text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# pylint: disable=missing-docstring
from lettuce import step
from lettuce import world
from lettuce import before
from pymongo import MongoClient
from nose.tools import assert_equals
from nose.tools import assert_in
REQUIRED_EVENT_FIELDS = [
'agent',
'event',
'event_source',
'event_type',
'host',
'ip',
'page',
'time',
'username'
]
@before.all
def connect_to_mongodb():
world.mongo_client = MongoClient()
world.event_collection = world.mongo_client['track']['events']
@before.each_scenario
def reset_captured_events(_scenario):
world.event_collection.drop()
@before.outline
def reset_between_outline_scenarios(_scenario, order, outline, reasons_to_fail):
world.event_collection.drop()
@step(r'[aA]n? course url "(.*)" event is emitted$')
def course_url_event_is_emitted(_step, url_regex):
event_type = url_regex.format(world.scenario_dict['COURSE'].id)
n_events_are_emitted(_step, 1, event_type, "server")
@step(r'([aA]n?|\d+) "(.*)" (server|browser) events? is emitted$')
def n_events_are_emitted(_step, count, event_type, event_source):
# Ensure all events are written out to mongo before querying.
world.mongo_client.fsync()
# Note that splinter makes 2 requests when you call browser.visit('/foo')
# the first just checks to see if the server responds with a status
# code of 200, the next actually uses the browser to submit the request.
# We filter out events associated with the status code checks by ignoring
# events that come directly from splinter.
criteria = {
'event_type': event_type,
'event_source': event_source,
'agent': {
'$ne': 'python/splinter'
}
}
cursor = world.event_collection.find(criteria)
try:
number_events = int(count)
except ValueError:
number_events = 1
assert_equals(cursor.count(), number_events)
event = cursor.next()
expected_field_values = {
"username": world.scenario_dict['USER'].username,
"event_type": event_type,
}
for key, value in expected_field_values.iteritems():
assert_equals(event[key], value)
for field in REQUIRED_EVENT_FIELDS:
assert_in(field, event)
|
MakeHer/edx-platform
|
lms/djangoapps/courseware/features/events.py
|
Python
|
agpl-3.0
| 2,247
|
[
"VisIt"
] |
8eacddcd21bea1d1be605cd725484213756aa5a3e4f449caa718eaca23ec99d2
|
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
def main():
myscreen = camvtk.VTKScreen()
focal = cam.Point(50, 0, 0)
r = 300
theta = (float(45)/360)*2*math.pi
fi=45
campos = cam.Point( r*math.sin(theta)*math.cos(fi), r*math.sin(theta)*math.sin(fi), r*math.cos(theta) )
myscreen.camera.SetPosition(campos.x, campos.y, campos.z)
myscreen.camera.SetFocalPoint(focal.x,focal.y, focal.z)
t = camvtk.Text()
t.SetPos( (myscreen.width-450, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
ytext = "kd-tree debug" #"Y: %3.3f" % (ycoord)
t2.SetText(ytext)
t2.SetPos( (50, myscreen.height-50) )
myscreen.addActor( t2)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
epos = cam.Epos()
epos.setS(0,1)
t.SetText("OpenCAMLib 10.03-beta, " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#ycoord = 1.1
stl = camvtk.STLSurf(filename="../stl/demo.stl")
#stl = camvtk.STLSurf(filename="../stl/demo2.stl")
print("STL surface read")
#myscreen.addActor(stl)
#stl.SetWireframe()
#stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STLSurf with ", s.size(), " triangles")
myscreen.addActor( camvtk.Sphere( center=(0,0,0), radius=0.2, color = camvtk.yellow ) )
s.build_kdtree()
print("built kd-tree")
s.jump_kd_reset()
tlist = s.get_kd_triangles()
print("got", len(tlist), " triangles")
while (s.jump_kd_hi()):
lotris = s.get_kd_triangles()
s.jump_kd_up()
cut = s.get_kd_cut()
s.jump_kd_lo()
hitris = s.get_kd_triangles()
lev = s.get_kd_level()
print("l=", lev, " hi=", len(hitris), " lo=", len(lotris), " cut=", cut)
if ( cut[0] < 2 ):
print("x cut ",)
if ( cut[0] == 0):
print("max" )
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.green ) )
else:
print("min" )
myscreen.addActor( camvtk.Line( p1=(cut[1],100,0), p2=(cut[1],-100,0), color = camvtk.lgreen ) )
#myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print("y cut ",)
if ( cut[0] == 2):
print("max" )
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.red ) )
else:
print("min")
myscreen.addActor( camvtk.Line( p1=(100,cut[1],0), p2=(-100,cut[1],0), color = camvtk.pink ) )
slo = camvtk.STLSurf(triangleList=lotris)
slo.SetColor(camvtk.pink)
slo.SetWireframe()
shi = camvtk.STLSurf(triangleList=hitris)
shi.SetColor(camvtk.lgreen)
shi.SetWireframe()
myscreen.addActor(slo)
myscreen.addActor(shi)
myscreen.render()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
time.sleep(1)
myscreen.removeActor(slo)
myscreen.removeActor(shi)
print("done.")
myscreen.render()
#lwr.SetFileName(filename)
#raw_input("Press Enter to terminate")
time.sleep(0.2)
lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
main()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/kdtree_debug_1.py
|
Python
|
lgpl-2.1
| 3,691
|
[
"VTK"
] |
f5530311e2b765db0cb18d2675f2e0c6187937d600f45575cd9529d4e772c054
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import copy
import random
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
# The test framework used to provide these, but not it doesn't. We'll add
# messages to as we find bugs (if indeed we find bugs).
msgs = []
rawmsgs = []
class FunctionsTestCase(SupyTestCase):
hostmask = 'foo!bar@baz'
def testHostmaskPatternEqual(self):
for msg in msgs:
if msg.prefix and ircutils.isUserHostmask(msg.prefix):
s = msg.prefix
self.failUnless(ircutils.hostmaskPatternEqual(s, s),
'%r did not match itself.' % s)
banmask = ircutils.banmask(s)
self.failUnless(ircutils.hostmaskPatternEqual(banmask, s),
'%r did not match %r' % (s, banmask))
s = 'supybot!~supybot@dhcp065-024-075-056.columbus.rr.com'
self.failUnless(ircutils.hostmaskPatternEqual(s, s))
s = 'jamessan|work!~jamessan@209-6-166-196.c3-0.' \
'abr-ubr1.sbo-abr.ma.cable.rcn.com'
self.failUnless(ircutils.hostmaskPatternEqual(s, s))
def testIsUserHostmask(self):
self.failUnless(ircutils.isUserHostmask(self.hostmask))
self.failUnless(ircutils.isUserHostmask('a!b@c'))
self.failIf(ircutils.isUserHostmask('!bar@baz'))
self.failIf(ircutils.isUserHostmask('!@baz'))
self.failIf(ircutils.isUserHostmask('!bar@'))
self.failIf(ircutils.isUserHostmask('!@'))
self.failIf(ircutils.isUserHostmask('foo!@baz'))
self.failIf(ircutils.isUserHostmask('foo!bar@'))
self.failIf(ircutils.isUserHostmask(''))
self.failIf(ircutils.isUserHostmask('!'))
self.failIf(ircutils.isUserHostmask('@'))
self.failIf(ircutils.isUserHostmask('!bar@baz'))
def testIsChannel(self):
self.failUnless(ircutils.isChannel('#'))
self.failUnless(ircutils.isChannel('&'))
self.failUnless(ircutils.isChannel('+'))
self.failUnless(ircutils.isChannel('!'))
self.failUnless(ircutils.isChannel('#foo'))
self.failUnless(ircutils.isChannel('&foo'))
self.failUnless(ircutils.isChannel('+foo'))
self.failUnless(ircutils.isChannel('!foo'))
self.failIf(ircutils.isChannel('#foo bar'))
self.failIf(ircutils.isChannel('#foo,bar'))
self.failIf(ircutils.isChannel('#foobar\x07'))
self.failIf(ircutils.isChannel('foo'))
self.failIf(ircutils.isChannel(''))
def testBold(self):
s = ircutils.bold('foo')
self.assertEqual(s[0], '\x02')
self.assertEqual(s[-1], '\x02')
def testUnderline(self):
s = ircutils.underline('foo')
self.assertEqual(s[0], '\x1f')
self.assertEqual(s[-1], '\x1f')
def testReverse(self):
s = ircutils.reverse('foo')
self.assertEqual(s[0], '\x16')
self.assertEqual(s[-1], '\x16')
def testMircColor(self):
# No colors provided should return the same string
s = 'foo'
self.assertEqual(s, ircutils.mircColor(s))
# Test positional args
self.assertEqual('\x0300foo\x03', ircutils.mircColor(s, 'white'))
self.assertEqual('\x031,02foo\x03',ircutils.mircColor(s,'black','blue'))
self.assertEqual('\x0300,03foo\x03', ircutils.mircColor(s, None, 'green'))
# Test keyword args
self.assertEqual('\x0304foo\x03', ircutils.mircColor(s, fg='red'))
self.assertEqual('\x0300,05foo\x03', ircutils.mircColor(s, bg='brown'))
self.assertEqual('\x036,07foo\x03',
ircutils.mircColor(s, bg='orange', fg='purple'))
# Commented out because we don't map numbers to colors anymore.
## def testMircColors(self):
## # Make sure all (k, v) pairs are also (v, k) pairs.
## for (k, v) in ircutils.mircColors.items():
## if k:
## self.assertEqual(ircutils.mircColors[v], k)
def testStripBold(self):
self.assertEqual(ircutils.stripBold(ircutils.bold('foo')), 'foo')
def testStripColor(self):
self.assertEqual(ircutils.stripColor('\x02bold\x0302,04foo\x03bar\x0f'),
'\x02boldfoobar\x0f')
self.assertEqual(ircutils.stripColor('\x03foo\x03'), 'foo')
self.assertEqual(ircutils.stripColor('\x03foo\x0F'), 'foo\x0F')
self.assertEqual(ircutils.stripColor('\x0312foo\x03'), 'foo')
self.assertEqual(ircutils.stripColor('\x0312,14foo\x03'), 'foo')
self.assertEqual(ircutils.stripColor('\x03,14foo\x03'), 'foo')
self.assertEqual(ircutils.stripColor('\x03,foo\x03'), ',foo')
self.assertEqual(ircutils.stripColor('\x0312foo\x0F'), 'foo\x0F')
self.assertEqual(ircutils.stripColor('\x0312,14foo\x0F'), 'foo\x0F')
self.assertEqual(ircutils.stripColor('\x03,14foo\x0F'), 'foo\x0F')
self.assertEqual(ircutils.stripColor('\x03,foo\x0F'), ',foo\x0F')
def testStripReverse(self):
self.assertEqual(ircutils.stripReverse(ircutils.reverse('foo')), 'foo')
def testStripUnderline(self):
self.assertEqual(ircutils.stripUnderline(ircutils.underline('foo')),
'foo')
def testStripFormatting(self):
self.assertEqual(ircutils.stripFormatting(ircutils.bold('foo')), 'foo')
self.assertEqual(ircutils.stripFormatting(ircutils.reverse('foo')),
'foo')
self.assertEqual(ircutils.stripFormatting(ircutils.underline('foo')),
'foo')
self.assertEqual(ircutils.stripFormatting('\x02bold\x0302,04foo\x03'
'bar\x0f'),
'boldfoobar')
s = ircutils.mircColor('[', 'blue') + ircutils.bold('09:21')
self.assertEqual(ircutils.stripFormatting(s), '[09:21')
def testSafeArgument(self):
s = 'I have been running for 9 seconds'
bolds = ircutils.bold(s)
colors = ircutils.mircColor(s, 'pink', 'orange')
self.assertEqual(s, ircutils.safeArgument(s))
self.assertEqual(bolds, ircutils.safeArgument(bolds))
self.assertEqual(colors, ircutils.safeArgument(colors))
def testSafeArgumentConvertsToString(self):
self.assertEqual('1', ircutils.safeArgument(1))
self.assertEqual(str(None), ircutils.safeArgument(None))
def testIsNick(self):
try:
original = conf.supybot.protocols.irc.strictRfc()
conf.supybot.protocols.irc.strictRfc.setValue(True)
self.failUnless(ircutils.isNick('jemfinch'))
self.failUnless(ircutils.isNick('jemfinch0'))
self.failUnless(ircutils.isNick('[0]'))
self.failUnless(ircutils.isNick('{jemfinch}'))
self.failUnless(ircutils.isNick('[jemfinch]'))
self.failUnless(ircutils.isNick('jem|finch'))
self.failUnless(ircutils.isNick('\\```'))
self.failUnless(ircutils.isNick('`'))
self.failUnless(ircutils.isNick('A'))
self.failIf(ircutils.isNick(''))
self.failIf(ircutils.isNick('8foo'))
self.failIf(ircutils.isNick('10'))
self.failIf(ircutils.isNick('-'))
self.failIf(ircutils.isNick('-foo'))
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.failUnless(ircutils.isNick('services@something.undernet.net'))
finally:
conf.supybot.protocols.irc.strictRfc.setValue(original)
def testIsNickNeverAllowsSpaces(self):
try:
original = conf.supybot.protocols.irc.strictRfc()
conf.supybot.protocols.irc.strictRfc.setValue(True)
self.failIf(ircutils.isNick('foo bar'))
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.failIf(ircutils.isNick('foo bar'))
finally:
conf.supybot.protocols.irc.strictRfc.setValue(original)
def testStandardSubstitute(self):
# Stub out random msg and irc objects that provide what
# standardSubstitute wants
msg = ircmsgs.IrcMsg(':%s PRIVMSG #channel :stuff' % self.hostmask)
class Irc(object):
nick = 'bob'
irc = Irc()
f = ircutils.standardSubstitute
vars = {'foo': 'bar', 'b': 'c', 'i': 100,
'f': lambda: 'called'}
self.assertEqual(f(irc, msg, '$foo', vars), 'bar')
self.assertEqual(f(irc, msg, '${foo}', vars), 'bar')
self.assertEqual(f(irc, msg, '$b', vars), 'c')
self.assertEqual(f(irc, msg, '${b}', vars), 'c')
self.assertEqual(f(irc, msg, '$i', vars), '100')
self.assertEqual(f(irc, msg, '${i}', vars), '100')
self.assertEqual(f(irc, msg, '$f', vars), 'called')
self.assertEqual(f(irc, msg, '${f}', vars), 'called')
self.assertEqual(f(irc, msg, '$b:$i', vars), 'c:100')
def testBanmask(self):
for msg in msgs:
if ircutils.isUserHostmask(msg.prefix):
banmask = ircutils.banmask(msg.prefix)
self.failUnless(ircutils.hostmaskPatternEqual(banmask,
msg.prefix),
'%r didn\'t match %r' % (msg.prefix, banmask))
self.assertEqual(ircutils.banmask('foobar!user@host'), '*!*@host')
self.assertEqual(ircutils.banmask('foobar!user@host.tld'),
'*!*@host.tld')
self.assertEqual(ircutils.banmask('foobar!user@sub.host.tld'),
'*!*@*.host.tld')
self.assertEqual(ircutils.banmask('foo!bar@2001::'), '*!*@2001::*')
def testSeparateModes(self):
self.assertEqual(ircutils.separateModes(['+ooo', 'x', 'y', 'z']),
[('+o', 'x'), ('+o', 'y'), ('+o', 'z')])
self.assertEqual(ircutils.separateModes(['+o-o', 'x', 'y']),
[('+o', 'x'), ('-o', 'y')])
self.assertEqual(ircutils.separateModes(['+s-o', 'x']),
[('+s', None), ('-o', 'x')])
self.assertEqual(ircutils.separateModes(['+sntl', '100']),
[('+s', None),('+n', None),('+t', None),('+l', 100)])
def testNickFromHostmask(self):
self.assertEqual(ircutils.nickFromHostmask('nick!user@host.domain.tld'),
'nick')
def testToLower(self):
self.assertEqual('jemfinch', ircutils.toLower('jemfinch'))
self.assertEqual('{}|^', ircutils.toLower('[]\\~'))
def testReplyTo(self):
prefix = 'foo!bar@baz'
channel = ircmsgs.privmsg('#foo', 'bar baz', prefix=prefix)
private = ircmsgs.privmsg('jemfinch', 'bar baz', prefix=prefix)
self.assertEqual(ircutils.replyTo(channel), channel.args[0])
self.assertEqual(ircutils.replyTo(private), private.nick)
def testJoinModes(self):
plusE = ('+e', '*!*@*ohio-state.edu')
plusB = ('+b', '*!*@*umich.edu')
minusL = ('-l', None)
modes = [plusB, plusE, minusL]
self.assertEqual(ircutils.joinModes(modes),
['+be-l', plusB[1], plusE[1]])
def testDccIpStuff(self):
def randomIP():
def rand():
return random.randrange(0, 256)
return '.'.join(map(str, [rand(), rand(), rand(), rand()]))
for _ in range(100): # 100 should be good :)
ip = randomIP()
self.assertEqual(ip, ircutils.unDccIP(ircutils.dccIP(ip)))
class IrcDictTestCase(SupyTestCase):
def test(self):
d = ircutils.IrcDict()
d['#FOO'] = 'bar'
self.assertEqual(d['#FOO'], 'bar')
self.assertEqual(d['#Foo'], 'bar')
self.assertEqual(d['#foo'], 'bar')
del d['#fOO']
d['jemfinch{}'] = 'bar'
self.assertEqual(d['jemfinch{}'], 'bar')
self.assertEqual(d['jemfinch[]'], 'bar')
self.assertEqual(d['JEMFINCH[]'], 'bar')
def testKeys(self):
d = ircutils.IrcDict()
self.assertEqual(d.keys(), [])
def testSetdefault(self):
d = ircutils.IrcDict()
d.setdefault('#FOO', []).append(1)
self.assertEqual(d['#foo'], [1])
self.assertEqual(d['#fOO'], [1])
self.assertEqual(d['#FOO'], [1])
def testGet(self):
d = ircutils.IrcDict()
self.assertEqual(d.get('#FOO'), None)
d['#foo'] = 1
self.assertEqual(d.get('#FOO'), 1)
def testContains(self):
d = ircutils.IrcDict()
d['#FOO'] = None
self.failUnless('#foo' in d)
d['#fOOBAR[]'] = None
self.failUnless('#foobar{}' in d)
def testGetSetItem(self):
d = ircutils.IrcDict()
d['#FOO'] = 12
self.assertEqual(12, d['#foo'])
d['#fOOBAR[]'] = 'blah'
self.assertEqual('blah', d['#foobar{}'])
def testCopyable(self):
d = ircutils.IrcDict()
d['foo'] = 'bar'
self.failUnless(d == copy.copy(d))
self.failUnless(d == copy.deepcopy(d))
class IrcSetTestCase(SupyTestCase):
def test(self):
s = ircutils.IrcSet()
s.add('foo')
s.add('bar')
self.failUnless('foo' in s)
self.failUnless('FOO' in s)
s.discard('alfkj')
s.remove('FOo')
self.failIf('foo' in s)
self.failIf('FOo' in s)
def testCopy(self):
s = ircutils.IrcSet()
s.add('foo')
s.add('bar')
s1 = copy.deepcopy(s)
self.failUnless('foo' in s)
self.failUnless('FOO' in s)
s.discard('alfkj')
s.remove('FOo')
self.failIf('foo' in s)
self.failIf('FOo' in s)
self.failUnless('foo' in s1)
self.failUnless('FOO' in s1)
s1.discard('alfkj')
s1.remove('FOo')
self.failIf('foo' in s1)
self.failIf('FOo' in s1)
class IrcStringTestCase(SupyTestCase):
def testEquality(self):
self.assertEqual('#foo', ircutils.IrcString('#foo'))
self.assertEqual('#foo', ircutils.IrcString('#FOO'))
self.assertEqual('#FOO', ircutils.IrcString('#foo'))
self.assertEqual('#FOO', ircutils.IrcString('#FOO'))
self.assertEqual(hash(ircutils.IrcString('#FOO')),
hash(ircutils.IrcString('#foo')))
def testInequality(self):
s1 = 'supybot'
s2 = ircutils.IrcString('Supybot')
self.failUnless(s1 == s2)
self.failIf(s1 != s2)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
mazaclub/mazabot-core
|
test/test_ircutils.py
|
Python
|
bsd-3-clause
| 16,070
|
[
"COLUMBUS"
] |
af10bc33094301be48c2b622e76441de3553085f6859ee9ea5254d1c64ebcd9f
|
'''
Created on 2013-3-31
pysa - reverse a complete computer setup
Copyright (C) 2013 MadeiraCloud Ltd.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Michael
'''
import os
import re
import logging
from pysa.config import *
from pysa.scanner.actions.utils import *
from pysa.scanner.actions.base import ScannerBase
class ScannerFile(ScannerBase):
suf_list = ['.conf', '.cfg', '.ini']
def scan(self):
"""
scan config files
"""
logging.info('searching for config files')
# scan the system directories
self.scandir(Config.files_path)
# scan directory and add config files
def scandir(self, pathdir):
# Visit every file in pathdir except those on the exclusion list above.
pathdirs = re.split(":", pathdir)
for p in pathdirs:
if not p: continue
for dirpath, dirnames, filenames in os.walk(p, followlinks=True):
for filename in filenames:
self.addfile(os.path.join(dirpath, filename))
# add per config file
def addfile(self, pathname):
# only plane text file
if valid_txtfile(pathname) == False:
return
# # only include above suffix config file
# suf = os.path.splitext(pathname)[1]
# if suf is None or suf not in self.suf_list:
# return
# get owner, group and mode
s = get_stat(pathname)
# read the config file's content
c = get_content(pathname)
# add the config file:
# checksum, content, group, mode, owner, path, force=False, provider=None,
# recurse=None, recurselimit=None, source=None
self.add_file('md5', c, s[0], s[1], s[2], pathname)
|
VisualOps/pysa
|
pysa/scanner/actions/file.py
|
Python
|
gpl-3.0
| 2,386
|
[
"VisIt"
] |
dc7b48aab5fa4dbb411c296fc580ce8503d7293de663ba577123344bc5cff580
|
"""The optimizer tries to constant fold expressions and modify the AST
in place so that it should be faster to evaluate.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example, loop unrolling doesn't work because unrolled loops
would have a different scope. The solution would be a second syntax tree
that stored the scoping rules.
"""
from . import nodes
from .visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def generic_visit(self, node, *args, **kwargs):
node = super().generic_visit(node, *args, **kwargs)
# Do constant folding. Some other nodes besides Expr have
# as_const, but folding them causes errors later on.
if isinstance(node, nodes.Expr):
try:
return nodes.Const.from_untrusted(
node.as_const(args[0] if args else None),
lineno=node.lineno,
environment=self.environment,
)
except nodes.Impossible:
pass
return node
|
pallets/jinja2
|
src/jinja2/optimizer.py
|
Python
|
bsd-3-clause
| 1,418
|
[
"VisIt"
] |
3bb69340f8164218ff896e574950bfeccb7afccbda1c327c2bcbc546f3368c04
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import email.parser
import logging
import math
import os
import pickle
import rfc822
import sys
import unittest
from contextlib import closing, contextmanager, nested
from gzip import GzipFile
from shutil import rmtree
import gc
import time
from textwrap import dedent
from urllib import quote
from hashlib import md5
from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp, NamedTemporaryFile
import weakref
import operator
import functools
from swift.obj import diskfile
import re
import random
import mock
from eventlet import sleep, spawn, wsgi, listen, Timeout
from six import BytesIO
from six import StringIO
from six.moves import range
from swift.common.utils import hash_path, json, storage_directory, \
parse_content_type, iter_multipart_mime_documents, public
from test.unit import (
connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
FakeMemcache, debug_logger, patch_policies, write_fake_ring,
mocked_http_conn)
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server
from swift.common.middleware import proxy_logging, versioned_writes
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
APIVersionError
from swift.common import utils, constraints
from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPException, HeaderKeyDict
from swift.common import storage_policy
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
StoragePolicyCollection, POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \
_testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
_orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
_orig_POLICIES, _test_POLICIES
_orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
monkey_patch_mimetools()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
_testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
'sdf1', 'sdg1', 'sdh1', 'sdi1'):
mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
'allow_versions': 't'}
prolis = listen(('localhost', 0))
acc1lis = listen(('localhost', 0))
acc2lis = listen(('localhost', 0))
con1lis = listen(('localhost', 0))
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
obj3lis = listen(('localhost', 0))
objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
ECStoragePolicy(3, 'ec', ec_type='jerasure_rs_vand',
ec_ndata=2, ec_nparity=1, ec_segment_size=4096)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
# sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = POLICIES[policy_index]
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': objsock.getsockname()[1], 'device': dev}
for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
# write_fake_ring can't handle a 3-element ring, and the EC policy needs
# at least 3 devs to work with, so we do it manually
devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
{'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
'port': obj3lis.getsockname()[1]}]
pol3_replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
obj3_ring_path = os.path.join(_testdir, POLICIES[3].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
for policy in POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't lose this one!
_test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
obj3srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj3'))
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create another account
# used for account-to-account tests
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a1')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT',
'/a1',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
# Create container in other account
# used for account-to-account tests
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper
def setup():
do_setup(object_server)
def teardown():
for server in _test_coros:
server.kill()
rmtree(os.path.dirname(_testdir))
utils.SysLogHandler = _orig_SysLogHandler
storage_policy._POLICIES = _orig_POLICIES
def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers)
def parse_headers_string(headers_str):
headers_dict = HeaderKeyDict()
for line in headers_str.split('\r\n'):
if ': ' in line:
header, value = line.split(': ', 1)
headers_dict[header] = value
return headers_dict
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
def node_last_error(proxy_app, ring_node):
# Reach into the proxy's internals to get the last error for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('last_error')
def set_node_errors(proxy_app, ring_node, value, last_error):
# Set the node's error count to value
node_key = proxy_app._error_limit_node_key(ring_node)
stats = proxy_app._error_limiting.setdefault(node_key, {})
stats['errors'] = value
stats['last_error'] = last_error
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
# Returns None as the timestamp of the container; assumes we're only
# using the FakeMemcache for container existence checks.
return None
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.proxy.controllers.base, 'http_connect',
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
orig_container_info = getattr(swift.proxy.controllers.Controller,
'container_info', None)
try:
yield True
finally:
swift.proxy.controllers.Controller.account_info = orig_account_info
swift.proxy.controllers.base.http_connect = orig_http_connect
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
new_connect = fake_http_connect(*args, **kwargs)
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
return new_connect
def _make_callback_func(calls):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context = {}
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
calls.append(context)
return callback
def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper
# tests
class TestController(unittest.TestCase):
def setUp(self):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
def __init__(self):
self.url = "/foo/bar"
self.method = "METHOD"
def as_referer(self):
return self.method + ' ' + self.url
self.account = 'some_account'
self.container = 'some_container'
self.request = FakeReq()
self.read_acl = 'read_acl'
self.write_acl = 'write_acl'
def test_transfer_headers(self):
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = {'x-base-meta-owner': 'Gareth',
'x-base-meta-size': '150M'}
self.controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
self.assertEqual(dst_headers, expected_headers)
def check_account_info_return(self, partition, nodes, is_none=False):
if is_none:
p, n = None, None
else:
p, n = self.account_ring.get_nodes(self.account)
self.assertEqual(p, partition)
self.assertEqual(n, nodes)
def test_account_info_container_count(self):
with save_globals():
set_http_connect(200, count=123)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
set_http_connect(200, count='123')
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': 1234}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': '1234'}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
def test_make_requests(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
set_http_connect(201, raise_timeout_exc=True)
self.controller._make_request(
nodes, partition, 'POST', '/', '', '',
self.controller.app.logger.thread_locals)
# tests if 200 is cached and used
def test_account_info_200(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# Test the internal representation in memcache
# 'container_count' changed from int to str
cache_key = get_account_memcache_key(self.account)
container_info = {'status': 200,
'container_count': '12345',
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(container_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# tests if 404 is cached and used
def test_account_info_404(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# Test the internal representation in memcache
# 'container_count' changed from 0 to None
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 404,
'container_count': None, # internally keep None
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(account_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# tests if some http status codes are not cached
def test_account_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_account_info_no_account(self):
with save_globals():
self.memcache.store = {}
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, is_none=True)
self.assertEqual(count, None)
def check_container_info_return(self, ret, is_none=False):
if is_none:
partition, nodes, read_acl, write_acl = None, None, None, None
else:
partition, nodes = self.container_ring.get_nodes(self.account,
self.container)
read_acl, write_acl = self.read_acl, self.write_acl
self.assertEqual(partition, ret['partition'])
self.assertEqual(nodes, ret['nodes'])
self.assertEqual(read_acl, ret['read_acl'])
self.assertEqual(write_acl, ret['write_acl'])
def test_container_info_invalid_account(self):
def account_info(self, account, request, autocreate=False):
return None, None
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
ret = self.controller.container_info(self.account,
self.container,
self.request)
self.check_container_info_return(ret, True)
# tests if 200 is cached and used
def test_container_info_200(self):
with save_globals():
headers = {'x-container-read': self.read_acl,
'x-container-write': self.write_acl}
set_http_connect(200, # account_info is found
200, headers=headers) # container_info is found
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(200, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
# tests if 404 is cached and used
def test_container_info_404(self):
def account_info(self, account, request):
return True, True, 0
with save_globals():
set_http_connect(503, 204, # account_info found
504, 404, 404) # container_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
set_http_connect(503, 404, 404) # account_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
# tests if some http status codes are not cached
def test_container_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
ret = self.controller.container_info(
self.account, self.container, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_container_info_return(ret, True)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
def test_get_object_ring(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
with patch_policies([
StoragePolicy(0, 'a', False, object_ring=123),
StoragePolicy(1, 'b', True, object_ring=456),
StoragePolicy(2, 'd', False, object_ring=789)
]):
# None means legacy so always use policy 0
ring = baseapp.get_object_ring(None)
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('0')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('1')
self.assertEqual(ring, 456)
ring = baseapp.get_object_ring('2')
self.assertEqual(ring, 789)
# illegal values
self.assertRaises(ValueError, baseapp.get_object_ring, '99')
self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 500)
def test_internal_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_inexistent_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_calls_authorize_allow(self):
called = [False]
def authorize(req):
called[0] = True
with save_globals():
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_calls_authorize_deny(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_negative_content_length(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-123'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
'REQUEST_METHOD': 'GET'}))
# This is kind of a hokey way to get the transaction ID; it'd be
# better to examine response headers, but the catch_errors
# middleware is what sets the X-Trans-Id header, and we don't have
# that available here.
self.assertTrue(logger.txn_id.endswith('-sardine'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id_length_limit(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
'REQUEST_METHOD': 'GET'}))
self.assertTrue(logger.txn_id.endswith(
'-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
FakeMemcache(),
container_ring=FakeLogger(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
self.assertEqual(resp.status, '403 Forbidden')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_node_timing(self):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.node_timings, {})
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.status_int, 503) # couldn't connect to anything
exp_timings = {}
self.assertEqual(baseapp.node_timings, exp_timings)
times = [time.time()]
exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)}
with mock.patch('swift.proxy.server.time', lambda: times.pop(0)):
baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1)
self.assertEqual(baseapp.node_timings, exp_timings)
nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}]
with mock.patch('swift.proxy.server.shuffle', lambda l: l):
res = baseapp.sort_nodes(nodes)
exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'},
{'ip': '127.0.0.1'}]
self.assertEqual(res, exp_sorting)
def test_node_affinity(self):
baseapp = proxy_server.Application({'sorting_method': 'affinity',
'read_affinity': 'r1=1'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
nodes = [{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 1, 'zone': 2, 'ip': '127.0.0.2'}]
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app_sorted = baseapp.sort_nodes(nodes)
exp_sorted = [{'region': 1, 'zone': 2, 'ip': '127.0.0.2'},
{'region': 2, 'zone': 1, 'ip': '127.0.0.1'}]
self.assertEqual(exp_sorted, app_sorted)
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertTrue(isinstance(app.disallowed_sections, list))
self.assertEqual(1, len(app.disallowed_sections))
self.assertEqual(['swift.valid_api_versions'],
app.disallowed_sections)
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
self.assertTrue('disallowed_sections' in path_parts)
self.assertTrue('expose_info' in path_parts)
self.assertTrue('admin_key' in path_parts)
self.assertEqual(controller.__name__, 'InfoController')
def test_error_limit_methods(self):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
# error occurred
app.error_occurred(node, 'test msg')
self.assertTrue('test msg' in
logger.get_lines_for_level('error')[-1])
self.assertEqual(1, node_error_count(app, node))
# exception occurred
try:
raise Exception('kaboom1!')
except Exception as e1:
app.exception_occurred(node, 'test1', 'test1 msg')
line = logger.get_lines_for_level('error')[-1]
self.assertTrue('test1 server' in line)
self.assertTrue('test1 msg' in line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e1)
self.assertEqual(2, node_error_count(app, node))
# warning exception occurred
try:
raise Exception('kaboom2!')
except Exception as e2:
app.exception_occurred(node, 'test2', 'test2 msg',
level=logging.WARNING)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test2 server' in line)
self.assertTrue('test2 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e2)
self.assertEqual(3, node_error_count(app, node))
# custom exception occurred
try:
raise Exception('kaboom3!')
except Exception as e3:
e3_info = sys.exc_info()
try:
raise Exception('kaboom4!')
except Exception:
pass
app.exception_occurred(node, 'test3', 'test3 msg',
level=logging.WARNING, exc_info=e3_info)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test3 server' in line)
self.assertTrue('test3 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e3)
self.assertEqual(4, node_error_count(app, node))
def test_valid_api_version(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
# The version string is only checked for account, container and object
# requests; the raised APIVersionError returns a 404 to the client
for path in [
'/v2/a',
'/v2/a/c',
'/v2/a/c/o']:
req = Request.blank(path)
self.assertRaises(APIVersionError, app.get_controller, req)
# Default valid API versions are ok
for path in [
'/v1/a',
'/v1/a/c',
'/v1/a/c/o',
'/v1.0/a',
'/v1.0/a/c',
'/v1.0/a/c/o']:
req = Request.blank(path)
controller, path_parts = app.get_controller(req)
self.assertTrue(controller is not None)
# Ensure settings valid API version constraint works
for version in ["42", 42]:
try:
with NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
f.write('valid_api_versions = %s\n' % version)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
req = Request.blank('/%s/a' % version)
controller, _ = app.get_controller(req)
self.assertTrue(controller is not None)
# In this case v1 is invalid
req = Request.blank('/v1/a')
self.assertRaises(APIVersionError, app.get_controller, req)
finally:
constraints.reload_constraints()
# Check that the valid_api_versions is not exposed by default
req = Request.blank('/info')
controller, path_parts = app.get_controller(req)
self.assertTrue('swift.valid_api_versions' in
path_parts.get('disallowed_sections'))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
])
class TestProxyServerLoading(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
utils.HASH_PATH_SUFFIX = 'endcap'
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
for policy in POLICIES:
policy.object_ring = None
def test_load_policy_rings(self):
for policy in POLICIES:
self.assertFalse(policy.object_ring)
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
for policy in POLICIES:
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
write_fake_ring(object_ring_path)
app = loadapp(conf_path)
# find the end of the pipeline
while hasattr(app, 'app'):
app = app.app
# validate loaded rings
self.assertEqual(app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(app.container_ring.serialized_path,
container_ring_path)
for policy in POLICIES:
self.assertEqual(policy.object_ring,
app.get_object_ring(int(policy)))
def test_missing_rings(self):
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
ring_paths = [
os.path.join(self.tempdir, 'account.ring.gz'),
os.path.join(self.tempdir, 'container.ring.gz'),
]
for policy in POLICIES:
self.assertFalse(policy.object_ring)
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
ring_paths.append(object_ring_path)
for policy in POLICIES:
self.assertFalse(policy.object_ring)
for ring_path in ring_paths:
self.assertFalse(os.path.exists(ring_path))
self.assertRaises(IOError, loadapp, conf_path)
write_fake_ring(ring_path)
# all rings exist, app should load
loadapp(conf_path)
for policy in POLICIES:
self.assertTrue(policy.object_ring)
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(base_port=3000))])
class TestObjectController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
def put_container(self, policy_name, container_name):
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (container_name, policy_name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
# repeat test
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
@unpatch_policies
def test_policy_IO(self):
def check_file(policy, cont, devs, check_val):
partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
policy=policy)
if check_val is True:
file.open()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# check policy 0: put file on c, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'test_object0'
path = '/v1/a/c/o'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c1/o'
obj = 'test_object1'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c2/o'
obj = 'test_object2'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
if hasattr(_test_servers[-1], '_filesystem'):
# ironically, the _filesystem attribute on the object server means
# the in-memory diskfile is in use, so this test does not apply
return
prosrv = _test_servers[0]
# validate container policy is 1
req = Request.blank('/v1/a/c1', method='HEAD')
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204) # sanity check
self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
# check overrides: put it in policy 2 (not where the container says)
req = Request.blank(
'/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': BytesIO(b"hello")},
headers={'Content-Type': 'text/plain',
'Content-Length': '5',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 201) # sanity check
# go to disk to make sure it's there
partition, nodes = prosrv.get_object_ring(2).get_nodes(
'a', 'c1', 'wrong-o')
node = nodes[0]
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = ''.join(df.reader())
self.assertEqual(contents, "hello")
# can't get it from the normal place
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 404) # sanity check
# but we can get it from policy 2
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, 'hello')
# and we can delete it the same way
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
self.assertTrue(float(e.timestamp) > 0)
else:
self.fail('did not raise DiskFileNotExist')
@unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'a' * (1024 * 1024)
path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'application/octet-stream',
'X-Newest': 'true'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
@unpatch_policies
def test_GET_ranges(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('beans lots of beans lots of beans lots of beans yeah %04d ' % i)
for i in range(100)))
path = '/v1/a/c/o.beans'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one byte range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
self.assertEqual(res.body, obj[10:201])
# multiple byte ranges
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200,1000-1099,4123-4523'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges')
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None)
got_mime_docs = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body),
boundary):
headers = HeaderKeyDict(rfc822.Message(mime_doc_fh, 0).items())
body = mime_doc_fh.read()
got_mime_docs.append((headers, body))
self.assertEqual(len(got_mime_docs), 3)
first_range_headers = got_mime_docs[0][0]
first_range_body = got_mime_docs[0][1]
self.assertEqual(first_range_headers['Content-Range'],
'bytes 10-200/5800')
self.assertEqual(first_range_body, obj[10:201])
second_range_headers = got_mime_docs[1][0]
second_range_body = got_mime_docs[1][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 1000-1099/5800')
self.assertEqual(second_range_body, obj[1000:1100])
second_range_headers = got_mime_docs[2][0]
second_range_body = got_mime_docs[2][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 4123-4523/5800')
self.assertEqual(second_range_body, obj[4123:4524])
@unpatch_policies
def test_GET_bad_range_zero_byte(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c/o.zerobyte'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# bad byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=spaghetti-carbonara'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
# not a byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'Kotta'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
@unpatch_policies
def test_GET_ranges_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('Smurf! The smurfing smurf is completely smurfed. %03d ' % i)
for i in range(1000)))
path = '/v1/a/c/o.smurfs'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/smurftet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
kaboomed = [0]
bytes_before_timeout = [None]
class FileLikeKaboom(object):
def __init__(self, inner_file_like):
self.inner_file_like = inner_file_like
# close(), etc.
def __getattr__(self, attr):
return getattr(self.inner_file_like, attr)
def readline(self, *a, **kw):
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
result = self.inner_file_like.readline(*a, **kw)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
def read(self, length=None):
result = self.inner_file_like.read(length)
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
orig_hrtdi = proxy_base.http_response_to_document_iters
# Use this to mock out http_response_to_document_iters. On the first
# call, the result will be sabotaged to blow up with
# ChunkReadTimeout after some number of bytes are read. On
# subsequent calls, no sabotage will be added.
def sabotaged_hrtdi(*a, **kw):
resp_parts = orig_hrtdi(*a, **kw)
for sb, eb, l, h, range_file in resp_parts:
if bytes_before_timeout[0] <= 0:
# simulate being unable to read MIME part of
# multipart/byteranges response
kaboomed[0] += 1
raise ChunkReadTimeout(None)
boomer = FileLikeKaboom(range_file)
yield sb, eb, l, h, boomer
sabotaged = [False]
def single_sabotage_hrtdi(*a, **kw):
if not sabotaged[0]:
sabotaged[0] = True
return sabotaged_hrtdi(*a, **kw)
else:
return orig_hrtdi(*a, **kw)
# We want sort of an end-to-end test of object resuming, so what we
# do is mock out stuff so the proxy thinks it only read a certain
# number of bytes before it got a timeout.
bytes_before_timeout[0] = 300
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=0-500'})
res = req.get_response(prosrv)
body = res.body # read the whole thing
self.assertEqual(kaboomed[0], 1) # sanity check
self.assertEqual(res.status_int, 206)
self.assertEqual(len(body), 501)
self.assertEqual(body, obj[:501])
# Sanity-check for multi-range resume: make sure we actually break
# in the middle of the second byterange. This test is partially
# about what happens when all the object servers break at once, and
# partially about validating all these mocks we do. After all, the
# point of resuming is that the client can't tell anything went
# wrong, so we need a test where we can't resume and something
# *does* go wrong so we can observe it.
bytes_before_timeout[0] = 700
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
sabotaged_hrtdi): # perma-broken
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''
try:
for chunk in res.app_iter:
body += chunk
except ChunkReadTimeout:
pass
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] > 0) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(len(got_byteranges[1]), 199) # partial
# Multi-range resume, resuming in the middle of the first byterange
bytes_before_timeout[0] = 300
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second set
# of MIME headers
bytes_before_timeout[0] = 501
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second
# byterange
bytes_before_timeout[0] = 750
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
rfc822.Message(mime_doc_fh, 0)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
@unpatch_policies
def test_PUT_ec(self):
policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
ecd = policy.pyeclib_driver
expected_pieces = set(ecd.encode(obj))
# go to disk to make sure it's there and all erasure-coded
partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[policy]
got_pieces = set()
got_indices = set()
got_durable = []
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o1',
policy=policy)
with df.open():
meta = df.get_metadata()
contents = ''.join(df.reader())
got_pieces.add(contents)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(policy),
partition, hash_path('a', 'ec-con', 'o1')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
lmeta = dict((k.lower(), v) for k, v in meta.items())
got_indices.add(
lmeta['x-object-sysmeta-ec-frag-index'])
self.assertEqual(
lmeta['x-object-sysmeta-ec-etag'],
md5(obj).hexdigest())
self.assertEqual(
lmeta['x-object-sysmeta-ec-content-length'],
str(len(obj)))
self.assertEqual(
lmeta['x-object-sysmeta-ec-segment-size'],
'4096')
self.assertEqual(
lmeta['x-object-sysmeta-ec-scheme'],
'jerasure_rs_vand 2+1')
self.assertEqual(
lmeta['etag'],
md5(contents).hexdigest())
self.assertEqual(expected_pieces, got_pieces)
self.assertEqual(set(('0', '1', '2')), got_indices)
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_multiple_segments(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
segment_size = ec_policy.ec_segment_size
# Big enough to have multiple segments. Also a multiple of the
# segment size to get coverage of that path too.
obj = 'ABC' * segment_size
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# it's a 2+1 erasure code, so each fragment archive should be half
# the length of the object, plus three inline pyeclib metadata
# things (one per segment)
expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
partition, nodes = ec_policy.object_ring.get_nodes(
'a', 'ec-con', 'o2')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
got_durable = []
fragment_archives = []
for node in nodes:
df = df_mgr.get_diskfile(
node['device'], partition, 'a',
'ec-con', 'o2', policy=ec_policy)
with df.open():
contents = ''.join(df.reader())
fragment_archives.append(contents)
self.assertEqual(len(contents), expected_length)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(ec_policy),
partition, hash_path('a', 'ec-con', 'o2')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
# Verify that we can decode each individual fragment and that they
# are all the correct size
fragment_size = ec_policy.fragment_size
nfragments = int(
math.ceil(float(len(fragment_archives[0])) / fragment_size))
for fragment_index in range(nfragments):
fragment_start = fragment_index * fragment_size
fragment_end = (fragment_index + 1) * fragment_size
try:
frags = [fa[fragment_start:fragment_end]
for fa in fragment_archives]
seg = ec_policy.pyeclib_driver.decode(frags)
except ECDriverError:
self.fail("Failed to decode fragments %d; this probably "
"means the fragments are not the sizes they "
"should be" % fragment_index)
segment_start = fragment_index * segment_size
segment_end = (fragment_index + 1) * segment_size
self.assertEqual(seg, obj[segment_start:segment_end])
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_object_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 422'
self.assertEqual(headers[:len(exp)], exp)
# nothing should have made it to disk on the object servers
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'o3')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o3', policy=POLICIES[3])
self.assertRaises(DiskFileNotExist, df.open)
@unpatch_policies
def test_PUT_ec_fragment_archive_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
# Cause a hash mismatch by feeding one particular MD5 hasher some
# extra data. The goal here is to get exactly one of the hashers in
# an object server.
countdown = [1]
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
if countdown[0] == 0:
hasher.update('wrong')
countdown[0] -= 1
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
with mock.patch('swift.obj.server.md5', busted_md5_constructor):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# 2/3 of the fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'pimento')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
found = 0
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'pimento',
policy=POLICIES[3])
try:
# diskfile open won't succeed because no durable was written,
# so look under the hood for data files.
files = os.listdir(df._datadir)
num_data_files = len([f for f in files if f.endswith('.data')])
self.assertEqual(1, num_data_files)
found += 1
except OSError:
pass
self.assertEqual(found, 2)
@unpatch_policies
def test_PUT_ec_if_none_match(self):
self.put_container("ec", "ec-con")
obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'If-None-Match: *\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_GET_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
self.assertEqual(gotten_obj, obj)
@unpatch_policies
def test_conditional_GET_ec(self):
self.put_container("ec", "ec-con")
obj = 'this object has an etag and is otherwise unimportant'
etag = md5(obj).hexdigest()
not_etag = md5(obj + "blahblah").hexdigest()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
for verb in ('GET', 'HEAD'):
# If-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
# If-None-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
@unpatch_policies
def test_GET_ec_big(self):
self.put_container("ec", "ec-con")
# our EC segment size is 4 KiB, so this is multiple (3) segments;
# we'll verify that with a sanity check
obj = 'a moose once bit my sister' * 400
self.assertTrue(
len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
"object is too small for proper testing")
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# This may look like a redundant test, but when things fail, this
# has a useful failure message while the subsequent one spews piles
# of garbage and demolishes your terminal's scrollback buffer.
self.assertEqual(len(gotten_obj), len(obj))
self.assertEqual(gotten_obj, obj)
@unpatch_policies
def test_GET_ec_failure_handling(self):
self.put_container("ec", "ec-con")
obj = 'look at this object; it is simply amazing ' * 500
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def explodey_iter(inner_iter):
yield next(inner_iter)
raise Exception("doom ba doom")
def explodey_doc_parts_iter(inner_iter_iter):
for item in inner_iter_iter:
item = item.copy() # paranoia about mutable data
item['part_iter'] = explodey_iter(item['part_iter'])
yield item
real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
# Each thing in `iterators` here is a document-parts iterator,
# and we want to fail after getting a little into each part.
#
# That way, we ensure we've started streaming the response to
# the client when things go wrong.
return real_ec_app_iter(
path, policy,
[explodey_doc_parts_iter(i) for i in iterators],
*a, **kw)
with mock.patch("swift.proxy.controllers.obj.ECAppIter",
explodey_ec_app_iter):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
try:
with Timeout(300): # don't hang the testrun when this fails
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
except Timeout:
self.fail("GET hung when connection failed")
# Ensure we failed partway through, otherwise the mocks could
# get out of date without anyone noticing
self.assertTrue(0 < len(gotten_obj) < len(obj))
@unpatch_policies
def test_HEAD_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
@unpatch_policies
def test_GET_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_HEAD_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'expect' in headers or 'Expect' in headers:
test_errors.append('Expect was in headers for object '
'server!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
# But the object server won't send a 100 continue line if the
# client doesn't send a expect 100 header (as is the case with
# zero byte PUTs as validated by this test), nevertheless the
# object controller calls getexpect without prejudice. In this
# case the status from the response shows up early in getexpect
# instead of having to wait until getresponse. The Exception is
# in there to ensure that the object controller also *uses* the
# result of getexpect instead of calling getresponse in which case
# our FakeConn will blow up.
success_codes = [(201, Exception('test'))] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '), res.status)
def test_PUT_expect_header_nonzero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'Expect' not in headers:
test_errors.append('Expect was not in headers for '
'non-zero byte PUT!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
# for 201 if no expect_status is specified.
success_codes = [(100, 201)] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '))
def test_PUT_respects_write_affinity(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
for ip, port, device in written_to:
# this is kind of a hokey test, but in FakeRing, the port is even
# when the region is 0, and odd when the region is 1, so this test
# asserts that we only wrote to nodes in region 0.
self.assertEqual(0, port % 2)
def test_PUT_respects_write_affinity_with_507s(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
# this is kind of a hokey test, but in FakeRing, the port is even when
# the region is 0, and odd when the region is 1, so this test asserts
# that we wrote to 2 nodes in region 0, then went to 1 non-r0 node.
self.assertEqual(0, written_to[0][1] % 2) # it's (ip, port, device)
self.assertEqual(0, written_to[1][1] % 2)
self.assertNotEqual(0, written_to[2][1] % 2)
@unpatch_policies
def test_PUT_no_etag_fallocate(self):
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'hemoleucocytic-surfactant'
fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one for each obj server; this test has 2
self.assertEqual(len(mock_fallocate.mock_calls), 2)
@unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'j' * 20
fd.write('PUT /v1/a/c/o.content-length HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip,chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_too_large(self):
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n\r\n'
'oh say can you see by the dawns\'\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_last_modified(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
lm_hdr = 'Last-Modified: '
self.assertEqual(headers[:len(exp)], exp)
last_modified_put = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
last_modified_head = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
self.assertEqual(last_modified_put, last_modified_head)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Modified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 304'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Unmodified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
def test_PUT_auto_content_type(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
# account server), container_info() (HEAD to container server)
# and three calls to _connect_put_node() (PUT to three object
# servers)
set_http_connect(201, 201, 201, 201, 201,
give_content_type=lambda content_type:
self.assertEqual(content_type,
next(expected)))
# We need into include a transfer-encoding to get past
# constraints.check_object_creation()
req = Request.blank('/v1/a/c/%s' % filename, {},
headers={'transfer-encoding': 'chunked'})
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
# If we don't check the response here we could miss problems
# in PUT()
self.assertEqual(res.status_int, 201)
test_content_type('test.jpg', iter(['', '', 'image/jpeg',
'image/jpeg', 'image/jpeg']))
test_content_type('test.html', iter(['', '', 'text/html',
'text/html', 'text/html']))
test_content_type('test.css', iter(['', '', 'text/css',
'text/css', 'text/css']))
def test_custom_mime_types_files(self):
swift_dir = mkdtemp()
try:
with open(os.path.join(swift_dir, 'mime.types'), 'w') as fp:
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0],
'image/jpeg')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_PUT(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, 201), 201)
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
# connect errors
test_status_map((200, 200, Timeout(), 201, 201, ), 201)
test_status_map((200, 200, 201, 201, Exception()), 201)
# expect errors
test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
test_status_map((200, 200, (Exception(), None), 201, 201), 201)
# response errors
test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
test_status_map((200, 200, (100, Exception()), 201, 201), 201)
test_status_map((200, 200, 507, 201, 201), 201) # error limited
test_status_map((200, 200, -1, 201, -1), 503)
test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 400)
def test_PUT_getresponse_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 201, 201, -1), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_POST(self):
with save_globals():
self.app.object_post_as_copy = False
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.object_post_as_copy = False
self.app.sort_nodes = lambda nodes: nodes
backend_requests = []
def capture_requests(ip, port, method, path, headers, *args,
**kwargs):
backend_requests.append((method, path, headers))
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue'})
# we want the container_info response to says a policy index of 1
resp_headers = {'X-Backend-Storage-Policy-Index': 1}
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
def check_request(req, method, path, headers=None):
req_method, req_path, req_headers = req
self.assertEqual(method, req_method)
# caller can ignore leading path parts
self.assertTrue(req_path.endswith(path),
'expected path to end with %s, it was %s' % (
path, req_path))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req_headers[k], v)
account_request = backend_requests.pop(0)
check_request(account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests.pop(0)
check_request(container_request, method='HEAD', path='/sda/0/a/c')
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests:
req_headers = request[2]
device = req_headers['x-container-device']
host = req_headers['x-container-host']
container_headers[device] = host
expectations = {
'method': 'POST',
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Connection': 'close',
'User-Agent': 'proxy-server %s' % os.getpid(),
'Host': 'localhost:80',
'Referer': 'POST http://localhost/v1/a/c/o',
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '1'
},
}
check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# and again with policy override
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
for request in backend_requests[2:]:
expectations = {
'method': 'POST',
'path': '/0/a/c/o', # ignore device bit
'headers': {
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '0',
}
}
check_request(request, **expectations)
# and this time with post as copy
self.app.object_post_as_copy = True
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 200, 200, 200, 201, 201, 201,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 8)
policy0 = {'X-Backend-Storage-Policy-Index': '0'}
policy1 = {'X-Backend-Storage-Policy-Index': '1'}
expected = [
# account info
{'method': 'HEAD', 'path': '/0/a'},
# container info
{'method': 'HEAD', 'path': '/0/a/c'},
# x-newests
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
# new writes
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
]
for request, expectations in zip(backend_requests, expected):
check_request(request, **expectations)
def test_POST_as_copy(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 200, 200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 200, 200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 200, 200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 200, 200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 404, 404, 404), 404)
def test_DELETE(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
test_status_map((200, 200, 200, 404, 404), 200)
test_status_map((200, 200, 200, 500, 404), 200)
test_status_map((200, 200, 304, 500, 404), 304)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 404, 404, 500), 404)
test_status_map((200, 200, 500, 500, 500), 503)
def test_HEAD_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
# acct cont obj obj obj
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
None, '1'), '1')
def test_GET_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'GET'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
'1', '2'), None)
def test_POST_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_count(self):
with save_globals():
limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_size(self):
with save_globals():
limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
for i in range(count + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_PUT_not_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/right', 'Content-Length': 0}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('something/right'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEquals(it_worked, [])
self.assertTrue(all(it_worked))
def test_PUT_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/wrong', 'Content-Length': 0,
'X-Detect-Content-Type': 'True'}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('text/html'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEquals(it_worked, [])
self.assertTrue(all(it_worked))
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return ' '
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(201, 201, 201)
# obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
raise Exception('Disconnected')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200, slow=0.1)
req.sent_size = 0
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=1.0)
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_read_timeout_retry(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=[1.0, 1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual('', resp.body)
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'a', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'b'])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201, slow=0.1)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.node_timeout = 0.1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(201, 201, 201, slow=1.0)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_node_request_setting(self):
baseapp = proxy_server.Application({'request_node_count': '3'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.request_node_count(3), 3)
def test_iter_nodes(self):
with save_globals():
try:
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 2
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 5)
object_ring.max_more_nodes = 20
self.app.request_node_count = lambda r: 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 9)
# zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
object_ring.max_more_nodes = 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [])
self.assertEqual(self.app.logger.get_increments(), [])
# one error-limited primary node -> one handoff warning
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
set_node_errors(self.app, object_ring._devs[0], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count'])
# two error-limited primary nodes -> two handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
for i in range(2):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {}),
(('Handoff requested (6)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count'])
# all error-limited primary nodes -> four handoff warnings,
# plus a handoff-all metric
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 10
object_ring.set_replicas(4) # otherwise we run out of handoffs
self.app._error_limiting = {} # clear out errors
for i in range(4):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 10)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (7)',), {}),
(('Handoff requested (8)',), {}),
(('Handoff requested (9)',), {}),
(('Handoff requested (10)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count',
'handoff_count',
'handoff_count',
'handoff_all_count'])
finally:
object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
object_ring = self.app.get_object_ring(None)
for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
object_ring.get_part_nodes(0))
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes', lambda n: n):
object_ring = self.app.get_object_ring(None)
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] in second_nodes)
self.app.error_limit(first_nodes[0], 'test')
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] not in second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
object_ring = self.app.get_object_ring(None)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 6),
mock.patch.object(object_ring, 'max_more_nodes', 99)):
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
self.assertEqual(len(first_nodes), 6)
self.assertEqual(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
for n in range(10)]
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 3)):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
with nested(
mock.patch.object(self.app, 'sort_nodes', lambda n: n),
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000)):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
{'X-Test': '2'},
{'X-Test': '3'}])
self.assertEqual(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
self.assertEqual(resp.etag, None)
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object',
etag='68b329da9893e34099c7d8ad5cb9c940'
)
self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940')
def test_proxy_passes_content_type(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'x-application/test')
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_proxy_passes_content_length_on_head(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
(200, 200, 200, 200, 200, 200, 202, 202,
202), 503)
self.assert_status_map(controller.DELETE,
(200, 200, 200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
200)
self.assertRaises(BaseException,
self.assert_status_map, controller.DELETE,
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
def test_error_limiting_survives_ring_reload(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# wipe out any state in the ring
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
# and we still get an error, which proves that the
# error-limiting info survived a ring reload
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
def test_PUT_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 2)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 0)
self.assertTrue(
node_last_error(controller.app, odevs[0]) is not None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(node_last_error(controller.app, odevs[2]) is None)
def test_PUT_error_limiting_last_node(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 0)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 2)
self.assertTrue(node_last_error(controller.app, odevs[0]) is None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(
node_last_error(controller.app, odevs[2]) is not None)
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 503)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 200, 204, 204, 204)
# acct cont obj obj obj
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 204)
set_http_connect(200, 404, 404, 404)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 503, 503, 503)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev, self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev, 0, last_error=None)
for dev in self.app.container_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_requires_container_exist(self):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_bad_metadata(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-' + (
'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'Content-Length': '0',
'X-Object-Meta-' + (
'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
(constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
constraints.MAX_META_VALUE_LENGTH:
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
controller = ReplicatedObjectController(
self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
new_connect = set_http_connect(*args, **kwargs)
yield controller
unused_status_list = []
while True:
try:
unused_status_list.append(next(new_connect.code_iter))
except StopIteration:
break
if unused_status_list:
raise self.fail('UN-USED STATUS CODES: %r' %
unused_status_list)
def test_basic_put_with_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_put_with_x_copy_from_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_basic_put_with_x_copy_from_across_container(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont conc objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
def test_basic_put_with_x_copy_from_across_container_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_with_slashes_in_x_copy_from(self):
# extra source path parsing
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_slashes_in_x_copy_from_and_account(self):
# extra source path parsing
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_spaces_in_x_copy_from(self):
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
def test_copy_with_spaces_in_x_copy_from_and_account(self):
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_in_x_copy_from(self):
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_server_error_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# not found
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_not_found_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
# not found
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_some_missing_sources_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_object_metadata(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_copy_with_object_metadata_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_copy_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# copy-from object is too large to fit in target object
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
self.app.update_request(req)
self.app.memcache.store = {}
try:
resp = controller.PUT(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_basic_COPY(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_COPY_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_across_containers(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont c2 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_account_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_account_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_account_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_account_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_COPY_account_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_COPY_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
@_limit_max_file_size
def test_COPY_account_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_COPY_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
# act cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '3', '2', '4', '4',
'4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_account_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
# act cont acct cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '1', '3', '2', '1',
'4', '4', '4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_COPY_account_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
class ChunkedFile(object):
def __init__(self, bytes):
self.bytes = bytes
self.read_bytes = 0
@property
def bytes_left(self):
return self.bytes - self.read_bytes
def read(self, amt=None):
if self.read_bytes >= self.bytes:
raise StopIteration()
if not amt:
amt = self.bytes_left
data = 'a' * min(amt, self.bytes_left)
self.read_bytes += len(data)
return data
with save_globals():
set_http_connect(201, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(10)
self.app.memcache.store = {}
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int // 100, 2) # success
# test 413 entity to large
set_http_connect(201, 201, 201, 201)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
@unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 405'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 500'
self.assertEqual(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
@unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nContent-Length: 0\r\n' in headers)
@unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
'\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \
'\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \
'\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \
'\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \
'\xbf\x86.Test'
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List account with ustr container (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
containers = fd.read().split('\n')
self.assertTrue(ustr in containers)
# List account with ustr container (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertTrue(ustr.decode('utf8') in [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Create ustr object with ustr metadata in ustr container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr), quote(ustr_short),
quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List ustr container with ustr object (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
objects = fd.read().split('\n')
self.assertTrue(ustr in objects)
# List ustr container with ustr object (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertEqual(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Retrieve ustr object with ustr metadata
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)) in headers)
@unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
# replacement for x-auth-token.
fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure we get what we put
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
self.assertEqual(body, 'oh hai123456789abcdef')
@unpatch_policies
def test_conditional_range_get(self):
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
fd = sock.makefile()
fd.write('PUT /v1/a/con HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# put an object in it
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/con/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 10\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
'abcdefghij\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# request with both If-None-Match and Range
etag = md5("abcdefghij").hexdigest()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/con/o HTTP/1.1\r\n' +
'Host: localhost\r\n' +
'Connection: close\r\n' +
'X-Storage-Token: t\r\n' +
'If-None-Match: "' + etag + '"\r\n' +
'Range: bytes=3-8\r\n' +
'\r\n')
fd.flush()
exp = 'HTTP/1.1 304'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
set_http_connect(200, 201, 201, 201,
etags=[None,
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941'])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 5) # server error
# req supplies etag, object servers return 422 - mismatch
headers = {'Content-Length': '0',
'ETag': '68b329da9893e34099c7d8ad5cb9c940'}
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
set_http_connect(200, 422, 422, 503,
etags=['68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941',
None,
None])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 4) # client error
def test_response_get_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.GET(req)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_POST_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_POST_as_copy_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_COPY_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.COPY(req)
self.assertTrue(called[0])
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
self.app.object_post_as_copy = False
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
t = time.time()
time.time = lambda: t
req = Request.blank('/v1/a/c/o', {},
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status, '202 Fake')
self.assertEqual(req.headers.get('x-delete-at'),
str(int(t + 60)))
finally:
time.time = orig_time
@unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
def request_init(self, *args, **kwargs):
_orig_init(self, *args, **kwargs)
_request_instances[self] = None
with mock.patch.object(Request, "__init__", request_init):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
obj_len = prosrv.client_chunk_size * 2
# PUT test file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (obj_len, 'a' * obj_len))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Remember Request instance count, make sure the GC is run for
# pythons without reference counting.
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
before_request_instances = len(_request_instances)
# GET test file, but disconnect early
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
fd.read(1)
fd.close()
sock.close()
# Make sure the GC is run again for pythons without reference
# counting
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
self.assertEqual(
before_request_instances, len(_request_instances))
def test_OPTIONS(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
def test_CORS_valid(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://not.foo.bar'
}
}
controller.container_info = stubContainerInfo
controller.app.strict_cors_mode = False
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-object-meta-color'])
self.assertEqual(expected_exposed, exposed)
controller.app.strict_cors_mode = True
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertTrue('access-control-allow-origin' not in resp.headers)
def test_CORS_valid_with_obj_headers(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
'Access-Control-Allow-Origin': 'http://obj.origin',
'Access-Control-Expose-Headers': 'x-trans-id'
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://obj.origin',
resp.headers['access-control-allow-origin'])
self.assertEqual('x-trans-id',
resp.headers['access-control-expose-headers'])
def _gather_x_container_headers(self, controller_call, req, *connect_args,
**kwargs):
header_list = kwargs.pop('header_list', ['X-Container-Device',
'X-Container-Host',
'X-Container-Partition'])
seen_headers = []
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in header_list:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account/container HEADs, so chuck
# the first two requests
return sorted(seen_headers[2:],
key=lambda d: d.get(header_list[0]) or 'z')
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': None,
'X-Container-Partition': None,
'X-Container-Device': None}])
def test_PUT_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.object_post_as_copy = False
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
'X-Delete-At-Partition': None,
'X-Delete-At-Device': None}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.expiring_objects_account = 'expires'
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
class TestECMismatchedFA(unittest.TestCase):
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv._error_limiting = {}
def test_mixing_different_objects_fragment_archives(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
ec_policy = POLICIES[3]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertTrue(resp.status_int in (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# Server obj1 will have the first version of the object (obj2 also
# gets it, but that gets stepped on later)
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj3srv, 'PUT', bad_disk),
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum')):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Servers obj2 and obj3 will have the second version of the object.
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj1srv, 'PUT', bad_disk),
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum')):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj1srv, 'GET', bad_disk),
mock.patch.object(obj2srv, 'GET', bad_disk)):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
class TestObjectECRangedGET(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
@classmethod
def setUpClass(cls):
cls.obj_name = 'range-get-test'
cls.tiny_obj_name = 'range-get-test-tiny'
cls.aligned_obj_name = 'range-get-test-aligned'
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
assert headers[:len(exp)] == exp, "container PUT failed"
seg_size = POLICIES.get_by_name("ec").ec_segment_size
cls.seg_size = seg_size
# EC segment size is 4 KiB, hence this gives 4 segments, which we
# then verify with a quick sanity check
cls.obj = ' my hovercraft is full of eels '.join(
str(s) for s in range(431))
assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
"object is wrong number of segments"
cls.tiny_obj = 'tiny, tiny object'
assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
cls.aligned_obj = "".join(
"abcdEFGHijkl%04d" % x for x in range(512))
assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
for obj_name, obj in ((cls.obj_name, cls.obj),
(cls.tiny_obj_name, cls.tiny_obj),
(cls.aligned_obj_name, cls.aligned_obj)):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (obj_name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"object PUT failed %s" % obj_name
def _get_obj(self, range_value, obj_name=None):
if obj_name is None:
obj_name = self.obj_name
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Range: %s\r\n'
'\r\n' % (obj_name, range_value))
fd.flush()
headers = readuntil2crlfs(fd)
# e.g. "HTTP/1.1 206 Partial Content\r\n..."
status_code = int(headers[9:12])
headers = parse_headers_string(headers)
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# if we get this wrong, clients will either get truncated data or
# they'll hang waiting for bytes that aren't coming, so it warrants
# being asserted for every test case
if 'Content-Length' in headers:
self.assertEqual(int(headers['Content-Length']), len(gotten_obj))
# likewise, if we say MIME and don't send MIME or vice versa,
# clients will be horribly confused
if headers.get('Content-Type', '').startswith('multipart/byteranges'):
self.assertEqual(gotten_obj[:2], "--")
else:
# In general, this isn't true, as you can start an object with
# "--". However, in this test, we don't start any objects with
# "--", or even include "--" in their contents anywhere.
self.assertNotEqual(gotten_obj[:2], "--")
return (status_code, headers, gotten_obj)
def _parse_multipart(self, content_type, body):
parser = email.parser.FeedParser()
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
parser.feed(body)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertFalse(root_message.defects)
for i, message in enumerate(byteranges):
self.assertFalse(message.defects, "Part %d had defects" % i)
self.assertFalse(message.is_multipart(),
"Nested multipart at %d" % i)
return byteranges
def test_bogus(self):
status, headers, gotten_obj = self._get_obj("tacos=3-5")
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_unaligned(self):
# One segment's worth of data, but straddling two segment boundaries
# (so it has data from three segments)
status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[3783:7879])
def test_aligned_left(self):
# First byte is aligned to a segment boundary, last byte is not
status, headers, gotten_obj = self._get_obj("bytes=0-5500")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "5501")
self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
self.assertEqual(len(gotten_obj), 5501)
self.assertEqual(gotten_obj, self.obj[:5501])
def test_aligned_range(self):
# Ranged GET that wants exactly one segment
status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[4096:8192])
def test_aligned_range_end(self):
# Ranged GET that wants exactly the last segment
status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "2225")
self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
self.assertEqual(len(gotten_obj), 2225)
self.assertEqual(gotten_obj, self.obj[12288:])
def test_aligned_range_aligned_obj(self):
# Ranged GET that wants exactly the last segment, which is full-size
status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
def test_byte_0(self):
# Just the first byte, but it's index 0, so that's easy to get wrong
status, headers, gotten_obj = self._get_obj("bytes=0-0")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "1")
self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
self.assertEqual(gotten_obj, self.obj[0])
def test_unsatisfiable(self):
# Goes just one byte too far off the end of the object, so it's
# unsatisfiable
status, _junk, _junk = self._get_obj(
"bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
self.assertEqual(status, 416)
def test_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_aligned_off_end(self):
# Ranged GET that starts on a segment boundary but asks for a whole lot
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (8192, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '6321')
self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
self.assertEqual(gotten_obj, self.obj[8192:])
def test_way_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte, and wants multiple segments' worth off
# the end
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_boundaries(self):
# Wants the last byte of segment 1 + the first byte of segment 2
status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '2')
self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
self.assertEqual(gotten_obj, self.obj[4095:4097])
def test_until_end(self):
# Wants the last byte of segment 1 + the rest
status, headers, gotten_obj = self._get_obj("bytes=4095-")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '10418')
self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
self.assertEqual(gotten_obj, self.obj[4095:])
def test_small_suffix(self):
# Small range-suffix GET: the last 100 bytes (less than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-100")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
self.assertEqual(len(gotten_obj), 100)
self.assertEqual(gotten_obj, self.obj[-100:])
def test_small_suffix_aligned(self):
# Small range-suffix GET: the last 100 bytes, last segment is
# full-size
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
self.assertEqual(len(gotten_obj), 100)
def test_suffix_two_segs(self):
# Ask for enough data that we need the last two segments. The last
# segment is short, though, so this ensures we compensate for that.
#
# Note that the total range size is less than one full-size segment.
suffix_len = len(self.obj) % self.seg_size + 1
status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], str(suffix_len))
self.assertEqual(headers['Content-Range'],
'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
len(self.obj) - 1,
len(self.obj)))
self.assertEqual(len(gotten_obj), suffix_len)
def test_large_suffix(self):
# Large range-suffix GET: the last 5000 bytes (more than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-5000")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5000')
self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
self.assertEqual(len(gotten_obj), 5000)
self.assertEqual(gotten_obj, self.obj[-5000:])
def test_overlarge_suffix(self):
# The last N+1 bytes of an N-byte object
status, headers, gotten_obj = self._get_obj(
"bytes=-%d" % (len(self.obj) + 1))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '14513')
self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_small_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-5", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
self.assertEqual(gotten_obj, self.tiny_obj[12:])
def test_overlarge_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-1234567890", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
self.assertEqual(len(gotten_obj), len(self.tiny_obj))
self.assertEqual(gotten_obj, self.tiny_obj)
def test_multiple_ranges(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4490-5010", self.obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers["Content-Length"], str(len(gotten_obj)))
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4490-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4490:5011])
def test_multiple_ranges_overlapping_in_segment(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-9,20-29,40-49,60-69,80-89")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 5)
def test_multiple_ranges_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_suffix_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,-13")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_one_barely_unsatisfiable(self):
# The thing about 14515-14520 is that it comes from the last segment
# in the object. When we turn this range into a fragment range,
# it'll be for the last fragment, so the object servers see
# something satisfiable.
#
# Basically, we'll get 3 byteranges from the object server, but we
# have to filter out the unsatisfiable one on our own.
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14515-14520,40-50")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[0].get_payload(), self.obj[0:11])
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 40-50/14513")
self.assertEqual(got_byteranges[1].get_payload(), self.obj[40:51])
def test_multiple_ranges_some_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4090-5010,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
def test_two_ranges_one_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
# According to RFC 7233, this could be either a multipart/byteranges
# response with one part or it could be a single-part response (just
# the bytes, no MIME). We're locking it down here: single-part
# response. That's what replicated objects do, and we don't want any
# client-visible differences between EC objects and replicated ones.
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[:101])
def test_two_ranges_one_unsatisfiable_same_segment(self):
# Like test_two_ranges_one_unsatisfiable(), but where both ranges
# fall within the same EC segment.
status, headers, gotten_obj = self._get_obj(
"bytes=14500-14510,14520-14530")
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[14500:14511])
def test_multiple_ranges_some_unsatisfiable_out_of_order(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,99999998-99999999,4090-5010", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(base_port=2000),
logger=debug_logger())
def test_convert_policy_to_index(self):
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
expected = {
'zero': 0,
'ZeRo': 0,
'one': 1,
'OnE': 1,
}
for name, index in expected.items():
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': name})
self.assertEqual(controller._convert_policy_to_index(req), index)
# default test
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.assertEqual(controller._convert_policy_to_index(req), None)
# negative test
req = Request.blank('/a/c',
headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'nada'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
# storage policy two is deprecated
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'two'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
def test_convert_index_to_name(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': int(policy)},
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
def test_no_convert_index_to_name_when_container_not_found(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 404, 404, 404,
headers={'X-Backend-Storage-Policy-Index':
int(policy)}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
def test_error_convert_index_to_name(self):
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
error_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for msg in error_lines:
expected = "Could not translate " \
"X-Backend-Storage-Policy-Index ('-1')"
self.assertTrue(expected in msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
'x-container-read': '*:user',
'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
'x-container-read': '*:user',
'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
raise_exc=False, missing_container=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
kwargs['missing_container'] = missing_container
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
def test_HEAD_GET(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def test_status_map(statuses, expected,
c_expected=None, a_expected=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.HEAD(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
# In all the following tests cache 200 for account
# return and ache vary for container
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 404)
def test_PUT_policy_headers(self):
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
if method == 'PUT':
backend_requests.append(headers)
def test_policy(requested_policy):
with save_globals():
mock_conn = set_http_connect(200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/test', method='PUT',
headers={'Content-Length': 0})
if requested_policy:
expected_policy = requested_policy
req.headers['X-Storage-Policy'] = policy.name
else:
expected_policy = POLICIES.default
res = req.get_response(self.app)
if expected_policy.is_deprecated:
self.assertEqual(res.status_int, 400)
self.assertEqual(0, len(backend_requests))
expected = 'is deprecated'
self.assertTrue(expected in res.body,
'%r did not include %r' % (
res.body, expected))
return
self.assertEqual(res.status_int, 201)
self.assertEqual(
expected_policy.object_ring.replicas,
len(backend_requests))
for headers in backend_requests:
if not requested_policy:
self.assertFalse('X-Backend-Storage-Policy-Index' in
headers)
self.assertTrue(
'X-Backend-Storage-Policy-Default' in headers)
self.assertEqual(
int(expected_policy),
int(headers['X-Backend-Storage-Policy-Default']))
else:
self.assertTrue('X-Backend-Storage-Policy-Index' in
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
int(policy))
# make sure all mocked responses are consumed
self.assertRaises(StopIteration, mock_conn.code_iter.next)
test_policy(None) # no policy header
for policy in POLICIES:
backend_requests = [] # reset backend requests
test_policy(policy)
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
# fail to retrieve account info
test_status_map(
(503, 503, 503), # account_info fails on 503
404, missing_container=True)
# account fail after creation
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
404, 404, 404), # account_info fail
404, missing_container=True)
test_status_map(
(503, 503, 404, # account_info fails on 404
503, 503, 503, # PUT account
503, 503, 404), # account_info fail
404, missing_container=True)
# put fails
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
503, 503, 201), # put container fail
503, missing_container=True)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True)
test_status_map(
(503, 404, 404, # account_info fails on 404
503, 201, 201, # PUT account
503, 200, # account_info success
503, 201, 201), # put container success
201, missing_container=True)
def test_PUT_autocreate_account_with_sysmeta(self):
# x-account-sysmeta headers in a container PUT request should be
# transferred to the account autocreate PUT request
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
headers=headers,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual('/account', call['path'])
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.POST(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, missing_container=True)
def test_PUT_max_containers_per_account(self):
with save_globals():
self.app.max_containers_per_account = 12346
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'container_new')
self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
self.app.max_containers_whitelist = ['account']
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
def test_PUT_max_container_name_length(self):
with save_globals():
limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400,
missing_container=True)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503,
missing_container=True)
def test_acc_missing_returns_404(self):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
if meth == 'PUT':
set_http_connect(200, 200, 200, 200, 200, 200,
missing_container=True)
else:
set_http_connect(200, 200, 200, 200)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
self.app.update_request(req)
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, raise_exc=True)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
def test_put_locking(self):
class MockMemcache(FakeMemcache):
def __init__(self, allow_lock=None):
self.allow_lock = allow_lock
super(MockMemcache, self).__init__()
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
if self.allow_lock:
yield True
else:
raise NotImplementedError
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.app.memcache = MockMemcache(allow_lock=True)
set_http_connect(200, 201, 201, 201,
missing_container=True)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 201)
def test_error_limiting(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200)
self.assert_status_map(controller.DELETE, (200, 204, 204, 204),
404, raise_exc=True)
def test_DELETE(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,
(200, 204, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 404), 503)
self.app.memcache = FakeMemcacheReturnsNone()
# 200: Account check, 404x3: Container check
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Container-Meta-TestHeader', 'TestValue'),
('X-Container-Meta-TestHeader', ''),
('X-Remove-Container-Meta-TestHeader', 'anything'),
('X-Container-Read', '.r:*'),
('X-Remove-Container-Read', 'anything'),
('X-Container-Write', 'anyone'),
('X-Remove-Container-Write', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
controller = \
proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201, give_connect=test_connect)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': method, 'swift_owner': True},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_POST_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_GET_no_content(self):
with save_globals():
set_http_connect(200, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
self.assertEqual(
res.environ['swift.container/a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertTrue('transfer-encoding' not in res.headers)
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.environ['swift.container/a/c']['status'], 201)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_unauthorized_requests_when_account_not_found(self):
# verify unauthorized container requests always return response
# from swift.authorize
called = [0, 0]
def authorize(req):
called[0] += 1
return HTTPUnauthorized(request=req)
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE'):
# no delay_denial on method, expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([1, 0], called)
for method in ('HEAD', 'GET'):
# delay_denial on method, expect two calls to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([2, 1], called)
def test_authorized_requests_when_account_not_found(self):
# verify authorized container requests always return 404 when
# account not found
called = [0, 0]
def authorize(req):
called[0] += 1
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
# expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(404, res.status_int)
self.assertEqual([1, 1], called)
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
count = [0]
def my_get_info(app, env, account, container=None,
ret_not_found=False, swift_source=None):
if count[0] > 11:
return {}
count[0] += 1
if not container:
return {'some': 'stuff'}
return proxy_base.was_get_info(
app, env, account, container, ret_not_found, swift_source)
proxy_base.was_get_info = proxy_base.get_info
with mock.patch.object(proxy_base, 'get_info', my_get_info):
proxy_base.get_info = my_get_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
controller.OPTIONS(req)
self.assertTrue(count[0] < 11)
def test_OPTIONS(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers':
'x-foo, x-bar, x-auth-token',
'Access-Control-Request-Method': 'GET'}
)
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sortHeaderNames('x-foo, x-bar, x-auth-token'),
sortHeaderNames(resp.headers['access-control-allow-headers']))
def test_CORS_valid(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def containerGET(controller, req):
return Response(headers={
'X-Container-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(containerGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-container-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-container-meta-color'])
self.assertEqual(expected_exposed, exposed)
def _gather_x_account_headers(self, controller_call, req, *connect_args,
**kwargs):
seen_headers = []
to_capture = ('X-Account-Partition', 'X-Account-Host',
'X-Account-Device')
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in to_capture:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account HEAD, so throw away the
# first element
return sorted(seen_headers[1:],
key=lambda d: d['X-Account-Host'] or 'Z')
def test_PUT_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_PUT_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_DELETE_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_DELETE_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_PUT_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_DELETE_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_node_read_timeout_retry_to_container(self):
with save_globals():
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
self.app.node_timeout = 0.1
set_http_connect(200, 200, 200, body='abcdef', slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
def assert_status_map(self, method, statuses, expected, env_expected=None,
headers=None, **kwargs):
headers = headers or {}
with save_globals():
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
set_http_connect(*statuses)
req = Request.blank('/v1/a/', {})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
def test_OPTIONS(self):
with save_globals():
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
# Test a CORS OPTIONS request (i.e. including Origin and
# Access-Control-Request-Method headers)
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank(
'/v1/account', {'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
def test_GET(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
# GET returns after the first successful call to an Account Server
self.assert_status_map(controller.GET, (200,), 200, 200)
self.assert_status_map(controller.GET, (503, 200), 200, 200)
self.assert_status_map(controller.GET, (503, 503, 200), 200, 200)
self.assert_status_map(controller.GET, (204,), 204, 204)
self.assert_status_map(controller.GET, (503, 204), 204, 204)
self.assert_status_map(controller.GET, (503, 503, 204), 204, 204)
self.assert_status_map(controller.GET, (404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 503, 204), 204, 204)
# If Account servers fail, if autocreate = False, return majority
# response
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
self.assert_status_map(controller.GET, (404, 404, 503), 404, 404)
self.assert_status_map(controller.GET, (404, 503, 503), 503)
self.app.memcache = FakeMemcacheReturnsNone()
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
def test_GET_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
# Repeat the test for autocreate = False and 404 by all
self.assert_status_map(controller.GET,
(404, 404, 404), 404)
self.assert_status_map(controller.GET,
(404, 503, 404), 404)
# When autocreate is True, if none of the nodes respond 2xx
# And quorum of the nodes responded 404,
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
self.assert_status_map(controller.GET,
(404, 404, 404), 204)
self.assert_status_map(controller.GET,
(404, 503, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.HEAD, (200,), 200, 200)
self.assert_status_map(controller.HEAD, (503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (503, 204), 204, 204)
self.assert_status_map(controller.HEAD, (204, 503, 503), 204, 204)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (404, 404, 404), 404, 404)
self.assert_status_map(controller.HEAD, (404, 404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 404, 503), 404, 404)
self.assert_status_map(controller.HEAD, (404, 503, 503), 503)
self.assert_status_map(controller.HEAD, (404, 503, 204), 204, 204)
def test_HEAD_autocreate(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.HEAD,
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 204)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)
def test_POST_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201)
# account_info PUT account POST account
self.assert_status_map(
controller.POST,
(404, 404, 503, 201, 201, 503, 204, 204, 504), 204)
# what if create fails
self.assert_status_map(
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
def test_POST_autocreate_with_sysmeta(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
# POST , autocreate PUT, POST again
headers=headers,
give_connect=callback)
self.assertEqual(9, len(calls))
for call in calls:
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
res.body
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400)
def test_PUT_connect_exceptions(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, -1), 201)
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
def test_PUT_status(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, 202), 202)
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Account-Meta-TestHeader', 'TestValue'),
('X-Account-Meta-TestHeader', ''),
('X-Remove-Account-Meta-TestHeader', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
self.app.allow_account_management = True
controller = \
proxy_server.AccountController(self.app, 'a')
set_http_connect(201, 201, 201, give_connect=test_connect)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_DELETE(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_DELETE_with_query_string(self):
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a?whoops',
environ={'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 400)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 400)
test_status_map((201, 201, 500), 400)
test_status_map((201, 500, 500), 400)
test_status_map((204, 500, 404), 400)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
have to match the responses for empty accounts that really exist.
"""
def setUp(self):
conf = {'account_autocreate': 'yes'}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.app.memcache = FakeMemcacheReturnsNone()
def test_GET_autocreate_accept_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank(
'/v1/a', headers={'Accept': 'application/json'},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_format_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=json',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=json'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_accept_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "text/xml"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('text/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_format_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=xml',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=xml'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_accept_unknown(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "mystery/meat"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(406, resp.status_int)
def test_GET_autocreate_format_invalid_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=\xff\xfe',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=\xff\xfe'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_account_acl_header_access(self):
acl = {
'admin': ['AUTH_alice'],
'read-write': ['AUTH_bob'],
'read-only': ['AUTH_carol'],
}
prefix = get_sys_meta_prefix('account')
privileged_headers = {(prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET'})
resp = app.handle_request(req)
# Not a swift_owner -- ACLs should NOT be in response
header = 'X-Account-Access-Control'
self.assertTrue(header not in resp.headers, '%r was in %r' % (
header, resp.headers))
# Same setup -- mock acct server will provide ACLs
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET',
'swift_owner': True})
resp = app.handle_request(req)
# For a swift_owner, the ACLs *should* be in response
self.assertTrue(header in resp.headers, '%r not in %r' % (
header, resp.headers))
def test_account_acls_through_delegation(self):
# Define a way to grab the requests sent out from the AccountController
# to the Account Server, and a way to inject responses we'd like the
# Account Server to return.
resps_to_send = []
@contextmanager
def patch_account_controller_method(verb):
old_method = getattr(proxy_server.AccountController, verb)
new_method = lambda self, req, *_, **__: resps_to_send.pop(0)
try:
setattr(proxy_server.AccountController, verb, new_method)
yield
finally:
setattr(proxy_server.AccountController, verb, old_method)
def make_test_request(http_method, swift_owner=True):
env = {
'REQUEST_METHOD': http_method,
'swift_owner': swift_owner,
}
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {} if http_method in ('GET', 'HEAD') else {
'x-account-access-control': format_acl(version=2, acl_dict=acl)
}
return Request.blank('/v1/a', environ=env, headers=headers)
# Our AccountController will invoke methods to communicate with the
# Account Server, and they will return responses like these:
def make_canned_response(http_method):
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {'x-account-sysmeta-core-access-control': format_acl(
version=2, acl_dict=acl)}
canned_resp = Response(headers=headers)
canned_resp.environ = {
'PATH_INFO': '/acct',
'REQUEST_METHOD': http_method,
}
resps_to_send.append(canned_resp)
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
with patch_account_controller_method('GETorHEAD_base'):
# GET/HEAD requests should remap sysmeta headers from acct server
for verb in ('GET', 'HEAD'):
make_canned_response(verb)
req = make_test_request(verb)
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
# swift_owner = False: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# swift_owner unset: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
del req.environ['swift_owner']
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# Verify that PUT/POST requests remap sysmeta headers from acct server
with patch_account_controller_method('make_requests'):
make_canned_response('PUT')
req = make_test_request('PUT')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
make_canned_response('POST')
req = make_test_request('POST')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
class FakeObjectController(object):
def __init__(self):
self.app = self
self.logger = self
self.account_name = 'a'
self.container_name = 'c'
self.object_name = 'o'
self.trans_id = 'tx1'
self.object_ring = FakeRing()
self.node_timeout = 1
self.rate_limit_after_segment = 3
self.rate_limit_segments_per_sec = 2
self.GETorHEAD_base_args = []
def exception(self, *args):
self.exception_args = args
self.exception_info = sys.exc_info()
def GETorHEAD_base(self, *args):
self.GETorHEAD_base_args.append(args)
req = args[0]
path = args[4]
body = data = path[-1] * int(path[-1])
if req.range:
r = req.range.ranges_for_length(len(data))
if r:
(start, stop) = r[0]
body = data[start:stop]
resp = Response(app_iter=iter(body))
return resp
def iter_nodes(self, ring, partition):
for node in ring.get_part_nodes(partition):
yield node
for node in ring.get_more_nodes(partition):
yield node
def sort_nodes(self, nodes):
return nodes
def set_node_timing(self, node, timing):
return
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
# This is just a simple test that can be used to verify and debug the
# various data paths between the proxy server and the object
# server. Used as a play ground to debug buffer sizes for sockets.
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is transmitting in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
# Small, fast for testing
obj_len = 2 * 64 * 1024
# Use 1 GB or more for measurements
# obj_len = 2 * 512 * 1024 * 1024
self.path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.path, str(obj_len)))
fd.write('a' * obj_len)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.obj_len = obj_len
def test_GET_debug_large_file(self):
for i in range(10):
start = time.time()
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is reading in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
fd.write('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.path)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
total = 0
while True:
buf = fd.read(100000)
if not buf:
break
total += len(buf)
self.assertEqual(total, self.obj_len)
end = time.time()
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
StoragePolicy(2, 'deprecated', is_deprecated=True,
object_ring=FakeRing()),
StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
si = utils.get_swift_info()['swift']
self.assertTrue('version' in si)
self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
self.assertEqual(si['max_meta_name_length'],
constraints.MAX_META_NAME_LENGTH)
self.assertEqual(si['max_meta_value_length'],
constraints.MAX_META_VALUE_LENGTH)
self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
self.assertEqual(si['max_meta_overall_size'],
constraints.MAX_META_OVERALL_SIZE)
self.assertEqual(si['account_listing_limit'],
constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
constraints.MAX_CONTAINER_NAME_LENGTH)
self.assertEqual(si['max_object_name_length'],
constraints.MAX_OBJECT_NAME_LENGTH)
self.assertTrue('strict_cors_mode' in si)
self.assertEqual(si['allow_account_management'], False)
self.assertEqual(si['account_autocreate'], False)
# This setting is by default excluded by disallowed_sections
self.assertEqual(si['valid_api_versions'],
constraints.VALID_API_VERSIONS)
# this next test is deliberately brittle in order to alert if
# other items are added to swift info
self.assertEqual(len(si), 18)
self.assertTrue('policies' in si)
sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
self.assertEqual(len(sorted_pols), 3)
for policy in sorted_pols:
self.assertNotEquals(policy['name'], 'deprecated')
self.assertEqual(sorted_pols[0]['name'], 'bert')
self.assertEqual(sorted_pols[1]['name'], 'ernie')
self.assertEqual(sorted_pols[2]['name'], 'migrated')
class TestSocketObjectVersions(unittest.TestCase):
def setUp(self):
global _test_sockets
self.prolis = prolis = listen(('localhost', 0))
self._orig_prolis = _test_sockets[0]
allowed_headers = ', '.join([
'content-encoding',
'x-object-manifest',
'content-disposition',
'foo'
])
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers': allowed_headers}
prosrv = versioned_writes.VersionedWritesMiddleware(
proxy_logging.ProxyLoggingMiddleware(
_test_servers[0], conf,
logger=_test_servers[0].logger),
{})
self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger())
# replace global prosrv with one that's filtered with version
# middleware
self.sockets = list(_test_sockets)
self.sockets[0] = prolis
_test_sockets = tuple(self.sockets)
def tearDown(self):
self.coro.kill()
# put the global state back
global _test_sockets
self.sockets[0] = self._orig_prolis
_test_sockets = tuple(self.sockets)
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
pre = quote('%03x' % len(o))
osub = '%s/sub' % o
presub = quote('%03x' % len(osub))
osub = quote(osub)
presub = quote(presub)
oc = quote(oc)
vc = quote(vc)
def put_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n'
% (oc, vc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
headers = put_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def get_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# check that the header was set
headers, body = get_container()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('X-Versions-Location: %s' % vc, headers)
def put_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# make the container for the object versions
headers = put_version_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def put(version):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s'
'\r\n\r\n%05d\r\n' % (oc, o, version, version))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def get(container=oc, obj=o):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n'
'\r\n' % (container, obj))
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Create the versioned file
headers = put(0)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the object versions
for version in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
headers = put(version)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % version, headers)
self.assertNotIn('X-Object-Meta-Foo: barbaz', headers)
self.assertEqual(body, '%05d' % version)
def get_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Ensure we have the right number of versions saved
headers, body = get_version_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), versions_to_create - 1)
def delete():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r'
'\nConnection: close\r\nX-Storage-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nDestination: %s/copied_name\r\n'
'Content-Length: 0\r\n\r\n' % (oc, o, oc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# copy a version and make sure the version info is stripped
headers = copy()
exp = 'HTTP/1.1 2' # 2xx series response to the COPY
self.assertEqual(headers[:len(exp)], exp)
def get_copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\n'
'X-Auth-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
headers, body = get_copy()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertEqual(body, '%05d' % version)
def post():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n'
'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# post and make sure it's updated
headers = post()
exp = 'HTTP/1.1 2' # 2xx series response to the POST
self.assertEqual(headers[:len(exp)], exp)
headers, body = get()
self.assertIn('Content-Type: foo/bar', headers)
self.assertIn('X-Object-Meta-Bar: foo', headers)
self.assertEqual(body, '%05d' % version)
# check container listing
headers, body = get_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object versions
for segment in range(versions_to_create - 1, 0, -1):
headers = delete()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % (segment - 1),
headers)
self.assertEqual(body, '%05d' % (segment - 1))
# Ensure we have the right number of versions saved
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r'
'\n' % (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), segment - 1)
# there is now one version left (in the manifest)
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# delete the last version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure it's all gone
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
# make sure manifest files will be ignored
for _junk in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 0\r\n'
'Content-Type: text/jibberish0\r\n'
'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
% (oc, o, oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: '
'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, presub, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), 1)
# Check for when the versions target container doesn't exist
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the versioned file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create another version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx response
self.assertEqual(headers[:len(exp)], exp)
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
if __name__ == '__main__':
setup()
try:
unittest.main()
finally:
teardown()
|
gold3bear/swift
|
test/unit/proxy/test_server.py
|
Python
|
apache-2.0
| 403,114
|
[
"MOOSE"
] |
859f9846dc60a7cde8f7daa9b3537333f0e58ba5e989af3aa8d14e0232e5cc9a
|
from django.db import models
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from supersalon.purchases.models import ServicePurchase, ProductPurchase
class Visit(models.Model):
# Customer
customer = models.ForeignKey('customers.Customer', verbose_name=_("Customer"))
# Date Fields
visit_date = models.DateField(_("Visit Date"), default=timezone.now)
arrival_time = models.TimeField(_("Arrival Time"), blank=True, null=True)
departure_time = models.TimeField(_("Departure Time"), blank=True, null=True)
# Guest Fields
female_guest_count = models.PositiveSmallIntegerField(_("Female Guest Count"), default=0)
male_guest_count = models.PositiveSmallIntegerField(_("Male Guest Count"), default=0)
child_guest_count = models.PositiveSmallIntegerField(_("Child Guest Count"), default=0)
# Payment Fields
total_payment_amount = models.DecimalField(_("Total Payment Amount"), decimal_places=2, max_digits=8)
# Notes
notes = models.TextField(_("Notes"), blank=True)
class Meta:
ordering = ('-visit_date',)
verbose_name = _("Visit")
verbose_name_plural = _("Visits")
def __str__(self):
return _("{customer}'s Visit").format(customer=self.customer)
def str_product_purchases(self):
x = ""
for product_purchase in self.product_purchases.all():
x += "%s, " % product_purchase.product.name
return x
def str_service_purchases(self):
x = ""
for service_purchase in self.service_purchases.all():
x += "%s, " % service_purchase.service.name
return x
@receiver([models.signals.post_save], sender=Visit)
def visit_post_save(sender, instance, created, **kwargs):
# Update Customer last_visit
last_visit = Visit.objects.latest('visit_date')
instance.customer.last_visit = last_visit.visit_date
instance.customer.save()
# Send SMS to thank for the visit
|
dogukantufekci/supersalon
|
supersalon/visits/models.py
|
Python
|
bsd-3-clause
| 2,022
|
[
"VisIt"
] |
645ca171fb8cd8f2a2f0b791e65b4dcd810f60f96da4cff62be6f33ebc3fad38
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import functools
import numpy as np
import scipy as sp
import matplotlib.colors
import matplotlib.pyplot as plt
import matplotlib.text as mpl_text
import traits.api as t
from hyperspy import drawing
from hyperspy.exceptions import SignalDimensionError
from hyperspy.axes import AxesManager
from hyperspy.drawing.widgets import VerticalLineWidget
from hyperspy import components1d
from hyperspy.component import Component
from hyperspy import drawing
from hyperspy.ui_registry import add_gui_method
_logger = logging.getLogger(__name__)
class SpanSelectorInSignal1D(t.HasTraits):
ss_left_value = t.Float(np.nan)
ss_right_value = t.Float(np.nan)
is_ok = t.Bool(False)
def __init__(self, signal):
if signal.axes_manager.signal_dimension != 1:
raise SignalDimensionError(
signal.axes_manager.signal_dimension, 1)
self.signal = signal
self.axis = self.signal.axes_manager.signal_axes[0]
self.span_selector = None
self.signal.plot()
self.span_selector_switch(on=True)
def on_disabling_span_selector(self):
pass
def span_selector_switch(self, on):
if not self.signal._plot.is_active():
return
if on is True:
self.span_selector = \
drawing.widgets.ModifiableSpanSelector(
self.signal._plot.signal_plot.ax,
onselect=self.update_span_selector_traits,
onmove_callback=self.update_span_selector_traits,)
elif self.span_selector is not None:
self.on_disabling_span_selector()
self.span_selector.turn_off()
self.span_selector = None
def update_span_selector_traits(self, *args, **kwargs):
if not self.signal._plot.is_active():
return
self.ss_left_value = self.span_selector.rect.get_x()
self.ss_right_value = self.ss_left_value + \
self.span_selector.rect.get_width()
def reset_span_selector(self):
self.span_selector_switch(False)
self.ss_left_value = np.nan
self.ss_right_value = np.nan
self.span_selector_switch(True)
class LineInSignal1D(t.HasTraits):
"""Adds a vertical draggable line to a spectrum that reports its
position to the position attribute of the class.
Attributes:
-----------
position : float
The position of the vertical line in the one dimensional signal. Moving
the line changes the position but the reverse is not true.
on : bool
Turns on and off the line
color : wx.Colour
The color of the line. It automatically redraws the line.
"""
position = t.Float()
is_ok = t.Bool(False)
on = t.Bool(False)
# The following is disabled because as of traits 4.6 the Color trait
# imports traitsui (!)
# try:
# color = t.Color("black")
# except ModuleNotFoundError: # traitsui is not installed
# pass
color_str = t.Str("black")
def __init__(self, signal):
if signal.axes_manager.signal_dimension != 1:
raise SignalDimensionError(
signal.axes_manager.signal_dimension, 1)
self.signal = signal
self.signal.plot()
axis_dict = signal.axes_manager.signal_axes[0].get_axis_dictionary()
am = AxesManager([axis_dict, ])
am._axes[0].navigate = True
# Set the position of the line in the middle of the spectral
# range by default
am._axes[0].index = int(round(am._axes[0].size / 2))
self.axes_manager = am
self.axes_manager.events.indices_changed.connect(
self.update_position, [])
self.on_trait_change(self.switch_on_off, 'on')
def draw(self):
self.signal._plot.signal_plot.figure.canvas.draw_idle()
def switch_on_off(self, obj, trait_name, old, new):
if not self.signal._plot.is_active():
return
if new is True and old is False:
self._line = VerticalLineWidget(self.axes_manager)
self._line.set_mpl_ax(self.signal._plot.signal_plot.ax)
self._line.patch.set_linewidth(2)
self._color_changed("black", "black")
# There is not need to call draw because setting the
# color calls it.
elif new is False and old is True:
self._line.close()
self._line = None
self.draw()
def update_position(self, *args, **kwargs):
if not self.signal._plot.is_active():
return
self.position = self.axes_manager.coordinates[0]
def _color_changed(self, old, new):
if self.on is False:
return
self._line.patch.set_color((self.color.Red() / 255.,
self.color.Green() / 255.,
self.color.Blue() / 255.,))
self.draw()
@add_gui_method(toolkey="Signal1D.calibrate")
class Signal1DCalibration(SpanSelectorInSignal1D):
left_value = t.Float(t.Undefined, label='New left value')
right_value = t.Float(t.Undefined, label='New right value')
offset = t.Float()
scale = t.Float()
units = t.Unicode()
def __init__(self, signal):
super(Signal1DCalibration, self).__init__(signal)
if signal.axes_manager.signal_dimension != 1:
raise SignalDimensionError(
signal.axes_manager.signal_dimension, 1)
self.units = self.axis.units
self.scale = self.axis.scale
self.offset = self.axis.offset
self.last_calibration_stored = True
def _left_value_changed(self, old, new):
if self.span_selector is not None and \
self.span_selector.range is None:
return
else:
self._update_calibration()
def _right_value_changed(self, old, new):
if self.span_selector.range is None:
return
else:
self._update_calibration()
def _update_calibration(self, *args, **kwargs):
# If the span selector or the new range values are not defined do
# nothing
if np.isnan(self.ss_left_value) or np.isnan(self.ss_right_value) or\
t.Undefined in (self.left_value, self.right_value):
return
lc = self.axis.value2index(self.ss_left_value)
rc = self.axis.value2index(self.ss_right_value)
self.offset, self.scale = self.axis.calibrate(
(self.left_value, self.right_value), (lc, rc),
modify_calibration=False)
def apply(self):
if np.isnan(self.ss_left_value) or np.isnan(self.ss_right_value):
_logger.warn("Select a range by clicking on the signal figure "
"and dragging before pressing Apply.")
return
elif self.left_value is t.Undefined or self.right_value is t.Undefined:
_logger.warn("Select the new left and right values before "
"pressing apply.")
return
axis = self.axis
axis.scale = self.scale
axis.offset = self.offset
axis.units = self.units
self.span_selector_switch(on=False)
self.signal._replot()
self.span_selector_switch(on=True)
self.last_calibration_stored = True
class Signal1DRangeSelector(SpanSelectorInSignal1D):
on_close = t.List()
class Smoothing(t.HasTraits):
# The following is disabled because as of traits 4.6 the Color trait
# imports traitsui (!)
# try:
# line_color = t.Color("blue")
# except ModuleNotFoundError:
# # traitsui is required to define this trait so it is not defined when
# # traitsui is not installed.
# pass
line_color_ipy = t.Str("blue")
differential_order = t.Int(0)
@property
def line_color_rgb(self):
if hasattr(self, "line_color"):
try:
# PyQt and WX
return np.array(self.line_color.Get()) / 255.
except AttributeError:
try:
# PySide
return np.array(self.line_color.getRgb()) / 255.
except:
return matplotlib.colors.to_rgb(self.line_color_ipy)
else:
return matplotlib.colors.to_rgb(self.line_color_ipy)
def __init__(self, signal):
self.ax = None
self.data_line = None
self.smooth_line = None
self.signal = signal
self.single_spectrum = self.signal.get_current_signal().deepcopy()
self.axis = self.signal.axes_manager.signal_axes[0].axis
self.plot()
def plot(self):
if self.signal._plot is None or not \
self.signal._plot.is_active():
self.signal.plot()
hse = self.signal._plot
l1 = hse.signal_plot.ax_lines[0]
self.original_color = l1.line.get_color()
l1.set_line_properties(color=self.original_color,
type='scatter')
l2 = drawing.signal1d.Signal1DLine()
l2.data_function = self.model2plot
l2.set_line_properties(
color=self.line_color_rgb,
type='line')
# Add the line to the figure
hse.signal_plot.add_line(l2)
l2.plot()
self.data_line = l1
self.smooth_line = l2
self.smooth_diff_line = None
def update_lines(self):
self.smooth_line.update()
if self.smooth_diff_line is not None:
self.smooth_diff_line.update()
def turn_diff_line_on(self, diff_order):
self.signal._plot.signal_plot.create_right_axis()
self.smooth_diff_line = drawing.signal1d.Signal1DLine()
self.smooth_diff_line.data_function = self.diff_model2plot
self.smooth_diff_line.set_line_properties(
color=self.line_color_rgb,
type='line')
self.signal._plot.signal_plot.add_line(self.smooth_diff_line,
ax='right')
self.smooth_diff_line.axes_manager = self.signal.axes_manager
def _line_color_ipy_changed(self):
if hasattr(self, "line_color"):
self.line_color = str(self.line_color_ipy)
else:
self._line_color_changed(None, None)
def turn_diff_line_off(self):
if self.smooth_diff_line is None:
return
self.smooth_diff_line.close()
self.smooth_diff_line = None
def _differential_order_changed(self, old, new):
if old == 0:
self.turn_diff_line_on(new)
self.smooth_diff_line.plot()
if new == 0:
self.turn_diff_line_off()
return
self.smooth_diff_line.update(force_replot=False)
def _line_color_changed(self, old, new):
self.smooth_line.line_properties = {
'color': self.line_color_rgb}
if self.smooth_diff_line is not None:
self.smooth_diff_line.line_properties = {
'color': self.line_color_rgb}
self.update_lines()
def diff_model2plot(self, axes_manager=None):
smoothed = np.diff(self.model2plot(axes_manager),
self.differential_order)
return smoothed
def close(self):
if self.signal._plot.is_active():
if self.differential_order != 0:
self.turn_diff_line_off()
self.smooth_line.close()
self.data_line.set_line_properties(
color=self.original_color,
type='line')
@add_gui_method(toolkey="Signal1D.smooth_savitzky_golay")
class SmoothingSavitzkyGolay(Smoothing):
polynomial_order = t.Int(
3,
desc="The order of the polynomial used to fit the samples."
"`polyorder` must be less than `window_length`.")
window_length = t.Int(
5,
desc="`window_length` must be a positive odd integer.")
increase_window_length = t.Button(orientation="horizontal", label="+")
decrease_window_length = t.Button(orientation="horizontal", label="-")
def _increase_window_length_fired(self):
if self.window_length % 2:
nwl = self.window_length + 2
else:
nwl = self.window_length + 1
if nwl < self.signal.axes_manager[2j].size:
self.window_length = nwl
def _decrease_window_length_fired(self):
if self.window_length % 2:
nwl = self.window_length - 2
else:
nwl = self.window_length - 1
if nwl > self.polynomial_order:
self.window_length = nwl
else:
_logger.warn(
"The window length must be greater than the polynomial order")
def _polynomial_order_changed(self, old, new):
if self.window_length <= new:
self.window_length = new + 2 if new % 2 else new + 1
_logger.warn(
"Polynomial order must be < window length. "
"Window length set to %i.", self.window_length)
self.update_lines()
def _window_length_changed(self, old, new):
self.update_lines()
def _differential_order_changed(self, old, new):
if new > self.polynomial_order:
self.polynomial_order += 1
_logger.warn(
"Differential order must be <= polynomial order. "
"Polynomial order set to %i.", self.polynomial_order)
super(
SmoothingSavitzkyGolay,
self)._differential_order_changed(
old,
new)
def diff_model2plot(self, axes_manager=None):
self.single_spectrum.data = self.signal().copy()
self.single_spectrum.smooth_savitzky_golay(
polynomial_order=self.polynomial_order,
window_length=self.window_length,
differential_order=self.differential_order)
return self.single_spectrum.data
def model2plot(self, axes_manager=None):
self.single_spectrum.data = self.signal().copy()
self.single_spectrum.smooth_savitzky_golay(
polynomial_order=self.polynomial_order,
window_length=self.window_length,
differential_order=0)
return self.single_spectrum.data
def apply(self):
self.signal.smooth_savitzky_golay(
polynomial_order=self.polynomial_order,
window_length=self.window_length,
differential_order=self.differential_order)
self.signal._replot()
@add_gui_method(toolkey="Signal1D.smooth_lowess")
class SmoothingLowess(Smoothing):
smoothing_parameter = t.Range(low=0.,
high=1.,
value=0.5,
)
number_of_iterations = t.Range(low=1,
value=1)
def __init__(self, *args, **kwargs):
super(SmoothingLowess, self).__init__(*args, **kwargs)
def _smoothing_parameter_changed(self, old, new):
if new == 0:
self.smoothing_parameter = old
else:
self.update_lines()
def _number_of_iterations_changed(self, old, new):
self.update_lines()
def model2plot(self, axes_manager=None):
self.single_spectrum.data = self.signal().copy()
self.single_spectrum.smooth_lowess(
smoothing_parameter=self.smoothing_parameter,
number_of_iterations=self.number_of_iterations,
show_progressbar=False)
return self.single_spectrum.data
def apply(self):
self.signal.smooth_lowess(
smoothing_parameter=self.smoothing_parameter,
number_of_iterations=self.number_of_iterations)
self.signal._replot()
@add_gui_method(toolkey="Signal1D.smooth_total_variation")
class SmoothingTV(Smoothing):
smoothing_parameter = t.Float(200)
def _smoothing_parameter_changed(self, old, new):
self.update_lines()
def model2plot(self, axes_manager=None):
self.single_spectrum.data = self.signal().copy()
self.single_spectrum.smooth_tv(
smoothing_parameter=self.smoothing_parameter,
show_progressbar=False)
return self.single_spectrum.data
def apply(self):
self.signal.smooth_tv(
smoothing_parameter=self.smoothing_parameter)
self.signal._replot()
@add_gui_method(toolkey="Signal1D.smooth_butterworth")
class ButterworthFilter(Smoothing):
cutoff_frequency_ratio = t.Range(0.01, 1., 0.01)
type = t.Enum('low', 'high')
order = t.Int(2)
def _cutoff_frequency_ratio_changed(self, old, new):
self.update_lines()
def _type_changed(self, old, new):
self.update_lines()
def _order_changed(self, old, new):
self.update_lines()
def model2plot(self, axes_manager=None):
b, a = sp.signal.butter(self.order, self.cutoff_frequency_ratio,
self.type)
smoothed = sp.signal.filtfilt(b, a, self.signal())
return smoothed
def apply(self):
b, a = sp.signal.butter(self.order, self.cutoff_frequency_ratio,
self.type)
f = functools.partial(sp.signal.filtfilt, b, a)
self.signal.map(f)
class Load(t.HasTraits):
filename = t.File
lazy = t.Bool(False)
@add_gui_method(toolkey="Signal1D.contrast_editor")
class ImageContrastEditor(t.HasTraits):
ss_left_value = t.Float()
ss_right_value = t.Float()
def __init__(self, image):
super(ImageContrastEditor, self).__init__()
self.image = image
f = plt.figure()
self.ax = f.add_subplot(111)
self.plot_histogram()
self.span_selector = None
self.span_selector_switch(on=True)
def on_disabling_span_selector(self):
pass
def span_selector_switch(self, on):
if on is True:
self.span_selector = \
drawing.widgets.ModifiableSpanSelector(
self.ax,
onselect=self.update_span_selector_traits,
onmove_callback=self.update_span_selector_traits)
elif self.span_selector is not None:
self.on_disabling_span_selector()
self.span_selector.turn_off()
self.span_selector = None
def update_span_selector_traits(self, *args, **kwargs):
self.ss_left_value = self.span_selector.rect.get_x()
self.ss_right_value = self.ss_left_value + \
self.span_selector.rect.get_width()
def plot_histogram(self):
vmin, vmax = self.image.vmin, self.image.vmax
pad = (vmax - vmin) * 0.05
vmin -= pad
vmax += pad
data = self.image.data_function().ravel()
self.patches = self.ax.hist(data, 100, range=(vmin, vmax),
color='blue')[2]
self.ax.set_xticks([])
self.ax.set_yticks([])
self.ax.set_xlim(vmin, vmax)
self.ax.figure.canvas.draw_idle()
def reset(self):
data = self.image.data_function().ravel()
self.image.vmin, self.image.vmax = np.nanmin(data), np.nanmax(data)
self.image.update()
self.update_histogram()
def update_histogram(self):
for patch in self.patches:
self.ax.patches.remove(patch)
self.plot_histogram()
def apply(self):
if self.ss_left_value == self.ss_right_value:
return
self.image.vmin = self.ss_left_value
self.image.vmax = self.ss_right_value
self.image.update()
self.update_histogram()
def close(self):
plt.close(self.ax.figure)
@add_gui_method(toolkey="Signal1D.integrate_in_range")
class IntegrateArea(SpanSelectorInSignal1D):
integrate = t.Button()
def __init__(self, signal, signal_range=None):
if signal.axes_manager.signal_dimension != 1:
raise SignalDimensionError(
signal.axes.signal_dimension, 1)
self.signal = signal
self.axis = self.signal.axes_manager.signal_axes[0]
self.span_selector = None
if not hasattr(self.signal, '_plot'):
self.signal.plot()
elif self.signal._plot is None:
self.signal.plot()
elif self.signal._plot.is_active() is False:
self.signal.plot()
self.span_selector_switch(on=True)
def apply(self):
integrated_spectrum = self.signal._integrate_in_range_commandline(
signal_range=(
self.ss_left_value,
self.ss_right_value)
)
# Replaces the original signal inplace with the new integrated spectrum
plot = False
if self.signal._plot and integrated_spectrum.axes_manager.shape != ():
self.signal._plot.close()
plot = True
self.signal.__init__(**integrated_spectrum._to_dictionary())
self.signal._assign_subclass()
self.signal.axes_manager.set_signal_dimension(0)
if plot is True:
self.signal.plot()
@add_gui_method(toolkey="Signal1D.remove_background")
class BackgroundRemoval(SpanSelectorInSignal1D):
background_type = t.Enum(
'Power Law',
'Gaussian',
'Offset',
'Polynomial',
default='Power Law')
polynomial_order = t.Range(1, 10)
fast = t.Bool(True,
desc=("Perform a fast (analytic, but possibly less accurate)"
" estimation of the background. Otherwise use "
"non-linear least squares."))
background_estimator = t.Instance(Component)
bg_line_range = t.Enum('from_left_range',
'full',
'ss_range',
default='full')
hi = t.Int(0)
def __init__(self, signal):
super(BackgroundRemoval, self).__init__(signal)
self.set_background_estimator()
self.bg_line = None
def on_disabling_span_selector(self):
if self.bg_line is not None:
self.bg_line.close()
self.bg_line = None
def set_background_estimator(self):
if self.background_type == 'Power Law':
self.background_estimator = components1d.PowerLaw()
self.bg_line_range = 'from_left_range'
elif self.background_type == 'Gaussian':
self.background_estimator = components1d.Gaussian()
self.bg_line_range = 'full'
elif self.background_type == 'Offset':
self.background_estimator = components1d.Offset()
self.bg_line_range = 'full'
elif self.background_type == 'Polynomial':
self.background_estimator = components1d.Polynomial(
self.polynomial_order)
self.bg_line_range = 'full'
def _polynomial_order_changed(self, old, new):
self.background_estimator = components1d.Polynomial(new)
self.span_selector_changed()
def _background_type_changed(self, old, new):
self.set_background_estimator()
self.span_selector_changed()
def _ss_left_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def _ss_right_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def create_background_line(self):
self.bg_line = drawing.signal1d.Signal1DLine()
self.bg_line.data_function = self.bg_to_plot
self.bg_line.set_line_properties(
color='blue',
type='line',
scaley=False)
self.signal._plot.signal_plot.add_line(self.bg_line)
self.bg_line.autoscale = False
self.bg_line.plot()
def bg_to_plot(self, axes_manager=None, fill_with=np.nan):
# First try to update the estimation
self.background_estimator.estimate_parameters(
self.signal, self.ss_left_value, self.ss_right_value,
only_current=True)
if self.bg_line_range == 'from_left_range':
bg_array = np.zeros(self.axis.axis.shape)
bg_array[:] = fill_with
from_index = self.axis.value2index(self.ss_left_value)
bg_array[from_index:] = self.background_estimator.function(
self.axis.axis[from_index:])
to_return = bg_array
elif self.bg_line_range == 'full':
to_return = self.background_estimator.function(self.axis.axis)
elif self.bg_line_range == 'ss_range':
bg_array = np.zeros(self.axis.axis.shape)
bg_array[:] = fill_with
from_index = self.axis.value2index(self.ss_left_value)
to_index = self.axis.value2index(self.ss_right_value)
bg_array[from_index:] = self.background_estimator.function(
self.axis.axis[from_index:to_index])
to_return = bg_array
if self.signal.metadata.Signal.binned is True:
to_return *= self.axis.scale
return to_return
def span_selector_changed(self):
if self.ss_left_value is np.nan or self.ss_right_value is np.nan or\
self.ss_right_value <= self.ss_left_value:
return
if self.background_estimator is None:
return
if self.bg_line is None and \
self.background_estimator.estimate_parameters(
self.signal, self.ss_left_value,
self.ss_right_value,
only_current=True) is True:
self.create_background_line()
else:
self.bg_line.update()
def apply(self):
if self.signal._plot:
self.signal._plot.close()
plot = True
else:
plot = False
background_type = ("PowerLaw" if self.background_type == "Power Law"
else self.background_type)
new_spectra = self.signal.remove_background(
signal_range=(self.ss_left_value, self.ss_right_value),
background_type=background_type,
fast=self.fast,
polynomial_order=self.polynomial_order)
self.signal.data = new_spectra.data
self.signal.events.data_changed.trigger(self)
if plot:
self.signal.plot()
SPIKES_REMOVAL_INSTRUCTIONS = (
"To remove spikes from the data:\n\n"
" 1. Click \"Show derivative histogram\" to "
"determine at what magnitude the spikes are present.\n"
" 2. Enter a suitable threshold (lower than the "
"lowest magnitude outlier in the histogram) in the "
"\"Threshold\" box, which will be the magnitude "
"from which to search. \n"
" 3. Click \"Find next\" to find the first spike.\n"
" 4. If desired, the width and position of the "
"boundaries used to replace the spike can be "
"adjusted by clicking and dragging on the displayed "
"plot.\n "
" 5. View the spike (and the replacement data that "
"will be added) and click \"Remove spike\" in order "
"to alter the data as shown. The tool will "
"automatically find the next spike to replace.\n"
" 6. Repeat this process for each spike throughout "
"the dataset, until the end of the dataset is "
"reached.\n"
" 7. Click \"OK\" when finished to close the spikes "
"removal tool.\n\n"
"Note: Various settings can be configured in "
"the \"Advanced settings\" section. Hover the "
"mouse over each parameter for a description of what "
"it does."
"\n")
@add_gui_method(toolkey="SimpleMessage")
class SimpleMessage(t.HasTraits):
text = t.Str
def __init__(self, text=""):
self.text = text
@add_gui_method(toolkey="Signal1D.spikes_removal_tool")
class SpikesRemoval(SpanSelectorInSignal1D):
interpolator_kind = t.Enum(
'Linear',
'Spline',
default='Linear',
desc="the type of interpolation to use when\n"
"replacing the signal where a spike has been replaced")
threshold = t.Float(400, desc="the derivative magnitude threshold above\n"
"which to find spikes")
click_to_show_instructions = t.Button()
show_derivative_histogram = t.Button()
spline_order = t.Range(1, 10, 3,
desc="the order of the spline used to\n"
"connect the reconstructed data")
interpolator = None
default_spike_width = t.Int(5,
desc="the width over which to do the interpolation\n"
"when removing a spike (this can be "
"adjusted for each\nspike by clicking "
"and dragging on the display during\n"
"spike replacement)")
index = t.Int(0)
add_noise = t.Bool(True,
desc="whether to add noise to the interpolated\nportion"
"of the spectrum. The noise properties defined\n"
"in the Signal metadata are used if present,"
"otherwise\nshot noise is used as a default")
def __init__(self, signal, navigation_mask=None, signal_mask=None):
super(SpikesRemoval, self).__init__(signal)
self.interpolated_line = None
self.coordinates = [coordinate for coordinate in
signal.axes_manager._am_indices_generator()
if (navigation_mask is None or not
navigation_mask[coordinate[::-1]])]
self.signal = signal
self.line = signal._plot.signal_plot.ax_lines[0]
self.ax = signal._plot.signal_plot.ax
signal._plot.auto_update_plot = False
if len(self.coordinates) > 1:
signal.axes_manager.indices = self.coordinates[0]
self.index = 0
self.argmax = None
self.derivmax = None
self.kind = "linear"
self._temp_mask = np.zeros(self.signal().shape, dtype='bool')
self.signal_mask = signal_mask
self.navigation_mask = navigation_mask
md = self.signal.metadata
from hyperspy.signal import BaseSignal
if "Signal.Noise_properties" in md:
if "Signal.Noise_properties.variance" in md:
self.noise_variance = md.Signal.Noise_properties.variance
if isinstance(md.Signal.Noise_properties.variance, BaseSignal):
self.noise_type = "heteroscedastic"
else:
self.noise_type = "white"
else:
self.noise_type = "shot noise"
else:
self.noise_type = "shot noise"
def _threshold_changed(self, old, new):
self.index = 0
self.update_plot()
def _click_to_show_instructions_fired(self):
from pyface.message_dialog import information
m = information(None, SPIKES_REMOVAL_INSTRUCTIONS,
title="Instructions"),
def _show_derivative_histogram_fired(self):
self.signal._spikes_diagnosis(signal_mask=self.signal_mask,
navigation_mask=self.navigation_mask)
def detect_spike(self):
derivative = np.diff(self.signal())
if self.signal_mask is not None:
derivative[self.signal_mask[:-1]] = 0
if self.argmax is not None:
left, right = self.get_interpolation_range()
self._temp_mask[left:right] = True
derivative[self._temp_mask[:-1]] = 0
if abs(derivative.max()) >= self.threshold:
self.argmax = derivative.argmax()
self.derivmax = abs(derivative.max())
return True
else:
return False
def _reset_line(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
def find(self, back=False):
self._reset_line()
ncoordinates = len(self.coordinates)
spike = self.detect_spike()
while not spike and (
(self.index < ncoordinates - 1 and back is False) or
(self.index > 0 and back is True)):
if back is False:
self.index += 1
else:
self.index -= 1
spike = self.detect_spike()
if spike is False:
m = SimpleMessage()
m.text = 'End of dataset reached'
try:
m.gui()
except (NotImplementedError, ImportError):
# This is only available for traitsui, ipywidgets has a
# progress bar instead.
pass
self.index = 0
self._reset_line()
return
else:
minimum = max(0, self.argmax - 50)
maximum = min(len(self.signal()) - 1, self.argmax + 50)
thresh_label = DerivativeTextParameters(
text=r"$\mathsf{\delta}_\mathsf{max}=$",
color="black")
self.ax.legend([thresh_label], [repr(int(self.derivmax))],
handler_map={DerivativeTextParameters:
DerivativeTextHandler()},
loc='best')
self.ax.set_xlim(
self.signal.axes_manager.signal_axes[0].index2value(
minimum),
self.signal.axes_manager.signal_axes[0].index2value(
maximum))
self.update_plot()
self.create_interpolation_line()
def update_plot(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
self.update_spectrum_line()
if len(self.coordinates) > 1:
self.signal._plot.pointer._update_patch_position()
def update_spectrum_line(self):
self.line.auto_update = True
self.line.update()
self.line.auto_update = False
def _index_changed(self, old, new):
self.signal.axes_manager.indices = self.coordinates[new]
self.argmax = None
self._temp_mask[:] = False
def on_disabling_span_selector(self):
if self.interpolated_line is not None:
self.interpolated_line.close()
self.interpolated_line = None
def _spline_order_changed(self, old, new):
self.kind = self.spline_order
self.span_selector_changed()
def _add_noise_changed(self, old, new):
self.span_selector_changed()
def _interpolator_kind_changed(self, old, new):
if new == 'linear':
self.kind = new
else:
self.kind = self.spline_order
self.span_selector_changed()
def _ss_left_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def _ss_right_value_changed(self, old, new):
if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
self.span_selector_changed()
def create_interpolation_line(self):
self.interpolated_line = drawing.signal1d.Signal1DLine()
self.interpolated_line.data_function = self.get_interpolated_spectrum
self.interpolated_line.set_line_properties(
color='blue',
type='line')
self.signal._plot.signal_plot.add_line(self.interpolated_line)
self.interpolated_line.autoscale = False
self.interpolated_line.plot()
def get_interpolation_range(self):
axis = self.signal.axes_manager.signal_axes[0]
if np.isnan(self.ss_left_value) or np.isnan(self.ss_right_value):
left = self.argmax - self.default_spike_width
right = self.argmax + self.default_spike_width
else:
left = axis.value2index(self.ss_left_value)
right = axis.value2index(self.ss_right_value)
# Clip to the axis dimensions
nchannels = self.signal.axes_manager.signal_shape[0]
left = left if left >= 0 else 0
right = right if right < nchannels else nchannels - 1
return left, right
def get_interpolated_spectrum(self, axes_manager=None):
data = self.signal().copy()
axis = self.signal.axes_manager.signal_axes[0]
left, right = self.get_interpolation_range()
if self.kind == 'linear':
pad = 1
else:
pad = self.spline_order
ileft = left - pad
iright = right + pad
ileft = np.clip(ileft, 0, len(data))
iright = np.clip(iright, 0, len(data))
left = int(np.clip(left, 0, len(data)))
right = int(np.clip(right, 0, len(data)))
if ileft == 0:
# Extrapolate to the left
if right == iright:
right -= 1
data[:right] = data[right:iright].mean()
elif iright == len(data):
# Extrapolate to the right
if left == ileft:
left += 1
data[left:] = data[ileft:left].mean()
else:
# Interpolate
x = np.hstack((axis.axis[ileft:left], axis.axis[right:iright]))
y = np.hstack((data[ileft:left], data[right:iright]))
intp = sp.interpolate.interp1d(x, y, kind=self.kind)
data[left:right] = intp(axis.axis[left:right])
# Add noise
if self.add_noise is True:
if self.noise_type == "white":
data[left:right] += np.random.normal(
scale=np.sqrt(self.noise_variance),
size=right - left)
elif self.noise_type == "heteroscedastic":
noise_variance = self.noise_variance(
axes_manager=self.signal.axes_manager)[left:right]
noise = [np.random.normal(scale=np.sqrt(item))
for item in noise_variance]
data[left:right] += noise
else:
data[left:right] = np.random.poisson(
np.clip(data[left:right], 0, np.inf))
return data
def span_selector_changed(self):
if self.interpolated_line is None:
return
else:
self.interpolated_line.update()
def apply(self):
if not self.interpolated_line: # No spike selected
return
self.signal()[:] = self.get_interpolated_spectrum()
self.signal.events.data_changed.trigger(obj=self.signal)
self.update_spectrum_line()
self.interpolated_line.close()
self.interpolated_line = None
self.reset_span_selector()
self.find()
# For creating a text handler in legend (to label derivative magnitude)
class DerivativeTextParameters(object):
def __init__(self, text, color):
self.my_text = text
self.my_color = color
class DerivativeTextHandler(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
patch = mpl_text.Text(
text=orig_handle.my_text,
color=orig_handle.my_color)
handlebox.add_artist(patch)
return patch
|
CodeMonkeyJan/hyperspy
|
hyperspy/signal_tools.py
|
Python
|
gpl-3.0
| 40,200
|
[
"Gaussian"
] |
283eefc94b69c935ac14c9cec30b733b7307058724dfb1e7b8de9b4b2483d343
|
##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2014 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Kenneth Hoste
# Authors:: George Tsouloupas <g.tsouloupas@cyi.ac.cy>, Fotis Georgatos <fotis@cern.ch>
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing BWA, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: George Tsouloupas <g.tsouloupas@cyi.ac.cy>
"""
import os
import shutil
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_BWA(ConfigureMake):
"""
Support for building BWA
"""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to BWA."""
super(EB_BWA, self).__init__(*args, **kwargs)
self.files = []
def configure_step(self):
"""
Empty function as bwa comes with _no_ configure script
"""
self.files = ["bwa", "qualfa2fq.pl", "xa2multi.pl"]
if LooseVersion(self.version) < LooseVersion("0.7.0"):
# solid2fastq was dropped in recent versions because the same functionality is covered by other tools already
# cfr. http://osdir.com/ml/general/2010-10/msg26205.html
self.files.append("solid2fastq.pl")
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in self.files:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err))
def sanity_check_step(self):
"""Custom sanity check for BWA."""
custom_paths = {
'files': ["bin/%s" % x for x in self.files],
'dirs': []
}
super(EB_BWA, self).sanity_check_step(custom_paths=custom_paths)
|
omula/easybuild-easyblocks
|
easybuild/easyblocks/b/bwa.py
|
Python
|
gpl-2.0
| 2,400
|
[
"BWA"
] |
a8ccecd3ab78a47bc6d40764c4b1bb4450f68f0fab4c9f29c295cf53b8757afb
|
from octopus.modules.es.testindex import ESTestCase
from service.models import Record, SpreadsheetJob, OAGRLink
import time
class TestModels(ESTestCase):
def setUp(self):
super(TestModels, self).setUp()
def tearDown(self):
super(TestModels, self).tearDown()
def test_01_spreadsheet(self):
s = SpreadsheetJob()
s.filename = "myfile.csv"
s.contact_email = "contact@email.com"
s.status_code = "processing"
s.status_message = "currently working on it!"
assert s.filename == "myfile.csv"
assert s.contact_email == "contact@email.com"
assert s.status_code == "processing"
assert s.status_message == "currently working on it!"
def test_02_record(self):
r = Record()
r.upload_id = "1234"
r.upload_pos = "234"
r.set_source_data(university="my uni",
pmcid="PMC12345678",
pmid="98765432",
doi="10.whatever",
publisher="wiley",
journal_title="Journal of things",
article_title="A study of sorts",
apc=100,
wellcome_apc=200,
vat=20,
total_cost=300,
grant_code="WELL01",
licence_info="CC BY",
notes="this is a note")
r.pmcid = "PMC12345678"
r.pmid = "98765432"
r.doi = "10.whatever"
r.title = "A study of sorts"
r.has_ft_xml = True
r.aam_from_xml = False
r.aam_from_epmc = True
r.issn = "1234-5678"
r.in_oag = True
r.oag_pmcid = "not_sent"
r.oag_doi = "sent"
r.oag_pmid = "fto"
r.in_epmc = True
r.is_oa = False
r.aam = True
r.licence_type = "CC BY"
r.licence_source = "epmc"
r.journal_type = "hybrid"
r.confidence = "0.8"
r.standard_compliance = True
r.deluxe_compliance = False
r.add_provenance("richard", "provenance 1")
r.add_provenance("wellcome", "provenance 2")
assert r.upload_id == "1234"
assert r.upload_pos == 234
assert r.pmcid == "PMC12345678"
assert r.pmid == "98765432"
assert r.doi == "10.whatever"
assert r.title == "A study of sorts"
assert r.has_ft_xml
assert not r.aam_from_xml
assert r.aam_from_epmc
assert len(r.issn) == 1
assert "1234-5678" in r.issn
assert r.in_epmc
assert not r.is_oa
assert r.aam
assert r.licence_type == "CC BY"
assert r.licence_source == "epmc"
assert r.journal_type == "hybrid"
assert r.confidence == 0.8
assert r.standard_compliance
assert not r.deluxe_compliance
p = r.provenance
assert len(p) == 2
for by, when, note in p:
assert by in ["richard", "wellcome"]
assert note in ["provenance 1", "provenance 2"]
def test_03_oagrlink(self):
l = OAGRLink()
l.spreadsheet_id = "1234"
l.oagrjob_id = "9876"
assert l.spreadsheet_id == "1234"
assert l.oagrjob_id == "9876"
l.save()
time.sleep(1)
l2 = OAGRLink.by_oagr_id("9876")
assert l2.spreadsheet_id == "1234"
assert l2.oagrjob_id == "9876"
def test_04_pc_complete(self):
job = SpreadsheetJob()
job.save()
# a record with no completeness
r = Record()
r.upload_id = job.id
r.save()
# a record with epmc complete
r2 = Record()
r2.upload_id = job.id
r2.epmc_complete = True
r2.save()
# a record with both complete
r3 = Record()
r3.upload_id = job.id
r3.epmc_complete = True
r3.oag_complete = True
r3.save()
time.sleep(1)
comp = job.pc_complete
assert int(comp) == 50
r.epmc_complete = True
r.save()
time.sleep(1)
comp = job.pc_complete
assert int(comp) == 66
r.oag_complete = True
r2.oag_complete = True
r.save()
r2.save()
time.sleep(1)
comp = job.pc_complete
assert int(comp) == 100
def test_05_compliance(self):
record = Record()
# Truth table for standard and deluxe compliance
#
# Note: CC-BY column means CC-BY or CC0
#
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 0 0 0 0 0 0 0
# 0 0 1 0 0 0 0
# 1 0 0 0 0 0 0
# 1 0 0 0 1 0 0
# 1 0 0 1 1 0 0
# 1 0 1 0 0 1 0
# 1 0 1 1 1 1 1
# 1 1 0 0 0 1 1
# 1 1 1 0 0 1 1
# 1 1 1 1 1 1 1
# check the default values, before we've done anything to the record
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# set the various switches, and then check the results
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 0 0 0 0 0 0 0
record.in_epmc = False
record.aam = False
record.licence_type = "Other"
record.licence_source = "publisher"
record.is_oa = False
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 0 0 1 0 0 0 0
record.licence_type = "cc-by"
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 0 0 0 0 0 0
record.in_epmc = True
record.licence_type = "Other"
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 0 0 0 1 0 0
record.is_oa = True
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 0 0 1 1 0 0
record.licence_source = "epmc_xml"
assert record.deluxe_compliance is False
assert record.standard_compliance is False
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 0 1 0 0 1 0
record.licence_type = "CC0"
record.licence_source = "publisher"
record.is_oa = False
assert record.deluxe_compliance is False
assert record.standard_compliance is True
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 0 1 1 1 1 1
record.licence_type = "cc-by"
record.licence_source = "epmc"
record.is_oa = True
assert record.deluxe_compliance is True
assert record.standard_compliance is True
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 1 0 0 0 1 1
record.aam = True
record.licence_type = "Other"
record.licence_source = "publisher"
assert record.deluxe_compliance is True
assert record.standard_compliance is True
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 1 1 0 0 1 1
record.licence_type = "CC-BY"
assert record.deluxe_compliance is True
assert record.standard_compliance is True
# in EPMC | AAM | CC BY | Licence in EPMC | is OA | S | D
# 1 1 1 1 1 1 1
record.licence_source = "epmc_xml"
record.is_oa = True
assert record.deluxe_compliance is True
assert record.standard_compliance is True
def test_06_duplicates(self):
# first make ourselves a job to work on
job = SpreadsheetJob()
job.save()
# now make a bunch of records, some unique and some duplicate
# unique pmcid
r = Record()
r.upload_id = job.id
r.pmcid = "PMCunique"
r.save()
# duplicate pmcid
r = Record()
r.upload_id = job.id
r.pmcid = "PMCdupe"
r.save()
r = Record()
r.upload_id = job.id
r.pmcid = "PMCdupe"
r.save()
# unique pmid
r = Record()
r.upload_id = job.id
r.pmid = "unique"
r.save()
# duplicate pmid
r = Record()
r.upload_id = job.id
r.pmid = "dupe"
r.save()
r = Record()
r.upload_id = job.id
r.pmid = "dupe"
r.save()
# unique doi
r = Record()
r.upload_id = job.id
r.doi = "10.unique"
r.save()
# duplicate pmcid
r = Record()
r.upload_id = job.id
r.doi = "10.dupe"
r.save()
r = Record()
r.upload_id = job.id
r.doi = "10.dupe"
r.save()
# one that is a duplicate of everything
r = Record()
r.upload_id = job.id
r.pmcid = "PMCdupe"
r.pmid = "dupe"
r.doi = "10.dupe"
r.save()
# one that is confused about its duplication
r = Record()
r.upload_id = job.id
r.pmcid = "PMCdupe"
r.pmid = "dupe"
r.doi = "10.notdupe"
r.save()
time.sleep(2)
dupes = job.list_duplicate_identifiers()
# check the structure of the response
assert "pmcid" in dupes
assert "pmid" in dupes
assert "doi" in dupes
# check the contentes
assert len(dupes["pmcid"]) == 1
assert "PMCdupe" in dupes["pmcid"]
assert len(dupes["pmid"]) == 1
assert "dupe" in dupes["pmid"]
assert len(dupes["doi"]) == 1
assert "10.dupe" in dupes["doi"]
|
CottageLabs/oacwellcome
|
service/tests/unit/test_models.py
|
Python
|
apache-2.0
| 11,345
|
[
"Octopus"
] |
7c6cb6cb0d05fad94141fcdd4c8d69a4bcba08d8434a694e268d22ee07f0418c
|
#!/usr/bin/env python
"""
Read the Dimer Vector from CP2K.
The vector is read from the cp2k-1.restart file.
"""
import sys,re
#Read the cp2k-1.restart file
if ( len(sys.argv) < 2):
print "Usage: %s cp2k-1.restart" %(sys.argv[0])
exit()
CP2KFileName = sys.argv[1]
DimerFileName = "NEWMODECAR"
inp=open(CP2KFileName, "r")
lines=inp.readlines()
inp.close()
#Get the startline and endline of the dimer vector
i=0
VectorStart=0
VectorEnd=0
for line in lines:
if (re.search(ur"&DIMER_VECTOR",line)):
# print "Find the start line in lin %d" % (i)
VectorStart = i+1
elif (re.search(ur"&END DIMER_VECTOR", line)):
# print "Find the end line in lin %d" % (i)
VectorEnd = i
break
i += 1
VectorLines=lines[VectorStart:VectorEnd]
#Print the vector lines
DimerVector = []
NumVector= 0
for line in VectorLines:
line=re.sub(ur"\\","",line)
DimerVector.extend(line.split())
#print "Number of vectors: %d" % (len(DimerVector))
if (len(DimerVector)%3 != 0 ):
print "Error: Number of Dimer vectors is %d, cannot be divided by 3." % (len(DimerVector))
else:
NumVector= len(DimerVector)/3
modelines=""
for i in range(0, NumVector):
modelines += "%f\t%f\t%f\n" % ( float(DimerVector[3*i]), float(DimerVector[3*i+1]), float(DimerVector[3*i+2]))
outp=open(DimerFileName,"w")
outp.writelines(modelines)
outp.close()
|
zevanzhao/TCCL-Code
|
CP2K/GetCP2KDimerVector.py
|
Python
|
gpl-3.0
| 1,370
|
[
"CP2K"
] |
9e98ee4bb6dcad4d84755a93899d0cfb3ac5385065c30fe49d9689da6b2f0fee
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0021_auto_20150515_0131'),
]
operations = [
migrations.AddField(
model_name='visit',
name='no_show',
field=models.BooleanField(default=False),
),
]
|
koebbe/homeworks
|
visit/migrations/0022_visit_no_show.py
|
Python
|
mit
| 401
|
[
"VisIt"
] |
c96ef4dea901d60b8b27b7e26dab714bd4a0d24232da221ede6ad070be7fa90f
|
# -*- coding: utf-8 -*-
"""
=====================
Cython related magics
=====================
Magic command interface for interactive work with Cython
.. note::
The ``Cython`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
Usage
=====
To enable the magics below, execute ``%load_ext cythonmagic``.
``%%cython``
{CYTHON_DOC}
``%%cython_inline``
{CYTHON_INLINE_DOC}
``%%cython_pyximport``
{CYTHON_PYXIMPORT_DOC}
Author:
* Brian Granger
Code moved from IPython and adapted by:
* Martín Gaitán
Parts of this code were taken from Cython.inline.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file ipython-COPYING.rst, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import imp
import io
import os
import re
import sys
import time
try:
reload
except NameError: # Python 3
from imp import reload
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from IPython.core import display
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.utils import py3compat
try:
from IPython.paths import get_ipython_cache_dir
except ImportError:
# older IPython version
from IPython.utils.path import get_ipython_cache_dir
from IPython.utils.text import dedent
from ..Shadow import __version__ as cython_version
from ..Compiler.Errors import CompileError
from .Inline import cython_inline
from .Dependencies import cythonize
@magics_class
class CythonMagics(Magics):
def __init__(self, shell):
super(CythonMagics,self).__init__(shell)
self._reloads = {}
self._code_cache = {}
self._pyximport_installed = False
def _import_all(self, module):
for k,v in module.__dict__.items():
if not k.startswith('__'):
self.shell.push({k:v})
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return cython_inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are
injected into the user's namespace. For most purposes, we recommend
the usage of the `%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with io.open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules or not self._pyximport_installed:
import pyximport
pyximport.install(reload_support=True)
self._pyximport_installed = True
if module_name in self._reloads:
module = self._reloads[module_name]
reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-3', dest='language_level', action='store_const', const=3, default=None,
help="Select Python 3 syntax."
)
@magic_arguments.argument(
'-2', dest='language_level', action='store_const', const=2, default=None,
help="Select Python 2 syntax."
)
@magic_arguments.argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'--link-args', action='append', default=[],
help="Extra flags to pass to linker via the `extra_link_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-n', '--name',
help="Specify a name for the Cython module."
)
@magic_arguments.argument(
'-L', dest='library_dirs', metavar='dir', action='append', default=[],
help="Add a path to the list of library directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-S', '--src', action='append', default=[],
help="Add a path to the list of src files (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-+', '--cplus', action='store_true', default=False,
help="Output a C++ rather than C file."
)
@magic_arguments.argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of a new module, even if the source has been "
"previously compiled."
)
@magic_arguments.argument(
'-a', '--annotate', action='store_true', default=False,
help="Produce a colorized HTML version of the source."
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory `IPYTHONDIR/cython` using a filename with the hash of the
code. This file is then cythonized and compiled. The resulting module
is imported and all of its symbols are injected into the user's
namespace. The usage is similar to that of `%%cython_pyximport` but
you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
To compile OpenMP codes, pass the required `--compile-args`
and `--link-args`. For example with gcc::
%%cython --compile-args=-fopenmp --link-args=-fopenmp
...
"""
args = magic_arguments.parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell+'\n'
lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
quiet = True
key = code, line, sys.version_info, sys.executable, cython_version
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if args.force:
# Force a new module name by adding the current time to the
# key which is hashed to determine the module name.
key += time.time(),
if args.name:
module_name = py3compat.unicode_to_str(args.name)
else:
module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
module_path = os.path.join(lib_dir, module_name + self.so_ext)
have_module = os.path.isfile(module_path)
need_cythonize = not have_module
if args.annotate:
html_file = os.path.join(lib_dir, module_name + '.html')
if not os.path.isfile(html_file):
need_cythonize = True
if need_cythonize:
c_include_dirs = args.include
c_src_files = list(map(str, args.src))
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
with io.open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name = module_name,
sources = [pyx_file] + c_src_files,
include_dirs = c_include_dirs,
library_dirs = args.library_dirs,
extra_compile_args = args.compile_args,
extra_link_args = args.link_args,
libraries = args.lib,
language = 'c++' if args.cplus else 'c',
)
build_extension = self._get_build_extension()
try:
opts = dict(
quiet=quiet,
annotate=args.annotate,
force=True,
)
if args.language_level is not None:
assert args.language_level in (2, 3)
opts['language_level'] = args.language_level
elif sys.version_info[0] > 2:
opts['language_level'] = 3
build_extension.extensions = cythonize([extension], **opts)
except CompileError:
return
if not have_module:
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
self._code_cache[key] = module_name
module = imp.load_dynamic(module_name, module_path)
self._import_all(module)
if args.annotate:
try:
with io.open(html_file, encoding='utf-8') as f:
annotated_html = f.read()
except IOError as e:
# File could not be opened. Most likely the user has a version
# of Cython before 0.15.1 (when `cythonize` learned the
# `force` keyword argument) and has already compiled this
# exact source without annotation.
print('Cython completed successfully but the annotated '
'source could not be read.', file=sys.stderr)
print(e, file=sys.stderr)
else:
return display.HTML(self.clean_annotated_html(annotated_html))
@property
def so_ext(self):
"""The extension suffix for compiled modules."""
try:
return self._so_ext
except AttributeError:
self._so_ext = self._get_build_extension().get_ext_filename('')
return self._so_ext
def _clear_distutils_mkpath_cache(self):
"""clear distutils mkpath cache
prevents distutils from skipping re-creation of dirs that have been removed
"""
try:
from distutils.dir_util import _path_created
except ImportError:
pass
else:
_path_created.clear()
def _get_build_extension(self):
self._clear_distutils_mkpath_cache()
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@staticmethod
def clean_annotated_html(html):
"""Clean up the annotated HTML source.
Strips the link to the generated C or C++ file, which we do not
present to the user.
"""
r = re.compile('<p>Raw output: <a href="(.*)">(.*)</a>')
html = '\n'.join(l for l in html.splitlines() if not r.match(l))
return html
__doc__ = __doc__.format(
# rST doesn't see the -+ flag as part of an option list, so we
# hide it from the module-level docstring.
CYTHON_DOC = dedent(CythonMagics.cython.__doc__\
.replace('-+, --cplus','--cplus ')),
CYTHON_INLINE_DOC = dedent(CythonMagics.cython_inline.__doc__),
CYTHON_PYXIMPORT_DOC = dedent(CythonMagics.cython_pyximport.__doc__),
)
|
fabianrost84/cython
|
Cython/Build/IpythonMagic.py
|
Python
|
apache-2.0
| 12,985
|
[
"Brian"
] |
8d5e5c65b951cf0f7531746cb45e02ae18507e04b2bb81ca381e0d608a1eaaed
|
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/10/20 #
# Weighted Matrix Factorization for implicit feedback data #
# ALS===>>>> #
# Result: lambda1=1; beta=1; alpha=40 =====> 0.076 #
# lambda1=0.01; beta=0.01; alpha=1 =====> 0.048 #
# lambda1=0.01; beta=0.1; gamma=0.5; alpha=1 ===> 0.113 #
# lambda1=0.01; beta=1; gamma=1; alpha=1 ===> 0.079 #
# SGD===>>>> #
# Result: niters2, lambda2=0.01, neg_sample=1, beta2=1, alpha2=1 #
# ===> 0.046 #
###################################################################
import numpy as np
import json, csv, sys, random, pickle
from collections import defaultdict
from tool import rZero, rPosGaussian, tic, toc
settings = json.loads(open("../../SETTINGS.json").read())
MIN_PREF = -1e5
class WMF():
def __init__(self):
self.ndim = 20
self.tr_method = 4
# ALS
self.niters1 = 20
self.lambda1 = 0.01
self.beta1 = 0.1
self.gamma1 = 0.5
self.alpha1 = 1
# SGD
self.niters2 = 30
self.lr2 = 0.01
self.lambda2 = 0.001
self.neg_sample = 5
self.beta2 = 0.1
self.alpha2 = 1
def model_init(self, train_file, init_choice):
self.user_ids = {}
self.ruser_ids = {}
self.organizer_ids = {}
self.event_ids = {}
data = [entry for entry in csv.reader(open(train_file))]
for entry in data:
uname, eventname, oname = entry[0], entry[1], entry[3]
if uname not in self.user_ids:
self.user_ids[uname] = len(self.user_ids)
self.ruser_ids[self.user_ids[uname]] = uname
if oname not in self.organizer_ids:
self.organizer_ids[oname] = len(self.organizer_ids)
if eventname not in self.event_ids:
self.event_ids[eventname] = len(self.event_ids)
factor_init_method = None
if init_choice == "zero":
factor_init_method = rZero
elif init_choice == "gaussian":
factor_init_method = rPosGaussian
else:
print 'Choice of model initialization error.'
sys.exit(1)
self.user_factor = np.array([factor_init_method(self.ndim) for i in
xrange(len(self.user_ids))])
self.organizer_factor = np.array([factor_init_method(self.ndim) for i in
xrange(len(self.organizer_ids))])
# ALS learning needed
if self.tr_method == 1:
self.user_pref = {}
self.user_conf = {}
for uid in xrange(len(self.user_ids)):
self.user_pref[uid] = np.array([0 for i in xrange(len(self.organizer_ids))])
self.user_conf[uid] = np.array([self.beta1 for i in xrange(len(self.organizer_ids))])
for entry in data:
uid, oid = self.user_ids[entry[0]], self.organizer_ids[entry[3]]
self.user_pref[uid][oid] = 1
if self.user_conf[uid][oid] == self.beta1:
self.user_conf[uid][oid] = self.gamma1
else:
self.user_conf[uid][oid] += self.alpha1
self.organizer_pref = {}
self.organizer_conf = {}
for oid in xrange(len(self.organizer_ids)):
self.organizer_pref[oid] = np.array([0 for i in xrange(len(self.user_ids))])
self.organizer_conf[oid] = np.array([0 for i in xrange(len(self.user_ids))])
for uid in xrange(len(self.user_ids)):
for oid in xrange(len(self.organizer_ids)):
self.organizer_pref[oid][uid] = self.user_pref[uid][oid]
self.organizer_conf[oid][uid] = self.user_conf[uid][oid]
# SGD learning needed
if self.tr_method == 2:
self.pool_oids = [i for i in xrange(len(self.organizer_ids))]
self.user_interacted_organizer = defaultdict(set)
self.tr_pairs = [[self.user_ids[entry[0]], self.organizer_ids[entry[3]]] for entry in data]
for i, entry in enumerate(data):
uid, oid = self.user_ids[entry[0]], self.organizer_ids[entry[3]]
self.user_interacted_organizer[uid].add(oid)
# SGD1 learning needed
if self.tr_method == 3:
self.user_pref = {}
self.user_conf = {}
for uid in xrange(len(self.user_ids)):
self.user_pref[uid] = np.array([0 for i in xrange(len(self.organizer_ids))])
self.user_conf[uid] = np.array([self.beta1 for i in xrange(len(self.organizer_ids))])
for entry in data:
uid, oid = self.user_ids[entry[0]], self.organizer_ids[entry[3]]
self.user_pref[uid][oid] = 1
if self.user_conf[uid][oid] == self.beta1:
self.user_conf[uid][oid] = self.gamma1
else:
self.user_conf[uid][oid] += self.alpha1
self.tr_pairs = []
for uid in xrange(len(self.user_ids)):
for oid in xrange(len(self.organizer_ids)):
self.tr_pairs.append([uid, oid, self.user_pref[uid][oid], self.user_conf[uid][oid]])
print 'Number of training pairs: %d' % len(self.tr_pairs)
if self.tr_method == 4:
self.user_pref = {}
self.user_conf = {}
for uid in xrange(len(self.user_ids)):
self.user_pref[uid] = np.array([0 for i in xrange(len(self.organizer_ids))])
self.user_conf[uid] = np.array([self.beta1 for i in xrange(len(self.organizer_ids))])
self.user_interacted_event = defaultdict(set)
self.event_organizer = [0 for i in xrange(len(self.event_ids))]
self.tr_pairs = []
for entry in data:
uid, eid, oid = self.user_ids[entry[0]], self.event_ids[entry[1]], self.organizer_ids[entry[3]]
self.event_organizer[eid] = oid
self.tr_pairs.append([uid, eid])
self.user_interacted_event[uid].add(eid)
self.pool_eids = [i for i in xrange(len(self.event_ids))]
del data
print "Number of users: %d" % len(self.user_ids)
print "Number of organizers: %d" % len(self.organizer_ids)
def train(self):
print 'Start training'
if self.tr_method == 1:
for i in xrange(self.niters1):
tic()
self.als_train()
cost = toc()
print 'Iteration %d, time cost: %.3f seconds.' % (i+1, cost)
self.evaluation()
elif self.tr_method == 2:
for i in xrange(self.niters2):
tic()
self.sgd_train()
cost = toc()
print 'Iteration %d, time cost: %.3f seconds.' % (i+1, cost)
elif self.tr_method == 3:
self.evaluation()
for i in xrange(self.niters2):
tic()
self.sgd1_train()
cost = toc()
print 'Iteration %d, time cost: %.3f seconds.' % (i+1, cost)
self.evaluation()
elif self.tr_method == 4:
self.evaluation()
for i in xrange(self.niters2):
tic()
self.sgd2_train()
cost = toc()
print 'Iteration %d, time cost: %.3f seconds.' % (i+1, cost)
self.evaluation()
else:
print 'Error choice of training method!'
sys.exit(1)
self.save_model()
def als_train(self):
for oid in xrange(len(self.organizer_ids)):
sys.stdout.write("\rFINISHED OID NUM: %d. " % (oid+1))
sys.stdout.flush()
self.organizer_factor[oid] = np.dot(np.dot(np.linalg.inv(np.dot(np.transpose(self.user_factor)*self.organizer_conf[oid], self.user_factor)+self.lambda1*np.eye(self.ndim)), np.transpose(self.user_factor))*self.organizer_conf[oid],np.transpose(self.organizer_pref[oid]))
for uid in xrange(len(self.user_ids)):
sys.stdout.write("\rFINISHED UID NUM: %d. " % (uid+1))
sys.stdout.flush()
self.user_factor[uid] = np.dot(np.dot(np.linalg.inv(np.dot(np.transpose(self.organizer_factor)*self.user_conf[uid], self.organizer_factor)+self.lambda1*np.eye(self.ndim)), np.transpose(self.organizer_factor))*self.user_conf[uid], np.transpose(self.user_pref[uid]))
def sgd_train(self):
random.shuffle(self.tr_pairs)
for i, pair in enumerate(self.tr_pairs):
uid, oid = pair
self.sgd_update(uid, oid, 1, self.alpha2)
finished_num = 0
for neg_oid in self.pool_oids:
if neg_oid not in self.user_interacted_organizer[uid]:
self.sgd_update(uid, neg_oid, 0, self.beta2)
finished_num += 1
if finished_num == self.neg_sample:
break
if (i+1) % 100 == 0:
random.shuffle(self.pool_oids)
sys.stdout.write("\rFINISHED PAIR NUM: %d. " % (i+1))
sys.stdout.flush()
def sgd1_train(self):
random.shuffle(self.tr_pairs)
for i, pair in enumerate(self.tr_pairs):
self.sgd_update(pair[0], pair[1], pair[2], 2*pair[3])
if (i+1) % 1000 == 0:
sys.stdout.write("\rFINISHED PAIR NUM: %d. " % (i+1))
sys.stdout.flush()
def sgd2_train(self):
random.shuffle(self.tr_pairs)
scan_idx = 0
for i, pair in enumerate(self.tr_pairs):
uid, eid = pair
oid = self.event_organizer[eid]
self.sgd_update(uid, oid, 1, self.alpha2)
finished_neg_num = 0
for j in xrange(scan_idx, len(self.pool_eids)):
neg_eid = self.pool_eids[j]
if neg_eid not in self.user_interacted_event[uid]:
self.sgd_update(uid, self.event_organizer[neg_eid], 0, self.beta2)
finished_neg_num += 1
if finished_neg_num == self.neg_sample:
break
scan_idx = j+1
if scan_idx == len(self.pool_eids):
scan_idx = 0
random.shuffle(self.pool_eids)
if (i+1) % 1000 == 0:
sys.stdout.write("\rFINISHED PAIR NUM: %d. " % (i+1))
sys.stdout.flush()
def sgd_update(self, uid, oid, label, weight):
res = weight*(np.dot(self.user_factor[uid], self.organizer_factor[oid]) - label)
tmp_user_factor = self.user_factor[uid] - self.lr2*(res*self.organizer_factor[oid]+self.lambda2*self.user_factor[uid])
try:
self.organizer_factor[oid] -= self.lr2*(res*self.user_factor[uid]+self.lambda2*self.organizer_factor[oid])
except:
print self.user_factor[uid]
print self.organizer_factor[oid]
raw_input()
self.user_factor[uid] = tmp_user_factor
def evaluation(self):
total_error = 0
for uid in xrange(len(self.user_ids)):
for oid in xrange(len(self.organizer_ids)):
total_error += self.user_conf[uid][oid]*(np.dot(self.user_factor[uid], self.organizer_factor[oid]) - self.user_pref[uid][oid])**2
error = total_error/(len(self.user_ids)*len(self.organizer_ids))
print 'Error: %f' % error
def save_model(self):
model_fd = open(settings["WMF_MODEL"], "wb")
pickle.dump([self.user_factor, self.organizer_factor], model_fd)
def load_model(self):
model_fd = open(settings["WMF_MODEL"], "rb")
self.user_factor, self.organizer_factor = pickle.load(model_fd)
def genRecommendResult(self, restart, train_file, test_file, init_choice, result_path):
if not restart:
self.model_init(train_file, init_choice)
self.load_model()
data = [entry for entry in csv.reader(open(test_file))]
event_oid = {}
for entry in data:
eventname, oname = entry[1], entry[3]
if eventname in event_oid:
continue
if oname in self.organizer_ids:
event_oid[eventname] = self.organizer_ids[oname]
else:
event_oid[eventname] = -1
del data
wfd = open(result_path, 'w')
score = 0
print 'Number of test events: %d' % len(event_oid)
for uid in xrange(len(self.user_ids)):
wfd.write("%s" % self.ruser_ids[uid])
organizer_pref = {}
newevent_pref = []
for eventname in event_oid:
oid = event_oid[eventname]
if oid == -1:
newevent_pref.append([eventname, MIN_PREF])
else:
if oid in organizer_pref:
newevent_pref.append([eventname, organizer_pref[oid]])
else:
score = np.dot(self.user_factor[uid], self.organizer_factor[oid])
organizer_pref[eventname] = score
newevent_pref.append([eventname, score])
results = sorted(newevent_pref, key=lambda x:x[1], reverse=True)
recommendations = [x[0] for x in results]
for event in recommendations[:settings["RE_TOPK"]]:
wfd.write(",%s" % event)
wfd.write("\n")
sys.stdout.write("\rFINISHED USER NUM: %d. " % (uid+1))
sys.stdout.flush()
wfd.close()
print ''
|
anthonylife/EventRecommendation
|
src/WMF/model.py
|
Python
|
apache-2.0
| 14,466
|
[
"Gaussian"
] |
48db89893d36383560dab357fb0255421a04138b242aacc7c32a23e3d5c80e82
|
"""
Title: Probabilistic Bayesian Neural Networks
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/01/15
Last modified: 2021/01/15
Description: Building probabilistic Bayesian neural network models with TensorFlow Probability.
"""
"""
## Introduction
Taking a probabilistic approach to deep learning allows to account for *uncertainty*,
so that models can assign less levels of confidence to incorrect predictions.
Sources of uncertainty can be found in the data, due to measurement error or
noise in the labels, or the model, due to insufficient data availability for
the model to learn effectively.
This example demonstrates how to build basic probabilistic Bayesian neural networks
to account for these two types of uncertainty.
We use [TensorFlow Probability](https://www.tensorflow.org/probability) library,
which is compatible with Keras API.
This example requires TensorFlow 2.3 or higher.
You can install Tensorflow Probability using the following command:
```python
pip install tensorflow-probability
```
"""
"""
## The dataset
We use the [Wine Quality](https://archive.ics.uci.edu/ml/datasets/wine+quality)
dataset, which is available in the [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/wine_quality).
We use the red wine subset, which contains 4,898 examples.
The dataset has 11numerical physicochemical features of the wine, and the task
is to predict the wine quality, which is a score between 0 and 10.
In this example, we treat this as a regression task.
You can install TensorFlow Datasets using the following command:
```python
pip install tensorflow-datasets
```
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
"""
## Create training and evaluation datasets
Here, we load the `wine_quality` dataset using `tfds.load()`, and we convert
the target feature to float. Then, we shuffle the dataset and split it into
training and test sets. We take the first `train_size` examples as the train
split, and the rest as the test split.
"""
def get_train_and_test_splits(train_size, batch_size=1):
# We prefetch with a buffer the same size as the dataset because th dataset
# is very small and fits into memory.
dataset = (
tfds.load(name="wine_quality", as_supervised=True, split="train")
.map(lambda x, y: (x, tf.cast(y, tf.float32)))
.prefetch(buffer_size=dataset_size)
.cache()
)
# We shuffle with a buffer the same size as the dataset.
train_dataset = (
dataset.take(train_size).shuffle(buffer_size=train_size).batch(batch_size)
)
test_dataset = dataset.skip(train_size).batch(batch_size)
return train_dataset, test_dataset
"""
## Compile, train, and evaluate the model
"""
hidden_units = [8, 8]
learning_rate = 0.001
def run_experiment(model, loss, train_dataset, test_dataset):
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate),
loss=loss,
metrics=[keras.metrics.RootMeanSquaredError()],
)
print("Start training the model...")
model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset)
print("Model training finished.")
_, rmse = model.evaluate(train_dataset, verbose=0)
print(f"Train RMSE: {round(rmse, 3)}")
print("Evaluating model performance...")
_, rmse = model.evaluate(test_dataset, verbose=0)
print(f"Test RMSE: {round(rmse, 3)}")
"""
## Create model inputs
"""
FEATURE_NAMES = [
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol",
]
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(1,), dtype=tf.float32
)
return inputs
"""
## Experiment 1: standard neural network
We create a standard deterministic neural network model as a baseline.
"""
def create_baseline_model():
inputs = create_model_inputs()
input_values = [value for _, value in sorted(inputs.items())]
features = keras.layers.concatenate(input_values)
features = layers.BatchNormalization()(features)
# Create hidden layers with deterministic weights using the Dense layer.
for units in hidden_units:
features = layers.Dense(units, activation="sigmoid")(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
Let's split the wine dataset into training and test sets, with 85% and 15% of
the examples, respectively.
"""
dataset_size = 4898
batch_size = 256
train_size = int(dataset_size * 0.85)
train_dataset, test_dataset = get_train_and_test_splits(train_size, batch_size)
"""
Now let's train the baseline model. We use the `MeanSquaredError`
as the loss function.
"""
num_epochs = 100
mse_loss = keras.losses.MeanSquaredError()
baseline_model = create_baseline_model()
run_experiment(baseline_model, mse_loss, train_dataset, test_dataset)
"""
We take a sample from the test set use the model to obtain predictions for them.
Note that since the baseline model is deterministic, we get a single a
*point estimate* prediction for each test example, with no information about the
uncertainty of the model nor the prediction.
"""
sample = 10
examples, targets = list(test_dataset.unbatch().shuffle(batch_size * 10).batch(sample))[
0
]
predicted = baseline_model(examples).numpy()
for idx in range(sample):
print(f"Predicted: {round(float(predicted[idx][0]), 1)} - Actual: {targets[idx]}")
"""
## Experiment 2: Bayesian neural network (BNN)
The object of the Bayesian approach for modeling neural networks is to capture
the *epistemic uncertainty*, which is uncertainty about the model fitness,
due to limited training data.
The idea is that, instead of learning specific weight (and bias) *values* in the
neural network, the Bayesian approach learns weight *distributions*
- from which we can sample to produce an output for a given input -
to encode weight uncertainty.
Thus, we need to define prior and the posterior distributions of these weights,
and the training process is to learn the parameters of these distributions.
"""
# Define the prior weight distribution as Normal of mean=0 and stddev=1.
# Note that, in this example, the we prior distribution is not trainable,
# as we fix its parameters.
def prior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
prior_model = keras.Sequential(
[
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(n), scale_diag=tf.ones(n)
)
)
]
)
return prior_model
# Define variational posterior weight distribution as multivariate Gaussian.
# Note that the learnable parameters for this distribution are the means,
# variances, and covariances.
def posterior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
posterior_model = keras.Sequential(
[
tfp.layers.VariableLayer(
tfp.layers.MultivariateNormalTriL.params_size(n), dtype=dtype
),
tfp.layers.MultivariateNormalTriL(n),
]
)
return posterior_model
"""
We use the `tfp.layers.DenseVariational` layer instead of the standard
`keras.layers.Dense` layer in the neural network model.
"""
def create_bnn_model(train_size):
inputs = create_model_inputs()
features = keras.layers.concatenate(list(inputs.values()))
features = layers.BatchNormalization()(features)
# Create hidden layers with weight uncertainty using the DenseVariational layer.
for units in hidden_units:
features = tfp.layers.DenseVariational(
units=units,
make_prior_fn=prior,
make_posterior_fn=posterior,
kl_weight=1 / train_size,
activation="sigmoid",
)(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
The epistemic uncertainty can be reduced as we increase the size of the
training data. That is, the more data the BNN model sees, the more it is certain
about its estimates for the weights (distribution parameters).
Let's test this behaviour by training the BNN model on a small subset of
the training set, and then on the full training set, to compare the output variances.
"""
"""
### Train BNN with a small training subset.
"""
num_epochs = 500
train_sample_size = int(train_size * 0.3)
small_train_dataset = train_dataset.unbatch().take(train_sample_size).batch(batch_size)
bnn_model_small = create_bnn_model(train_sample_size)
run_experiment(bnn_model_small, mse_loss, small_train_dataset, test_dataset)
"""
Since we have trained a BNN model, the model produces a different output each time
we call it with the same input, since each time a new set of weights are sampled
from the distributions to construct the network and produce an output.
The less certain the mode weights are, the more variability (wider range) we will
see in the outputs of the same inputs.
"""
def compute_predictions(model, iterations=100):
predicted = []
for _ in range(iterations):
predicted.append(model(examples).numpy())
predicted = np.concatenate(predicted, axis=1)
prediction_mean = np.mean(predicted, axis=1).tolist()
prediction_min = np.min(predicted, axis=1).tolist()
prediction_max = np.max(predicted, axis=1).tolist()
prediction_range = (np.max(predicted, axis=1) - np.min(predicted, axis=1)).tolist()
for idx in range(sample):
print(
f"Predictions mean: {round(prediction_mean[idx], 2)}, "
f"min: {round(prediction_min[idx], 2)}, "
f"max: {round(prediction_max[idx], 2)}, "
f"range: {round(prediction_range[idx], 2)} - "
f"Actual: {targets[idx]}"
)
compute_predictions(bnn_model_small)
"""
### Train BNN with the whole training set.
"""
num_epochs = 500
bnn_model_full = create_bnn_model(train_size)
run_experiment(bnn_model_full, mse_loss, train_dataset, test_dataset)
compute_predictions(bnn_model_full)
"""
Notice that the model trained with the full training dataset shows smaller range
(uncertainty) in the prediction values for the same inputs, compared to the model
trained with a subset of the training dataset.
"""
"""
## Experiment 3: probabilistic Bayesian neural network
So far, the output of the standard and the Bayesian NN models that we built is
deterministic, that is, produces a point estimate as a prediction for a given example.
We can create a probabilistic NN by letting the model output a distribution.
In this case, the model captures the *aleatoric uncertainty* as well,
which is due to irreducible noise in the data, or to the stochastic nature of the
process generating the data.
In this example, we model the output as a `IndependentNormal` distribution,
with learnable mean and variance parameters. If the task was classification,
we would have used `IndependentBernoulli` with binary classes, and `OneHotCategorical`
with multiple classes, to model distribution of the model output.
"""
def create_probablistic_bnn_model(train_size):
inputs = create_model_inputs()
features = keras.layers.concatenate(list(inputs.values()))
features = layers.BatchNormalization()(features)
# Create hidden layers with weight uncertainty using the DenseVariational layer.
for units in hidden_units:
features = tfp.layers.DenseVariational(
units=units,
make_prior_fn=prior,
make_posterior_fn=posterior,
kl_weight=1 / train_size,
activation="sigmoid",
)(features)
# Create a probabilisticå output (Normal distribution), and use the `Dense` layer
# to produce the parameters of the distribution.
# We set units=2 to learn both the mean and the variance of the Normal distribution.
distribution_params = layers.Dense(units=2)(features)
outputs = tfp.layers.IndependentNormal(1)(distribution_params)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
Since the output of the model is a distribution, rather than a point estimate,
we use the [negative loglikelihood](https://en.wikipedia.org/wiki/Likelihood_function)
as our loss function to compute how likely to see the true data (targets) from the
estimated distribution produced by the model.
"""
def negative_loglikelihood(targets, estimated_distribution):
return -estimated_distribution.log_prob(targets)
num_epochs = 1000
prob_bnn_model = create_probablistic_bnn_model(train_size)
run_experiment(prob_bnn_model, negative_loglikelihood, train_dataset, test_dataset)
"""
Now let's produce an output from the model given the test examples.
The output is now a distribution, and we can use its mean and variance
to compute the confidence intervals (CI) of the prediction.
"""
prediction_distribution = prob_bnn_model(examples)
prediction_mean = prediction_distribution.mean().numpy().tolist()
prediction_stdv = prediction_distribution.stddev().numpy()
# The 95% CI is computed as mean ± (1.96 * stdv)
upper = (prediction_mean + (1.96 * prediction_stdv)).tolist()
lower = (prediction_mean - (1.96 * prediction_stdv)).tolist()
prediction_stdv = prediction_stdv.tolist()
for idx in range(sample):
print(
f"Prediction mean: {round(prediction_mean[idx][0], 2)}, "
f"stddev: {round(prediction_stdv[idx][0], 2)}, "
f"95% CI: [{round(upper[idx][0], 2)} - {round(lower[idx][0], 2)}]"
f" - Actual: {targets[idx]}"
)
|
keras-team/keras-io
|
examples/keras_recipes/bayesian_neural_networks.py
|
Python
|
apache-2.0
| 14,079
|
[
"Gaussian"
] |
ee1f504c050bbad2fa668aea163f5c56283f054ac766ae7c3d6039b4be1f2770
|
# -*- coding: utf-8 -*-
# TODO: Port to pytest
# PEP8 asserts
from copy import deepcopy
import httplib as http
import time
import mock
import pytest
import pytz
import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase, fake
from osf_tests.factories import (
UserFactory, NodeFactory, ProjectFactory,
AuthUserFactory, RegistrationFactory
)
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website.exceptions import NodeStateError
from addons.wiki import settings
from addons.wiki import views
from addons.wiki.exceptions import InvalidVersionError
from addons.wiki.models import WikiPage, WikiVersion, render_content
from addons.wiki.utils import (
get_sharejs_uuid, generate_private_uuid, share_db, delete_share_doc,
migrate_uuid, format_wiki_version, serialize_wiki_settings, serialize_wiki_widget
)
from framework.auth import Auth
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from .config import EXAMPLE_DOCS, EXAMPLE_OPS
pytestmark = pytest.mark.django_db
# forward slashes are not allowed, typically they would be replaced with spaces
SPECIAL_CHARACTERS_ALL = u'`~!@#$%^*()-=_+ []{}\|/?.df,;:''"'
SPECIAL_CHARACTERS_ALLOWED = u'`~!@#$%^*()-=_+ []{}\|?.df,;:''"'
@pytest.mark.enable_bookmark_creation
class TestWikiViews(OsfTestCase):
def setUp(self):
super(TestWikiViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.consolidate_auth = Auth(user=self.project.creator)
def test_wiki_url_get_returns_200(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_wiki_url_404_with_no_write_permission(self): # and not public
url = self.project.web_url_for('project_wiki_view', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_deleted_404_with_no_write_permission(self, mock_sharejs):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='funpage')
res = self.app.get(url)
assert_equal(res.status_code, 200)
delete_url = self.project.api_url_for('project_wiki_delete', wname='funpage')
self.app.delete(delete_url, auth=self.user.auth)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_wiki_url_with_path_get_returns_200(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?view&compare=1&edit'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_url_with_edit_get_redirects_to_no_edit_params_with_no_write_permission(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
compare=1,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Public project, can_view, redirects without edit params
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?edit'
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
# Check publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth, expect_errors=False)
assert_equal(res.status_code, 200)
# Check publicly editable but not logged in
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_wiki_url_for_pointer_returns_200(self):
# TODO: explain how this tests a pointer
project = ProjectFactory(is_public=True)
self.project.add_pointer(project, Auth(self.project.creator), save=True)
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
def test_wiki_draft_returns_200(self):
url = self.project.api_url_for('wiki_page_draft', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_content_returns_200(self):
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
@mock.patch('addons.wiki.models.WikiVersion.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_content_rendered_before_update(self, mock_rendered_before_update):
content = 'Some content'
self.project.update_node_wiki('somerandomid', content, Auth(self.user))
self.project.save()
mock_rendered_before_update.return_value = True
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json['rendered_before_update'])
mock_rendered_before_update.return_value = False
res = self.app.get(url, auth=self.user.auth)
assert_false(res.json['rendered_before_update'])
def test_wiki_url_for_component_returns_200(self):
component = NodeFactory(parent=self.project, is_public=True)
url = component.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post(self):
self.project.update_node_wiki(
'home',
content='old content',
auth=Auth(self.project.creator)
)
url = self.project.web_url_for('project_wiki_edit_post', wname='home')
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
# page was updated with new content
new_wiki = self.project.get_wiki_version('home')
assert_equal(new_wiki.content, 'new content')
def test_project_wiki_edit_post_with_new_wname_and_no_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
old_wiki_page_count = WikiVersion.objects.all().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': ''}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = WikiVersion.objects.all().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_version(page_name)
assert_is_not_none(new_page)
def test_project_wiki_edit_post_with_new_wname_and_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
page_content = fake.bs()
old_wiki_page_count = WikiVersion.objects.all().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': page_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = WikiVersion.objects.all().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_version(page_name)
assert_is_not_none(new_page)
# content was set
assert_equal(new_page.content, page_content)
def test_project_wiki_edit_post_with_non_ascii_title(self):
# regression test for https://github.com/CenterForOpenScience/openscienceframework.org/issues/1040
# wname doesn't exist in the db, so it will be created
new_wname = u'øˆ∆´ƒøßå√ß'
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_page(new_wname)
assert_equal(wiki.page_name, new_wname)
# updating content should return correct url as well.
res = self.app.post(url, {'content': 'updated content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post_with_special_characters(self):
new_wname = 'title: ' + SPECIAL_CHARACTERS_ALLOWED
new_wiki_content = 'content: ' + SPECIAL_CHARACTERS_ALL
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': new_wiki_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_version(new_wname)
assert_equal(wiki.wiki_page.page_name, new_wname)
assert_equal(wiki.content, new_wiki_content)
assert_equal(res.status_code, 200)
def test_wiki_edit_get_home(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_view_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', view=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', view=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', view=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_project_wiki_compare_returns_200(self):
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home') + '?compare'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_compare_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', compare=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_wiki_page_creation_strips_whitespace(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1080
# wname has a trailing space
url = self.project.web_url_for('project_wiki_view', wname='cupcake ')
res = self.app.post(url, {'content': 'blah'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_version('cupcake')
assert_is_not_none(wiki)
def test_wiki_validate_name(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='Capslock')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_validate_name_creates_blank_page(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='newpage', auth=self.consolidate_auth)
self.app.get(url, auth=self.user.auth)
self.project.reload()
assert_is_not_none(self.project.get_wiki_page('newpage'))
def test_wiki_validate_name_collision_doesnt_clear(self):
self.project.update_node_wiki('oldpage', 'some text', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_validate_name', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
url = self.project.api_url_for('wiki_page_content', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['wiki_content'], 'some text')
def test_wiki_validate_name_cannot_create_home(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='home')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_wiki_validate_name_mixed_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
assert_equal(self.project.get_wiki_page('CaPsLoCk').page_name, 'CaPsLoCk')
def test_project_wiki_validate_name_display_correct_capitalization(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('CaPsLoCk', res)
def test_project_wiki_validate_name_conflict_different_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CAPSLOCK')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_validate_name', wname='capslock')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_dashboard_shows_no_wiki_content_text(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
project = ProjectFactory(creator=self.user)
url = project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_project_dashboard_wiki_wname_get_shows_non_ascii_characters(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
text = u'你好'
self.project.update_node_wiki('home', text, Auth(self.user))
# can view wiki preview from project dashboard
url = self.project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in(text, res)
def test_project_wiki_home_api_route(self):
url = self.project.api_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
# TODO: should this route exist? it redirects you to the web_url_for, not api_url_for.
# page_url = self.project.api_url_for('project_wiki_view', wname='home')
# assert_in(page_url, res.location)
def test_project_wiki_home_web_route(self):
page_url = self.project.web_url_for('project_wiki_view', wname='home', _guid=True)
url = self.project.web_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
assert_in(page_url, res.location)
def test_wiki_id_url_get_returns_302_and_resolves(self):
name = 'page by id'
self.project.update_node_wiki(name, 'some content', Auth(self.project.creator))
page = self.project.get_wiki_page(name)
page_url = self.project.web_url_for('project_wiki_view', wname=page.page_name, _guid=True)
url = self.project.web_url_for('project_wiki_id_page', wid=page._primary_key, _guid=True)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_in(page_url, res.location)
res = res.follow()
assert_equal(res.status_code, 200)
assert_in(page_url, res.request.url)
def test_wiki_id_url_get_returns_404(self):
url = self.project.web_url_for('project_wiki_id_page', wid='12345', _guid=True)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_home_is_capitalized_in_web_view(self):
url = self.project.web_url_for('project_wiki_home', wid='home', _guid=True)
res = self.app.get(url, auth=self.user.auth).follow(auth=self.user.auth)
page_name_elem = res.html.find('span', {'id': 'pageName'})
assert_in('Home', page_name_elem.text)
def test_wiki_widget_no_content(self):
res = serialize_wiki_widget(self.project)
assert_is_none(res['wiki_content'])
def test_wiki_widget_short_content_no_cutoff(self):
short_content = 'a' * 150
self.project.update_node_wiki('home', short_content, Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_in(short_content, res['wiki_content'])
assert_not_in('...', res['wiki_content'])
assert_false(res['more'])
def test_wiki_widget_long_content_cutoff(self):
long_content = 'a' * 600
self.project.update_node_wiki('home', long_content, Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_less(len(res['wiki_content']), 520) # wiggle room for closing tags
assert_in('...', res['wiki_content'])
assert_true(res['more'])
def test_wiki_widget_with_multiple_short_pages_has_more(self):
project = ProjectFactory(is_public=True, creator=self.user)
short_content = 'a' * 150
project.update_node_wiki('home', short_content, Auth(self.user))
project.update_node_wiki('andanotherone', short_content, Auth(self.user))
res = serialize_wiki_widget(project)
assert_true(res['more'])
@mock.patch('addons.wiki.models.WikiVersion.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_widget_rendered_before_update(self, mock_rendered_before_update):
# New pages use js renderer
mock_rendered_before_update.return_value = False
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_false(res['rendered_before_update'])
# Old pages use a different version of js render
mock_rendered_before_update.return_value = True
res = serialize_wiki_widget(self.project)
assert_true(res['rendered_before_update'])
def test_read_only_users_cannot_view_edit_pane(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
# No write permissions
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
# Write permissions
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable but not logged in
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
def test_wiki_widget_not_show_in_registration_for_contributor(self):
registration = RegistrationFactory(project=self.project)
res = self.app.get(
registration.web_url_for('view_project'),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
assert_not_in('Add important information, links, or images here to describe your project.', res.text)
class TestViewHelpers(OsfTestCase):
def setUp(self):
super(TestViewHelpers, self).setUp()
self.project = ProjectFactory()
self.wname = 'New page'
self.project.update_node_wiki(self.wname, 'some content', Auth(self.project.creator))
def test_get_wiki_web_urls(self):
urls = views._get_wiki_web_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['edit'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
assert_equal(urls['home'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['page'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
def test_get_wiki_api_urls(self):
urls = views._get_wiki_api_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.api_url_for('project_wiki_home'))
assert_equal(urls['delete'], self.project.api_url_for('project_wiki_delete', wname=self.wname))
assert_equal(urls['rename'], self.project.api_url_for('project_wiki_rename', wname=self.wname))
assert_equal(urls['content'], self.project.api_url_for('wiki_page_content', wname=self.wname))
assert_equal(urls['settings'], self.project.api_url_for('edit_wiki_settings'))
class TestWikiDelete(OsfTestCase):
def setUp(self):
super(TestWikiDelete, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('Elephants', 'Hello Elephants', self.consolidate_auth)
self.project.update_node_wiki('Lions', 'Hello Lions', self.consolidate_auth)
self.elephant_wiki = self.project.get_wiki_page('Elephants')
self.lion_wiki = self.project.get_wiki_page('Lions')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete(self, mock_shrejs):
page = self.elephant_wiki
assert_equal(page.page_name.lower(), 'elephants')
assert_equal(page.deleted, None)
url = self.project.api_url_for(
'project_wiki_delete',
wname='Elephants'
)
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
page.reload()
assert_equal(page.deleted, mock_now)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete_w_valid_special_characters(self, mock_sharejs):
# TODO: Need to understand why calling update_node_wiki with failure causes transaction rollback issue later
# with assert_raises(NameInvalidError):
# self.project.update_node_wiki(SPECIAL_CHARACTERS_ALL, 'Hello Special Characters', self.consolidate_auth)
self.project.update_node_wiki(SPECIAL_CHARACTERS_ALLOWED, 'Hello Special Characters', self.consolidate_auth)
self.special_characters_wiki = self.project.get_wiki_page(SPECIAL_CHARACTERS_ALLOWED)
assert_equal(self.special_characters_wiki.page_name, SPECIAL_CHARACTERS_ALLOWED)
url = self.project.api_url_for(
'project_wiki_delete',
wname=SPECIAL_CHARACTERS_ALLOWED
)
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
self.special_characters_wiki.reload()
assert_equal(self.special_characters_wiki.deleted, mock_now)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_versions_do_not_reappear_after_delete(self, mock_sharejs):
# Creates a wiki page
self.project.update_node_wiki('Hippos', 'Hello hippos', self.consolidate_auth)
# Edits it two times
wiki_page = self.project.get_wiki_page('Hippos')
assert_equal(wiki_page.deleted, None)
assert_equal(wiki_page.current_version_number, 1)
self.project.update_node_wiki('Hippos', 'Hello hippopotamus', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.current_version_number, 2)
# Deletes the wiki page
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.project.delete_node_wiki('Hippos', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.deleted, mock_now)
# Creates new wiki with same name as deleted wiki
self.project.update_node_wiki('Hippos', 'Hello again hippos', self.consolidate_auth)
wiki_page = self.project.get_wiki_page('Hippos')
assert_equal(wiki_page.current_version_number, 1)
self.project.update_node_wiki('Hippos', 'Hello again hippopotamus', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.current_version_number, 2)
@pytest.mark.enable_implicit_clean
class TestWikiRename(OsfTestCase):
def setUp(self):
super(TestWikiRename, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('home', 'Hello world', self.consolidate_auth)
self.page_name = 'page2'
self.project.update_node_wiki(self.page_name, 'content', self.consolidate_auth)
self.project.save()
self.page = self.project.get_wiki_version(self.page_name)
self.wiki = self.project.get_wiki_page('home')
self.url = self.project.api_url_for(
'project_wiki_rename',
wname=self.page_name,
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_valid(self, mock_sharejs, new_name=u'away'):
self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth
)
self.project.reload()
old_wiki = self.project.get_wiki_version(self.page_name)
assert_false(old_wiki)
new_wiki = self.project.get_wiki_version(new_name)
assert_true(new_wiki)
assert_equal(new_wiki.wiki_page._primary_key, self.page.wiki_page._primary_key)
assert_equal(new_wiki.content, self.page.content)
assert_equal(new_wiki.identifier, self.page.identifier)
def test_rename_wiki_page_invalid(self, new_name=u'invalid/name'):
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(http.BAD_REQUEST, res.status_code)
assert_equal(res.json['message_short'], 'Invalid name')
assert_equal(res.json['message_long'], 'Page name cannot contain forward slashes.')
self.project.reload()
old_wiki = self.project.get_wiki_page(self.page_name)
assert_true(old_wiki)
def test_rename_wiki_page_duplicate(self):
self.project.update_node_wiki('away', 'Hello world', self.consolidate_auth)
new_name = 'away'
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_not_found(self):
url = self.project.api_url_for('project_wiki_rename', wname='not_found_page_name')
res = self.app.put_json(url, {'value': 'new name'},
auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_rename_wiki_page_to_home(self):
user = AuthUserFactory()
# A fresh project where the 'home' wiki page has no content
project = ProjectFactory(creator=user)
project.update_node_wiki('Hello', 'hello world', Auth(user=user))
url = project.api_url_for('project_wiki_rename', wname='Hello')
res = self.app.put_json(url, {'value': 'home'}, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_with_value_missing(self):
# value is missing
res = self.app.put_json(self.url, {}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_rename_wiki_page_duplicate_different_casing(self):
# attempt to rename 'page2' from setup to different case of 'away'.
old_name = 'away'
new_name = 'AwAy'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True
)
assert_equal(res.status_code, 409)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_same_name_different_casing(self, mock_sharejs):
old_name = 'away'
new_name = 'AWAY'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_rename', wname=old_name)
res = self.app.put_json(
url,
{'value': new_name},
auth=self.auth,
expect_errors=False
)
assert_equal(res.status_code, 200)
def test_cannot_rename_home_page(self):
url = self.project.api_url_for('project_wiki_rename', wname='home')
res = self.app.put_json(url, {'value': 'homelol'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_can_rename_to_a_deleted_page(self, mock_sharejs):
self.project.delete_node_wiki(self.page_name, self.consolidate_auth)
self.project.save()
# Creates a new page
self.project.update_node_wiki('page3', 'moarcontent', self.consolidate_auth)
self.project.save()
# Renames the wiki to the deleted page
url = self.project.api_url_for('project_wiki_rename', wname='page3')
res = self.app.put_json(url, {'value': self.page_name}, auth=self.auth)
assert_equal(res.status_code, 200)
def test_rename_wiki_page_with_valid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
self.test_rename_wiki_page_valid(new_name=u'<html>hello<html>')
def test_rename_wiki_page_with_invalid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
# with that said routes still do not accept forward slashes
self.test_rename_wiki_page_invalid(new_name=u'<html>hello</html>')
def test_rename_wiki_page_with_non_ascii_title(self):
self.test_rename_wiki_page_valid(new_name=u'øˆ∆´ƒøßå√ß')
def test_rename_wiki_page_with_valid_special_character_title(self):
self.test_rename_wiki_page_valid(new_name=SPECIAL_CHARACTERS_ALLOWED)
def test_rename_wiki_page_with_invalid_special_character_title(self):
self.test_rename_wiki_page_invalid(new_name=SPECIAL_CHARACTERS_ALL)
class TestWikiLinks(OsfTestCase):
def test_links(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
wiki_page = WikiFactory(
user=user,
node=project,
)
wiki = WikiVersionFactory(
content='[[wiki2]]',
wiki_page=wiki_page,
)
assert_in(
'/{}/wiki/wiki2/'.format(project._id),
wiki.html(project),
)
# Regression test for https://sentry.osf.io/osf/production/group/310/
def test_bad_links(self):
content = u'<span></span><iframe src="http://httpbin.org/"></iframe>'
user = AuthUserFactory()
node = ProjectFactory()
wiki_page = WikiFactory(
user=user,
node=node,
)
wiki = WikiVersionFactory(
content=content,
wiki_page=wiki_page,
)
expected = render_content(content, node)
assert_equal(
'<p><span></span><iframe src="<a href="http://httpbin.org/" rel="nofollow">http://httpbin.org/</a>"></iframe></p>',
wiki.html(node)
)
@pytest.mark.enable_bookmark_creation
class TestWikiUuid(OsfTestCase):
def setUp(self):
super(TestWikiUuid, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
def test_uuid_generated_once(self):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Revisit page; uuid has not changed
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(private_uuid, self.project.wiki_private_uuids.get(self.wkey))
def test_uuid_not_visible_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Users without write permission should not be able to access
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in(get_sharejs_uuid(self.project, self.wname), res.body)
def test_uuid_not_generated_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_is_none(private_uuid)
def test_uuids_differ_between_pages(self):
wname1 = 'foo.bar'
url1 = self.project.web_url_for('project_wiki_view', wname=wname1)
res1 = self.app.get(url1, auth=self.user.auth)
assert_equal(res1.status_code, 200)
wname2 = 'bar.baz'
url2 = self.project.web_url_for('project_wiki_view', wname=wname2)
res2 = self.app.get(url2, auth=self.user.auth)
assert_equal(res2.status_code, 200)
self.project.reload()
uuid1 = get_sharejs_uuid(self.project, wname1)
uuid2 = get_sharejs_uuid(self.project, wname2)
assert_not_equal(uuid1, uuid2)
assert_in(uuid1, res1)
assert_in(uuid2, res2)
assert_not_in(uuid1, res2)
assert_not_in(uuid2, res1)
def test_uuids_differ_between_forks(self):
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
project_res = self.app.get(url, auth=self.user.auth)
assert_equal(project_res.status_code, 200)
self.project.reload()
fork = self.project.fork_node(Auth(self.user))
assert_true(fork.is_fork_of(self.project))
fork_url = fork.web_url_for('project_wiki_view', wname=self.wname)
fork_res = self.app.get(fork_url, auth=self.user.auth)
assert_equal(fork_res.status_code, 200)
fork.reload()
# uuids are not copied over to forks
assert_not_equal(
self.project.wiki_private_uuids.get(self.wkey),
fork.wiki_private_uuids.get(self.wkey)
)
project_uuid = get_sharejs_uuid(self.project, self.wname)
fork_uuid = get_sharejs_uuid(fork, self.wname)
assert_not_equal(project_uuid, fork_uuid)
assert_in(project_uuid, project_res)
assert_in(fork_uuid, fork_res)
assert_not_in(project_uuid, fork_res)
assert_not_in(fork_uuid, project_res)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migration_does_not_affect_forks(self, mock_sharejs):
original_uuid = generate_private_uuid(self.project, self.wname)
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
fork = self.project.fork_node(Auth(self.user))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
migrate_uuid(self.project, self.wname)
assert_not_equal(original_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_delete(self, mock_sharejs):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
# Visit wiki edit page
edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Delete wiki
delete_url = self.project.api_url_for('project_wiki_delete', wname=self.wname)
res = self.app.delete(delete_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
# Revisit wiki edit page
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_in(original_sharejs_uuid, res.body)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_rename(self, mock_sharejs):
new_wname = 'barbaz'
new_wkey = to_mongo_key(new_wname)
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_is_none(self.project.wiki_private_uuids.get(new_wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
wiki_page = self.project.get_wiki_page(self.wname)
# Visit wiki edit page
original_edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Rename wiki
rename_url = self.project.api_url_for('project_wiki_rename', wname=self.wname)
res = self.app.put_json(
rename_url,
{'value': new_wname, 'pk': wiki_page._id},
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
self.project.reload()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(new_wkey))
# Revisit original wiki edit page
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_not_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_not_in(original_sharejs_uuid, res.body)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
class TestWikiShareJSMongo(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestWikiShareJSMongo, cls).setUpClass()
cls._original_sharejs_db_name = settings.SHAREJS_DB_NAME
settings.SHAREJS_DB_NAME = 'sharejs_test'
def setUp(self):
super(TestWikiShareJSMongo, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
self.private_uuid = generate_private_uuid(self.project, self.wname)
self.sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
self.wiki_page = self.project.get_wiki_page(self.wname)
# Insert mongo data for current project/wiki
self.db = share_db()
example_uuid = EXAMPLE_DOCS[0]['_id']
self.example_docs = deepcopy(EXAMPLE_DOCS)
self.example_docs[0]['_id'] = self.sharejs_uuid
self.db.docs.insert(self.example_docs)
self.example_ops = deepcopy(EXAMPLE_OPS)
for item in self.example_ops:
item['_id'] = item['_id'].replace(example_uuid, self.sharejs_uuid)
item['name'] = item['name'].replace(example_uuid, self.sharejs_uuid)
self.db.docs_ops.insert(self.example_ops)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
new_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
assert_equal(
EXAMPLE_DOCS[0]['_data'],
self.db.docs.find_one({'_id': new_sharejs_uuid})['_data']
)
assert_equal(
len([item for item in self.example_ops if item['name'] == self.sharejs_uuid]),
len([item for item in self.db.docs_ops.find({'name': new_sharejs_uuid})])
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_no_mongo(self, mock_sharejs):
# Case where no edits have been made to the wiki
wname = 'bar.baz'
wkey = to_mongo_key(wname)
share_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
self.project.update_node_wiki(wname, 'Hello world', Auth(self.user))
migrate_uuid(self.project, wname)
assert_not_equal(share_uuid, self.project.wiki_private_uuids.get(wkey))
assert_is_none(self.db.docs.find_one({'_id': sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_updates_node(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_manage_contributors_updates_uuid(self, mock_sharejs):
user = UserFactory()
self.project.add_contributor(
contributor=user,
permissions=['read', 'write', 'admin'],
auth=Auth(user=self.user),
)
self.project.save()
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing admin permission does nothing
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'write', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing write permission migrates uuid
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'read', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc(self, mock_sharejs):
delete_share_doc(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc_updates_node(self, mock_sharejs):
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
delete_share_doc(self.project, self.wname)
assert_not_in(self.wkey, self.project.wiki_private_uuids)
def test_get_draft(self):
# draft is current with latest wiki save
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, self.wiki_page.content)
# modify the sharejs wiki page contents and ensure we
# return the draft contents
new_content = 'I am a teapot'
new_time = int(time.time() * 1000) + 10000
new_version = self.example_docs[0]['_v'] + 1
self.db.docs.update(
{'_id': self.sharejs_uuid},
{'$set': {
'_v': new_version,
'_m.mtime': new_time,
'_data': new_content
}}
)
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, new_content)
def tearDown(self):
super(TestWikiShareJSMongo, self).tearDown()
self.db.drop_collection('docs')
self.db.drop_collection('docs_ops')
@classmethod
def tearDownClass(cls):
share_db().connection.drop_database(settings.SHAREJS_DB_NAME)
settings.SHARE_DATABASE_NAME = cls._original_sharejs_db_name
class TestWikiUtils(OsfTestCase):
def setUp(self):
super(TestWikiUtils, self).setUp()
self.project = ProjectFactory()
def test_get_sharejs_uuid(self):
wname = 'foo.bar'
wname2 = 'bar.baz'
private_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
# Provides consistent results
assert_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname))
# Provides obfuscation
assert_not_in(wname, sharejs_uuid)
assert_not_in(sharejs_uuid, wname)
assert_not_in(private_uuid, sharejs_uuid)
assert_not_in(sharejs_uuid, private_uuid)
# Differs based on share uuid provided
assert_not_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname2))
# Differs across projects and forks
project = ProjectFactory()
assert_not_equal(sharejs_uuid, get_sharejs_uuid(project, wname))
fork = self.project.fork_node(Auth(self.project.creator))
assert_not_equal(sharejs_uuid, get_sharejs_uuid(fork, wname))
def test_generate_share_uuid(self):
wname = 'bar.baz'
wkey = to_mongo_key(wname)
assert_is_none(self.project.wiki_private_uuids.get(wkey))
share_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_equal(self.project.wiki_private_uuids[wkey], share_uuid)
new_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_not_equal(share_uuid, new_uuid)
assert_equal(self.project.wiki_private_uuids[wkey], new_uuid)
def test_format_wiki_version(self):
assert_is_none(format_wiki_version(None, 5, False))
assert_is_none(format_wiki_version('', 5, False))
assert_equal(format_wiki_version('3', 5, False), 3)
assert_equal(format_wiki_version('4', 5, False), 'previous')
assert_equal(format_wiki_version('5', 5, False), 'current')
assert_equal(format_wiki_version('previous', 5, False), 'previous')
assert_equal(format_wiki_version('current', 5, False), 'current')
assert_equal(format_wiki_version('preview', 5, True), 'preview')
assert_equal(format_wiki_version('current', 0, False), 'current')
assert_equal(format_wiki_version('preview', 0, True), 'preview')
with assert_raises(InvalidVersionError):
format_wiki_version('1', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('previous', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('6', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('0', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('preview', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('nonsense', 5, True)
class TestPublicWiki(OsfTestCase):
def setUp(self):
super(TestPublicWiki, self).setUp()
self.project = ProjectFactory()
self.consolidate_auth = Auth(user=self.project.creator)
self.user = AuthUserFactory()
def test_addon_on_children(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project')
sub_component = NodeFactory(parent=node)
parent.delete_addon('wiki', self.consolidate_auth)
node.delete_addon('wiki', self.consolidate_auth)
sub_component.delete_addon('wiki', self.consolidate_auth)
NodeFactory(parent=node)
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_true(has_addon_on_child_node)
def test_check_user_has_addon_excludes_deleted_components(self):
parent = ProjectFactory()
parent.delete_addon('wiki', self.consolidate_auth)
node = NodeFactory(parent=parent, category='project')
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
node.delete_addon('wiki', self.consolidate_auth)
sub_component = NodeFactory(parent=node)
sub_component.is_deleted = True
sub_component.save()
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_false(has_addon_on_child_node)
def test_set_editing(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project', is_public=True)
wiki = node.get_addon('wiki')
# Set as publicly editable
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
assert_true(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_public')
# Try to set public when the wiki is already public
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
# Turn off public editing
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=True)
assert_false(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_private')
node = NodeFactory(parent=parent, category='project')
wiki = node.get_addon('wiki')
# Try to set to private wiki already private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=False)
# Try to set public when the project is private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
node.add_pointer(self.project, Auth(self.user))
node.save()
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
'is_public': True
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True,
'admin': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings_disabled_wiki(self):
node = NodeFactory(parent=self.project, creator=self.user)
node.delete_addon('wiki', self.consolidate_auth)
data = serialize_wiki_settings(self.user, [node])
expected = [{'node':
{'url': node.url,
'is_public': False,
'id': node._id,
'title': node.title},
'category': 'hypothesis',
'kind': 'folder',
'nodeType': 'component',
'children': [],
'permissions': {'admin': True,
'view': True}
}]
assert_equal(data, expected)
@pytest.mark.enable_bookmark_creation
class TestWikiMenu(OsfTestCase):
def setUp(self):
super(TestWikiMenu, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.component = NodeFactory(creator=self.user, parent=self.project, is_public=True)
self.consolidate_auth = Auth(user=self.project.creator)
self.non_contributor = UserFactory()
def test_format_home_wiki_page_no_content(self):
data = views.format_home_wiki_page(self.project)
expected = {
'page': {
'url': self.project.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
assert_equal(data, expected)
def test_format_project_wiki_pages_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', 'koala', self.consolidate_auth)
home_page = self.project.get_wiki_page(name='home')
zoo_page = self.project.get_wiki_page(name='zoo')
data = views.format_project_wiki_pages(self.project, self.consolidate_auth)
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page._primary_key,
}
},
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_project_wiki_pages_no_content_non_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', '', self.consolidate_auth)
home_page = self.project.get_wiki_version(name='home')
data = views.format_project_wiki_pages(self.project, auth=Auth(self.non_contributor))
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page.wiki_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_component_wiki_pages_contributor(self):
self.component.update_node_wiki('home', 'home content', self.consolidate_auth)
self.component.update_node_wiki('zoo', 'koala', self.consolidate_auth)
zoo_page = self.component.get_wiki_page(name='zoo')
expected = [
{
'page': {
'name': self.component.title,
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
},
'children': [
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': self.component._primary_key,
}
},
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
},
}
],
'kind': 'component',
'category': self.component.category,
'pointer': False,
}
]
data = views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
assert_equal(data, expected)
def test_format_component_wiki_pages_no_content_non_contributor(self):
data = views.format_component_wiki_pages(node=self.project, auth=Auth(self.non_contributor))
expected = []
assert_equal(data, expected)
def test_project_wiki_grid_data(self):
self.project.update_node_wiki('home', 'project content', self.consolidate_auth)
self.component.update_node_wiki('home', 'component content', self.consolidate_auth)
data = views.project_wiki_grid_data(auth=self.consolidate_auth, wname='home', node=self.project)
expected = [
{
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_project_wiki_pages(node=self.project, auth=self.consolidate_auth),
},
{
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
}
]
assert_equal(data, expected)
|
erinspace/osf.io
|
addons/wiki/tests/test_wiki.py
|
Python
|
apache-2.0
| 62,693
|
[
"VisIt"
] |
32a67cf1c32511ffe1313c41b4d4edc77bd3e6d3fdd7ce3b6b9f9441186ae683
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAmpliqueso(RPackage):
"""The package provides tools and reports for the analysis of
amplicon sequencing panels, such as AmpliSeq."""
homepage = "https://www.bioconductor.org/packages/ampliQueso/"
git = "https://git.bioconductor.org/packages/ampliQueso.git"
version('1.14.0', commit='9a4c26ec594171279aba8ab7fe59c4a2ea09b06b')
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
depends_on('r-samr', type=('build', 'run'))
depends_on('r-deseq', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-xtable', type=('build', 'run'))
depends_on('r-statmod', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-rgl', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-rnaseqmap', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-ampliqueso/package.py
|
Python
|
lgpl-2.1
| 2,383
|
[
"Bioconductor"
] |
4abd3c3012166b86a2b60f8e9d78641c79a487204f7197150af10444a25d8dfd
|
#!/usr/bin/env python
#String in Pi, stringinpi.py v1.01
#Copyright (c) 2015 by Brian Mikolajczyk, brianm12@gmail.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
from sympy.mpmath import *
import re #must be imported after sympy
import sys
class color:
red = '\033[91m'
bold = '\033[1m'
end = '\033[0m'
def main(string=None):
string = str(string)
print('Searching for ' + string + ' in ' + u'\U0001D6D1' + '...')
slen = len(string)
splen = 4
do = True
mp.dps = 100000
try:
while do:
pi = str(mp.pi())
if re.search(string, pi):
st = pi.find(string)
nst = st - splen
if nst < 0:
nst = 0
fstr = st + slen
nend = fstr + splen
print('Starting Position of String: ' + str(st))
print(pi[nst:st] + color.bold + color.red + pi[st:fstr] +\
color.end + pi[fstr:nend])
do = False
else:
mp.dps += 100000
except KeyboardInterrupt:
try:
print('\n' + 'Number of Characters Checked: <' + str(len(pi)) +\
'\n' + 'Program Interrupted')
except UnboundLocalError:
print('\n' + 'Number of Characters Checked: <' + str(mp.dps) +\
'\n' + 'Program Interrupted')
finally:
sys.exit(0)
def intcheck(val):
if val.startswith('0'):
oval = val
else:
oval = False
try:
val = int(val)
except ValueError:
raise argparse.ArgumentTypeError("{} is not a positive integer value"\
.format(val))
if val < 1:
raise argparse.ArgumentTypeError("{} is not a positive integer value"\
.format(val))
else:
if oval:
return oval
else:
return val
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('string', type=intcheck)
parser.add_argument('--version', action='version',\
version='stringinpi.py v1.01')
main(**vars(parser.parse_args()))
|
bmikolaj/stringinpi
|
stringinpi.py
|
Python
|
gpl-3.0
| 2,823
|
[
"Brian"
] |
1168f6568ed4a194dd75c6d7bc49945ff0f583a11b1dc66dbfa0d10ade6584af
|
import types
import base64
from RESTDIRAC.RESTSystem.Base.RESTHandler import WErr, WOK, RESTHandler
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities import List, Time
class BaseFC( RESTHandler ):
@property
def rpc( self ):
return RPCClient( "DataManagement/FileCatalog" )
def decodeMetadataQuery( self ):
cond = {}
for k in self.request.arguments:
for val in self.request.arguments[ k ]:
if val.find( "|" ) == -1:
continue
val = val.split( "|" )
op = val[0]
val = "|".join( val[1:] )
if 'in' == op:
val = List.fromChar( val, "," )
if k not in cond:
cond[ k ] = {}
cond[ k ][ op ] = val
self.log.info( "Metadata condition is %s" % cond )
return cond
def intParam( self, key, default = 0 ):
try:
return int( self.request.arguments[ key ][-1] )
except ( KeyError, ValueError ):
return default
def decodePath( self, did ):
if not did:
return "/"
try:
return base64.urlsafe_b64decode( str( did ) ).rstrip( "/" ) or "/"
except TypeError, e:
raise WErr( 400, "Cannot decode path" )
def sanitizeForJSON( self, val ):
vType = type( val )
if vType in Time._allTypes:
return Time.toString( val )
elif vType == types.DictType:
for k in val:
val[ k ] = self.sanitizeForJSON( val[ k ] )
elif vType == types.ListType:
for iP in range( len( val ) ):
val[ iP ] = self.sanitizeForJSON( val[ iP ] )
elif vType == types.TupleType:
nt = []
for iP in range( len( val ) ):
nt[ iP ] = self.sanitizeForJSON( val[ iP ] )
val = tuple( nt )
return val
|
DIRACGrid/RESTDIRAC
|
RESTSystem/API/FileCatalog/BaseFC.py
|
Python
|
gpl-3.0
| 1,716
|
[
"DIRAC"
] |
3e7bc5e780cc14190db95b61fd842054ae3e3bd70cd7eb58a97e62ffb959a32c
|
# -*- coding: utf-8 -*-
"""
Implements Monte Carlo Reinforcement Learning for iterSemiNFG objects
Created on Mon Feb 18 09:03:32 2013
Copyright (C) 2013 James Bono
GNU Affero General Public License
"""
from __future__ import division
import time
import copy
import numpy as np
import matplotlib.pylab as plt
from pynfg.utilities.utilities import iterated_input_dict
import warnings
import sys
class EWMA_MCRL(object):
"""
Finds the **uncoordinated** best policy using reinforcement learning.
:arg Game: The iterated semi-NFG on which to perform the RL
:type Game: iterSemiNFG
:arg specs: A nested dictionary containing specifications of the
game. See below for details
:type specs: dict
The specs dictionary is a triply nested dictionary. The first
level of keys is player names. For each player there is an entry with key
Level : int
The player's level
The rest of the entries are basenames. The value of each basename is a
dictionary containing:
J : int, list, or np.array
The number of runs per training episode. If a schedule is desired, enter a list or np.array with size equal to N.
N : int
The number of training episodes
L0Dist : ndarray, str, None
If ndarray, then the level 0 CPT is set to
L0Dist. If L0Dist is 'uniform', then all Level 0 CPTs are set to
the uniform distribution. If L0Dist is None, then the level 0 CPT
is set to the CPT of the inputted game.
alpha : int, list or np.array
The exponential weight for the moving average. If a schedule is
desired, enter a list or np.array with size equal to N
delta : float
The discount factor
eps : float
The maximum step-size for policy improvements
pureout : bool
if True, the policy is turned into a pure policy at the end
of training by assigning argmax actions prob 1. Default is False
"""
def __init__(self, Game, specs):
self.Game = copy.deepcopy(Game)
self.specs = specs
self.trained_CPTs = {}
self.figs = {}
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
self.figs[bn]={}
self.trained_CPTs[player] = {}
self.trained_CPTs[player][bn] = {}
self.trained_CPTs[player][bn][0] = self._set_L0_CPT()
self.high_level = max(map(lambda x: self.specs[x]['Level'], Game.players))
def _set_L0_CPT(self):
""" Sets the level 0 CPT"""
Game = self.Game
ps = self.specs
for player in ps:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
if ps[player][bn]['L0Dist'] == 'uniform':
return Game.bn_part[bn][0].uniformCPT(setCPT=False)
elif ps[player][bn]['L0Dist'] is None:
warnings.warn("No entry for L0Dist for player %s,\
setting to current CPT" % player)
return Game.bn_part[bn][0].CPT
elif type(ps[player][bn]['L0Dist']) == np.ndarray:
return ps[player][bn]['L0Dist']
def train_node(self, bn, level, setCPT=False):
""" Use EWMA MC RL to approximate the optimal CPT at bn given Game
:arg bn: the basename of the node with the CPT to be trained
:type bn: str
:arg level: The level at which to train the basename
:type level: int
"""
sys.stdout.write('\r')
print 'Training ' + bn + ' at level '+ str(level)
specs = self.specs
Game = copy.deepcopy(self.Game)
player = Game.bn_part[bn][0].player
basedict = specs[player][bn]
J, N, alpha, delta, eps, pureout = basedict['J'], basedict['N'], \
basedict['alpha'], basedict['delta'], basedict['eps'], \
basedict['pureout']
#Set other CPTs to level-1. Works even if CPTs aren't pointers.
for o_player in Game.players:
bn_list = list(set(map(lambda x: x.basename, Game.partition[o_player])))
for base in bn_list:
if base != bn:
for dn in Game.bn_part[base]:
try:
dn.CPT = \
(self.trained_CPTs[o_player][base][level - 1])
except KeyError:
raise KeyError('Need to train other players at level %s'
% str(level-1))
# initializing training schedules from scalar inputs
if isinstance(J, (int)):
J = J*np.ones(N)
if isinstance(alpha, (int, long, float)):
alpha = alpha*np.ones(N)
if isinstance(eps, (int, long, float)):
eps = eps*np.ones(N)
# getting shorter/more descriptive variable names to work with
T0 = Game.starttime
T = Game.endtime+1
shape = Game.bn_part[bn][0].CPT.shape
shape_last = shape[-1]
for dn in Game.bn_part[bn]: # pointing all CPTs to T0, i.e. single policy
dn.CPT = Game.bn_part[bn][0].CPT
visit = set() # dict of the messages and mapairs visited throughout training
R = 0 # average reward with initial value of zero
A = 0 # normalizing constant for average reward
B = {} # dict associates messages and mapairs with beta exponents
D = {} # dict associates messages and mapairs with norm constants for Q,V
Q = np.zeros(shape) # Qtable
V = np.zeros(shape[:-1]) # Value table
Rseries = np.zeros(N) # tracking average reward for plotting convergence
np.seterr(invalid='ignore', divide='ignore')
for n in xrange(N):
sys.stdout.write('\r')
sys.stdout.write('Iteration ' + str(n))
sys.stdout.flush()
indicaten = np.zeros(Q.shape) # indicates visited mapairs
visitn = set() # dict of messages and mapairs visited in episode n
Rseries[n] = R # adding the most recent ave reward to the data series
A *= alpha[n] # rescaling A at start of new episode, see writeup
for j in xrange(int(J[n])):
visitj = set() # visitj must be cleared at the start of every run
for t in xrange(T0, T):
#import pdb; pdb.set_trace()
#Game.bn_part[bn][t-T0].CPT = copy.copy(Game.bn_part[bn][0].CPT)
Game.sample_timesteps(t, t) # sampling the timestep
rew = Game.reward(player, t) # getting the reward
mapair = Game.bn_part[bn][t-T0].get_CPTindex()
A += 1
r = R
R = (1/A)*((A-1)*r+rew)
xm = set() # used below to keep track of updated messages
for values in visitj:
b = B[values] # past values
d = D[values]
q = Q[values]
bb = (b+1) # update equations double letters are time t
dd = d+1
qq = (1/dd)*(d*q+(delta**(bb-1))*(rew))
B[values] = bb # update dictionaries
D[values] = dd
Q[values] = qq
message = values[:-1] # V indexed by message only
if message not in xm: # updating message only once
b = B[message] # past values
d = D[message]
v = V[message]
bb = (b+1) # update equations double letters are time t
dd = d+1
vv = (1/dd)*(d*v+(delta**(bb-1))*(rew))
B[message] = bb # update dictionaries
D[message] = dd
V[message] = vv
xm.add(message) # so that message isn't updated again
if mapair not in visitj: # first time in j visiting mapair
message = mapair[:-1]
messtrue = (message not in xm) # for checking message visited
B[mapair] = 1 # whenever mapair not in visitj
if mapair not in visitn and mapair not in visit:
D[mapair] = 1
Q[mapair] = rew
if messtrue:
D[message] = 1
V[message] = rew
elif mapair not in visitn:
D[mapair] = alpha[n]*D[mapair]+1
Q[mapair] = (1/D[mapair])*((D[mapair]-1)*Q[mapair]
+(rew))
if messtrue:
D[message] = alpha[n]*D[message]+1
V[message] = (1/D[message])*((D[message]-1)*\
V[message]+(rew))
else:
D[mapair] += 1
Q[mapair] = (1/D[mapair])*((D[mapair]-1)*Q[mapair]\
+ (rew))
if messtrue:
D[message] += 1
V[message] = (1/D[message])*((D[message]-1) *
V[message]+(rew))
if messtrue:
B[message] = 1
visit.add(mapair) # mapair added to visit sets the first time
visitn.add(mapair)
visitj.add(mapair)
indicaten[mapair] = 1 # only visited actions are updated
# update CPT with shift towards Qtable argmax actions.
shift = Q-V[...,np.newaxis]
idx = np.nonzero(shift) # indices of nonzero shifts (avoid divide by 0)
# normalizing shifts to be a % of message's biggest shift
shiftnorm = np.absolute(shift).max(axis=-1)[...,np.newaxis]
# for each mapair shift only eps% of the percent shift
updater = eps[n]*indicaten*Game.bn_part[bn][0].CPT/shiftnorm
# increment the CPT
Game.bn_part[bn][0].CPT[idx] += updater[idx]*shift[idx]
# normalize after the shift
CPTsum = Game.bn_part[bn][0].CPT.sum(axis=-1)
Game.bn_part[bn][0].CPT /= CPTsum[...,np.newaxis]
if pureout: #if True, output is a pure policy
Game.bn_part[bn][0].makeCPTpure()
self.trained_CPTs[player][bn][level] = Game.bn_part[bn][0].CPT
if setCPT:
for node in self.Game.bn_part[bn]:
node.CPT = Game.bn_part[bn][0].CPT
for tau in xrange(1, T-T0): #before exit, make CPTs independent in memory
Game.bn_part[bn][tau].CPT = copy.copy(Game.bn_part[bn][0].CPT)
plt.figure()
plt.plot(Rseries, label = str(bn + ' Level ' + str(level)))
#plotting rseries to gauge convergence
plt.legend()
fig = plt.gcf()
self.figs[bn][str(level)] = fig
sys.stdout.write('\n')
def solve_game(self, setCPT=False):
"""Solves the game for given player levels"""
Game = self.Game
ps = self.specs
for level in np.arange(1, self.high_level):
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
self.train_node(controlled, level, setCPT=setCPT)
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
if ps[player]['Level'] == self.high_level:
self.train_node(controlled, self.high_level, setCPT=setCPT)
def mcrl_dict(Game, Level, J, N, delta, alpha=.5, eps=.2, L0Dist=None,
pureout=False):
"""
Creates the specs shell for a game to be solved using MCRL.
:arg Game: An iterated SemiNFG
:type Game: SemiNFG
.. seealso::
See the EWMA_MCRL documentation (above) for details of the optional arguments
"""
return iterated_input_dict(Game, [('Level', Level)], [('L0Dist', L0Dist), ('J', J),
('N', N), ('delta', delta),
('alpha', alpha), ('eps', eps),
('pureout', pureout)])
|
jwbono/PyNFG
|
pynfg/levelksolutions/mcrl.py
|
Python
|
agpl-3.0
| 12,877
|
[
"VisIt"
] |
42793810114e2c7c39d72fd040ade46bc2bd0742bf561d17e0bbbf0d6648c8cb
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
import types
import scipy.lib.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != types.ListType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != types.DictionaryType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print ('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print ("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and (parinfo[i].has_key('parname')):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and (parinfo[i].has_key('mpprint')):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print (p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print ('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (parinfo[i].has_key(key)):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == types.ListType:
test=default[0]
if isinstance(test, types.IntType):
values = numpy.asarray(values, int)
elif isinstance(test, types.FloatType):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print ('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print ('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print ('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print ('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print ('Entering qrsolv...')
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print ('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print ('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print ('Entering calc_covar...')
if numpy.rank(rr) != 2:
print ('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print ('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
gLENTNER/SLiPy
|
astrolibpy/mpfit/mpfit.py
|
Python
|
gpl-2.0
| 78,437
|
[
"Gaussian"
] |
1fc2a5aa285e7f1af90eb503016960d3fefadce18b8586636eb066fb51beb1b0
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espresso.esutil import pmiimport
pmiimport('espresso.integrator')
from espresso.integrator.MDIntegrator import *
from espresso.integrator.VelocityVerlet import *
from espresso.integrator.VelocityVerletOnGroup import *
from espresso.integrator.Isokinetic import *
from espresso.integrator.StochasticVelocityRescaling import *
from espresso.integrator.TDforce import *
from espresso.integrator.FreeEnergyCompensation import *
from espresso.integrator.Extension import *
from espresso.integrator.Adress import *
from espresso.integrator.BerendsenBarostat import *
from espresso.integrator.BerendsenBarostatAnisotropic import *
from espresso.integrator.BerendsenThermostat import *
from espresso.integrator.LangevinThermostat import *
from espresso.integrator.LangevinThermostat1D import *
from espresso.integrator.DPDThermostat import *
from espresso.integrator.LangevinBarostat import *
from espresso.integrator.FixPositions import *
from espresso.integrator.LatticeBoltzmann import *
from espresso.integrator.LBInit import *
from espresso.integrator.LBInitConstForce import *
from espresso.integrator.LBInitPeriodicForce import *
from espresso.integrator.LBInitPopUniform import *
from espresso.integrator.LBInitPopWave import *
from espresso.integrator.ExtForce import *
from espresso.integrator.CapForce import *
from espresso.integrator.ExtAnalyze import *
from espresso.integrator.VelocityVerletOnRadius import *
from espresso.integrator.EmptyExtension import *
|
BackupTheBerlios/espressopp
|
src/integrator/__init__.py
|
Python
|
gpl-3.0
| 2,344
|
[
"ESPResSo"
] |
7013c954a1622cc3efbc10718f9d9af68d5cb7912cfe600960af2eaa0a866081
|
"""
Async helper function that are invalid syntax on Python 3.5 and below.
This code is best effort, and may have edge cases not behaving as expected. In
particular it contain a number of heuristics to detect whether code is
effectively async and need to run in an event loop or not.
Some constructs (like top-level `return`, or `yield`) are taken care of
explicitly to actually raise a SyntaxError and stay as close as possible to
Python semantics.
"""
import ast
import sys
import inspect
from textwrap import dedent, indent
class _AsyncIORunner:
def __call__(self, coro):
"""
Handler for asyncio autoawait
"""
import asyncio
return asyncio.get_event_loop().run_until_complete(coro)
def __str__(self):
return 'asyncio'
_asyncio_runner = _AsyncIORunner()
def _curio_runner(coroutine):
"""
handler for curio autoawait
"""
import curio
return curio.run(coroutine)
def _trio_runner(async_fn):
import trio
async def loc(coro):
"""
We need the dummy no-op async def to protect from
trio's internal. See https://github.com/python-trio/trio/issues/89
"""
return await coro
return trio.run(loc, async_fn)
def _pseudo_sync_runner(coro):
"""
A runner that does not really allow async execution, and just advance the coroutine.
See discussion in https://github.com/python-trio/trio/issues/608,
Credit to Nathaniel Smith
"""
try:
coro.send(None)
except StopIteration as exc:
return exc.value
else:
# TODO: do not raise but return an execution result with the right info.
raise RuntimeError(
"{coro_name!r} needs a real async loop".format(coro_name=coro.__name__)
)
def _asyncify(code: str) -> str:
"""wrap code in async def definition.
And setup a bit of context to run it later.
"""
res = dedent(
"""
async def __wrapper__():
try:
{usercode}
finally:
locals()
"""
).format(usercode=indent(code, " " * 8))
return res
class _AsyncSyntaxErrorVisitor(ast.NodeVisitor):
"""
Find syntax errors that would be an error in an async repl, but because
the implementation involves wrapping the repl in an async function, it
is erroneously allowed (e.g. yield or return at the top level)
"""
def __init__(self):
if sys.version_info >= (3,8):
raise ValueError('DEPRECATED in Python 3.8+')
self.depth = 0
super().__init__()
def generic_visit(self, node):
func_types = (ast.FunctionDef, ast.AsyncFunctionDef)
invalid_types_by_depth = {
0: (ast.Return, ast.Yield, ast.YieldFrom),
1: (ast.Nonlocal,)
}
should_traverse = self.depth < max(invalid_types_by_depth.keys())
if isinstance(node, func_types) and should_traverse:
self.depth += 1
super().generic_visit(node)
self.depth -= 1
elif isinstance(node, invalid_types_by_depth[self.depth]):
raise SyntaxError()
else:
super().generic_visit(node)
def _async_parse_cell(cell: str) -> ast.AST:
"""
This is a compatibility shim for pre-3.7 when async outside of a function
is a syntax error at the parse stage.
It will return an abstract syntax tree parsed as if async and await outside
of a function were not a syntax error.
"""
if sys.version_info < (3, 7):
# Prior to 3.7 you need to asyncify before parse
wrapped_parse_tree = ast.parse(_asyncify(cell))
return wrapped_parse_tree.body[0].body[0]
else:
return ast.parse(cell)
def _should_be_async(cell: str) -> bool:
"""Detect if a block of code need to be wrapped in an `async def`
Attempt to parse the block of code, it it compile we're fine.
Otherwise we wrap if and try to compile.
If it works, assume it should be async. Otherwise Return False.
Not handled yet: If the block of code has a return statement as the top
level, it will be seen as async. This is a know limitation.
"""
if sys.version_info > (3, 8):
try:
code = compile(cell, "<>", "exec", flags=getattr(ast,'PyCF_ALLOW_TOP_LEVEL_AWAIT', 0x0))
return inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
except (SyntaxError, MemoryError):
return False
try:
# we can't limit ourself to ast.parse, as it __accepts__ to parse on
# 3.7+, but just does not _compile_
code = compile(cell, "<>", "exec")
except (SyntaxError, MemoryError):
try:
parse_tree = _async_parse_cell(cell)
# Raise a SyntaxError if there are top-level return or yields
v = _AsyncSyntaxErrorVisitor()
v.visit(parse_tree)
except (SyntaxError, MemoryError):
return False
return True
return False
|
sserrot/champion_relationships
|
venv/Lib/site-packages/IPython/core/async_helpers.py
|
Python
|
mit
| 4,998
|
[
"VisIt"
] |
66fa8e2f2c754cb0087a89e08a1e739131f0eb82668d0c7f125cf188b03f41fb
|
#
#-*- coding:utf-8 -*-
"""
Gentoo-keys - gkey.py
Holds GKEY class and related values
@copyright: 2012-2015 by Brian Dolbec <dol-sen@gentoo.org>
@license: GNU GPL2, see COPYING for details.
"""
from collections import namedtuple
GKEY_STRING = ''' ----------
Name.........: %(name)s
Nick.........: %(nick)s
Keydir.......: %(keydir)s
'''
GKEY_UID = \
''' UID..........: %(uid)s
'''
GKEY_FINGERPRINTS = \
''' Keyid........: %(keyid)s
Fingerprint: %(fingerprint)s
'''
class GKEY(namedtuple('GKEY', ['nick', 'name', 'keydir', 'keys', 'fingerprint', 'uid'])):
'''Class to hold the relavent info about a key'''
field_types = {'nick': str, 'name': str, 'keydir': str, 'keys': list,
'fingerprint': list, 'uid': list}
__slots__ = ()
@property
def keyid(self):
'''Keyid is a substring value of the fingerprint'''
return ['0x' + x[-16:] for x in self.fingerprint]
@property
def pub_keyid(self):
'''Keyid is a substring value of the keys fingerprints'''
return ['0x' + x[-16:] for x in self.keys]
@property
def pretty_print(self):
'''Pretty printing a GKEY'''
gkey = {
'name': self.name,
'nick': self.nick,
'keydir': self.keydir,
}
output = GKEY_STRING % gkey
for uid in self.uid:
output += GKEY_UID % {'uid': uid}
for f in self.fingerprint:
fingerprint = {'fingerprint': f, 'keyid': '0x' + f[-16:]}
output += GKEY_FINGERPRINTS % fingerprint
return output
def update(self, result_list):
'''Processes a results instance from a colon listing
and mines all fingerprints found.
@param result_list: list of pyGPG.output.GPGResult instances
(one for each fingerprint in the list)
@return: A new, updated GKEY instance
'''
fingerprints = set()
uids = set()
for result in result_list:
for data in result.status.data:
if data.name == "FPR":
fingerprints.add(data.fingerprint)
elif data.name == "UID":
uids.add(data.user_ID)
return self._make([self.nick, self.name, self.keydir, self.keys, list(fingerprints), sorted(uids)])
class GKEY_CHECK(namedtuple('GKEY_CHECK', ['keyid', 'revoked', 'expired', 'invalid', 'sign'])):
__slots__ = ()
|
gentoo/gentoo-keys
|
gkeys/gkeys/gkey.py
|
Python
|
gpl-2.0
| 2,466
|
[
"Brian"
] |
21f9d1325d35129ce727fd7f46c33fa4a35b57aa730e5b8a1cf5626c9aca4231
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 4, 9, 16, 25]
line = chigger.graphs.Line(label='x^2', color=[0,1,0], tracer=True)
graph = chigger.graphs.Graph(line)
graph.setOptions('xaxis', lim=[0,6])
graph.setOptions('yaxis', lim=[0,28])
graph.setOptions('legend', visible=True)
window = chigger.RenderWindow(graph, size=[400,400], test=True)
for i in range(len(x)):
line.setOptions(x=[x[i]], y=[y[i]])
window.write('tracer_' + str(i) + '.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/line/tracer.py
|
Python
|
lgpl-2.1
| 841
|
[
"MOOSE"
] |
e364d9b91bc05e029ee3380d1a1da290cd07ab12568dadc0d6315458d33a03f4
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import glob
import os.path
from spack import *
class Espresso(Package):
"""Quantum-ESPRESSO is an integrated suite of Open-Source computer codes
for electronic-structure calculations and materials modeling at the
nanoscale. It is based on density-functional theory, plane waves, and
pseudopotentials.
"""
homepage = 'http://quantum-espresso.org'
url = 'http://www.qe-forge.org/gf/download/frsrelease/204/912/espresso-5.3.0.tar.gz'
version(
'6.1.0',
'db398edcad76e085f8c8a3f6ecb7aaab',
url='http://www.qe-forge.org/gf/download/frsrelease/240/1075/qe-6.1.tar.gz'
)
version(
'5.4.0',
'8bb78181b39bd084ae5cb7a512c1cfe7',
url='http://www.qe-forge.org/gf/download/frsrelease/211/968/espresso-5.4.0.tar.gz'
)
version('5.3.0', '6848fcfaeb118587d6be36bd10b7f2c3')
variant('mpi', default=True, description='Builds with mpi support')
variant('openmp', default=False, description='Enables openMP support')
variant('scalapack', default=True, description='Enables scalapack support')
variant('elpa', default=True, description='Uses elpa as an eigenvalue solver')
# Support for HDF5 has been added starting in version 6.1.0 and is
# still experimental, therefore we default to False for the variant
variant('hdf5', default=False, description='Builds with HDF5 support')
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('elpa+openmp', when='+elpa+openmp')
depends_on('elpa~openmp', when='+elpa~openmp')
depends_on('hdf5', when='+hdf5')
patch('dspev_drv_elpa.patch', when='@6.1+elpa ^elpa@2016.05.004')
patch('dspev_drv_elpa.patch', when='@6.1+elpa ^elpa@2016.05.003')
# We can't ask for scalapack or elpa if we don't want MPI
conflicts(
'+scalapack',
when='~mpi',
msg='scalapack is a parallel library and needs MPI support'
)
conflicts(
'+elpa',
when='~mpi',
msg='elpa is a parallel library and needs MPI support'
)
# Elpa is formally supported by @:5.4.0, but QE configure searches
# for it in the wrong folders (or tries to download it within
# the build directory). Instead of patching Elpa to provide the
# folder QE expects as a link, we issue a conflict here.
conflicts('+elpa', when='@:5.4.0')
conflicts('+hdf5', when='@:5.4.0')
# Spurious problems running in parallel the Makefile
# generated by the configure
parallel = False
def install(self, spec, prefix):
prefix_path = prefix.bin if '@:5.4.0' in spec else prefix
options = ['-prefix={0}'.format(prefix_path)]
if '+mpi' in spec:
options.append('--enable-parallel=yes')
else:
options.append('--enable-parallel=no')
if '+openmp' in spec:
options.append('--enable-openmp')
if '+scalapack' in spec:
scalapack_option = 'intel' if '^intel-mkl' in spec else 'yes'
options.append('--with-scalapack={0}'.format(scalapack_option))
if '+elpa' in spec:
# Spec for elpa
elpa = spec['elpa']
# Find where the Fortran module resides
elpa_module = find(elpa.prefix, 'elpa.mod')
# Compute the include directory from there: versions
# of espresso prior to 6.1 requires -I in front of the directory
elpa_include = '' if '@6.1:' in spec else '-I'
elpa_include += os.path.dirname(elpa_module[0])
options.extend([
'--with-elpa-include={0}'.format(elpa_include),
'--with-elpa-lib={0}'.format(elpa.libs[0])
])
if '+hdf5' in spec:
options.append('--with-hdf5={0}'.format(spec['hdf5'].prefix))
# Add a list of directories to search
search_list = []
for dependency_spec in spec.dependencies():
search_list.extend([
dependency_spec.prefix.lib,
dependency_spec.prefix.lib64
])
search_list = " ".join(search_list)
options.extend([
'LIBDIRS={0}'.format(search_list),
'F90={0}'.format(env['SPACK_FC']),
'CC={0}'.format(env['SPACK_CC'])
])
configure(*options)
make('all')
if 'platform=darwin' in spec:
mkdirp(prefix.bin)
for filename in glob.glob("bin/*.x"):
install(filename, prefix.bin)
else:
make('install')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/espresso/package.py
|
Python
|
lgpl-2.1
| 5,930
|
[
"ESPResSo"
] |
1f681545527306d312bed598d29185e9cf317903451467ffdc9dfa9d8c1488b2
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Exciting(MakefilePackage):
"""
exciting is a full-potential all-electron density-functional-theory package
implementing the families of linearized augmented planewave methods. It can
be applied to all kinds of materials, irrespective of the atomic species in
volved, and also allows for exploring the physics of core electrons. A
particular focus are excited states within many-body perturbation theory.
"""
homepage = "https://exciting-code.org/"
url = "https://exciting.wdfiles.com/local--files/nitrogen-14/exciting.nitrogen-14.tar.gz"
git = "https://github.com/exciting/exciting.git"
version('oxygen', branch='oxygen_release', preferred=True)
version('14', sha256='a7feaffdc23881d6c0737d2f79f94d9bf073e85ea358a57196d7f7618a0a3eff')
# as-of-yet unpublished fix to version 14
patch('dfgather.patch', when='@14', working_dir='src/src_xs', level=0)
# Patch to add aarch64 in config.guess
patch('for_aarch64.patch', when='target=aarch64:')
variant('mpi', default=False, description='Use MPI')
variant('mkl', default=False, description='Use MKL')
variant('omp', default=True, description='Use OpenMP')
variant('scalapack', default=False, description='Use ScaLAPACK')
depends_on('blas')
depends_on('lapack')
depends_on('fftw', when='~mkl')
depends_on('mkl', when='+mkl')
depends_on('mpi', when='+mpi')
depends_on('scalapack', when='+scalapack')
# conflicts('%gcc@10:', msg='exciting cannot be built with GCC 10')
for __compiler in spack.compilers.supported_compilers():
if __compiler != 'intel':
conflicts('%{0}'.format(__compiler), when='^mkl',
msg='Intel MKL only works with the Intel compiler')
conflicts('%{0}'.format(__compiler), when='^intel-mkl',
msg='Intel MKL only works with the Intel compiler')
conflicts('%{0}'.format(__compiler), when='^intel-mpi',
msg='Intel MPI only works with the Intel compiler')
def patch(self):
"""Fix bad logic in m_makespectrum.f90 for the Oxygen release
"""
if self.spec.satisfies('@oxygen'):
filter_file(' '.join(['if ((.not. input%xs%BSE%coupling) .and.',
'input%xs%BSE%chibar0) then']),
' '.join(['if ((.not. input%xs%BSE%coupling)',
'.and. (.not. input%xs%BSE%chibar0)) then']),
'src/src_xs/m_makespectrum.f90', string=True)
def edit(self, spec, prefix):
opts = {}
opts['FCCPP'] = 'cpp'
opts['F90_OPTS'] = '-O3'
opts['F77_OPTS'] = '-O3'
opts['CPP_ON_OPTS'] = '-cpp -DXS -DISO -DLIBXC'
opts['LIB_ARP'] = 'libarpack.a'
opts['F90'] = spack_fc
opts['F77'] = spack_f77
if '+omp' in spec:
opts['SMPF90_OPTS'] = self.compiler.openmp_flag + ' -DUSEOMP'
opts['SMPF77_OPTS'] = self.compiler.openmp_flag + ' -DUSEOMP'
else:
opts['BUILDSMP'] = 'false'
if '%intel' in spec:
opts['F90_OPTS'] += ' -cpp -ip -unroll -scalar_rep '
opts['CPP_ON_OPTS'] += ' -DIFORT -DFFTW'
if '%gcc' in spec:
opts['F90_OPTS'] += ' -march=native -ffree-line-length-0'
if '%gcc@10:' in spec:
# The INSTALL file says this will fix the GCC@10 issues
opts['F90_OPTS'] += ' -fallow-argument-mismatch'
opts['F77_OPTS'] += ' -fallow-argument-mismatch'
filter_file('FCFLAGS = @FCFLAGS@',
' '.join(['FCFLAGS = @FCFLAGS@', '-cpp',
self.compiler.openmp_flag]),
'src/libXC/src/Makefile.in')
if '+mkl' in spec:
opts['LIB_LPK'] = '-mkl=parallel'
opts['INC_MKL'] = spec['mkl'].headers.include_flags
opts['LIB_MKL'] = spec['mkl'].libs.ld_flags
opts['F90_OPTS'] += spec['mkl'].headers.include_flags
else:
opts['LIB_LPK'] = ' '.join([spec['lapack'].libs.ld_flags,
spec['blas'].libs.ld_flags,
self.compiler.openmp_flag])
if '+omp' in spec:
opts['BUILDSMP'] = 'true'
if '+mpi' in spec:
opts['BUILDMPI'] = 'true'
opts['MPIF90'] = spec['mpi'].mpifc
opts['MPIF90_CPP_OPTS'] = '-DMPI -DMPIRHO -DMPISEC'
opts['MPIF90_OPTS'] = ' '.join(['$(F90_OPTS)', '$(CPP_ON_OPTS) '
'$(MPIF90_CPP_OPTS)'])
opts['MPIF90MT'] = '$(MPIF90)'
if '+omp' in spec:
opts['BUILDMPISMP'] = 'true'
opts['SMPF90_OPTS'] = self.compiler.openmp_flag + ' -DUSEOMP'
opts['SMPF77_OPTS'] = opts['SMPF90_OPTS']
opts['SMP_LIBS'] = ''
else:
opts['BUILDMPI'] = 'false'
if '+scalapack' in spec:
opts['LIB_SCLPK'] = spec['scalapack'].libs.ld_flags
opts['CPP_SCLPK'] = ' -DSCAL '
opts['MPI_LIBS'] = '$(LIB_SCLPK)'
opts['MPIF90_CPP_OPTS'] += ' $(CPP_SCLPK) '
opts['USE_SYS_LAPACK'] = 'true'
opts['LIB_FFT'] = 'fftlib.a'
opts['LIB_BZINT'] = 'libbzint.a'
opts['LIBS'] = '$(LIB_ARP) $(LIB_LPK) $(LIB_FFT) $(LIB_BZINT)'
if '+mpi' not in spec or '+omp' not in spec:
opts['BUILDMPISMP'] = 'false'
# Write the build/make.inc file
with open('build/make.inc', 'a') as inc:
for key in opts:
inc.write('{0} = {1}\n'.format(key, opts[key]))
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('species', prefix.species)
install_tree('tools', prefix.tools)
def setup_run_environment(self, env):
env.set('WNHOME', self.prefix)
env.set('EXCITINGROOT', self.prefix)
env.set('EXCITINGBIN', self.prefix.bin)
env.set('EXCITINGTOOLS', self.prefix.tools)
env.set('EXCITINGSTM', self.prefix.tools.stm)
env.set('EXCITINGVISUAL', self.prefix.xml.visualizationtemplates)
env.set('EXCITINGCONVERT', self.prefix.xml.inputfileconverter)
env.set('TIMEFORMAT', ' Elapsed time = %0lR')
env.set('WRITEMINMAX', '1')
env.set('USE_SYS_LAPACK', 'true')
env.append_path('PYTHONPATH', self.prefix.tools.stm)
env.append_path('PATH', self.prefix.tools)
env.append_path('PATH', self.prefix)
env.append_path('PATH', self.prefix.tools.stm)
|
LLNL/spack
|
var/spack/repos/builtin/packages/exciting/package.py
|
Python
|
lgpl-2.1
| 6,878
|
[
"exciting"
] |
86476bc45f9fb864f82ef0d6b30fbeafab331e7eb13977bdc3345c11b75bccf4
|
import codecs
import contextlib
import io
import locale
import sys
import unittest
import encodings
from unittest import mock
from test import support
try:
import _testcapi
except ImportError as exc:
_testcapi = None
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
# On small versions of Windows like Windows IoT or Windows Nano Server not all codepages are present
def is_code_page_present(cp):
from ctypes import POINTER, WINFUNCTYPE, WinDLL
from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
MAX_LEADBYTES = 12 # 5 ranges, 2 bytes ea., 0 term.
MAX_DEFAULTCHAR = 2 # single or double byte
MAX_PATH = 260
class CPINFOEXW(ctypes.Structure):
_fields_ = [("MaxCharSize", UINT),
("DefaultChar", BYTE*MAX_DEFAULTCHAR),
("LeadByte", BYTE*MAX_LEADBYTES),
("UnicodeDefaultChar", WCHAR),
("CodePage", UINT),
("CodePageName", WCHAR*MAX_PATH)]
prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
info = CPINFOEXW()
return GetCPInfoEx(cp, 0, info)
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #32110: Test readline() followed by read(n)
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #32110: Test read(n) followed by read(n)
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
# Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "namereplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
# sequential surrogate characters
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"),
"[??]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
backslashreplace = ''.join('\\x%02x' % b
for b in self.ill_formed_sequence)
self.assertEqual(test_sequence.decode(self.encoding, "backslashreplace"),
before + backslashreplace + after)
def test_incremental_surrogatepass(self):
# Test incremental decoder for surrogatepass handler:
# see issue #24214
# High surrogate
data = '\uD901'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:], True), '\uD901')
# Low surrogate
data = '\uDC02'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:]), '\uDC02')
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with support.check_warnings(('', DeprecationWarning)):
reader = codecs.open(support.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
BOM = b''
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode(self.encoding, error_handler),
expected)
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"),
self.BOM + b'[\x80]')
with self.assertRaises(UnicodeEncodeError) as cm:
"[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape")
exc = cm.exception
self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"),
self.BOM + b"abc\xed\xa0\x80def")
self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"),
self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"),
self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]')
self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"),
"abc\ud800def")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode(self.encoding, "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode(self.encoding, "surrogatepass")
def test_incremental_errors(self):
# Test that the incremental decoder can fail with final=False.
# See issue #24214
cases = [b'\x80', b'\xBF', b'\xC0', b'\xC1', b'\xF5', b'\xF6', b'\xFF']
for prefix in (b'\xC2', b'\xDF', b'\xE0', b'\xE0\xA0', b'\xEF',
b'\xEF\xBF', b'\xF0', b'\xF0\x90', b'\xF0\x90\x80',
b'\xF4', b'\xF4\x8F', b'\xF4\x8F\xBF'):
for suffix in b'\x7F', b'\xC0':
cases.append(prefix + suffix)
cases.extend((b'\xE0\x80', b'\xE0\x9F', b'\xED\xA0\x80',
b'\xED\xBF\xBF', b'\xF0\x80', b'\xF0\x8F', b'\xF4\x90'))
for data in cases:
with self.subTest(data=data):
dec = codecs.getincrementaldecoder(self.encoding)()
self.assertRaises(UnicodeDecodeError, dec.decode, data)
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii'))
self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d)
# Set O (optional direct characters)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii'))
self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o)
# +
self.assertEqual('a+b'.encode(self.encoding), b'a+-b')
self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws.encode('ascii'))
self.assertEqual(ws.encode('ascii').decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'\xffb', '\ufffdb'),
(b'a\xffb', 'a\ufffdb'),
(b'a\xff\xffb', 'a\ufffd\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+IKw-b\xff', 'a\u20acb\ufffd'),
(b'a+IKw\xffb', 'a\u20ac\ufffdb'),
(b'a+@b', 'a\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0')
self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-')
self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding),
b'+IKwgrNgB3KA-')
self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
(b'a+2AE-b', 'a\ud801b'),
(b'a+2AE\xffb', 'a\ufffdb'),
(b'a+2AE', 'a\ufffd'),
(b'a+2AEA-b', 'a\ufffdb'),
(b'a+2AH-b', 'a\ufffdb'),
(b'a+IKzYAQ-b', 'a\u20ac\ud801b'),
(b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'),
(b'a+IKzYAQA-b', 'a\u20ac\ufffdb'),
(b'a+IKzYAd-b', 'a\u20ac\ufffdb'),
(b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'),
(b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'),
(b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
BOM = codecs.BOM_UTF8
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", b"[\\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\x410]", b"[A0]")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, b"\\" + b)
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), b"\\" + b.upper())
with self.assertWarns(DeprecationWarning):
check(br"\8", b"\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", b"\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", b"\\\xfa")
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
def test_errors(self):
"""Only supports "strict" error handler"""
"python.org".encode("idna", "strict")
b"python.org".decode("idna", "strict")
for errors in ("ignore", "replace", "backslashreplace",
"surrogateescape"):
self.assertRaises(Exception, "python.org".encode, "idna", errors)
self.assertRaises(Exception,
b"python.org".decode, "idna", errors)
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
# test keywords
self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
'\xe4\xf6\xfc')
self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
'[]')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
# test keywords
self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
b'\xe4\xf6\xfc')
self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
b'[]')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"namereplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertCountEqual(api, codecs.__all__)
for api in codecs.__all__:
getattr(codecs, api)
def test_open(self):
self.addCleanup(support.unlink, support.TESTFN)
for mode in ('w', 'r', 'r+', 'w+', 'a', 'a+'):
with self.subTest(mode), \
codecs.open(support.TESTFN, mode, 'ascii') as file:
self.assertIsInstance(file, codecs.StreamReaderWriter)
def test_undefined(self):
self.assertRaises(UnicodeError, codecs.encode, 'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.encode, '', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'', 'undefined')
for errors in ('strict', 'ignore', 'replace', 'backslashreplace'):
self.assertRaises(UnicodeError,
codecs.encode, 'abc', 'undefined', errors)
self.assertRaises(UnicodeError,
codecs.decode, b'abc', 'undefined', errors)
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
if hasattr(codecs, "oem_encode"):
all_unicode_encodings.append("oem")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_stateful = [
"punycode",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_stateful:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab\ufffe"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: None}),
("ab\\x02", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
self.assertTrue(f.closed)
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding a unicode string is supported and gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtuvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, "\\" + chr(i))
if b.upper() not in b'UN':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), "\\" + chr(i-32))
with self.assertWarns(DeprecationWarning):
check(br"\8", "\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", "\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", "\\\xfa")
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class EscapeEncodeTest(unittest.TestCase):
def test_escape_encode(self):
tests = [
(b'', (b'', 0)),
(b'foobar', (b'foobar', 6)),
(b'spam\0eggs', (b'spam\\x00eggs', 9)),
(b'a\'b', (b"a\\'b", 3)),
(b'b\\c', (b'b\\\\c', 3)),
(b'c\nd', (b'c\\nd', 3)),
(b'd\re', (b'd\\re', 3)),
(b'f\x7fg', (b'f\\x7fg', 3)),
]
for data, output in tests:
with self.subTest(data=data):
self.assertEqual(codecs.escape_encode(data), output)
self.assertRaises(TypeError, codecs.escape_encode, 'spam')
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b'spam'))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_blacklists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = (r"{!r} is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_blacklists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_blacklists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_blacklists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple search functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
codecs.register(_get_test_codec) # Returns None, not usable as a decorator
try:
# Issue #22166: Also need to clear the internal cache in CPython
from _codecs import _forget_codec
except ImportError:
def _forget_codec(codec_name):
pass
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
# There's no way to unregister a codec search function, so we just
# ensure we render this one fairly harmless after the test
# case finishes by using the test case repr as the codec name
# The codecs module normalizes codec names, although this doesn't
# appear to be formally documented...
# We also make sure we use a truly unique id for the custom codec
# to avoid issues with the codec cache when running these tests
# multiple times (e.g. when hunting for refleaks)
unique_id = repr(self) + str(id(self))
self.codec_name = encodings.normalize_encoding(unique_id).lower()
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
try:
_forget_codec(self.codec_name)
except KeyError:
pass
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
r"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
r"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors, True)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors, True)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'namereplace',
b'[\\N{LATIN SMALL LETTER Y WITH DIAERESIS}]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
('\udcff', 'strict', None),
('[\udcff]', 'surrogateescape', b'[\xff]'),
('[\udcff]', 'surrogatepass', None),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'backslashreplace', '[\\xff]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'[\xff]', 'surrogatepass', None),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
(b'\x81\x00abc', 'backslashreplace', '\\x81\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
# test error handlers
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
('\udc98', 'surrogateescape', b'\x98'),
('\udc98', 'surrogatepass', None),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
if support.verbose:
sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
# On small versions of Windows like Windows IoT
# not all codepages are present.
# A missing codepage causes an OSError exception
# so check for the codepage before decoding
if is_code_page_present(cp):
self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
else:
if support.verbose:
print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
def test_mbcs_alias(self):
# Check that looking up our 'default' codepage will return
# mbcs when we don't have a more specific one available
with mock.patch('_winapi.GetACP', return_value=123):
codec = codecs.lookup('cp123')
self.assertEqual(codec.name, 'mbcs')
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
def test_large_input(self, size):
# Test input longer than INT_MAX.
# Input should contain undecodable bytes before and after
# the INT_MAX limit.
encoded = (b'01234567' * ((size//8)-1) +
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
self.assertEqual(len(encoded), size+2)
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), decoded[1])
self.assertEqual(decoded[0][:10], '0123456701')
self.assertEqual(decoded[0][-20:],
'6701234567'
'\udc85\udc86\udcea\udceb\udcec'
'\udcef\udcfc\udcfd\udcfe\udcff')
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
def test_large_utf8_input(self, size):
# Test input longer than INT_MAX.
# Input should contain a decodable multi-byte character
# surrounding INT_MAX
encoded = (b'0123456\xed\x84\x80' * (size//8))
self.assertEqual(len(encoded), size // 8 * 10)
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), size)
self.assertEqual(decoded[0][:10], '0123456\ud10001')
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
class ASCIITest(unittest.TestCase):
def test_encode(self):
self.assertEqual('abc123'.encode('ascii'), b'abc123')
def test_encode_error(self):
for data, error_handler, expected in (
('[\x80\xff\u20ac]', 'ignore', b'[]'),
('[\x80\xff\u20ac]', 'replace', b'[???]'),
('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'),
('[\x80\xff\u20ac\U000abcde]', 'backslashreplace',
b'[\\x80\\xff\\u20ac\\U000abcde]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('ascii', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\xff'.encode('ascii', 'surrogateescape')
def test_decode(self):
self.assertEqual(b'abc'.decode('ascii'), 'abc')
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode('ascii', error_handler),
expected)
class Latin1Test(unittest.TestCase):
def test_encode(self):
for data, expected in (
('abc', b'abc'),
('\x80\xe9\xff', b'\x80\xe9\xff'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.encode('latin1'), expected)
def test_encode_errors(self):
for data, error_handler, expected in (
('[\u20ac\udc80]', 'ignore', b'[]'),
('[\u20ac\udc80]', 'replace', b'[??]'),
('[\u20ac\U000abcde]', 'backslashreplace',
b'[\\u20ac\\U000abcde]'),
('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('latin1', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\u20ac'.encode('latin1', 'surrogateescape')
def test_decode(self):
for data, expected in (
(b'abc', 'abc'),
(b'[\x80\xff]', '[\x80\xff]'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.decode('latin1'), expected)
class StreamRecoderTest(unittest.TestCase):
def test_writelines(self):
bio = io.BytesIO()
codec = codecs.lookup('ascii')
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
sr.writelines([b'a', b'b'])
self.assertEqual(bio.getvalue(), b'ab')
def test_write(self):
bio = io.BytesIO()
codec = codecs.lookup('latin1')
# Recode from Latin-1 to utf-8.
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.utf_8.StreamReader, encodings.utf_8.StreamWriter)
text = 'àñé'
sr.write(text.encode('latin1'))
self.assertEqual(bio.getvalue(), text.encode('utf-8'))
def test_seeking_read(self):
bio = io.BytesIO('line1\nline2\nline3\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
self.assertEqual(sr.readline(), b'line1\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'line1\n')
self.assertEqual(sr.readline(), b'line2\n')
self.assertEqual(sr.readline(), b'line3\n')
self.assertEqual(sr.readline(), b'')
def test_seeking_write(self):
bio = io.BytesIO('123456789\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
# Test that seek() only resets its internal buffer when offset
# and whence are zero.
sr.seek(2)
sr.write(b'\nabc\n')
self.assertEqual(sr.readline(), b'789\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'1\n')
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
@unittest.skipIf(_testcapi is None, 'need _testcapi module')
class LocaleCodecTest(unittest.TestCase):
"""
Test indirectly _Py_DecodeUTF8Ex() and _Py_EncodeUTF8Ex().
"""
ENCODING = sys.getfilesystemencoding()
STRINGS = ("ascii", "ulatin1:\xa7\xe9",
"u255:\xff",
"UCS:\xe9\u20ac\U0010ffff",
"surrogates:\uDC80\uDCFF")
BYTES_STRINGS = (b"blatin1:\xa7\xe9", b"b255:\xff")
SURROGATES = "\uDC80\uDCFF"
def encode(self, text, errors="strict"):
return _testcapi.EncodeLocaleEx(text, 0, errors)
def check_encode_strings(self, errors):
for text in self.STRINGS:
with self.subTest(text=text):
try:
expected = text.encode(self.ENCODING, errors)
except UnicodeEncodeError:
with self.assertRaises(RuntimeError) as cm:
self.encode(text, errors)
errmsg = str(cm.exception)
self.assertRegex(errmsg, r"encode error: pos=[0-9]+, reason=")
else:
encoded = self.encode(text, errors)
self.assertEqual(encoded, expected)
def test_encode_strict(self):
self.check_encode_strings("strict")
def test_encode_surrogateescape(self):
self.check_encode_strings("surrogateescape")
def test_encode_surrogatepass(self):
try:
self.encode('', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} encoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_encode_strings("surrogatepass")
def test_encode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.encode('', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
def decode(self, encoded, errors="strict"):
return _testcapi.DecodeLocaleEx(encoded, 0, errors)
def check_decode_strings(self, errors):
is_utf8 = (self.ENCODING == "utf-8")
if is_utf8:
encode_errors = 'surrogateescape'
else:
encode_errors = 'strict'
strings = list(self.BYTES_STRINGS)
for text in self.STRINGS:
try:
encoded = text.encode(self.ENCODING, encode_errors)
if encoded not in strings:
strings.append(encoded)
except UnicodeEncodeError:
encoded = None
if is_utf8:
encoded2 = text.encode(self.ENCODING, 'surrogatepass')
if encoded2 != encoded:
strings.append(encoded2)
for encoded in strings:
with self.subTest(encoded=encoded):
try:
expected = encoded.decode(self.ENCODING, errors)
except UnicodeDecodeError:
with self.assertRaises(RuntimeError) as cm:
self.decode(encoded, errors)
errmsg = str(cm.exception)
self.assertTrue(errmsg.startswith("decode error: "), errmsg)
else:
decoded = self.decode(encoded, errors)
self.assertEqual(decoded, expected)
def test_decode_strict(self):
self.check_decode_strings("strict")
def test_decode_surrogateescape(self):
self.check_decode_strings("surrogateescape")
def test_decode_surrogatepass(self):
try:
self.decode(b'', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} decoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_decode_strings("surrogatepass")
def test_decode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.decode(b'', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
class Rot13Test(unittest.TestCase):
"""Test the educational ROT-13 codec."""
def test_encode(self):
ciphertext = codecs.encode("Caesar liked ciphers", 'rot-13')
self.assertEqual(ciphertext, 'Pnrfne yvxrq pvcuref')
def test_decode(self):
plaintext = codecs.decode('Rg gh, Oehgr?', 'rot-13')
self.assertEqual(plaintext, 'Et tu, Brute?')
def test_incremental_encode(self):
encoder = codecs.getincrementalencoder('rot-13')()
ciphertext = encoder.encode('ABBA nag Cheryl Baker')
self.assertEqual(ciphertext, 'NOON ant Purely Onxre')
def test_incremental_decode(self):
decoder = codecs.getincrementaldecoder('rot-13')()
plaintext = decoder.decode('terra Ares envy tha')
self.assertEqual(plaintext, 'green Nerf rail gun')
class Rot13UtilTest(unittest.TestCase):
"""Test the ROT-13 codec via rot13 function,
i.e. the user has done something like:
$ echo "Hello World" | python -m encodings.rot_13
"""
def test_rot13_func(self):
infile = io.StringIO('Gb or, be abg gb or, gung vf gur dhrfgvba')
outfile = io.StringIO()
encodings.rot_13.rot13(infile, outfile)
outfile.seek(0)
plain_text = outfile.read()
self.assertEqual(
plain_text,
'To be, or not to be, that is the question')
if __name__ == "__main__":
unittest.main()
|
kikocorreoso/brython
|
www/src/Lib/test/test_codecs.py
|
Python
|
bsd-3-clause
| 131,220
|
[
"FEFF"
] |
c113d81aff0dfd47b192c9dd9daf2b7423c9fe17e96f4a70679d79b954fc8246
|
#WCS response decoder.
#Decodes response from a WCS (either a Coverages XML document or a Multipart MIME) and extracts the urls of the coverage data.
#Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#Author: Dominic Lowe, STFC
#contact email: d.lowe@rl.ac.uk
#
# Multipart MIME decoding based on http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/86676
#example: used in conjunction with ows lib wcs:
#from owslib import wcsdecoder
#u=wcs.getcoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='application/netcdf', store='true')
#decoder=wcsdecoder.WCSDecoder(u)
#decoder.getCoverages()
import os
from owslib.etree import etree
import email
import errno
class WCSDecoder(object):
def __init__(self, u):
''' initiate with a urllib url object.'''
self.u=u
self._getType()
def _getType(self):
''' determine whether it is a Multipart Mime or a Coverages XML file'''
#what's the best way to test this?
#for now read start of file
tempu=self.u
if tempu.readline()[:14] == '<?xml version=':
self.urlType='XML'
else:
self.urlType='Multipart'
def getCoverages(self, unpackdir='./unpacked'):
if self.urlType=='XML':
paths=[]
u_xml = self.u.read()
u_tree = etree.fromstring(u_xml)
for ref in u_tree.findall('{http://www.opengis.net/wcs/1.1}Coverage/{http://www.opengis.net/wcs/1.1}Reference'):
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
for ref in u_tree.findall('{http://www.opengis.net/wcs/1.1.0/owcs}Coverage/{{http://www.opengis.net/wcs/1.1.0/owcs}Reference'):
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
elif self.urlType=='Multipart':
#Decode multipart mime and return fileobjects
u_mpart=self.u.read()
mpart =MpartMime(u_mpart)
paths= mpart.unpackToDir(unpackdir)
return paths
class MpartMime(object):
def __init__ (self,mpartmime):
""" mpartmime is a multipart mime file that has already been read in."""
self.mpartmime=mpartmime
def unpackToDir(self, unpackdir):
""" unpacks contents of Multipart mime to a given directory"""
names=[]
#create the directory if it doesn't exist:
try:
os.mkdir(unpackdir)
except OSError, e:
# Ignore directory exists error
if e.errno <> errno.EEXIST:
raise
#now walk through the multipart mime and write out files
msg = email.message_from_string(self.mpartmime)
counter =1
for part in msg.walk():
# multipart/* are just containers, ignore
if part.get_content_maintype() == 'multipart':
continue
# Applications should really check the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
try:
ext = mimetypes.guess_extension(part.get_type())
except:
ext=None
if not ext:
# Use a generic extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
fullpath=os.path.join(unpackdir, filename)
names.append(fullpath)
fp = open(fullpath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return names
|
sebastic/QGIS
|
python/ext-libs/owslib/coverage/wcsdecoder.py
|
Python
|
gpl-2.0
| 3,839
|
[
"NetCDF"
] |
76c41e0090c526305c274dddb2d96ffc4ffee4a6a310fcd78156429813307405
|
from django.utils.translation import ugettext_lazy as _
from django.forms import ValidationError # noqa
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import forms
from horizon import messages
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
from crystal_dashboard.api import swift as api
class CreateRegion(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
help_text=_("The name of the new region."),
widget=forms.TextInput(
attrs={"ng-model": "name", "not-blank": ""}
))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateRegion, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
response = api.new_region(request, data)
if 200 <= response.status_code < 300:
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:regions:index")
error_message = "Unable to create the new region.\t %s" % ex.message
exceptions.handle(request,
_(error_message),
redirect=redirect)
class UpdateRegion(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
help_text=_("The name of the new region."))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
region_id = forms.CharField(max_length=255,
label=_("Region ID"),
widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(UpdateRegion, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
response = api.update_region(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _('Successfully updated node: %s') % data['region_id'])
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:regions:index")
error_message = "Unable to update region.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/regions/forms.py
|
Python
|
gpl-3.0
| 2,968
|
[
"CRYSTAL"
] |
9d912cc9a5f09bc971cb999d0ce87b515258342c2e25ad492522e63b94e2b0de
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import sys
import mdtools.constants as const
import mdtraj as md
import itertools
from tqdm import tqdm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import seaborn as sns
### setup ###
# example...
target_pairs = [
[278, 348],
[165, 280],
[128, 195],
[128, 192],
[271, 350],
[271, 348],
]
include_mainchain = True
plotting = True
structure_file = '/path/to/structure/file'
trajectory_file = '/path/to/trajectory/file'
chunk_size = 100
#############
if include_mainchain:
# atom list (including O and N of backbone)
resid_dict = const.RESID_DICT
else:
# atom list (sidechain only)
resid_dict = const.RESID_DICT_SIDECHAIN
# load the structure file data
sys.stderr.write('Loading data ...\n')
ref = md.load(structure_file)
# prepare selection
sys.stderr.write('Preparing the selection array...\n')
calc_selections = []
for lst in target_pairs:
sel = ref.topology.select("name CA and (residue %d or residue %d)" % tuple(lst))
ca_atom1 = ref.topology.atom(sel[0])
ca_atom2 = ref.topology.atom(sel[1])
# raise error if selection was invalid
if len(sel) != 2:
raise RuntimeError
# get atom names list from the dictionary
try:
atomset1 = resid_dict[ca_atom1.residue.name]
atomset2 = resid_dict[ca_atom2.residue.name]
except KeyError as e:
sys.stderr.write('!!! Hydrogen bonding atom is not defined in selected residues. !!!')
sys.stderr.write('If you want to calc distance between main-chain, set "include_mainchain = True"')
raise e
atomset_iter = itertools.product(atomset1, atomset2)
selections_tmp = []
for atoms in atomset_iter:
inputs = (ca_atom1.residue.resSeq, atoms[0], ca_atom2.residue.resSeq, atoms[1])
s = ref.topology.select('(residue %d and name %s) or (residue %d and name %s)' % inputs)
selections_tmp.append(s)
calc_selections.append(selections_tmp)
# calc
sys.stderr.write('Start distance calculation...\n')
results = [[] for _ in calc_selections]
itertraj = md.iterload(trajectory_file, top=ref, chunk=chunk_size)
for chunk in tqdm(itertraj):
for i, sel in enumerate(calc_selections):
coor = chunk.xyz[:, sel, :] # 4-dimensional ary: (time, #of selection pair, 2atoms, xyz)
dist_vectors = coor[:, :, 0, :] - coor[:, :, 1, :] # 3-dimensional ary: (time, #of pair, xyz)
dists = np.linalg.norm(dist_vectors, axis=2) # calc norm over xyz
min_dist = np.min(dists, axis=1) # select the minimum value in the pairs -> 1-dimensional ary
results[i].append(min_dist)
# convert into 2-dim numpy array
t_ary = np.array([np.concatenate(e) for e in results]).T
sys.stderr.write('\n')
# write-out to csv file
sys.stderr.write('Saving raw-distance file in rawdata.csv. \n')
np.savetxt('rawdata.csv', t_ary, delimiter=', ')
if not plotting:
sys.stderr.write('Exit without plotting.\n')
sys.exit(0)
# plotting
n_frames = t_ary.shape[0]
t = np.arange(n_frames)
for i in range(len(target_pairs)):
plt.clf()
title = 'mindist_%d-%d' % tuple(target_pairs[i])
plt.plot(t, t_ary[:, i])
plt.title(title)
plt.xlabel('Time step')
plt.ylabel('Minimum dist (nm)')
plt.savefig('%s.png' % title)
sys.stderr.write('%s.png saved !\n' % title)
sys.stderr.write('Exit normally.\n')
|
TaizoAyase/mdtools
|
min_dist/calc_mindist.py
|
Python
|
gpl-2.0
| 3,430
|
[
"MDTraj"
] |
2c075f4541b5e3a774c48f44942d78769b5dda4d291c90ac142864042af4ea72
|
from Firefly import logging
from Firefly.helpers.device_types.switch import Switch
from Firefly.const import SWITCH, LEVEL, AUTHOR
CAPABILITIES = {
LEVEL: True,
SWITCH: True
}
TITLE = 'Virtual Switch'
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
switch = VirtualSwitch(firefly, package, **kwargs)
return firefly.install_component(switch)
class VirtualSwitch(Switch):
def __init__(self, firefly, package, **kwargs):
super().__init__(firefly, package, TITLE, AUTHOR, capabilities=CAPABILITIES, **kwargs)
def set_on(self, **kwargs):
self.update_values(switch='on')
def set_off(self, **kwargs):
self.update_values(switch='off')
def set_level(self, level=-1, **kwargs):
self.update_values(level=int(level))
|
Firefly-Automation/Firefly
|
Firefly/components/virtual_devices/dimmer.py
|
Python
|
apache-2.0
| 794
|
[
"Firefly"
] |
608df4c87fb13b6ed77dd115059a0e071a4b0291884f49adf0751f3c9e33b0d1
|
from __future__ import with_statement
import os
import time
import tempfile
from latex import latex
def preview(expr, output='png', viewer=None, euler=True, **latex_settings):
"""View expression or LaTeX markup in PNG, DVI, PostScript or
PDF form.
If the expr argument is an expression, it will be exported to
LaTeX and then compiled using available the TeX distribution.
The first argument, 'expr', may also be a LaTeX string.
The function will then run the appropriate viewer for the given
output format or use the user defined one. By default png
output is generated.
By default pretty Euler fonts are used for typesetting (they
were used to typeset the well known "Concrete Mathematics"
book). For that to work, you need the 'eulervm.sty' LaTeX style (in
Debian/Ubuntu, install the texlive-fonts-extra package). If you prefer
default AMS fonts or your system lacks 'eulervm' LaTeX package then
unset the 'euler' keyword argument.
To use viewer auto-detection, lets say for 'png' output, issue::
>> from sympy import *
>> x, y = symbols("x,y")
>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select different one::
>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats
the rules are slightly different. As an example we will take
'dvi' output format. If you would run::
>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your
system (predefined in the function, so it will try evince,
first, then kdvi and xdvi). If nothing is found you will
need to set the viewer explicitly::
>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on
your system it will gracefully raise an exception. You may also
enter 'file' for the viewer argument. Doing so will cause this function
to return a file object in read-only mode.
Currently this depends on pexpect, which is not available for windows.
Additional keyword args will be passed to the latex call. E.g. the
symbol_names flag::
>> phidd = Symbol('phidd')
>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
"""
# we don't want to depend on anything not in the
# standard library with SymPy by default
import pexpect
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi" : [ "evince", "okular", "kdvi", "xdvi" ],
"ps" : [ "evince", "okular", "gsview", "gv" ],
"pdf" : [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if pexpect.which(candidate):
viewer = candidate
break
else:
raise SystemError("No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer not in special and not pexpect.which(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if not euler:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
else:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{eulervm}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
tmp = tempfile.mktemp()
with open(tmp + ".tex", "w") as tex:
tex.write(format % latex_string)
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
if os.system("latex -halt-on-error %s.tex" % tmp) != 0:
raise SystemError("Failed to generate DVI output.")
os.remove(tmp + ".tex")
os.remove(tmp + ".aux")
os.remove(tmp + ".log")
if output != "dvi":
command = {
"ps" : "dvips -o %s.ps %s.dvi",
"pdf" : "dvipdf %s.dvi %s.pdf",
"png" : "dvipng -T tight -z 9 " + \
"--truecolor -o %s.png %s.dvi",
}
try:
if os.system(command[output] % (tmp, tmp)) != 0:
raise SystemError("Failed to generate '%s' output." % output)
else:
os.remove(tmp + ".dvi")
except KeyError:
raise SystemError("Invalid output format: %s" % output)
src = "%s.%s" % (tmp, output)
src_file = None
if viewer == "file":
src_file = open(src, 'rb')
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for plotting.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(src, decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width = img.width + 2*offset,
height = img.height + 2*offset,
caption = "sympy",
resizable = False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
os.system("%s %s &> /dev/null &" % (viewer, src))
time.sleep(2) # wait for the viewer to read data
os.remove(src)
os.chdir(cwd)
if src_file is not None:
return src_file
|
ichuang/sympy
|
sympy/printing/preview.py
|
Python
|
bsd-3-clause
| 7,314
|
[
"VisIt"
] |
d4c71728fa107742e5a083228fe5c3a66da43407e11a21e48c4bff442d815e38
|
# This file is part of MSMBuilder.
#
# Copyright 2011 Stanford University
#
# MSMBuilder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Functions for performing Transition Path Theory calculations.
Written and maintained by TJ Lane <tjlane@stanford.edu>
Contributions from Kyle Beauchamp, Robert McGibbon, Vince Voelz,
Christian Schwantes.
These are the cannonical references for TPT. Note that TPT is really a
specialization of ideas very framiliar to the mathematical study of Markov
chains, and there are many books, manuscripts in the mathematical literature
that cover the same concepts.
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
from __future__ import print_function, division, absolute_import
from mdtraj.utils.six.moves import xrange
import numpy as np
import scipy.sparse
from msmbuilder import MSMLib
from msmbuilder import msm_analysis
from msmbuilder.utils import deprecated
import logging
logger = logging.getLogger(__name__)
# turn on debugging printout
# logger.setLogLevel(logging.DEBUG)
###############################################################################
# Typechecking/Utility Functions
#
def _ensure_iterable(arg):
if not hasattr(arg, '__iter__'):
arg = list([int(arg)])
logger.debug("Passed object was not iterable,"
" converted it to: %s" % str(arg))
assert hasattr(arg, '__iter__')
return arg
def _check_sources_sinks(sources, sinks):
sources = _ensure_iterable(sources)
sinks = _ensure_iterable(sinks)
if np.any(sources == sinks):
raise ValueError("Sets `sources` and `sinks` must be disjoint "
"to find paths between them")
return sources, sinks
###############################################################################
# Path Finding Functions
#
def find_top_paths(sources, sinks, tprob, num_paths=10, node_wipe=False, net_flux=None):
r"""
Calls the Dijkstra algorithm to find the top 'NumPaths'.
Does this recursively by first finding the top flux path, then cutting that
path and relaxing to find the second top path. Continues until NumPaths
have been found.
Parameters
----------
sources : array_like, int
The indices of the source states
sinks : array_like, int
Indices of sink states
num_paths : int
The number of paths to find
Returns
-------
Paths : list of lists
The nodes transversed in each path
Bottlenecks : list of tuples
The nodes between which exists the path bottleneck
Fluxes : list of floats
The flux through each path
Optional Parameters
-------------------
node_wipe : bool
If true, removes the bottleneck-generating node from the graph, instead
of just the bottleneck (not recommended, a debugging functionality)
net_flux : sparse matrix
Matrix of the net flux from `sources` to `sinks`, see function `net_flux`.
If not provided, is calculated from scratch. If provided, `tprob` is
ignored.
To Do
-----
-- Add periodic flow check
References
----------
.. [1] Dijkstra, E. W. (1959). "A note on two problems in connexion with
graphs". Numerische Mathematik 1: 269–271. doi:10.1007/BF01386390.
"""
# first, do some checking on the input, esp. `sources` and `sinks`
# we want to make sure all objects are iterable and the sets are disjoint
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
# check to see if we get net_flux for free, otherwise calculate it
if not net_flux:
net_flux = calculate_net_fluxes(sources, sinks, tprob)
# initialize objects
paths = []
fluxes = []
bottlenecks = []
if scipy.sparse.issparse(net_flux):
net_flux = net_flux.tolil()
# run the initial Dijkstra pass
pi, b = Dijkstra(sources, sinks, net_flux)
logger.info("Path Num | Path | Bottleneck | Flux")
i = 1
done = False
while not done:
# First find the highest flux pathway
(path, (b1, b2), flux) = _backtrack(sinks, b, pi, net_flux)
# Add each result to a Paths, Bottlenecks, Fluxes list
if flux == 0:
logger.info("Only %d possible pathways found. Stopping backtrack.", i)
break
paths.append(path)
bottlenecks.append((b1, b2))
fluxes.append(flux)
logger.info("%s | %s | %s | %s ", i, path, (b1, b2), flux)
# Cut the bottleneck, start relaxing from B side of the cut
if node_wipe:
net_flux[:, b2] = 0
logger.info("Wiped node: %s", b2)
else:
net_flux[b1, b2] = 0
G = scipy.sparse.find(net_flux)
Q = [b2]
b, pi, net_flux = _back_relax(b2, b, pi, net_flux)
# Then relax the graph and repeat
# But only if we still need to
if i != num_paths - 1:
while len(Q) > 0:
w = Q.pop()
for v in G[1][np.where(G[0] == w)]:
if pi[v] == w:
b, pi, net_flux = _back_relax(v, b, pi, net_flux)
Q.append(v)
Q = sorted(Q, key=lambda v: b[v])
i += 1
if i == num_paths + 1:
done = True
if flux == 0:
logger.info("Only %d possible pathways found. Stopping backtrack.", i)
done = True
return paths, bottlenecks, fluxes
def Dijkstra(sources, sinks, net_flux):
r""" A modified Dijkstra algorithm that dynamically computes the cost
of all paths from A to B, weighted by NFlux.
Parameters
----------
sources : array_like, int
The indices of the source states (i.e. for state A in rxn A -> B)
sinks : array_like, int
Indices of sink states (state B)
NFlux : sparse matrix
Matrix of the net flux from A to B, see function GetFlux
Returns
-------
pi : array_like
The paths from A->B, pi[i] = node preceeding i
b : array_like
The flux passing through each node
See Also
--------
DijkstraTopPaths : child function
`DijkstraTopPaths` is probably the function you want to call to find
paths through an MSM network. This is a utility function called by
`DijkstraTopPaths`, but may be useful in some specific cases
References
----------
.. [1] Dijkstra, E. W. (1959). "A note on two problems in connexion with
graphs". Numerische Mathematik 1: 269–271. doi:10.1007/BF01386390.
"""
sources, sinks = _check_sources_sinks(sources, sinks)
# initialize data structures
if scipy.sparse.issparse(net_flux):
net_flux = net_flux.tolil()
else:
net_flux = scipy.sparse.lil_matrix(net_flux)
G = scipy.sparse.find(net_flux)
N = net_flux.shape[0]
b = np.zeros(N)
b[sources] = 1000
pi = np.zeros(N, dtype=int)
pi[sources] = -1
U = []
Q = sorted(list(range(N)), key=lambda v: b[v])
for v in sinks:
Q.remove(v)
# run the Dijkstra algorithm
while len(Q) > 0:
w = Q.pop()
U.append(w)
# relax
for v in G[1][np.where(G[0] == w)]:
if b[v] < min(b[w], net_flux[w, v]):
b[v] = min(b[w], net_flux[w, v])
pi[v] = w
Q = sorted(Q, key=lambda v: b[v])
logger.info("Searched %s nodes", len(U) + len(sinks))
return pi, b
def _back_relax(s, b, pi, NFlux):
r"""
Updates a Djikstra calculation once a bottleneck is cut, quickly
recalculating only cost of nodes that change due to the cut.
Cuts & relaxes the B-side (sink side) of a cut edge (b2) to source from the
adjacent node with the most flux flowing to it. If there are no
adjacent source nodes, cuts the node out of the graph and relaxes the
nodes that were getting fed by b2 (the cut node).
Parameters
----------
s : int
the node b2
b : array_like
the cost function
pi : array_like
the backtrack array, a list such that pi[i] = source node of node i
NFlux : sparse matrix
Net flux matrix
Returns
-------
b : array_like
updated cost function
pi : array_like
updated backtrack array
NFlux : sparse matrix
net flux matrix
See Also
--------
DijkstraTopPaths : child function
`DijkstraTopPaths` is probably the function you want to call to find
paths through an MSM network. This is a utility function called by
`DijkstraTopPaths`, but may be useful in some specific cases
"""
G = scipy.sparse.find(NFlux)
if len(G[0][np.where(G[1] == s)]) > 0:
# For all nodes connected upstream to the node `s` in question,
# Re-source that node from the best option (lowest cost) one level lower
# Notation: j is node one level below, s is the one being considered
b[s] = 0 # set the cost to zero
for j in G[0][np.where(G[1] == s)]: # for each upstream node
if b[s] < min(b[j], NFlux[j, s]): # if that node has a lower cost
b[s] = min(b[j], NFlux[j, s]) # then set the cost to that node
pi[s] = j # and the source comes from there
# if there are no nodes connected to this one, then we need to go one
# level up and work there first
else:
for sprime in G[1][np.where(G[0] == s)]:
NFlux[s, sprime] = 0
b, pi, NFlux = _back_relax(sprime, b, pi, NFlux)
return b, pi, NFlux
def _backtrack(B, b, pi, NFlux):
"""
Works backwards to pull out a path from pi, where pi is a list such that
pi[i] = source node of node i. Begins at the largest staring incoming flux
point in B.
Parameters
----------
B : array_like, int
Indices of sink states (state B)
b : array_like
the cost function
pi : array_like
the backtrack array, a list such that pi[i] = source node of node i
NFlux : sparse matrix
net flux matrix
Returns
-------
bestpath : list
the list of nodes forming the highest flux path
bottleneck : tuple
a tupe of nodes, between which is the bottleneck
bestflux : float
the flux through `bestpath`
See Also
--------
DijkstraTopPaths : child function
`DijkstraTopPaths` is probably the function you want to call to find
paths through an MSM network. This is a utility function called by
`DijkstraTopPaths`, but may be useful in some specific cases
"""
# Select starting location
bestflux = 0
for Bnode in B:
path = [Bnode]
NotDone = True
while NotDone:
if pi[path[-1]] == -1:
break
else:
path.append(pi[path[-1]])
path.reverse()
bottleneck, flux = find_path_bottleneck(path, NFlux)
logger.debug('In Backtrack: Flux %s, bestflux %s', flux, bestflux)
if flux > bestflux:
bestpath = path
bestbottleneck = bottleneck
bestflux = flux
if flux == 0:
bestpath = []
bottleneck = (np.nan, np.nan)
bestflux = 0
return (bestpath, bestbottleneck, bestflux)
def find_path_bottleneck(path, net_flux):
"""
Simply finds the bottleneck along a path.
This is the point at which the cost function first goes up along the path,
backtracking from B to A.
Parameters
----------
path : list
a list of nodes along the path of interest
net_flux : matrix
the net flux matrix
Returns
-------
bottleneck : tuple
a tuple of the nodes on either end of the bottleneck
flux : float
the flux at the bottleneck
See Also
--------
find_top_paths : child function
`find_top_paths` is probably the function you want to call to find
paths through an MSM network. This is a utility function called by
`find_top_paths`, but may be useful in some specific cases.
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
if scipy.sparse.issparse(net_flux):
net_flux = net_flux.tolil()
flux = 100000. # initialize as large value
for i in range(len(path) - 1):
if net_flux[path[i], path[i + 1]] < flux:
flux = net_flux[path[i], path[i + 1]]
b1 = path[i]
b2 = path[i + 1]
return (b1, b2), flux
def calculate_fluxes(sources, sinks, tprob, populations=None, committors=None):
"""
Compute the transition path theory flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : mm_matrix
The transition matrix.
Returns
------
fluxes : mm_matrix
The flux matrix
Optional Parameters
-------------------
populations : nd_array, float
The equilibrium populations, if not provided is re-calculated
committors : nd_array, float
The committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
if scipy.sparse.issparse(tprob):
dense = False
else:
dense = True
# check if we got the populations
if populations is None:
eigens = msm_analysis.get_eigenvectors(tprob, 1)
if np.count_nonzero(np.imag(eigens[1][:, 0])) != 0:
raise ValueError('First eigenvector has imaginary components')
populations = np.real(eigens[1][:, 0])
# check if we got the committors
if committors is None:
committors = calculate_committors(sources, sinks, tprob)
# perform the flux computation
Indx, Indy = tprob.nonzero()
n = tprob.shape[0]
if dense:
X = np.zeros((n, n))
Y = np.zeros((n, n))
X[(np.arange(n), np.arange(n))] = populations * (1.0 - committors)
Y[(np.arange(n), np.arange(n))] = committors
else:
X = scipy.sparse.lil_matrix((n, n))
Y = scipy.sparse.lil_matrix((n, n))
X.setdiag(populations * (1.0 - committors))
Y.setdiag(committors)
if dense:
fluxes = np.dot(np.dot(X, tprob), Y)
fluxes[(np.arange(n), np.arange(n))] = np.zeros(n)
else:
fluxes = (X.tocsr().dot(tprob.tocsr())).dot(Y.tocsr())
# This should be the same as below, but it's a bit messy...
#fluxes = np.dot(np.dot(X.tocsr(), tprob.tocsr()), Y.tocsr())
fluxes = fluxes.tolil()
fluxes.setdiag(np.zeros(n))
return fluxes
def calculate_net_fluxes(sources, sinks, tprob, populations=None, committors=None):
"""
Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : mm_matrix
The transition matrix.
Returns
------
net_fluxes : mm_matrix
The net flux matrix
Optional Parameters
-------------------
populations : nd_array, float
The equilibrium populations, if not provided is re-calculated
committors : nd_array, float
The committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
if scipy.sparse.issparse(tprob):
dense = False
else:
dense = True
n = tprob.shape[0]
flux = calculate_fluxes(sources, sinks, tprob, populations, committors)
ind = flux.nonzero()
if dense:
net_flux = np.zeros((n, n))
else:
net_flux = scipy.sparse.lil_matrix((n, n))
for k in range(len(ind[0])):
i, j = ind[0][k], ind[1][k]
forward = flux[i, j]
reverse = flux[j, i]
net_flux[i, j] = max(0, forward - reverse)
return net_flux
###############################################################################
# MFPT & Committor Finding Functions
#
def calculate_ensemble_mfpt(sources, sinks, tprob, lag_time):
"""
Calculates the average 'Folding Time' of an MSM defined by T and a LagTime.
The Folding Time is the average of the MFPTs (to F) of all the states in U.
Note here 'Folding Time' is defined as the avg MFPT of {U}, to {F}.
Consider this carefully. This is probably NOT the experimental folding time!
Parameters
----------
sources : array, int
indices of the source states
sinks : array, int
indices of the sink states
tprob : matrix
transition probability matrix
lag_time : float
the lag time used to create T (dictates units of the answer)
Returns
-------
avg : float
the average of the MFPTs
std : float
the standard deviation of the MFPTs
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
X = calculate_mfpt(sinks, tprob, lag_time)
times = np.zeros(len(sources))
for i in range(len(sources)):
times[i] = X[sources[i]]
return np.average(times), np.std(times)
def calculate_avg_TP_time(sources, sinks, tprob, lag_time):
"""
Calculates the Average Transition Path Time for MSM with: T, LagTime.
The TPTime is the average of the MFPTs (to F) of all the states
immediately adjacent to U, with the U states effectively deleted.
Note here 'TP Time' is defined as the avg MFPT of all adjacent states to {U},
to {F}, ignoring {U}.
Consider this carefully.
Parameters
----------
sources : array, int
indices of the unfolded states
sinks : array, int
indices of the folded states
tprob : matrix
transition probability matrix
lag_time : float
the lag time used to create T (dictates units of the answer)
Returns
-------
avg : float
the average of the MFPTs
std : float
the standard deviation of the MFPTs
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
n = tprob.shape[0]
if scipy.sparse.issparse(tprob):
T = tprob.tolil()
P = scipy.sparse.lil_matrix((n, n))
else:
P = np.zeros((n, n))
for u in sources:
for i in range(n):
if i not in sources:
P[u, i] = T[u, i]
for u in sources:
T[u, :] = np.zeros(n)
T[:, u] = 0
for i in sources:
N = T[i, :].sum()
T[i, :] = T[i, :] / N
X = calculate_mfpt(sinks, tprob, lag_time)
TP = P * X.T
TPtimes = []
for time in TP:
if time != 0:
TPtimes.append(time)
return np.average(TPtimes), np.std(TPtimes)
def calculate_mfpt(sinks, tprob, lag_time=1.):
"""
Gets the Mean First Passage Time (MFPT) for all states to a *set*
of sinks.
Parameters
----------
sinks : array, int
indices of the sink states
tprob : matrix
transition probability matrix
LagTime : float
the lag time used to create T (dictates units of the answer)
Returns
-------
MFPT : array, float
MFPT in time units of LagTime, for each state (in order of state index)
See Also
--------
calculate_all_to_all_mfpt : function
A more efficient way to calculate all the MFPTs in a network
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sinks = _ensure_iterable(sinks)
msm_analysis.check_transition(tprob)
n = tprob.shape[0]
if scipy.sparse.isspmatrix(tprob):
tprob = tprob.tolil()
for state in sinks:
tprob[state, :] = 0.0
tprob[state, state] = 2.0
if scipy.sparse.isspmatrix(tprob):
tprob = tprob - scipy.sparse.eye(n, n)
tprob = tprob.tocsr()
else:
tprob = tprob - np.eye(n)
RHS = -1 * np.ones(n)
for state in sinks:
RHS[state] = 0.0
if scipy.sparse.isspmatrix(tprob):
MFPT = lag_time * scipy.sparse.linalg.spsolve(tprob, RHS)
else:
MFPT = lag_time * np.linalg.solve(tprob, RHS)
return MFPT
def calculate_all_to_all_mfpt(tprob, populations=None):
"""
Calculate the all-states by all-state matrix of mean first passage
times.
This uses the fundamental matrix formalism, and should be much faster
than GetMFPT for calculating many MFPTs.
Parameters
----------
tprob : matrix
transition probability matrix
populations : array_like, float
optional argument, the populations of each state. If not supplied,
it will be computed from scratch
Returns
-------
MFPT : array, float
MFPT in time units of LagTime, square array for MFPT from i -> j
See Also
--------
GetMFPT : function
for calculating a subset of the MFPTs, with functionality for including
a set of sinks
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
msm_analysis.check_transition(tprob)
if scipy.sparse.issparse(tprob):
tprob = tprob.toarray()
logger.warning('calculate_all_to_all_mfpt does not support sparse linear algebra')
if populations is None:
eigens = msm_analysis.get_eigenvectors(tprob, 1)
if np.count_nonzero(np.imag(eigens[1][:, 0])) != 0:
raise ValueError('First eigenvector has imaginary parts')
populations = np.real(eigens[1][:, 0])
# ensure that tprob is a transition matrix
msm_analysis.check_transition(tprob)
num_states = len(populations)
if tprob.shape[0] != num_states:
raise ValueError("Shape of tprob and populations vector don't match")
eye = np.transpose(np.matrix(np.ones(num_states)))
limiting_matrix = eye * populations
#z = scipy.linalg.inv(scipy.sparse.eye(num_states, num_states) - (tprob - limiting_matrix))
z = scipy.linalg.inv(np.eye(num_states) - (tprob - limiting_matrix))
# mfpt[i,j] = z[j,j] - z[i,j] / pi[j]
mfpt = -z
for j in range(num_states):
mfpt[:, j] += z[j, j]
mfpt[:, j] /= populations[j]
return mfpt
def calculate_committors(sources, sinks, tprob):
"""
Get the forward committors of the reaction sources -> sinks.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
tprob : mm_matrix
The transition matrix.
Returns
-------
Q : array_like
The forward committors for the reaction U -> F.
References
----------
.. [1] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory
for Markov jump processes. Multiscale Model. Simul. 7, 1192–1219
(2009).
.. [2] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding
pathways in network models of coarse-grained protein dynamics. J.
Chem. Phys. 130, 205102 (2009).
"""
sources, sinks = _check_sources_sinks(sources, sinks)
msm_analysis.check_transition(tprob)
if scipy.sparse.issparse(tprob):
dense = False
tprob = tprob.tolil()
else:
dense = True
# construct the committor problem
n = tprob.shape[0]
if dense:
T = np.eye(n) - tprob
else:
T = scipy.sparse.eye(n, n, 0, format='lil') - tprob
T = T.tolil()
for a in sources:
T[a, :] = 0.0 # np.zeros(n)
T[:, a] = 0.0
T[a, a] = 1.0
for b in sinks:
T[b, :] = 0.0 # np.zeros(n)
T[:, b] = 0.0
T[b, b] = 1.0
IdB = np.zeros(n)
IdB[sinks] = 1.0
if dense:
RHS = np.dot(tprob, IdB)
else:
RHS = tprob.dot(IdB)
# This should be the same as below
#RHS = tprob * IdB
RHS[sources] = 0.0
RHS[sinks] = 1.0
# solve for the committors
if dense == False:
Q = scipy.sparse.linalg.spsolve(T.tocsr(), RHS)
else:
Q = np.linalg.solve(T, RHS)
epsilon = 0.001
assert np.all(Q <= 1.0 + epsilon)
assert np.all(Q >= 0.0 - epsilon)
return Q
######################################################################
# Functions for computing hub scores, conditional committors, and
# related quantities
#
def calculate_fraction_visits(tprob, waypoint, source, sink, return_cond_Q=False):
"""
Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above. The conditional committors can be
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
tprob : matrix
The transition probability matrix
waypoint : int
The index of the intermediate state
sources : nd_array, int or int
The indices of the source state(s)
sinks : nd_array, int or int
The indices of the sink state(s)
return_cond_Q : bool
Whether or not to return the conditional committors
Returns
-------
fraction_paths : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
cond_Q : nd_array, float (optional)
Optionally returned (`return_cond_Q`)
See Also
--------
calculate_hub_score : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
calculate_all_hub_scores : function
Wrapper to compute all the hub scores in a network.
Notes
-----
Employs dense linear algebra,
memory use scales as N^2
cycle use scales as N^3
References
----------
..[1] Dickson & Brooks (2012), J. Chem. Theory Comput.,
Article ASAP DOI: 10.1021/ct300537s
"""
# do some typechecking - we need to be sure that the lumped sources are in
# the second to last row, and the lumped sinks are in the last row
# check `tprob`
msm_analysis.check_transition(tprob)
if type(tprob) != np.ndarray:
try:
tprob = tprob.todense()
except AttributeError as e:
raise TypeError('Argument `tprob` must be convertable to a dense'
'numpy array. \n%s' % e)
# typecheck
for data in [source, sink, waypoint]:
if type(data) == int:
pass
elif hasattr(data, 'len'):
if len(data) == 1:
data = data[0]
else:
raise TypeError('Arguments source/sink/waypoint must be an int')
if (source == waypoint) or (sink == waypoint) or (sink == source):
raise ValueError('source, sink, waypoint must all be disjoint!')
N = tprob.shape[0]
Q = calculate_committors([source], [sink], tprob)
# permute the transition matrix into cannonical form - send waypoint the the
# last row, and source + sink to the end after that
Bsink_indices = [source, sink, waypoint]
perm = np.arange(N)
perm = np.delete(perm, Bsink_indices)
perm = np.append(perm, Bsink_indices)
T = MSMLib.permute_mat(tprob, perm)
# extract P, R
n = N - len(Bsink_indices)
P = T[:n, :n]
R = T[:n, n:]
# calculate the conditional committors ( B = N*R ), B[i,j] is the prob
# state i ends in j, where j runs over the source + sink + waypoint
# (waypoint is position -1)
B = np.dot(np.linalg.inv(np.eye(n) - P), R)
# Not sure if this is sparse or not...
# add probs for the sinks, waypoint / b[i] is P( i --> {C & not A, B} )
b = np.append(B[:, -1].flatten(), [0.0] * (len(Bsink_indices) - 1) + [1.0])
cond_Q = b * Q[waypoint]
epsilon = 1e-6 # some numerical give, hard-coded
assert cond_Q.shape == (N,)
assert np.all(cond_Q <= 1.0 + epsilon)
assert np.all(cond_Q >= 0.0 - epsilon)
assert np.all(cond_Q <= Q[perm] + epsilon)
# finally, calculate the fraction of paths h_C(A,B) (eq. 7 in [1])
fraction_paths = np.sum(T[-3, :] * cond_Q) / np.sum(T[-3, :] * Q[perm])
assert fraction_paths <= 1.0
assert fraction_paths >= 0.0
if return_cond_Q:
cond_Q = cond_Q[np.argsort(perm)] # put back in orig. order
return fraction_paths, cond_Q
else:
return fraction_paths
def calculate_hub_score(tprob, waypoint):
"""
Calculate the hub score for the states `waypoint`.
The "hub score" is a measure of how well traveled a certain state or
set of states is in a network. Specifically, it is the fraction of
times that a walker visits a state en route from some state A to another
state B, averaged over all combinations of A and B.
Parameters
----------
tprob : matrix
The transition probability matrix
waypoints : int
The indices of the intermediate state(s)
Returns
-------
Hc : float
The hub score for the state composed of `waypoints`
See Also
--------
calculate_fraction_visits : function
Calculate the fraction of times a state is visited on pathways going
from a set of "sources" to a set of "sinks".
calculate_all_hub_scores : function
A more efficient way to compute the hub score for every state in a
network.
Notes
-----
Employs dense linear algebra,
memory use scales as N^2
cycle use scales as N^5
References
----------
..[1] Dickson & Brooks (2012), J. Chem. Theory Comput.,
Article ASAP DOI: 10.1021/ct300537s
"""
msm_analysis.check_transition(tprob)
# typecheck
if type(waypoint) != int:
if hasattr(waypoint, '__len__'):
if len(waypoint) == 1:
waypoint = waypoint[0]
else:
raise ValueError('Must pass waypoints as int or list/array of ints')
else:
raise ValueError('Must pass waypoints as int or list/array of ints')
# find out which states to include in A, B (i.e. everything but C)
N = tprob.shape[0]
states_to_include = list(range(N))
states_to_include.remove(waypoint)
# calculate the hub score
Hc = 0.0
for s1 in states_to_include:
for s2 in states_to_include:
if (s1 != s2) and (s1 != waypoint) and (s2 != waypoint):
Hc += calculate_fraction_visits(tprob, waypoint,
s1, s2, return_cond_Q=False)
Hc /= ((N - 1) * (N - 2))
return Hc
def calculate_all_hub_scores(tprob):
"""
Calculate the hub scores for all states in a network defined by `tprob`.
The "hub score" is a measure of how well traveled a certain state or
set of states is in a network. Specifically, it is the fraction of
times that a walker visits a state en route from some state A to another
state B, averaged over all combinations of A and B.
Parameters
----------
tprob : matrix
The transition probability matrix
Returns
-------
Hc_array : nd_array, float
The hub score for each state in `tprob`
See Also
--------
calculate_fraction_visits : function
Calculate the fraction of times a state is visited on pathways going
from a set of "sources" to a set of "sinks".
calculate_hub_score : function
A function that computes just one hub score, can compute the hub score
for a set of states.
Notes
-----
Employs dense linear algebra,
memory use scales as N^2
cycle use scales as N^6
References
----------
..[1] Dickson & Brooks (2012), J. Chem. Theory Comput.,
Article ASAP DOI: 10.1021/ct300537s
"""
N = tprob.shape[0]
states = list(range(N))
# calculate the hub score
Hc_array = np.zeros(N)
# loop over each state and compute it's hub score
for i, waypoint in enumerate(states):
Hc = 0.0
# now loop over all combinations of sources/sinks and average
for s1 in states:
if waypoint != s1:
for s2 in states:
if s1 != s2:
if waypoint != s2:
Hc += calculate_fraction_visits(tprob, waypoint, s1, s2)
# store the hub score in an array
Hc_array[i] = Hc / ((N - 1) * (N - 2))
return Hc_array
|
msmbuilder/msmbuilder-legacy
|
MSMBuilder/tpt.py
|
Python
|
gpl-2.0
| 36,692
|
[
"MDTraj"
] |
ac8943cae6fb0639d7a6157f749d3f2c52ac4992a9eade08f6c8500cbfc4fe82
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import pytest
import tarfile
import tempfile
import time
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy import api as galaxy_api
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyAPI, GalaxyError
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.utils import context_objects as co
from ansible.utils.display import Display
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
# Required to initialise the GalaxyAPI object
context.CLIARGS._store = {'ignore_certs': False}
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(tmp_path_factory):
''' Creates a collection artifact tarball that is ready to be published '''
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))
tar_path = os.path.join(output_dir, 'namespace-collection-v1.0.0.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(b"\x00\x01\x02\x03")
tar_info = tarfile.TarInfo('test')
tar_info.size = 4
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
yield tar_path
def get_test_galaxy_api(url, version, token_ins=None, token_value=None):
token_value = token_value or "my token"
token_ins = token_ins or GalaxyToken(token_value)
api = GalaxyAPI(None, "test", url)
# Warning, this doesn't test g_connect() because _availabe_api_versions is set here. That means
# that urls for v2 servers have to append '/api/' themselves in the input data.
api._available_api_versions = {version: '%s' % version}
api.token = token_ins
return api
def test_api_no_auth():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {}
api._add_auth_token(actual, "")
assert actual == {}
def test_api_no_auth_but_required():
expected = "No access token or username set. A token can be set with --api-key, with 'ansible-galaxy login', " \
"or set in ansible.cfg."
with pytest.raises(AnsibleError, match=expected):
GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")._add_auth_token({}, "", required=True)
def test_api_token_auth():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_token_auth_with_token_type(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", token_type="Bearer", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v3_url(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v3/resource/name", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v2_url():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
# Add v3 to random part of URL but response should only see the v2 as the full URI path segment.
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v2/resourcev3/name", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_basic_auth_password():
token = BasicAuthToken(username=u"user", password=u"pass")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'}
def test_api_basic_auth_no_password():
token = BasicAuthToken(username=u"user")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjo='}
def test_api_dont_override_auth_header():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {'Authorization': 'Custom token'}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Custom token'}
def test_initialise_galaxy(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_galaxy_with_auth(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_automation_hub(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v2": "v2/", "v3":"v3/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v2'] == u'v2/'
assert api.available_api_versions['v3'] == u'v3/'
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[0][2]['headers'] == {'Authorization': 'Bearer my_token'}
def test_initialise_unknown(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
urllib_error.HTTPError('https://galaxy.ansible.com/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
urllib_error.HTTPError('https://galaxy.ansible.com/api/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
expected = "Error when finding available api versions from test (%s) (HTTP Code: 500, Message: msg)" \
% api.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.authenticate("github_token")
def test_get_available_api_versions(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/","v2":"v2/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.available_api_versions
assert len(actual) == 2
assert actual['v1'] == u'v1/'
assert actual['v2'] == u'v2/'
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
def test_publish_collection_missing_file():
fake_path = u'/fake/ÅÑŚÌβŁÈ/path'
expected = to_native("The collection path specified '%s' does not exist." % fake_path)
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection(fake_path)
def test_publish_collection_not_a_tarball():
expected = "The collection path specified '{0}' is not a tarball, use 'ansible-galaxy collection build' to " \
"create a proper release artifact."
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with tempfile.NamedTemporaryFile(prefix=u'ÅÑŚÌβŁÈ') as temp_file:
temp_file.write(b"\x00")
temp_file.flush()
with pytest.raises(AnsibleError, match=expected.format(to_native(temp_file.name))):
api.publish_collection(temp_file.name)
def test_publish_collection_unsupported_version():
expected = "Galaxy action publish_collection requires API versions 'v2, v3' but only 'v1' are available on test " \
"https://galaxy.ansible.com/api/"
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v1")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection("path")
@pytest.mark.parametrize('api_version, collection_url', [
('v2', 'collections'),
('v3', 'artifacts/collections'),
])
def test_publish_collection(api_version, collection_url, collection_artifact, monkeypatch):
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", api_version)
mock_call = MagicMock()
mock_call.return_value = {'task': 'http://task.url/'}
monkeypatch.setattr(api, '_call_galaxy', mock_call)
actual = api.publish_collection(collection_artifact)
assert actual == 'http://task.url/'
assert mock_call.call_count == 1
assert mock_call.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/%s/%s/' % (api_version, collection_url)
assert mock_call.mock_calls[0][2]['headers']['Content-length'] == len(mock_call.mock_calls[0][2]['args'])
assert mock_call.mock_calls[0][2]['headers']['Content-type'].startswith(
'multipart/form-data; boundary=--------------------------')
assert mock_call.mock_calls[0][2]['args'].startswith(b'--------------------------')
assert mock_call.mock_calls[0][2]['method'] == 'POST'
assert mock_call.mock_calls[0][2]['auth_required'] is True
@pytest.mark.parametrize('api_version, collection_url, response, expected', [
('v2', 'collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v2', 'collections', {
'message': u'Galaxy error messäge',
'code': 'GWE002',
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Galaxy error messäge Code: GWE002)'),
('v3', 'artifact/collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v3', 'artifact/collections', {
'errors': [
{
'code': 'conflict.collection_exists',
'detail': 'Collection "mynamespace-mycollection-4.1.1" already exists.',
'title': 'Conflict.',
'status': '400',
},
{
'code': 'quantum_improbability',
'title': u'Rändom(?) quantum improbability.',
'source': {'parameter': 'the_arrow_of_time'},
'meta': {'remediation': 'Try again before'},
},
],
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Collection '
u'"mynamespace-mycollection-4.1.1" already exists. Code: conflict.collection_exists), (HTTP Code: 500, '
u'Message: Rändom(?) quantum improbability. Code: quantum_improbability)')
])
def test_publish_failure(api_version, collection_url, response, expected, collection_artifact, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version)
expected_url = '%s/api/%s/%s' % (api.api_server, api_version, collection_url)
mock_open = MagicMock()
mock_open.side_effect = urllib_error.HTTPError(expected_url, 500, 'msg', {},
StringIO(to_text(json.dumps(response))))
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
with pytest.raises(GalaxyError, match=re.escape(to_native(expected % api.api_server))):
api.publish_collection(collection_artifact)
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.return_value = StringIO(u'{"state":"success","finished_at":"time"}')
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_multiple_requests(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"state":"test"}'),
StringIO(u'{"state":"success","finished_at":"time"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
api.wait_import_task(import_uri)
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == \
'Galaxy import process has a status of test, wait 2 seconds before trying again'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri,', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {
'code': 'GW001',
'description': u'Becäuse I said so!',
},
'messages': [
{
'level': 'error',
'message': u'Somé error',
},
{
'level': 'warning',
'message': u'Some wärning',
},
{
'level': 'info',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)')
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my_token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {},
'messages': [
{
'level': 'error',
'message': u'Somé error',
},
{
'level': 'warning',
'message': u'Some wärning',
},
{
'level': 'info',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_timeout(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
def return_response(*args, **kwargs):
return StringIO(u'{"state":"waiting"}')
mock_open = MagicMock()
mock_open.side_effect = return_response
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
expected = "Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri, 1)
assert mock_open.call_count > 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
# expected_wait_msg = 'Galaxy import process has a status of waiting, wait {0} seconds before trying again'
assert mock_vvv.call_count > 9 # 1st is opening Galaxy token file.
# FIXME:
# assert mock_vvv.mock_calls[1][1][0] == expected_wait_msg.format(2)
# assert mock_vvv.mock_calls[2][1][0] == expected_wait_msg.format(3)
# assert mock_vvv.mock_calls[3][1][0] == expected_wait_msg.format(4)
# assert mock_vvv.mock_calls[4][1][0] == expected_wait_msg.format(6)
# assert mock_vvv.mock_calls[5][1][0] == expected_wait_msg.format(10)
# assert mock_vvv.mock_calls[6][1][0] == expected_wait_msg.format(15)
# assert mock_vvv.mock_calls[7][1][0] == expected_wait_msg.format(22)
# assert mock_vvv.mock_calls[8][1][0] == expected_wait_msg.format(30)
@pytest.mark.parametrize('api_version, token_type, version, token_ins', [
('v2', None, 'v2.1.13', None),
('v3', 'Bearer', 'v1.0.0', KeycloakToken(auth_url='https://api.test/api/automation-hub/')),
])
def test_get_collection_version_metadata_no_version(api_version, token_type, version, token_ins, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'download_url': 'https://downloadme.com',
'artifact': {
'sha256': 'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f',
},
'namespace': {
'name': 'namespace',
},
'collection': {
'name': 'collection',
},
'version': version,
'metadata': {
'dependencies': {},
}
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_version_metadata('namespace', 'collection', version)
assert isinstance(actual, CollectionVersionMetadata)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.download_url == u'https://downloadme.com'
assert actual.artifact_sha256 == u'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f'
assert actual.version == version
assert actual.dependencies == {}
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s' \
% (api.api_server, api_version, version)
# v2 calls dont need auth, so no authz header or token_type
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, response', [
('v2', None, None, {
'count': 2,
'next': None,
'previous': None,
'results': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
# TODO: Verify this once Automation Hub is actually out
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), {
'count': 2,
'next': None,
'previous': None,
'data': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
])
def test_get_collection_versions(api_version, token_type, token_ins, response, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps(response))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1']
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions' % api_version
if token_ins:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, responses', [
('v2', None, None, [
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
'previous': None,
'results': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=3',
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions',
'results': [
{
'version': '1.0.2',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'next': None,
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
'results': [
{
'version': '1.0.4',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), [
{
'count': 6,
'links': {
'next': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/?page=2',
'previous': None,
},
'data': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'links': {
'next': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/?page=3',
'previous': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions',
},
'data': [
{
'version': '1.0.2',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'links': {
'next': None,
'previous': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/?page=2',
},
'data': [
{
'version': '1.0.4',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': 'https://galaxy.server.com/api/v3/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
])
def test_get_collection_versions_pagination(api_version, token_type, token_ins, responses, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
assert mock_open.call_count == 3
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions' % api_version
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?page=2' % api_version
assert mock_open.mock_calls[2][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?page=3' % api_version
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[2][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('responses', [
[
{
'count': 2,
'results': [{'name': '3.5.1', }, {'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': None,
'previous': None
},
],
[
{
'count': 2,
'results': [{'name': '3.5.1'}],
'next_link': '/api/v1/roles/432/versions/?page=2&page_size=50',
'next': '/roles/432/versions/?page=2&page_size=50',
'previous_link': None,
'previous': None
},
{
'count': 2,
'results': [{'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': '/api/v1/roles/432/versions/?&page_size=50',
'previous': '/roles/432/versions/?page_size=50',
},
]
])
def test_get_role_versions_pagination(monkeypatch, responses):
api = get_test_galaxy_api('https://galaxy.com/api/', 'v1')
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.fetch_role_related('versions', 432)
assert actual == [{'name': '3.5.1'}, {'name': '3.5.2'}]
assert mock_open.call_count == len(responses)
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page_size=50'
if len(responses) == 2:
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page=2&page_size=50'
|
Lujeni/ansible
|
test/units/galaxy/test_api.py
|
Python
|
gpl-3.0
| 37,758
|
[
"Galaxy"
] |
becd90f73001169cb499c5794217cfd7cb8b5a6b61705a33302531b460d3b1ba
|
"""
Single Bubble Model: Inert particles
=====================================
Use the ``TAMOC`` `single_bubble_model` to simulate the trajectory of an inert
particle (e.g., a dead oil) rising through the water column. This script
demonstrates the typical steps involved in running the single bubble model.
It uses the ambient data stored in the file `../test/output/test_bm54.nc`,
created by the `test_ambient` module. Please make sure all tests have
passed before running this script or modify the script to use a different
source of ambient data.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import seawater
from tamoc import single_bubble_model
import numpy as np
if __name__ == '__main__':
# Open an ambient profile object from the netCDF dataset
nc = '../../test/output/test_bm54.nc'
bm54 = ambient.Profile(nc, chem_names='all')
bm54.close_nc()
# Initialize a single_bubble_model.Model object with this data
sbm = single_bubble_model.Model(bm54)
# Create an inert particle that is compressible
oil = dbm.InsolubleParticle(True, True, rho_p=840., k_bio=0.001,
t_bio = 0.)
mol_frac = np.array([1.])
# Specify the remaining particle initial conditions
de = 0.03
z0 = 1000.
T0 = 273.15 + 30.
# Simulate the trajectory through the water column and plot the results
sbm.simulate(oil, z0, de, mol_frac, T0, K_T=1, delta_t=10.)
sbm.post_process()
# Save the simulation to a netCDF file
sbm.save_sim('./particle.nc', '../../test/output/test_bm54.nc',
'Results of ./particle.py script')
# Save the data for importing into Matlab
sbm.save_txt('./particle.txt', '../../test/output/test_bm54.nc',
'Results of ./particle.py script')
|
socolofs/tamoc
|
bin/sbm/particle.py
|
Python
|
mit
| 1,951
|
[
"NetCDF"
] |
cc31855f5087662882b010e950155a643e08ea28bb4d39c575bcb77524f9d5b0
|
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
class TestStatistic(object):
"""Base test statistic class
"""
use_pmf_cache = True
statistic_name = 's'
n_trials = int(1e4) # Number of trials used in monte carlo training of p(s|mu) for a given mu
mu_dependent = False # Set to True if the statistic is hypothesis-dependent
def __init__(self, **kwargs):
"""Use kwargs to override any class-level settings"""
for k, v in kwargs.items():
setattr(self, k, v)
self.pmf_cache = {}
def get_values_and_likelihoods(self, mu, precision_digits=3):
# TODO: do something with precision_digits...
if self.use_pmf_cache and mu in self.pmf_cache:
return self.pmf_cache[mu]
# Simulate self.n_trials observations
# TODO: shortcut if self.__call__ is vectorized. Maybe implement in __call__ and have children override
# something else instead (sounds nicer)
values = np.zeros(self.n_trials)
for i, obs in enumerate(self.generate_observations(mu, n_trials=self.n_trials)):
values[i] = self(obs)
# Summarize values to pdf/pmf
# Allow statistic implementation to choose method for this, there is no universal solution
# (e.g. very different for discrete or continuous statistics)
# TODO: handle under- and overflow here?
values, likelihoods = self.build_pdf(values, mu=mu)
likelihoods /= likelihoods.sum()
# Cache the pmf
if self.use_pmf_cache:
self.pmf_cache[mu] = values, likelihoods
return values, likelihoods
def build_pdf(self, values, mu):
"""Return possible values, likelihoods. Can bin, can even use mu if desired (not usually needed).
By default uses a KDE (implemented as fine histogram + Gaussian filter)
KDE bandwith = Silverman rule
"""
# First take a very fine histogram...
# TODO: somehow make bins dependent on n_trials, and n_trials on desired precision...
hist, bin_edges = np.histogram(values, bins=1000)
hist = hist.astype(np.float)
hist /= hist.sum()
# ... then apply a Gaussian filter.
# The filter is not applied on the outermost bins, since these might be accumulation points
# we don't want to smear (e.g. a statistic generally may take an extreme value if there are no events)
# The Bandwidth is determined by the Silverman rule of thumb, looking at the non-extreme values.
# TODO: this behaviour should be configurable per-statistic, don't assume accumulation points by default
bin_spacing = bin_edges[1] - bin_edges[0]
non_extreme_values = values[(values != np.min(values)) & (values != np.max(values))]
bandwidth = 1.06 * non_extreme_values.std() / len(non_extreme_values)**(1/5)
center_hist = gaussian_filter1d(hist[1:-1], sigma=bandwidth / bin_spacing)
# Ensure the Gauss filter has not changed the sum of the bins it was applied to:
center_hist /= np.sum(hist[1:-1])/center_hist.sum()
hist[1:-1] = center_hist
if not np.isclose(np.sum(hist), 1):
raise RuntimeError("WTF? Density histogram sums to %s, not 1 after filtering!" % np.sum(hist))
# The values representing to the histogram bins are the bin centers...
# ... except at the edges, where we use the outer boundaries.
# This is again necessary to deal with accumulation points.
values = (bin_edges[1:] + bin_edges[:-1]) * 0.5
values[0] = bin_edges[0]
values[-1] = bin_edges[-1]
return values, hist
def __call__(self, observation, hypothesis=None):
raise NotImplementedError
def generate_observations(self, mu, n_trials=1):
"""Generate n_trials observations for the statistic under hypothesis mu"""
n_per_trial = np.random.poisson(mu, n_trials)
# Last array will always be empty, because we passed the very last index + 1 as split point
return np.split(self.generate_single_observation(np.sum(n_per_trial)),
np.cumsum(n_per_trial))[:-1]
def generate_single_observation(self, n):
"""Generate a single observation of n events"""
return np.zeros(n)
|
JelleAalbers/plunc
|
plunc/statistics/base.py
|
Python
|
mit
| 4,325
|
[
"Gaussian"
] |
31293c16f6e8e9d37f2514f8da3c8877b3dec9447bad6a22618280b1851aa98c
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_vmd.py
# Purpose: viz running LAMMPS simulation via VMD with plot and GUI
# Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
from __future__ import print_function
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
next(d)
d.unscale()
p.single(ntimestep)
v.append('tmp.pdb','pdb')
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print("Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on VMD window via Pizza.py vmd tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from vmd import vmd
v = vmd()
v('menu main off')
v.rep('VDW')
from dump import dump
from pdbfile import pdbfile
d = dump('tmp.dump',0)
p = pdbfile(d)
next(d)
d.unscale()
p.single(ntimestep)
v.new('tmp.pdb','pdb')
# display GUI with run/stop buttons and slider for temperature
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
|
SGenheden/lammps
|
python/examples/vizplotgui_vmd.py
|
Python
|
gpl-2.0
| 4,473
|
[
"LAMMPS",
"VMD"
] |
63fa46e38c7e86b0842aa52a375f09609ffc8e894cf2e34186eaf9ea7af97a6b
|
from __future__ import print_function
import sys
import argparse
from collections import Counter, defaultdict
import operator
import itertools
import warnings
import traceback
import os.path
import multiprocessing
import pysam
import HTSeq
class UnknownChrom(Exception):
pass
def invert_strand(iv):
iv2 = iv.copy()
if iv2.strand == "+":
iv2.strand = "-"
elif iv2.strand == "-":
iv2.strand = "+"
else:
raise ValueError("Illegal strand")
return iv2
def count_reads_with_barcodes(
sam_filename,
features,
feature_attr,
order,
max_buffer_size,
stranded,
overlap_mode,
multimapped_mode,
secondary_alignment_mode,
supplementary_alignment_mode,
feature_type,
id_attribute,
additional_attributes,
quiet,
minaqual,
samout_format,
samout_filename,
nprocesses,
cb_tag,
ub_tag,
):
def write_to_samout(r, assignment, samoutfile, template=None):
if samoutfile is None:
return
if not pe_mode:
r = (r,)
for read in r:
if read is not None:
read.optional_fields.append(('XF', assignment))
if samout_format in ('SAM', 'sam'):
samoutfile.write(read.get_sam_line() + "\n")
else:
samoutfile.write(read.to_pysam_AlignedSegment(template))
def identify_barcodes(r):
'''Identify barcode from the read or pair (both must have the same)'''
if not pe_mode:
r = (r,)
# cell, UMI
barcodes = [None, None]
nbar = 0
for read in r:
if read is not None:
for tag, val in read.optional_fields:
if tag == cb_tag:
barcodes[0] = val
nbar += 1
if nbar == 2:
return barcodes
elif tag == ub_tag:
barcodes[1] = val
nbar += 1
if nbar == 2:
return barcodes
return barcodes
try:
if sam_filename == "-":
read_seq_file = HTSeq.BAM_Reader(sys.stdin)
else:
read_seq_file = HTSeq.BAM_Reader(sam_filename)
# Get template for output BAM
if samout_filename is None:
template = None
samoutfile = None
elif samout_format in ('bam', 'BAM'):
template = read_seq_file.get_template()
samoutfile = pysam.AlignmentFile(
samout_filename, 'wb',
template=template,
)
else:
template = None
samoutfile = open(samout_filename, 'w')
read_seq_iter = iter(read_seq_file)
# Catch empty BAM files
try:
first_read = next(read_seq_iter)
pe_mode = first_read.paired_end
# FIXME: catchall can hide subtle bugs
except:
first_read = None
pe_mode = False
if first_read is not None:
read_seq = itertools.chain([first_read], read_seq_iter)
else:
read_seq = []
except:
sys.stderr.write(
"Error occured when reading beginning of SAM/BAM file.\n")
raise
# CIGAR match characters (including alignment match, sequence match, and
# sequence mismatch
com = ('M', '=', 'X')
try:
if pe_mode:
if ((supplementary_alignment_mode == 'ignore') and
(secondary_alignment_mode == 'ignore')):
primary_only = True
else:
primary_only = False
if order == "name":
read_seq = HTSeq.pair_SAM_alignments(
read_seq,
primary_only=primary_only)
elif order == "pos":
read_seq = HTSeq.pair_SAM_alignments_with_buffer(
read_seq,
max_buffer_size=max_buffer_size,
primary_only=primary_only)
else:
raise ValueError("Illegal order specified.")
# The nesting is cell barcode, UMI, feature
counts = defaultdict(lambda: defaultdict(Counter))
i = 0
for r in read_seq:
if i > 0 and i % 100000 == 0 and not quiet:
sys.stderr.write(
"%d alignment record%s processed.\n" %
(i, "s" if not pe_mode else " pairs"))
sys.stderr.flush()
i += 1
cb, ub = identify_barcodes(r)
if not pe_mode:
if not r.aligned:
counts[cb][ub]['__not_aligned'] += 1
write_to_samout(
r, "__not_aligned", samoutfile,
template)
continue
if ((secondary_alignment_mode == 'ignore') and
r.not_primary_alignment):
continue
if ((supplementary_alignment_mode == 'ignore') and
r.supplementary):
continue
try:
if r.optional_field("NH") > 1:
counts[cb][ub]['__alignment_not_unique'] += 1
write_to_samout(
r,
"__alignment_not_unique",
samoutfile,
template)
if multimapped_mode == 'none':
continue
except KeyError:
pass
if r.aQual < minaqual:
counts[cb][ub]['__too_low_aQual'] += 1
write_to_samout(
r, "__too_low_aQual", samoutfile,
template)
continue
if stranded != "reverse":
iv_seq = (co.ref_iv for co in r.cigar if co.type in com
and co.size > 0)
else:
iv_seq = (invert_strand(co.ref_iv)
for co in r.cigar if (co.type in com and
co.size > 0))
else:
if r[0] is not None and r[0].aligned:
if stranded != "reverse":
iv_seq = (co.ref_iv for co in r[0].cigar
if co.type in com and co.size > 0)
else:
iv_seq = (invert_strand(co.ref_iv) for co in r[0].cigar
if co.type in com and co.size > 0)
else:
iv_seq = tuple()
if r[1] is not None and r[1].aligned:
if stranded != "reverse":
iv_seq = itertools.chain(
iv_seq,
(invert_strand(co.ref_iv) for co in r[1].cigar
if co.type in com and co.size > 0))
else:
iv_seq = itertools.chain(
iv_seq,
(co.ref_iv for co in r[1].cigar
if co.type in com and co.size > 0))
else:
if (r[0] is None) or not (r[0].aligned):
write_to_samout(
r, "__not_aligned", samoutfile,
template)
counts[cb][ub]['__not_aligned'] += 1
continue
if secondary_alignment_mode == 'ignore':
if (r[0] is not None) and r[0].not_primary_alignment:
continue
elif (r[1] is not None) and r[1].not_primary_alignment:
continue
if supplementary_alignment_mode == 'ignore':
if (r[0] is not None) and r[0].supplementary:
continue
elif (r[1] is not None) and r[1].supplementary:
continue
try:
if ((r[0] is not None and r[0].optional_field("NH") > 1) or
(r[1] is not None and r[1].optional_field("NH") > 1)):
write_to_samout(
r, "__alignment_not_unique", samoutfile,
template)
counts[cb][ub]['__alignment_not_unique'] += 1
if multimapped_mode == 'none':
continue
except KeyError:
pass
if ((r[0] and r[0].aQual < minaqual) or
(r[1] and r[1].aQual < minaqual)):
write_to_samout(
r, "__too_low_aQual", samoutfile,
template)
counts[cb][ub]['__too_low_aQual'] += 1
continue
try:
if overlap_mode == "union":
fs = set()
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[iv].steps():
fs = fs.union(fs2)
elif overlap_mode in ("intersection-strict",
"intersection-nonempty"):
fs = None
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[iv].steps():
if ((len(fs2) > 0) or
(overlap_mode == "intersection-strict")):
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection(fs2)
else:
sys.exit("Illegal overlap mode.")
if fs is None or len(fs) == 0:
write_to_samout(
r, "__no_feature", samoutfile,
template)
counts[cb][ub]['__no_feature'] += 1
elif len(fs) > 1:
write_to_samout(
r, "__ambiguous[" + '+'.join(fs) + "]",
samoutfile,
template)
counts[cb][ub]['__ambiguous'] += 1
else:
write_to_samout(
r, list(fs)[0], samoutfile,
template)
if fs is not None and len(fs) > 0:
if multimapped_mode == 'none':
if len(fs) == 1:
counts[cb][ub][list(fs)[0]] += 1
elif multimapped_mode == 'all':
for fsi in list(fs):
counts[cb][ub][fsi] += 1
else:
sys.exit("Illegal multimap mode.")
except UnknownChrom:
write_to_samout(
r, "__no_feature", samoutfile,
template)
counts[cb][ub]['__no_feature'] += 1
except:
sys.stderr.write(
"Error occured when processing input (%s):\n" %
(read_seq_file.get_line_number_string()))
raise
if not quiet:
sys.stderr.write(
"%d %s processed.\n" %
(i, "alignments " if not pe_mode else "alignment pairs"))
sys.stderr.flush()
if samoutfile is not None:
samoutfile.close()
# Get rid of UMI by majority rule
cbs = sorted(counts.keys())
counts_noumi = {}
for cb in cbs:
counts_cell = Counter()
for ub, udic in counts.pop(cb).items():
# In case of a tie, do not increment either feature
top = udic.most_common(2)
if (len(top) == 2) and (top[0][1] == top[1][1]):
continue
counts_cell[top[0][0]] += 1
counts_noumi[cb] = counts_cell
return {
'cell_barcodes': cbs,
'counts': counts_noumi,
}
def count_reads_in_features(
sam_filename,
gff_filename,
order,
max_buffer_size,
stranded,
overlap_mode,
multimapped_mode,
secondary_alignment_mode,
supplementary_alignment_mode,
feature_type,
id_attribute,
additional_attributes,
quiet,
minaqual,
samout,
samout_format,
output_delimiter,
output_filename,
nprocesses,
cb_tag,
ub_tag,
):
'''Count reads in features, parallelizing by file'''
if samout is not None:
# Try to open samout file early in case any of them has issues
if samout_format in ('SAM', 'sam'):
with open(samout, 'w'):
pass
else:
# We don't have a template if the input is stdin
if sam_filename != '-':
with pysam.AlignmentFile(sam_filename, 'r') as sf:
with pysam.AlignmentFile(samout, 'w', template=sf):
pass
# Try to open samfiles to fail early in case any of them is not there
if sam_filename != '-':
with pysam.AlignmentFile(sam_filename, 'r') as sf:
pass
features = HTSeq.GenomicArrayOfSets("auto", stranded != "no")
gff = HTSeq.GFF_Reader(gff_filename)
feature_attr = set()
attributes = {}
i = 0
try:
for f in gff:
if f.type == feature_type:
try:
feature_id = f.attr[id_attribute]
except KeyError:
raise ValueError(
"Feature %s does not contain a '%s' attribute" %
(f.name, id_attribute))
if stranded != "no" and f.iv.strand == ".":
raise ValueError(
"Feature %s at %s does not have strand information but you are "
"running htseq-count in stranded mode. Use '--stranded=no'." %
(f.name, f.iv))
features[f.iv] += feature_id
feature_attr.add(f.attr[id_attribute])
attributes[f.attr[id_attribute]] = [
f.attr[attr] if attr in f.attr else ''
for attr in additional_attributes]
i += 1
if i % 100000 == 0 and not quiet:
sys.stderr.write("%d GFF lines processed.\n" % i)
sys.stderr.flush()
except:
sys.stderr.write(
"Error occured when processing GFF file (%s):\n" %
gff.get_line_number_string())
raise
feature_attr = sorted(feature_attr)
if not quiet:
sys.stderr.write("%d GFF lines processed.\n" % i)
sys.stderr.flush()
if len(feature_attr) == 0:
sys.stderr.write(
"Warning: No features of type '%s' found.\n" % feature_type)
# Count reads
results = count_reads_with_barcodes(
sam_filename,
features,
feature_attr,
order,
max_buffer_size,
stranded,
overlap_mode,
multimapped_mode,
secondary_alignment_mode,
supplementary_alignment_mode,
feature_type,
id_attribute,
additional_attributes,
quiet,
minaqual,
samout_format,
samout,
nprocesses,
cb_tag,
ub_tag,
)
# Cell barcodes
cbs = results['cell_barcodes']
counts = results['counts']
# Write output
other_features = [
'__no_feature',
'__ambiguous',
'__too_low_aQual',
'__not_aligned',
'__alignment_not_unique',
]
pad = ['' for attr in additional_attributes]
# Header
fields = [''] + pad + cbs
line = output_delimiter.join(fields)
if output_filename == '':
print(line)
else:
with open(output_filename, 'w') as f:
f.write(line)
f.write('\n')
# Features
for ifn, fn in enumerate(feature_attr):
fields = [fn] + attributes[fn] + [str(counts[cb][fn]) for cb in cbs]
line = output_delimiter.join(fields)
if output_filename == '':
print(line)
else:
with open(output_filename, 'a') as f:
f.write(line)
f.write('\n')
# Other features
for fn in other_features:
fields = [fn] + pad + [str(counts[cb][fn]) for cb in cbs]
line = output_delimiter.join(fields)
if output_filename == '':
print(line)
else:
with open(output_filename, 'a') as f:
f.write(line)
f.write('\n')
def my_showwarning(message, category, filename, lineno=None, file=None,
line=None):
sys.stderr.write("Warning: %s\n" % message)
def main():
pa = argparse.ArgumentParser(
usage="%(prog)s [options] alignment_file gff_file",
description="This script takes one alignment file in SAM/BAM " +
"format and a feature file in GFF format and calculates for each feature " +
"the number of reads mapping to it, accounting for barcodes. See " +
"http://htseq.readthedocs.io/en/master/count.html for details.",
epilog="Written by Simon Anders (sanders@fs.tum.de), " +
"European Molecular Biology Laboratory (EMBL) and Fabio Zanini " +
"(fabio.zanini@unsw.edu.au), UNSW Sydney. (c) 2010-2020. " +
"Released under the terms of the GNU General Public License v3. " +
"Part of the 'HTSeq' framework, version %s." % HTSeq.__version__)
pa.add_argument(
"samfilename", type=str,
help="Path to the SAM/BAM file containing the barcoded, mapped " +
"reads. If '-' is selected, read from standard input")
pa.add_argument(
"featuresfilename", type=str,
help="Path to the GTF file containing the features")
pa.add_argument(
"-f", "--format", dest="samtype",
choices=("sam", "bam", "auto"), default="auto",
help="Type of <alignment_file> data. DEPRECATED: " +
"file format is detected automatically. This option is ignored.")
pa.add_argument(
"-r", "--order", dest="order",
choices=("pos", "name"), default="name",
help="'pos' or 'name'. Sorting order of <alignment_file> (default: name). Paired-end sequencing " +
"data must be sorted either by position or by read name, and the sorting order " +
"must be specified. Ignored for single-end data.")
pa.add_argument(
"--max-reads-in-buffer", dest="max_buffer_size", type=int,
default=30000000,
help="When <alignment_file> is paired end sorted by position, " +
"allow only so many reads to stay in memory until the mates are " +
"found (raising this number will use more memory). Has no effect " +
"for single end or paired end sorted by name")
pa.add_argument(
"-s", "--stranded", dest="stranded",
choices=("yes", "no", "reverse"), default="yes",
help="Whether the data is from a strand-specific assay. Specify 'yes', " +
"'no', or 'reverse' (default: yes). " +
"'reverse' means 'yes' with reversed strand interpretation")
pa.add_argument(
"-a", "--minaqual", type=int, dest="minaqual",
default=10,
help="Skip all reads with MAPQ alignment quality lower than the given " +
"minimum value (default: 10). MAPQ is the 5th column of a SAM/BAM " +
"file and its usage depends on the software used to map the reads.")
pa.add_argument(
"-t", "--type", type=str, dest="featuretype",
default="exon",
help="Feature type (3rd column in GTF file) to be used, " +
"all features of other type are ignored (default, suitable for Ensembl " +
"GTF files: exon)")
pa.add_argument(
"-i", "--idattr", type=str, dest="idattr",
default="gene_id",
help="GTF attribute to be used as feature ID (default, " +
"suitable for Ensembl GTF files: gene_id)")
pa.add_argument(
"--additional-attr", type=str,
action='append',
default=[],
help="Additional feature attributes (default: none, " +
"suitable for Ensembl GTF files: gene_name). Use multiple times " +
"for each different attribute")
pa.add_argument(
"-m", "--mode", dest="mode",
choices=("union", "intersection-strict", "intersection-nonempty"),
default="union",
help="Mode to handle reads overlapping more than one feature " +
"(choices: union, intersection-strict, intersection-nonempty; default: union)")
pa.add_argument(
"--nonunique", dest="nonunique", type=str,
choices=("none", "all"), default="none",
help="Whether to score reads that are not uniquely aligned " +
"or ambiguously assigned to features")
pa.add_argument(
"--secondary-alignments", dest="secondary_alignments", type=str,
choices=("score", "ignore"), default="ignore",
help="Whether to score secondary alignments (0x100 flag)")
pa.add_argument(
"--supplementary-alignments", dest="supplementary_alignments", type=str,
choices=("score", "ignore"), default="ignore",
help="Whether to score supplementary alignments (0x800 flag)")
pa.add_argument(
"-o", "--samout", type=str, dest="samout",
default=None,
help="Write out all SAM alignment records into a" +
"SAM/BAM file, annotating each line " +
"with its feature assignment (as an optional field with tag 'XF')" +
". See the -p option to use BAM instead of SAM.")
pa.add_argument(
"-p", '--samout-format', type=str, dest='samout_format',
choices=('SAM', 'BAM', 'sam', 'bam'), default='SAM',
help="Format to use with the --samout option."
)
pa.add_argument(
"-d", '--delimiter', type=str, dest='output_delimiter',
default='\t',
help="Column delimiter in output (default: TAB)."
)
pa.add_argument(
"-c", '--counts_output', type=str, dest='output_filename',
default='',
help="TSV/CSV filename to output the counts to instead of stdout."
)
pa.add_argument(
"-n", '--nprocesses', type=int, dest='nprocesses',
default=1,
help="Number of parallel CPU processes to use (default: 1)."
)
pa.add_argument(
'--cell-barcode', type=str, dest='cb_tag',
default='CB',
help='BAM tag used for the cell barcode (default compatible ' +
'with 10X Genomics Chromium is CB).',
)
pa.add_argument(
'--UMI', type=str, dest='ub_tag',
default='UB',
help='BAM tag used for the unique molecular identifier, also ' +
' known as molecular barcode (default compatible ' +
'with 10X Genomics Chromium is UB).',
)
pa.add_argument(
"-q", "--quiet", action="store_true", dest="quiet",
help="Suppress progress report") # and warnings" )
pa.add_argument(
"--version", action="store_true",
help='Show software version and exit')
args = pa.parse_args()
if args.version:
print(HTSeq.__version__)
sys.exit()
warnings.showwarning = my_showwarning
try:
count_reads_in_features(
args.samfilename,
args.featuresfilename,
args.order,
args.max_buffer_size,
args.stranded,
args.mode,
args.nonunique,
args.secondary_alignments,
args.supplementary_alignments,
args.featuretype,
args.idattr,
args.additional_attr,
args.quiet,
args.minaqual,
args.samout,
args.samout_format,
args.output_delimiter,
args.output_filename,
args.nprocesses,
args.cb_tag,
args.ub_tag,
)
except:
sys.stderr.write(" %s\n" % str(sys.exc_info()[1]))
sys.stderr.write(" [Exception type: %s, raised in %s:%d]\n" %
(sys.exc_info()[1].__class__.__name__,
os.path.basename(traceback.extract_tb(
sys.exc_info()[2])[-1][0]),
traceback.extract_tb(sys.exc_info()[2])[-1][1]))
sys.exit(1)
if __name__ == "__main__":
main()
|
simon-anders/htseq
|
python2/HTSeq/scripts/count_with_barcodes.py
|
Python
|
gpl-3.0
| 25,733
|
[
"HTSeq",
"pysam"
] |
be9259b9ed1683315c40206d570a1d11faad032c98f10b6b49691c0ff937b1dc
|
from .Singleton import Singleton
from kalliope.core.Models.settings.Resources import Resources
from .Brain import Brain
from .Synapse import Synapse
from .Neuron import Neuron
from .Signal import Signal
|
kalliope-project/kalliope
|
kalliope/core/Models/__init__.py
|
Python
|
gpl-3.0
| 203
|
[
"NEURON"
] |
1a6591786da9b02b0dbcf52c554e11ca54cbef2cb31343fcf86dc5ab89948649
|
from __future__ import division
from warnings import warn
from collections import Iterable
import numpy as np
import PIL.Image as PILImage
from menpo.compatibility import basestring
from menpo.base import (Vectorizable, MenpoDeprecationWarning,
copy_landmarks_and_path)
from menpo.shape import PointCloud, bounding_box
from menpo.landmark import Landmarkable
from menpo.transform import (Translation, NonUniformScale, Rotation,
AlignmentUniformScale, Affine, scale_about_centre,
transform_about_centre)
from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable
from .interpolation import scipy_interpolation, cython_interpolation
from .patches import extract_patches, set_patches
# Cache the greyscale luminosity coefficients as they are invariant.
_greyscale_luminosity_coef = None
class ImageBoundaryError(ValueError):
r"""
Exception that is thrown when an attempt is made to crop an image beyond
the edge of it's boundary.
Parameters
----------
requested_min : ``(d,)`` `ndarray`
The per-dimension minimum index requested for the crop
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index requested for the crop
snapped_min : ``(d,)`` `ndarray`
The per-dimension minimum index that could be used if the crop was
constrained to the image boundaries.
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index that could be used if the crop was
constrained to the image boundaries.
"""
def __init__(self, requested_min, requested_max, snapped_min,
snapped_max):
super(ImageBoundaryError, self).__init__()
self.requested_min = requested_min
self.requested_max = requested_max
self.snapped_min = snapped_min
self.snapped_max = snapped_max
def indices_for_image_of_shape(shape):
r"""
The indices of all pixels in an image with a given shape (without
channel information).
Parameters
----------
shape : ``(n_dims, n_pixels)`` `ndarray`
The shape of the image.
Returns
-------
indices : `ndarray`
The indices of all the pixels in the image.
"""
return np.indices(shape).reshape([len(shape), -1]).T
def normalize_pixels_range(pixels, error_on_unknown_type=True):
r"""
Normalize the given pixels to the Menpo valid floating point range, [0, 1].
This is a single place to handle normalising pixels ranges. At the moment
the supported types are uint8 and uint16.
Parameters
----------
pixels : `ndarray`
The pixels to normalize in the floating point range.
error_on_unknown_type : `bool`, optional
If ``True``, this method throws a ``ValueError`` if the given pixels
array is an unknown type. If ``False``, this method performs no
operation.
Returns
-------
normalized_pixels : `ndarray`
The normalized pixels in the range [0, 1].
Raises
------
ValueError
If ``pixels`` is an unknown type and ``error_on_unknown_type==True``
"""
dtype = pixels.dtype
if dtype == np.uint8:
max_range = 255.0
elif dtype == np.uint16:
max_range = 65535.0
else:
if error_on_unknown_type:
raise ValueError('Unexpected dtype ({}) - normalisation range '
'is unknown'.format(dtype))
else:
# Do nothing
return pixels
# This multiplication is quite a bit faster than just dividing - will
# automatically cast it up to float64
return pixels * (1.0 / max_range)
def denormalize_pixels_range(pixels, out_dtype):
"""
Denormalize the given pixels array into the range of the given out dtype.
If the given pixels are floating point or boolean then the values
are scaled appropriately and cast to the output dtype. If the pixels
are already the correct dtype they are immediately returned.
Floating point pixels must be in the range [0, 1].
Currently uint8 and uint16 output dtypes are supported.
Parameters
----------
pixels : `ndarray`
The pixels to denormalize.
out_dtype : `np.dtype`
The numpy data type to output and scale the values into.
Returns
-------
out_pixels : `ndarray`
Will be in the correct range and will have type ``out_dtype``.
Raises
------
ValueError
Pixels are floating point and range outside [0, 1]
ValueError
Input pixels dtype not in the set {float32, float64, bool}.
ValueError
Output dtype not in the set {uint8, uint16}
"""
in_dtype = pixels.dtype
if in_dtype == out_dtype:
return pixels
if np.issubclass_(in_dtype.type, np.floating) or in_dtype == np.float:
if np.issubclass_(out_dtype, np.floating) or out_dtype == np.float:
return pixels.astype(out_dtype)
else:
p_min = pixels.min()
p_max = pixels.max()
if p_min < 0.0 or p_max > 1.0:
raise ValueError('Unexpected input range [{}, {}] - pixels '
'must be in the range [0, 1]'.format(p_min,
p_max))
elif in_dtype != np.bool:
raise ValueError('Unexpected input dtype ({}) - only float32, float64 '
'and bool supported'.format(in_dtype))
if out_dtype == np.uint8:
max_range = 255.0
elif out_dtype == np.uint16:
max_range = 65535.0
else:
raise ValueError('Unexpected output dtype ({}) - normalisation range '
'is unknown'.format(out_dtype))
return (pixels * max_range).astype(out_dtype)
def channels_to_back(pixels):
r"""
Roll the channels from the front to the back for an image. If the image
that is passed is already a numpy array, then that is also fine.
Always returns a numpy array because our :map:`Image` containers do not
support channels at the back.
Parameters
----------
image : `ndarray`
The pixels or image to roll the channel back for.
Returns
-------
rolled_pixels : `ndarray`
The numpy array of pixels with the channels on the last axis.
"""
return np.require(np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype,
requirements=['C'])
def channels_to_front(pixels):
r"""
Convert the given pixels array (channels assumed to be at the last axis
as is common in other imaging packages) into a numpy array.
Parameters
----------
pixels : ``(H, W, C)`` `buffer`
The pixels to convert to the Menpo channels at axis 0.
Returns
-------
pixels : ``(C, H, W)`` `ndarray`
Numpy array, channels as axis 0.
"""
if not isinstance(pixels, np.ndarray):
pixels = np.array(pixels)
return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype,
requirements=['C'])
class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable):
r"""
An n-dimensional image.
Images are n-dimensional homogeneous regular arrays of data. Each
spatially distinct location in the array is referred to as a `pixel`.
At a pixel, ``k`` distinct pieces of information can be stored. Each
datum at a pixel is refereed to as being in a `channel`. All pixels in
the image have the same number of channels, and all channels have the
same data-type (`float64`).
Parameters
----------
image_data : ``(C, M, N ..., Q)`` `ndarray`
Array representing the image pixels, with the first axis being
channels.
copy : `bool`, optional
If ``False``, the ``image_data`` will not be copied on assignment.
Note that this will miss out on additional checks. Further note that we
still demand that the array is C-contiguous - if it isn't, a copy will
be generated anyway.
In general, this should only be used if you know what you are doing.
Raises
------
Warning
If ``copy=False`` cannot be honoured
ValueError
If the pixel array is malformed
"""
def __init__(self, image_data, copy=True):
super(Image, self).__init__()
if not copy:
if not image_data.flags.c_contiguous:
image_data = np.array(image_data, copy=True, order='C')
warn('The copy flag was NOT honoured. A copy HAS been made. '
'Please ensure the data you pass is C-contiguous.')
else:
image_data = np.array(image_data, copy=True, order='C')
# Degenerate case whereby we can just put the extra axis
# on ourselves
if image_data.ndim == 2:
# Ensures that the data STAYS C-contiguous
image_data = image_data.reshape((1,) + image_data.shape)
if image_data.ndim < 2:
raise ValueError(
"Pixel array has to be 2D (implicitly 1 channel, "
"2D shape) or 3D+ (n_channels, 2D+ shape) "
" - a {}D array "
"was provided".format(image_data.ndim))
self.pixels = image_data
@classmethod
def init_blank(cls, shape, n_channels=1, fill=0, dtype=np.float):
r"""
Returns a blank image.
Parameters
----------
shape : `tuple` or `list`
The shape of the image. Any floating point values are rounded up
to the nearest integer.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
Returns
-------
blank_image : :map:`Image`
A new image of the requested size.
"""
# Ensure that the '+' operator means concatenate tuples
shape = tuple(np.ceil(shape).astype(np.int))
if fill == 0:
pixels = np.zeros((n_channels,) + shape, dtype=dtype)
else:
pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill
# We know there is no need to copy...
return cls(pixels, copy=False)
@classmethod
def init_from_rolled_channels(cls, pixels):
r"""
Deprecated - please use the equivalent ``init_from_channels_at_back`` method.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .init_from_channels_at_back instead.',
MenpoDeprecationWarning)
return cls.init_from_channels_at_back(pixels)
@classmethod
def init_from_channels_at_back(cls, pixels):
r"""
Create an Image from a set of pixels where the channels axis is on
the last axis (the back). This is common in other frameworks, and
therefore this method provides a convenient means of creating a menpo
Image from such data. Note that a copy is always created due to the
need to rearrange the data.
Parameters
----------
pixels : ``(M, N ..., Q, C)`` `ndarray`
Array representing the image pixels, with the last axis being
channels.
Returns
-------
image : :map:`Image`
A new image from the given pixels, with the FIRST axis as the
channels.
Raises
------
ValueError
If image is not at least 2D, i.e. has at least 2 dimensions plus
the channels in the end.
"""
if pixels.ndim == 2:
pixels = pixels[..., None]
if pixels.ndim < 2:
raise ValueError(
"Pixel array has to be 2D "
"(2D shape, implicitly 1 channel) "
"or 3D+ (2D+ shape, n_channels) "
" - a {}D array "
"was provided".format(pixels.ndim))
return cls(channels_to_front(pixels))
@classmethod
def init_from_pointcloud(cls, pointcloud, group=None, boundary=0,
n_channels=1, fill=0, dtype=np.float,
return_transform=False):
r"""
Create an Image that is big enough to contain the given pointcloud.
The pointcloud will be translated to the origin and then translated
according to its bounds in order to fit inside the new image.
An optional boundary can be provided in order to increase the space
around the boundary of the pointcloud. The boundary will be added
to *all sides of the image* and so a boundary of 5 provides 10 pixels
of boundary total for each dimension.
Parameters
----------
pointcloud : :map:`PointCloud`
Pointcloud to place inside the newly created image.
group : `str`, optional
If ``None``, the pointcloud will only be used to create the image.
If a `str` then the pointcloud will be attached as a landmark
group to the image, with the given string as key.
boundary : `float`
A optional padding distance that is added to the pointcloud bounds.
Default is ``0``, meaning the max/min of tightest possible
containing image is returned.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
adjust the PointCloud in order to build the image, is returned.
Returns
-------
image : ``type(cls)`` Image or subclass
A new image with the same size as the given pointcloud, optionally
with the pointcloud attached as landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
# Translate pointcloud to the origin
minimum = pointcloud.bounds(boundary=boundary)[0]
tr = Translation(-minimum)
origin_pc = tr.apply(pointcloud)
image_shape = origin_pc.range(boundary=boundary)
new_image = cls.init_blank(image_shape, n_channels=n_channels,
fill=fill, dtype=dtype)
if group is not None:
new_image.landmarks[group] = origin_pc
if return_transform:
return new_image, tr
else:
return new_image
def as_masked(self, mask=None, copy=True):
r"""
Return a copy of this image with an attached mask behavior.
A custom mask may be provided, or ``None``. See the :map:`MaskedImage`
constructor for details of how the kwargs will be handled.
Parameters
----------
mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage`
A mask to attach to the newly generated masked image.
copy : `bool`, optional
If ``False``, the produced :map:`MaskedImage` will share pixels with
``self``. Only suggested to be used for performance.
Returns
-------
masked_image : :map:`MaskedImage`
An image with the same pixels and landmarks as this one, but with
a mask.
"""
from menpo.image import MaskedImage
return copy_landmarks_and_path(self,
MaskedImage(self.pixels,
mask=mask, copy=copy))
@property
def n_dims(self):
r"""
The number of dimensions in the image. The minimum possible ``n_dims``
is 2.
:type: `int`
"""
return len(self.shape)
@property
def n_pixels(self):
r"""
Total number of pixels in the image ``(prod(shape),)``
:type: `int`
"""
return self.pixels[0, ...].size
@property
def n_elements(self):
r"""
Total number of data points in the image
``(prod(shape), n_channels)``
:type: `int`
"""
return self.pixels.size
@property
def n_channels(self):
"""
The number of channels on each pixel in the image.
:type: `int`
"""
return self.pixels.shape[0]
@property
def width(self):
r"""
The width of the image.
This is the width according to image semantics, and is thus the size
of the **last** dimension.
:type: `int`
"""
return self.pixels.shape[-1]
@property
def height(self):
r"""
The height of the image.
This is the height according to image semantics, and is thus the size
of the **second to last** dimension.
:type: `int`
"""
return self.pixels.shape[-2]
@property
def shape(self):
r"""
The shape of the image
(with ``n_channel`` values at each point).
:type: `tuple`
"""
return self.pixels.shape[1:]
def bounds(self):
r"""
The bounds of the image, minimum is always (0, 0). The maximum is
the maximum **index** that can be used to index into the image for each
dimension. Therefore, bounds will be of the form:
((0, 0), (self.height - 1, self.width - 1)) for a 2D image.
Note that this is akin to supporting a nearest neighbour interpolation.
Although the *actual* maximum subpixel value would be something
like ``self.height - eps`` where ``eps`` is some value arbitrarily
close to 0, this value at least allows sampling without worrying about
floating point error.
:type: `tuple`
"""
return (0,) * self.n_dims, tuple(s - 1 for s in self.shape)
def diagonal(self):
r"""
The diagonal size of this image
:type: `float`
"""
return np.sqrt(np.sum(np.array(self.shape) ** 2))
def centre(self):
r"""
The geometric centre of the Image - the subpixel that is in the
middle.
Useful for aligning shapes and images.
:type: (``n_dims``,) `ndarray`
"""
return np.array(self.shape, dtype=np.double) / 2
def _str_shape(self):
if self.n_dims > 2:
return ' x '.join(str(dim) for dim in self.shape)
elif self.n_dims == 2:
return '{}W x {}H'.format(self.width, self.height)
def indices(self):
r"""
Return the indices of all pixels in this image.
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return indices_for_image_of_shape(self.shape)
def _as_vector(self, keep_channels=False):
r"""
The vectorized form of this image.
Parameters
----------
keep_channels : `bool`, optional
========== =============================
Value Return shape
========== =============================
`False` ``(n_channels * n_pixels,)``
`True` ``(n_channels, n_pixels)``
========== =============================
Returns
-------
vec : (See ``keep_channels`` above) `ndarray`
Flattened representation of this image, containing all pixel
and channel information.
"""
if keep_channels:
return self.pixels.reshape([self.n_channels, -1])
else:
return self.pixels.ravel()
def from_vector(self, vector, n_channels=None, copy=True):
r"""
Takes a flattened vector and returns a new image formed by reshaping
the vector to the correct pixels and channels.
The `n_channels` argument is useful for when we want to add an extra
channel to an image but maintain the shape. For example, when
calculating the gradient.
Note that landmarks are transferred in the process.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A flattened vector of all pixels and channels of an image.
n_channels : `int`, optional
If given, will assume that vector is the same shape as this image,
but with a possibly different number of channels.
copy : `bool`, optional
If ``False``, the vector will not be copied in creating the new
image.
Returns
-------
image : :map:`Image`
New image of same shape as this image and the number of
specified channels.
Raises
------
Warning
If the ``copy=False`` flag cannot be honored
"""
# This is useful for when we want to add an extra channel to an image
# but maintain the shape. For example, when calculating the gradient
n_channels = self.n_channels if n_channels is None else n_channels
image_data = vector.reshape((n_channels,) + self.shape)
new_image = Image(image_data, copy=copy)
new_image.landmarks = self.landmarks
return new_image
def _from_vector_inplace(self, vector, copy=True):
r"""
Takes a flattened vector and update this image by
reshaping the vector to the correct dimensions.
Parameters
----------
vector : ``(n_pixels,)`` `bool ndarray`
A vector vector of all the pixels of a :map:`BooleanImage`.
copy: `bool`, optional
If ``False``, the vector will be set as the pixels. If ``True``, a
copy of the vector is taken.
Raises
------
Warning
If ``copy=False`` flag cannot be honored
Note
----
For :map:`BooleanImage` this is rebuilding a boolean image **itself**
from boolean values. The mask is in no way interpreted in performing
the operation, in contrast to :map:`MaskedImage`, where only the masked
region is used in :meth:`from_vector_inplace` and :meth:`as_vector`.
"""
image_data = vector.reshape(self.pixels.shape)
if not copy:
if not image_data.flags.c_contiguous:
warn('The copy flag was NOT honoured. A copy HAS been made. '
'Please ensure the data you pass is C-contiguous.')
image_data = np.array(image_data, copy=True, order='C',
dtype=image_data.dtype)
else:
image_data = np.array(image_data, copy=True, order='C',
dtype=image_data.dtype)
self.pixels = image_data
def extract_channels(self, channels):
r"""
A copy of this image with only the specified channels.
Parameters
----------
channels : `int` or `[int]`
The channel index or `list` of channel indices to retain.
Returns
-------
image : `type(self)`
A copy of this image with only the channels requested.
"""
copy = self.copy()
if not isinstance(channels, list):
channels = [channels] # ensure we don't remove the channel axis
copy.pixels = self.pixels[channels]
return copy
def as_histogram(self, keep_channels=True, bins='unique'):
r"""
Histogram binning of the values of this image.
Parameters
----------
keep_channels : `bool`, optional
If set to ``False``, it returns a single histogram for all the
channels of the image. If set to ``True``, it returns a `list` of
histograms, one for each channel.
bins : ``{unique}``, positive `int` or sequence of scalars, optional
If set equal to ``'unique'``, the bins of the histograms are centred
on the unique values of each channel. If set equal to a positive
`int`, then this is the number of bins. If set equal to a
sequence of scalars, these will be used as bins centres.
Returns
-------
hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside
The histogram(s). If ``keep_channels=False``, then hist is an
`ndarray`. If ``keep_channels=True``, then hist is a `list` with
``len(hist)=n_channels``.
bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside
An array or a list of arrays corresponding to the above histograms
that store the bins' edges.
Raises
------
ValueError
Bins can be either 'unique', positive int or a sequence of scalars.
Examples
--------
Visualizing the histogram when a list of array bin edges is provided:
>>> hist, bin_edges = image.as_histogram()
>>> for k in range(len(hist)):
>>> plt.subplot(1,len(hist),k)
>>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0])
>>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2
>>> plt.bar(centre, hist[k], align='center', width=width)
"""
# parse options
if isinstance(bins, basestring):
if bins == 'unique':
bins = 0
else:
raise ValueError("Bins can be either 'unique', positive int or"
"a sequence of scalars.")
elif isinstance(bins, int) and bins < 1:
raise ValueError("Bins can be either 'unique', positive int or a "
"sequence of scalars.")
# compute histogram
vec = self.as_vector(keep_channels=keep_channels)
if len(vec.shape) == 1 or vec.shape[0] == 1:
if bins == 0:
bins = np.unique(vec)
hist, bin_edges = np.histogram(vec, bins=bins)
else:
hist = []
bin_edges = []
num_bins = bins
for ch in range(vec.shape[0]):
if bins == 0:
num_bins = np.unique(vec[ch, :])
h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins)
hist.append(h_tmp)
bin_edges.append(c_tmp)
return hist, bin_edges
def _view_2d(self, figure_id=None, new_figure=False, channels=None,
interpolation='bilinear', cmap_name=None, alpha=1.,
render_axes=False, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', axes_x_limits=None,
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
r"""
View the image using the default image viewer. This method will appear
on the Image as ``view`` if the Image is 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return ImageViewer(figure_id, new_figure, self.n_dims,
self.pixels, channels=channels).render(
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
def view_widget(self, browser_style='buttons', figure_size=(10, 8),
style='coloured'):
r"""
Visualizes the image object using an interactive widget. Currently
only supports the rendering of 2D images.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the images will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import visualize_images
visualize_images(self, figure_size=figure_size, style=style,
browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def _view_landmarks_2d(self, channels=None, group=None,
with_labels=None, without_labels=None,
figure_id=None, new_figure=False,
interpolation='bilinear', cmap_name=None, alpha=1.,
render_lines=True, line_colour=None, line_style='-',
line_width=1, render_markers=True, marker_style='o',
marker_size=5, marker_face_colour=None,
marker_edge_colour=None, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=False,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal',
legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
from menpo.visualize import view_image_landmarks
return view_image_landmarks(
self, channels, False, group, with_labels, without_labels,
figure_id, new_figure, interpolation, cmap_name, alpha,
render_lines, line_colour, line_style, line_width,
render_markers, marker_style, marker_size, marker_face_colour,
marker_edge_colour, marker_edge_width, render_numbering,
numbers_horizontal_align, numbers_vertical_align,
numbers_font_name, numbers_font_size, numbers_font_style,
numbers_font_weight, numbers_font_colour, render_legend,
legend_title, legend_font_name, legend_font_style,
legend_font_size, legend_font_weight, legend_marker_scale,
legend_location, legend_bbox_to_anchor, legend_border_axes_pad,
legend_n_columns, legend_horizontal_spacing,
legend_vertical_spacing, legend_border, legend_border_padding,
legend_shadow, legend_rounded_corners, render_axes, axes_font_name,
axes_font_size, axes_font_style, axes_font_weight, axes_x_limits,
axes_y_limits, axes_x_ticks, axes_y_ticks, figure_size)
def crop(self, min_indices, max_indices, constrain_to_boundary=False,
return_transform=False):
r"""
Return a cropped copy of this image using the given minimum and
maximum indices. Landmarks are correctly adjusted so they maintain
their position relative to the newly cropped image.
Parameters
----------
min_indices : ``(n_dims,)`` `ndarray`
The minimum index over each dimension.
max_indices : ``(n_dims,)`` `ndarray`
The maximum index over each dimension.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
cropped_image : `type(self)`
A new instance of self, but cropped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
``min_indices`` and ``max_indices`` both have to be of length
``n_dims``. All ``max_indices`` must be greater than
``min_indices``.
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices = np.floor(min_indices)
max_indices = np.ceil(max_indices)
if not (min_indices.size == max_indices.size == self.n_dims):
raise ValueError(
"Both min and max indices should be 1D numpy arrays of"
" length n_dims ({})".format(self.n_dims))
elif not np.all(max_indices > min_indices):
raise ValueError("All max indices must be greater that the min "
"indices")
min_bounded = self.constrain_points_to_bounds(min_indices)
max_bounded = self.constrain_points_to_bounds(max_indices)
all_max_bounded = np.all(min_bounded == min_indices)
all_min_bounded = np.all(max_bounded == max_indices)
if not (constrain_to_boundary or all_max_bounded or all_min_bounded):
# points have been constrained and the user didn't want this -
raise ImageBoundaryError(min_indices, max_indices,
min_bounded, max_bounded)
new_shape = (max_bounded - min_bounded).astype(np.int)
return self.warp_to_shape(new_shape, Translation(min_bounded), order=0,
warp_landmarks=True,
return_transform=return_transform)
def crop_to_pointcloud(self, pointcloud, boundary=0,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the bounds of the pointcloud.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices, max_indices = pointcloud.bounds(boundary=boundary)
return self.crop(min_indices, max_indices,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_landmarks(self, group=None, boundary=0,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a set
of landmarks with an optional ``n_pixel`` boundary
Parameters
----------
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to its landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud(
pc, boundary=boundary, constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_pointcloud_proportion(self, pointcloud, boundary_proportion,
minimum=True,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the pointclouds' per-dimension range; if ``False`` w.r.t.
the maximum value of the pointclouds' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the border proportional to
the pointcloud spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
if minimum:
boundary = boundary_proportion * np.min(pointcloud.range())
else:
boundary = boundary_proportion * np.max(pointcloud.range())
return self.crop_to_pointcloud(
pointcloud, boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_landmarks_proportion(self, boundary_proportion,
group=None, minimum=True,
constrain_to_boundary=True,
return_transform=False):
r"""
Crop this image to be bounded around a set of landmarks with a
border proportional to the landmark spread or range.
Parameters
----------
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the landmarks' per-dimension range; if ``False`` w.r.t. the
maximum value of the landmarks' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
This image, cropped to its landmarks with a border proportional to
the landmark spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud_proportion(
pc, boundary_proportion, minimum=minimum,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def constrain_points_to_bounds(self, points):
r"""
Constrains the points provided to be within the bounds of this image.
Parameters
----------
points : ``(d,)`` `ndarray`
Points to be snapped to the image boundaries.
Returns
-------
bounded_points : ``(d,)`` `ndarray`
Points snapped to not stray outside the image edges.
"""
bounded_points = points.copy()
# check we don't stray under any edges
bounded_points[bounded_points < 0] = 0
# check we don't stray over any edges
shape = np.array(self.shape)
over_image = (shape - bounded_points) < 0
bounded_points[over_image] = shape[over_image]
return bounded_points
def extract_patches(self, patch_centers, patch_shape=(16, 16),
sample_offsets=None, as_single_array=True):
r"""
Extract a set of patches from an image. Given a set of patch centers
and a patch size, patches are extracted from within the image, centred
on the given coordinates. Sample offsets denote a set of offsets to
extract from within a patch. This is very useful if you want to extract
a dense set of features around a set of landmarks and simply sample the
same grid of patches around the landmarks.
If sample offsets are used, to access the offsets for each patch you
need to slice the resulting `list`. So for 2 offsets, the first centers
offset patches would be ``patches[:2]``.
Currently only 2D images are supported.
Parameters
----------
patch_centers : :map:`PointCloud`
The centers to extract patches around.
patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
if self.n_dims != 2:
raise ValueError('Only two dimensional patch extraction is '
'currently supported.')
if sample_offsets is None:
sample_offsets = np.zeros([1, 2], dtype=np.intp)
else:
sample_offsets = np.require(sample_offsets, dtype=np.intp)
patch_centers = np.require(patch_centers.points, dtype=np.float,
requirements=['C'])
single_array = extract_patches(self.pixels, patch_centers,
np.asarray(patch_shape, dtype=np.intp),
sample_offsets)
if as_single_array:
return single_array
else:
return [Image(o, copy=False) for p in single_array for o in p]
def extract_patches_around_landmarks(
self, group=None, patch_shape=(16, 16),
sample_offsets=None, as_single_array=True):
r"""
Extract patches around landmarks existing on this image. Provided the
group label and optionally the landmark label extract a set of patches.
See `extract_patches` for more information.
Currently only 2D images are supported.
Parameters
----------
group : `str` or ``None``, optional
The landmark group to use as patch centres.
patch_shape : `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
return self.extract_patches(self.landmarks[group],
patch_shape=patch_shape,
sample_offsets=sample_offsets,
as_single_array=as_single_array)
def set_patches(self, patches, patch_centers, offset=None,
offset_index=None):
r"""
Set the values of a group of patches into the correct regions of a copy
of this image. Given an array of patches and a set of patch centers,
the patches' values are copied in the regions of the image that are
centred on the coordinates of the given centers.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
# parse arguments
if self.n_dims != 2:
raise ValueError('Only two dimensional patch insertion is '
'currently supported.')
if offset is None:
offset = np.zeros([1, 2], dtype=np.intp)
elif isinstance(offset, tuple) or isinstance(offset, list):
offset = np.asarray([offset])
offset = np.require(offset, dtype=np.intp)
if not offset.shape == (1, 2):
raise ValueError('The offset must be a tuple, a list or a '
'numpy.array with shape (1, 2).')
if offset_index is None:
offset_index = 0
# if patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(
patches, patch_centers.n_points)
copy = self.copy()
# set patches
set_patches(patches, copy.pixels, patch_centers.points, offset,
offset_index)
return copy
def set_patches_around_landmarks(self, patches, group=None,
offset=None, offset_index=None):
r"""
Set the values of a group of patches around the landmarks existing in a
copy of this image. Given an array of patches, a group and a label, the
patches' values are copied in the regions of the image that are
centred on the coordinates of corresponding landmarks.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
group : `str` or ``None`` optional
The landmark group to use as patch centres.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
return self.set_patches(patches, self.landmarks[group],
offset=offset, offset_index=offset_index)
def warp_to_mask(self, template_mask, transform, warp_landmarks=True,
order=1, mode='constant', cval=0.0, batch_size=None,
return_transform=False):
r"""
Return a copy of this image warped into a different reference space.
Note that warping into a mask is slower than warping into a full image.
If you don't need a non-linear mask, consider :meth:``warp_to_shape``
instead.
Parameters
----------
template_mask : :map:`BooleanImage`
Defines the shape of the result, and what pixels should be sampled.
transform : :map:`Transform`
Transform **from the template space back to this image**.
Defines, for each pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : :map:`MaskedImage`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
if self.n_dims != transform.n_dims:
raise ValueError(
"Trying to warp a {}D image with a {}D transform "
"(they must match)".format(self.n_dims, transform.n_dims))
template_points = template_mask.true_indices()
points_to_sample = transform.apply(template_points,
batch_size=batch_size)
sampled = self.sample(points_to_sample,
order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_image = self._build_warp_to_mask(template_mask, sampled)
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, 'path'):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def _build_warp_to_mask(self, template_mask, sampled_pixel_values):
r"""
Builds the warped image from the template mask and sampled pixel values.
Overridden for :map:`BooleanImage` as we can't use the usual
:meth:`from_vector_inplace` method. All other :map:`Image` classes
share the :map:`Image` implementation.
Parameters
----------
template_mask : :map:`BooleanImage` or 2D `bool ndarray`
Mask for warping.
sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray`
Sampled value to rebuild the masked image from.
"""
from menpo.image import MaskedImage
warped_image = MaskedImage.init_blank(template_mask.shape,
n_channels=self.n_channels,
mask=template_mask)
warped_image._from_vector_inplace(sampled_pixel_values.ravel())
return warped_image
def sample(self, points_to_sample, order=1, mode='constant', cval=0.0):
r"""
Sample this image at the given sub-pixel accurate points. The input
PointCloud should have the same number of dimensions as the image e.g.
a 2D PointCloud for a 2D multi-channel image. A numpy array will be
returned the has the values for every given point across each channel
of the image.
Parameters
----------
points_to_sample : :map:`PointCloud`
Array of points to sample from the image. Should be
`(n_points, n_dims)`
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
Returns
-------
sampled_pixels : (`n_points`, `n_channels`) `ndarray`
The interpolated values taken across every channel of the image.
"""
# The public interface is a PointCloud, but when this is used internally
# a numpy array is passed. So let's just treat the PointCloud as a
# 'special case' and not document the ndarray ability.
if isinstance(points_to_sample, PointCloud):
points_to_sample = points_to_sample.points
return scipy_interpolation(self.pixels, points_to_sample,
order=order, mode=mode, cval=cval)
def warp_to_shape(self, template_shape, transform, warp_landmarks=True,
order=1, mode='constant', cval=0.0, batch_size=None,
return_transform=False):
"""
Return a copy of this image warped into a different reference space.
Parameters
----------
template_shape : `tuple` or `ndarray`
Defines the shape of the result, and what pixel indices should be
sampled (all of them).
transform : :map:`Transform`
Transform **from the template_shape space back to this image**.
Defines, for each index on template_shape, which pixel location
should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : `type(self)`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
template_shape = np.array(template_shape, dtype=np.int)
if (isinstance(transform, Affine) and order in range(4) and
self.n_dims == 2):
# we are going to be able to go fast.
if isinstance(transform, Translation) and order == 0:
# an integer translation (e.g. a crop) If this lies entirely
# in the bounds then we can just do a copy. We need to match
# the behavior of cython_interpolation exactly, which means
# matching its rounding behavior too:
t = transform.translation_component.copy()
pos_t = t > 0.0
t[pos_t] += 0.5
t[~pos_t] -= 0.5
min_ = t.astype(np.int)
max_ = template_shape + min_
if np.all(max_ <= np.array(self.shape)) and np.all(min_ >= 0):
# we have a crop - slice the pixels.
warped_pixels = self.pixels[:,
int(min_[0]):int(max_[0]),
int(min_[1]):int(max_[1])].copy()
return self._build_warp_to_shape(warped_pixels, transform,
warp_landmarks,
return_transform)
# we couldn't do the crop, but skimage has an optimised Cython
# interpolation for 2D affine warps - let's use that
sampled = cython_interpolation(self.pixels, template_shape,
transform, order=order,
mode=mode, cval=cval)
else:
template_points = indices_for_image_of_shape(template_shape)
points_to_sample = transform.apply(template_points,
batch_size=batch_size)
sampled = self.sample(points_to_sample,
order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_pixels = sampled.reshape(
(self.n_channels,) + tuple(template_shape))
return self._build_warp_to_shape(warped_pixels, transform,
warp_landmarks, return_transform)
def _build_warp_to_shape(self, warped_pixels, transform, warp_landmarks,
return_transform):
# factored out common logic from the different paths we can take in
# warp_to_shape. Rebuilds an image post-warp, adjusting landmarks
# as necessary.
warped_image = Image(warped_pixels, copy=False)
# warp landmarks if requested.
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, 'path'):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def rescale(self, scale, round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled by a given factor.
Landmarks are rescaled appropriately.
Parameters
----------
scale : `float` or `tuple` of `floats`
The scale factor. If a tuple, the scale to apply to each dimension.
If a single `float`, the scale will be applied uniformly across
each dimension.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If less scales than dimensions are provided.
If any scale is less than or equal to 0.
"""
# Pythonic way of converting to list if we are passed a single float
try:
if len(scale) < self.n_dims:
raise ValueError(
'Must provide a scale per dimension.'
'{} scales were provided, {} were expected.'.format(
len(scale), self.n_dims
)
)
except TypeError: # Thrown when len() is called on a float
scale = [scale] * self.n_dims
# Make sure we have a numpy array
scale = np.asarray(scale)
for s in scale:
if s <= 0:
raise ValueError('Scales must be positive floats.')
transform = NonUniformScale(scale)
# use the scale factor to make the template mask bigger
# while respecting the users rounding preference.
template_shape = round_image_shape(transform.apply(self.shape),
round)
# due to image indexing, we can't just apply the pseudoinverse
# transform to achieve the scaling we want though!
# Consider a 3x rescale on a 2x4 image. Looking at each dimension:
# H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x
# W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x
# => need to make the correct scale per dimension!
shape = np.array(self.shape, dtype=np.float)
# scale factors = max_index_after / current_max_index
# (note that max_index = length - 1, as 0 based)
scale_factors = (scale * shape - 1) / (shape - 1)
inverse_transform = NonUniformScale(scale_factors).pseudoinverse()
# for rescaling we enforce that mode is nearest to avoid num. errors
return self.warp_to_shape(template_shape, inverse_transform,
warp_landmarks=True, order=order,
mode='nearest',
return_transform=return_transform)
def rescale_to_diagonal(self, diagonal, round='ceil',
return_transform=False):
r"""
Return a copy of this image, rescaled so that the it's diagonal is a
new size.
Parameters
----------
diagonal: `int`
The diagonal size of the new image.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : type(self)
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
return self.rescale(diagonal / self.diagonal(), round=round,
return_transform=return_transform)
def rescale_to_pointcloud(self, pointcloud, group=None,
round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled so that the scale of a
particular group of landmarks matches the scale of the passed
reference pointcloud.
Parameters
----------
pointcloud: :map:`PointCloud`
The reference pointcloud to which the landmarks specified by
``group`` will be scaled to match.
group : `str`, optional
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
pc = self.landmarks[group]
scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy()
return self.rescale(scale, round=round, order=order,
return_transform=return_transform)
def rescale_landmarks_to_diagonal_range(self, diagonal_range, group=None,
round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled so that the ``diagonal_range`` of
the bounding box containing its landmarks matches the specified
``diagonal_range`` range.
Parameters
----------
diagonal_range: ``(n_dims,)`` `ndarray`
The diagonal_range range that we want the landmarks of the returned
image to have.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
round : ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
x, y = self.landmarks[group].range()
scale = diagonal_range / np.sqrt(x ** 2 + y ** 2)
return self.rescale(scale, round=round, order=order,
return_transform=return_transform)
def resize(self, shape, order=1, return_transform=False):
r"""
Return a copy of this image, resized to a particular shape.
All image information (landmarks, and mask in the case of
:map:`MaskedImage`) is resized appropriately.
Parameters
----------
shape : `tuple`
The new shape to resize to.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the resize is also returned.
Returns
-------
resized_image : ``type(self)``
A copy of this image, resized.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If the number of dimensions of the new shape does not match
the number of dimensions of the image.
"""
shape = np.asarray(shape, dtype=np.float)
if len(shape) != self.n_dims:
raise ValueError(
'Dimensions must match.'
'{} dimensions provided, {} were expected.'.format(
shape.shape, self.n_dims))
scales = shape / self.shape
# Have to round the shape when scaling to deal with floating point
# errors. For example, if we want (250, 250), we need to ensure that
# we get (250, 250) even if the number we obtain is 250 to some
# floating point inaccuracy.
return self.rescale(scales, round='round', order=order,
return_transform=return_transform)
def zoom(self, scale, cval=0.0, return_transform=False):
r"""
Return a copy of this image, zoomed about the centre point. ``scale``
values greater than 1.0 denote zooming **in** to the image and values
less than 1.0 denote zooming **out** of the image. The size of the
image will not change, if you wish to scale an image, please see
:meth:`rescale`.
Parameters
----------
scale : `float`
``scale > 1.0`` denotes zooming in. Thus the image will appear
larger and areas at the edge of the zoom will be 'cropped' out.
``scale < 1.0`` denotes zooming out. The image will be padded
by the value of ``cval``.
cval : ``float``, optional
The value to be set outside the zoomed image boundaries.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the zooming is also returned.
Returns
-------
zoomed_image : ``type(self)``
A copy of this image, zoomed.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
t = scale_about_centre(self, 1.0 / scale)
return self.warp_to_shape(self.shape, t, cval=cval,
return_transform=return_transform)
def rotate_ccw_about_centre(self, theta, degrees=True, retain_shape=False,
cval=0.0, round='round', order=1,
return_transform=False):
r"""
Return a copy of this image, rotated counter-clockwise about its centre.
Note that the `retain_shape` argument defines the shape of the rotated
image. If ``retain_shape=True``, then the shape of the rotated image
will be the same as the one of current image, so some regions will
probably be cropped. If ``retain_shape=False``, then the returned image
has the correct size so that the whole area of the current image is
included.
Parameters
----------
theta : `float`
The angle of rotation about the centre.
degrees : `bool`, optional
If ``True``, `theta` is interpreted in degrees. If ``False``,
``theta`` is interpreted as radians.
retain_shape : `bool`, optional
If ``True``, then the shape of the rotated image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
cval : `float`, optional
The value to be set outside the rotated image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rotation is also returned.
Returns
-------
rotated_image : ``type(self)``
The rotated image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
Image rotation is presently only supported on 2D images
"""
if self.n_dims != 2:
raise ValueError('Image rotation is presently only supported on '
'2D images')
rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return self.transform_about_centre(rotation, retain_shape=retain_shape,
cval=cval, round=round, order=order,
return_transform=return_transform)
def transform_about_centre(self, transform, retain_shape=False,
cval=0.0, round='round', order=1,
return_transform=False):
r"""
Return a copy of this image, transformed about its centre.
Note that the `retain_shape` argument defines the shape of the
transformed image. If ``retain_shape=True``, then the shape of the
transformed image will be the same as the one of current image, so some
regions will probably be cropped. If ``retain_shape=False``, then the
returned image has the correct size so that the whole area of the
current image is included.
.. note::
This method will not work for transforms that result in a transform
chain as :map:`TransformChain` is not invertible.
.. note::
Be careful when defining transforms for warping imgaes. All pixel
locations must fall within a valid range as expected by the
transform. Therefore, your transformation must accept 'negative'
pixel locations as the pixel locations provided to your transform
will have the object centre subtracted from them.
Parameters
----------
transform : :map:`ComposableTransform` and :map:`VInvertible` type
A composable transform. ``pseudoinverse`` will be invoked on the
resulting transform so it must implement a valid inverse.
retain_shape : `bool`, optional
If ``True``, then the shape of the sheared image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
cval : `float`, optional
The value to be set outside the sheared image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the shearing is also returned.
Returns
-------
transformed_image : ``type(self)``
The transformed image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Examples
--------
This is an example for rotating an image about its center. Let's
first load an image, create the rotation transform and then apply it ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Rotation
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
rot_tr = Rotation.init_from_2d_ccw_angle(45)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render rotated image
plt.subplot(132)
im.transform_about_centre(rot_tr).view_landmarks()
plt.title('Rotated')
# Render rotated image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks()
plt.title('Rotated (Retain original shape)')
Similarly, in order to apply a shear transform ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Affine
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
shear_tr = Affine.init_from_2d_shear(25, 10)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render sheared image
plt.subplot(132)
im.transform_about_centre(shear_tr).view_landmarks()
plt.title('Sheared')
# Render sheared image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(shear_tr,
retain_shape=True).view_landmarks()
plt.title('Sheared (Retain original shape)')
"""
if retain_shape:
shape = self.shape
applied_transform = transform_about_centre(self, transform)
else:
# Get image's bounding box coordinates
original_bbox = bounding_box((0, 0),
np.array(self.shape) - 1)
# Translate to origin and apply transform
trans = Translation(-self.centre(),
skip_checks=True).compose_before(transform)
transformed_bbox = trans.apply(original_bbox)
# Create new translation so that min bbox values go to 0
t = Translation(-transformed_bbox.bounds()[0])
applied_transform = trans.compose_before(t)
transformed_bbox = trans.apply(original_bbox)
# Output image's shape is the range of the sheared bounding box
# while respecting the users rounding preference.
shape = round_image_shape(transformed_bbox.range() + 1, round)
# Warp image
return self.warp_to_shape(
shape, applied_transform.pseudoinverse(), order=order,
warp_landmarks=True, cval=cval, return_transform=return_transform)
def mirror(self, axis=1, return_transform=False):
r"""
Return a copy of this image, mirrored/flipped about a certain axis.
Parameters
----------
axis : `int`, optional
The axis about which to mirror the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the mirroring is also returned.
Returns
-------
mirrored_image : ``type(self)``
The mirrored image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
axis cannot be negative
ValueError
axis={} but the image has {} dimensions
"""
# Check axis argument
if axis < 0:
raise ValueError('axis cannot be negative')
elif axis >= self.n_dims:
raise ValueError("axis={} but the image has {} "
"dimensions".format(axis, self.n_dims))
# Create transform that includes ...
# ... flipping about the selected axis ...
rot_matrix = np.eye(self.n_dims)
rot_matrix[axis, axis] = -1
# ... and translating back to the image's bbox
tr_matrix = np.zeros(self.n_dims)
tr_matrix[axis] = self.shape[axis] - 1
# Create transform object
trans = Rotation(rot_matrix, skip_checks=True).compose_before(
Translation(tr_matrix, skip_checks=True))
# Warp image
return self.warp_to_shape(self.shape, trans.pseudoinverse(),
warp_landmarks=True,
return_transform=return_transform)
def pyramid(self, n_levels=3, downscale=2):
r"""
Return a rescaled pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
image = self.copy()
yield image
for _ in range(n_levels - 1):
image = image.rescale(1.0 / downscale)
yield image
def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None):
r"""
Return the gaussian pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
sigma : `float`, optional
Sigma for gaussian filter. Default is ``downscale / 3.`` which
corresponds to a filter mask twice the size of the scale factor
that covers more than 99% of the gaussian distribution.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
from menpo.feature import gaussian_filter
if sigma is None:
sigma = downscale / 3.
image = self.copy()
yield image
for level in range(n_levels - 1):
image = gaussian_filter(image, sigma).rescale(1.0 / downscale)
yield image
def as_greyscale(self, mode='luminosity', channel=None):
r"""
Returns a greyscale version of the image. If the image does *not*
represent a 2D RGB image, then the ``luminosity`` mode will fail.
Parameters
----------
mode : ``{average, luminosity, channel}``, optional
============== =====================================================
mode Greyscale Algorithm
============== =====================================================
average Equal average of all channels
luminosity Calculates the luminance using the CCIR 601 formula:
| .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B'
channel A specific channel is chosen as the intensity value.
============== =====================================================
channel: `int`, optional
The channel to be taken. Only used if mode is ``channel``.
Returns
-------
greyscale_image : :map:`MaskedImage`
A copy of this image in greyscale.
"""
greyscale = self.copy()
if mode == 'luminosity':
if self.n_dims != 2:
raise ValueError("The 'luminosity' mode only works on 2D RGB"
"images. {} dimensions found, "
"2 expected.".format(self.n_dims))
elif self.n_channels != 3:
raise ValueError("The 'luminosity' mode only works on RGB"
"images. {} channels found, "
"3 expected.".format(self.n_channels))
# Only compute the coefficients once.
global _greyscale_luminosity_coef
if _greyscale_luminosity_coef is None:
_greyscale_luminosity_coef = np.linalg.inv(
np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.106, 1.703]]))[0, :]
# Compute greyscale via dot product
pixels = np.dot(_greyscale_luminosity_coef,
greyscale.pixels.reshape(3, -1))
# Reshape image back to original shape (with 1 channel)
pixels = pixels.reshape(greyscale.shape)
elif mode == 'average':
pixels = np.mean(greyscale.pixels, axis=0)
elif mode == 'channel':
if channel is None:
raise ValueError("For the 'channel' mode you have to provide"
" a channel index")
pixels = greyscale.pixels[channel]
else:
raise ValueError("Unknown mode {} - expected 'luminosity', "
"'average' or 'channel'.".format(mode))
# Set new pixels - ensure channel axis and maintain
greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype,
copy=False)
return greyscale
def as_PILImage(self, out_dtype=np.uint8):
r"""
Return a PIL copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
pil_image : `PILImage`
PIL copy of image
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 is supported.
"""
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
'Can only convert greyscale or RGB 2D images. '
'Received a {} channel {}D image.'.format(self.n_channels,
self.n_dims))
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
pixels = denormalize_pixels_range(pixels, out_dtype)
return PILImage.fromarray(pixels)
def as_imageio(self, out_dtype=np.uint8):
r"""
Return an Imageio copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
imageio_image : `ndarray`
Imageio image (which is just a numpy ndarray with the channels
as the last axis).
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 and uint16
are supported.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .pixels_with_channels_at_back instead.',
MenpoDeprecationWarning)
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
'Can only convert greyscale or RGB 2D images. '
'Received a {} channel {}D image.'.format(self.n_channels,
self.n_dims))
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
return denormalize_pixels_range(pixels, out_dtype)
def pixels_range(self):
r"""
The range of the pixel values (min and max pixel values).
Returns
-------
min_max : ``(dtype, dtype)``
The minimum and maximum value of the pixels array.
"""
return self.pixels.min(), self.pixels.max()
def rolled_channels(self):
r"""
Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .pixels_with_channels_at_back() instead.',
MenpoDeprecationWarning)
return self.pixels_with_channels_at_back()
def pixels_with_channels_at_back(self, out_dtype=None):
r"""
Returns the pixels matrix, with the channels rolled to the back axis.
This may be required for interacting with external code bases that
require images to have channels as the last axis, rather than the
Menpo convention of channels as the first axis.
If this image is single channel, the final axis is dropped.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
rolled_channels : `ndarray`
Pixels with channels as the back (last) axis. If single channel,
the last axis will be dropped.
"""
p = channels_to_back(self.pixels)
if out_dtype is not None:
p = denormalize_pixels_range(p, out_dtype=out_dtype)
return np.squeeze(p)
def __str__(self):
return ('{} {}D Image with {} channel{}'.format(
self._str_shape(), self.n_dims, self.n_channels,
's' * (self.n_channels > 1)))
def has_landmarks_outside_bounds(self):
"""
Indicates whether there are landmarks located outside the image bounds.
:type: `bool`
"""
if self.has_landmarks:
for l_group in self.landmarks:
pc = self.landmarks[l_group].points
if np.any(np.logical_or(self.shape - pc < 1, pc < 0)):
return True
return False
def constrain_landmarks_to_bounds(self):
r"""
Deprecated - please use the equivalent ``constrain_to_bounds`` method
now on PointCloud, in conjunction with the new Image ``bounds()``
method. For example:
>>> im.constrain_landmarks_to_bounds() # Equivalent to below
>>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds())
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .constrain_to_bounds() instead (on PointCloud).',
MenpoDeprecationWarning)
for l_group in self.landmarks:
l = self.landmarks[l_group]
for k in range(l.points.shape[1]):
tmp = l.points[:, k]
tmp[tmp < 0] = 0
tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1
l.points[:, k] = tmp
self.landmarks[l_group] = l
def normalize_std(self, mode='all', **kwargs):
r"""
Returns a copy of this image normalized such that its
pixel values have zero mean and unit variance.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .normalize_std() instead (features package).',
MenpoDeprecationWarning)
return self._normalize(np.std, mode=mode)
def normalize_norm(self, mode='all', **kwargs):
r"""
Returns a copy of this image normalized such that its pixel values
have zero mean and its norm equals 1.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
unit norm.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .normalize_norm() instead (features package).',
MenpoDeprecationWarning)
def scale_func(pixels, axis=None):
return np.linalg.norm(pixels, axis=axis, **kwargs)
return self._normalize(scale_func, mode=mode)
def _normalize(self, scale_func, mode='all'):
from menpo.feature import normalize
return normalize(self, scale_func=scale_func, mode=mode)
def rescale_pixels(self, minimum, maximum, per_channel=True):
r"""A copy of this image with pixels linearly rescaled to fit a range.
Note that the only pixels that will be considered and rescaled are those
that feature in the vectorized form of this image. If you want to use
this routine on all the pixels in a :map:`MaskedImage`, consider
using `as_unmasked()` prior to this call.
Parameters
----------
minimum: `float`
The minimal value of the rescaled pixels
maximum: `float`
The maximal value of the rescaled pixels
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
v = self.as_vector(keep_channels=True).T
if per_channel:
min_, max_ = v.min(axis=0), v.max(axis=0)
else:
min_, max_ = v.min(), v.max()
sf = ((maximum - minimum) * 1.0) / (max_ - min_)
v_new = ((v - min_) * sf) + minimum
return self.from_vector(v_new.T.ravel())
def clip_pixels(self, minimum=None, maximum=None):
r"""A copy of this image with pixels linearly clipped to fit a range.
Parameters
----------
minimum: `float`, optional
The minimal value of the clipped pixels. If None is provided, the
default value will be 0.
maximum: `float`, optional
The maximal value of the clipped pixels. If None is provided, the
default value will depend on the dtype.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
if minimum is None:
minimum = 0
if maximum is None:
dtype = self.pixels.dtype
if dtype == np.uint8:
maximum = 255
elif dtype == np.uint16:
maximum = 65535
elif dtype in [np.float32, np.float64]:
maximum = 1.0
else:
m1 = 'Could not recognise the dtype ({}) to set the maximum.'
raise ValueError(m1.format(dtype))
copy = self.copy()
copy.pixels = copy.pixels.clip(min=minimum, max=maximum)
return copy
def rasterize_landmarks(self, group=None, render_lines=True, line_style='-',
line_colour='b', line_width=1, render_markers=True,
marker_style='o', marker_size=1,
marker_face_colour='b', marker_edge_colour='b',
marker_edge_width=1, backend='matplotlib'):
r"""
This method provides the ability to rasterize 2D landmarks onto the
image. The returned image has the specified landmark groups rasterized
onto the image - which is useful for things like creating result
examples or rendering videos with annotations.
Since multiple landmark groups can be specified, all arguments can take
lists of parameters that map to the provided groups list. Therefore, the
parameters must be lists of the correct length or a single parameter to
apply to every landmark group.
Multiple backends are provided, all with different strengths. The
'pillow' backend is very fast, but not very flexible. The `matplotlib`
backend should be feature compatible with other Menpo rendering methods,
but is much slower due to the overhead of creating a figure to render
into.
Parameters
----------
group : `str` or `list` of `str`, optional
The landmark group key, or a list of keys.
render_lines : `bool`, optional
If ``True``, and the provided landmark group is a
:map:`PointDirectedGraph`, the edges are rendered.
line_style : `str`, optional
The style of the edge line. Not all backends support this argument.
line_colour : `str` or `tuple`, optional
A Matplotlib style colour or a backend dependant colour.
line_width : `int`, optional
The width of the line to rasterize.
render_markers : `bool`, optional
If ``True``, render markers at the coordinates of each landmark.
marker_style : `str`, optional
A Matplotlib marker style. Not all backends support all marker
styles.
marker_size : `int`, optional
The size of the marker - different backends use different scale
spaces so consistent output may by difficult.
marker_face_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_width : `int`, optional
The width of the marker edge. Not all backends support this.
backend : {'matplotlib', 'pillow'}, optional
The backend to use.
Returns
-------
rasterized_image : :map:`Image`
The image with the landmarks rasterized directly into the pixels.
Raises
------
ValueError
Only 2D images are supported.
ValueError
Only RGB (3-channel) or Greyscale (1-channel) images are supported.
"""
from .rasterize import rasterize_landmarks_2d
return rasterize_landmarks_2d(
self, group=group, render_lines=render_lines,
line_style=line_style, line_colour=line_colour,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, backend=backend)
def round_image_shape(shape, round):
if round not in ['ceil', 'round', 'floor']:
raise ValueError('round must be either ceil, round or floor')
# Ensure that the '+' operator means concatenate tuples
return tuple(getattr(np, round)(shape).astype(np.int))
def _convert_patches_list_to_single_array(patches_list, n_center):
r"""
Converts patches from a `list` of :map:`Image` objects to a single `ndarray`
with shape ``(n_center, n_offset, self.n_channels, patch_shape)``.
Note that these two are the formats returned by the `extract_patches()`
and `extract_patches_around_landmarks()` methods of :map:`Image` class.
Parameters
----------
patches_list : `list` of `n_center * n_offset` :map:`Image` objects
A `list` that contains all the patches as :map:`Image` objects.
n_center : `int`
The number of centers from which the patches are extracted.
Returns
-------
patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)``
The numpy array that contains all the patches.
"""
n_offsets = np.int(len(patches_list) / n_center)
n_channels = patches_list[0].n_channels
height = patches_list[0].height
width = patches_list[0].width
patches_array = np.empty((n_center, n_offsets, n_channels, height, width),
dtype=patches_list[0].pixels.dtype)
total_index = 0
for p in range(n_center):
for o in range(n_offsets):
patches_array[p, o, ...] = patches_list[total_index].pixels
total_index += 1
return patches_array
def _create_patches_image(patches, patch_centers, patches_indices=None,
offset_index=None, background='black'):
r"""
Creates an :map:`Image` object in which the patches are located on the
correct regions based on the centers. Thus, the image is a block-sparse
matrix. It has also two attached :map:`PointCloud` objects. The
`all_patch_centers` one contains all the patch centers, while the
`selected_patch_centers` one contains only the centers that correspond to
the patches that the user selected to set.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be set (copied) to the image. If ``None``,
then all the patches are copied.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
Returns
-------
patches_image : :map:`Image`
The output patches image object.
Raises
------
ValueError
Background must be either ''black'' or ''white''.
"""
# If patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches,
patch_centers.n_points)
# Parse inputs
if offset_index is None:
offset_index = 0
if patches_indices is None:
patches_indices = np.arange(patches.shape[0])
elif not isinstance(patches_indices, Iterable):
patches_indices = [patches_indices]
# Compute patches image's shape
n_channels = patches.shape[2]
patch_shape0 = patches.shape[3]
patch_shape1 = patches.shape[4]
top, left = np.min(patch_centers.points, 0)
bottom, right = np.max(patch_centers.points, 0)
min_0 = np.floor(top - patch_shape0)
min_1 = np.floor(left - patch_shape1)
max_0 = np.ceil(bottom + patch_shape0)
max_1 = np.ceil(right + patch_shape1)
height = max_0 - min_0 + 1
width = max_1 - min_1 + 1
# Translate the patch centers to fit in the new image
new_patch_centers = patch_centers.copy()
new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]])
# Create temporary pointcloud with the selected patch centers
tmp_centers = PointCloud(new_patch_centers.points[patches_indices])
# Create new image with the correct background values
if background == 'black':
patches_image = Image.init_blank(
(height, width), n_channels,
fill=np.min(patches[patches_indices]),
dtype=patches.dtype)
elif background == 'white':
patches_image = Image.init_blank(
(height, width), n_channels,
fill=np.max(patches[patches_indices]),
dtype=patches.dtype)
else:
raise ValueError('Background must be either ''black'' or ''white''.')
# Attach the corrected patch centers
patches_image.landmarks['all_patch_centers'] = new_patch_centers
patches_image.landmarks['selected_patch_centers'] = tmp_centers
# Set the patches
return patches_image.set_patches_around_landmarks(
patches[patches_indices], group='selected_patch_centers',
offset_index=offset_index)
|
grigorisg9gr/menpo
|
menpo/image/base.py
|
Python
|
bsd-3-clause
| 128,607
|
[
"Gaussian"
] |
b3f39d2098959d98f70be553f77ce5ed9d42857f94d29c49bf9468dc8df3895e
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 15 15:46:50 2016
@author: camacho
"""
import numpy as np
#import matplotlib.pyplot as pl
#pl.close("all") #fecha todas as figuras anteriores
from time import time
import george
from george.kernels import *
from Kernel import *
def lnlike(K, r):
from scipy.linalg import cho_factor, cho_solve
L1 = cho_factor(K) # tuple (L, lower)
# this is K^-1*(r)
sol = cho_solve(L1, r)
n = r.size
logLike = -0.5*np.dot(r, sol) \
- np.sum(np.log(np.diag(L1[0]))) \
- n*0.5*np.log(2*np.pi)
return logLike
#### DADOS ####################################################################
#np.random.seed(1000)
x = 10 * np.sort(np.random.rand(2000))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
#pl.plot(x,y,'.')
#### CALCULO USANDO O GEORGE ##################################################
#Set up the Gaussian process.
kernel = ExpSquaredKernel(1.0)
gp = george.GP(kernel)
#Pre-compute the factorization of the matrix.
gp.compute(x,yerr)
#Compute the log likelihood.
start = time()
log_p_george = gp.lnlikelihood(y)
print 'Took %f seconds' % (time() - start), ('log_p_george',log_p_george)
#### CALCULO DA LIKELIHOOD ###################################################
#definir kernel a usar
#kernel = ExpSquared + ExpSineSquared
ES_theta = 1
ES_l = 1
x1=x
x2=x
ESS_theta = 2
ESS_l = 2
ESS_P = 5
#calcular matrix de covariancia K
K=np.zeros((len(x),len(x)))
for i in range(len(x)):
for j in range(len(x)):
#K[i,j]=kernel(x1[i],x2[j],ES_theta,ES_l)
K[i,j]=ExpSquared(x1[i],x2[j],ES_theta,ES_l) \
+ ExpSineSquared(x1[i],x2[j],ESS_theta,ESS_l,ESS_P)
K=K+yerr**2*np.identity(len(x))
#### COMEÇA A MINHA VERSAO
start = time()
#para usar cholesky a matriz tem de ser positiva definida
L = np.linalg.cholesky(K)
L_inv= np.linalg.inv(L)
y = np.array(y)
#Calculo da log likelihood
n=len(x)
log_p = -0.5*np.dot(np.dot(np.dot(y.T,L.T),L_inv),y) - sum(np.log(np.diag(L))) \
- n*0.5*np.log(2*np.pi)
print 'Took %f seconds' % (time() - start), ('log_p',log_p)
#### COMEÇA A VERSAO CORRIGIDA
start = time()
log_p_correct = lnlike(K, y)
print 'Took %f seconds' % (time() - start), ('log_p_correct',log_p_correct)
assert np.allclose(log_p,log_p_correct, log_p_george)
#### CONCLUSOES
#A minha versão demora cerca de 8 vezes mais
|
jdavidrcamacho/Tests_GP
|
01 - Trials and attempts/03_nova_likelihood.py
|
Python
|
mit
| 2,428
|
[
"Gaussian"
] |
a4eaae6df0529b9ef0b6216ab4f79e40b4efa24064900e4053e06a3cd4c59496
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'table', table, 'priority', priority))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table, priority)
self._assert_sudo([ip_version],
('del', 'table', table,
'priority', priority))
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global',
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(scope='global',
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
class TestArpPing(TestIPCmdBase):
def _test_arping(self, function, address, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
function(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
ARPING_COUNT)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_gratuitous_arp(self, spawn_n, mIPWrapper):
self._test_arping(
ip_lib.send_gratuitous_arp, '20.0.0.1', spawn_n, mIPWrapper)
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_garp_for_proxy_arp(self, spawn_n, mIPWrapper, mIPDevice):
addr = '20.0.0.1'
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
mIPWrapper.reset_mock()
device = mIPDevice(mock.sentinel.iface_name,
namespace=mock.sentinel.ns_name)
mIPDevice.reset_mock()
# Check that the address was added to the interface before arping
def check_added_address(*args, **kwargs):
mIPDevice.assert_called_once_with(mock.sentinel.iface_name,
namespace=mock.sentinel.ns_name)
device.addr.add.assert_called_once_with(addr + '/32')
self.assertFalse(device.addr.delete.called)
device.addr.reset_mock()
ip_wrapper.netns.execute.side_effect = check_added_address
self._test_arping(
ip_lib.send_garp_for_proxyarp, addr, spawn_n, mIPWrapper)
# Test that the address was removed after arping
device = mIPDevice(mock.sentinel.iface_name,
namespace=mock.sentinel.ns_name)
device.addr.delete.assert_called_once_with(addr + '/32')
# If this was called then check_added_address probably had a assert
self.assertFalse(device.addr.add.called)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
|
waltBB/neutron_read
|
neutron/tests/unit/test_linux_ip_lib.py
|
Python
|
apache-2.0
| 44,577
|
[
"Brian"
] |
09c6384bb95c6a98200090ed766abd6bd18ba74727aee0ecc4edf4a68287ddba
|
"""
Utility functionality (:mod:`skbio.util`)
=========================================
.. currentmodule:: skbio.util
This package provides general exception/warning definitions used throughout
scikit-bio, as well as various utility functionality, including I/O and
unit-testing convenience functions.
Testing functionality
---------------------
Common functionality to support testing in skbio.
.. autosummary::
:toctree: generated/
get_data_path
TestRunner
assert_data_frame_almost_equal
Decorators
----------
.. autosummary::
:toctree: generated/
classproperty
overrides
Miscellaneous functionality
---------------------------
Generally useful functions that don't fit in more specific locations.
.. autosummary::
:toctree: generated/
cardinal_to_ordinal
create_dir
find_duplicates
flatten
is_casava_v180_or_later
remove_files
safe_md5
Exceptions
----------
.. autosummary::
:toctree: generated/
TestingUtilError
Warnings
--------
.. autosummary::
:toctree: generated/
EfficiencyWarning
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ._warning import EfficiencyWarning
from ._exception import TestingUtilError
from ._decorator import classproperty, overrides
from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates, flatten,
is_casava_v180_or_later, remove_files, safe_md5)
from ._testing import (get_data_path, TestRunner,
assert_data_frame_almost_equal)
__all__ = ['EfficiencyWarning', 'TestingUtilError', 'classproperty',
'cardinal_to_ordinal', 'create_dir', 'find_duplicates', 'flatten',
'is_casava_v180_or_later', 'remove_files', 'safe_md5',
'get_data_path', 'TestRunner', 'overrides',
'assert_data_frame_almost_equal']
test = TestRunner(__file__).test
|
Achuth17/scikit-bio
|
skbio/util/__init__.py
|
Python
|
bsd-3-clause
| 2,160
|
[
"scikit-bio"
] |
c28d7b281e6118d6357dc7e811d65b1671b4278b678581437ae0d654e134a4c5
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
print "hello world"
# <codecell>
from apiclient.discovery import build
# <codecell>
from oauth2client.client import OAuth2WebServerFlow
# <codecell>
import httplib2
# <codecell>
import gflags
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications
# The client_id and client_secret can be found in Google Cloud Console
#Fetch client id and client secret from a file
lines = open("API_CREDENTIALS","r").readlines()
client_id_from_file = lines[0].strip("\r").strip("\n").trim(" ")
clinet_secret_from_file = lines[1].strip("\r").strip("\n").trim(" ")
FLOW = OAuth2WebServerFlow(
client_id=client_id_from_file,
client_secret=clinet_secret_from_file,
scope='https://www.googleapis.com/auth/calendar',
user_agent='Scheduler/1.0')
# To disable the local server feature, uncomment the following line:
FLAGS.auth_local_webserver = False
# If the Credentials don't exist or are invalid, run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google Cloud Console
# to get a developerKey for your own application.
service = build(serviceName='calendar', version='v3', http=http)
#,
#developerKey='AIzaSyCeELBlxSDK_26Y3WKW4VeesKZ5vYTU2uM')
# <codecell>
#Note: Created a new client ID with native application as an API which corrected the redirect URL.
# <codecell>
calendar = service.calendars().get(calendarId='primary').execute()
# <codecell>
print calendar['summary']
# <codecell>
#Note: if you ever get the error: Google Calendar API - (403) Access Not Configured', then comment "developerKey" in build function()
# <codecell>
#source:http://stackoverflow.com/questions/16173295/google-calendar-api-403-access-not-configured
# <codecell>
print [item for item in calendar.iteritems()]
# <codecell>
#Let's test printing event details
# <codecell>
page_token = None
while True:
events = service.events().list(calendarId='primary', pageToken=page_token).execute()
for event in events['items']:
print event['summary']
page_token = events.get('nextPageToken')
if not page_token:
break
# <codecell>
#Let's quick add
# <codecell>
created_event = service.events().quickAdd(
calendarId='primary',
text='Eat food at home on January 17th 1pm-1:25pm').execute()
print created_event['id']
# <codecell>
# <codecell>
#Try to take list of items and schedule them (write a function)
# <codecell>
class Task:
"""
This class has important variables and functions related to individual tasks.
"""
task_name = ""
task_label = ""
task_priority = 0 #should be between 0 to 9 (low to high priority)
task_time_start = 0
task_time_end = 0
task_time_zone = "" #evening, afternoon, morning, night
task_deadline = "" #deadline of the task
task_score = 0 #should be 0 to 9 (low to high)
def __init__(self, task_name="", task_label="", task_priority=0, task_time_start=0, task_time_end=0, task_time_zone="", task_deadline="", task_score=0):
"""
This function adds task to the queue
"""
self.task_name = task_name
self.task_label = task_label
self.task_priority = task_priority
self.task_time_start = task_time_start
self.task_time_end = task_time_end
self.task_time_zone = task_time_zone
self.task_deadline = task_deadline
self.task_score = task_score
class TaskList:
"""
This class operates on tasks
"""
taskList = [] #Queue of all tasks
def __init__(self):
print 'Created operations task object'
self.taskList = []
def addTask(self, task):
"""
Adding task to the task list
"""
self.taskList.append(task)
print "Added task: ",task.task_name
def removeTask(self, task):
"""
Remove task from the task list
"""
print "Remove task: ",task.task_name
def showAllTasks(self):
"""
Shows all tasks
"""
for task in self.taskList:
print task.task_name
# <codecell>
if __name__ == "__main__":
"""
main function
"""
print "Testing task list"
task1 = Task("eat food")
task2 = Task("read book")
taskList = TaskList()
taskList.addTask(task1)
taskList.addTask(task2)
print "\nAll tasks:"
taskList.showAllTasks()
# <codecell>
|
nakul225/Scheduler
|
Scheduler.py
|
Python
|
gpl-2.0
| 5,308
|
[
"VisIt"
] |
6c7de8a5350b69412d5e96ce91a47254ad58e10fa91cc5393e324dee6d3371be
|
# -*- Mode: python; coding: utf-8 -*-
from __future__ import division
#from __future__ import with_statement
LAYOUT_ALGORITHM = 'neato' # ['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
REPRESENT_CHANNELS_AS_NODES = 1
DEFAULT_NODE_SIZE = 2.0 # default node size in meters
DEFAULT_TRANSMISSIONS_MEMORY = 5 # default number of of past intervals whose transmissions are remembered
BITRATE_FONT_SIZE = 10
# internal constants, normally not meant to be changed
SAMPLE_PERIOD = 0.1
PRIORITY_UPDATE_MODEL = -100
PRIORITY_UPDATE_VIEW = 200
import platform
if platform.system() == "Windows":
SHELL_FONT = "Lucida Console 9"
else:
SHELL_FONT = "Luxi Mono 10"
import ns.core
import ns.network
import ns.visualizer
import ns.internet
import ns.mobility
import math
import os
import sys
import gobject
import time
try:
import pygraphviz
import gtk
import pango
import goocanvas
import cairo
import threading
import hud
#import time
import cairo
from higcontainer import HIGContainer
gobject.threads_init()
try:
import svgitem
except ImportError:
svgitem = None
except ImportError, _import_error:
import dummy_threading as threading
else:
_import_error = None
try:
import ipython_view
except ImportError:
ipython_view = None
from base import InformationWindow, PyVizObject, Link, lookup_netdevice_traits, PIXELS_PER_METER
from base import transform_distance_simulation_to_canvas, transform_point_simulation_to_canvas
from base import transform_distance_canvas_to_simulation, transform_point_canvas_to_simulation
from base import load_plugins, register_plugin, plugins
PI_OVER_2 = math.pi/2
PI_TIMES_2 = math.pi*2
class Node(PyVizObject):
__gsignals__ = {
# signal emitted whenever a tooltip is about to be shown for the node
# the first signal parameter is a python list of strings, to which information can be appended
'query-extra-tooltip-info': (gobject.SIGNAL_RUN_LAST, None, (object,)),
}
def __init__(self, visualizer, node_index):
super(Node, self).__init__()
self.visualizer = visualizer
self.node_index = node_index
self.canvas_item = goocanvas.Ellipse()
self.canvas_item.set_data("pyviz-object", self)
self.links = []
self._has_mobility = None
self._selected = False
self._highlighted = False
self._color = 0x808080ff
self._size = DEFAULT_NODE_SIZE
self.canvas_item.connect("enter-notify-event", self.on_enter_notify_event)
self.canvas_item.connect("leave-notify-event", self.on_leave_notify_event)
self.menu = None
self.svg_item = None
self.svg_align_x = None
self.svg_align_y = None
ns3_node = ns.network.NodeList.GetNode(self.node_index)
self._label = '%i' % self.node_index
node_name = ns.core.Names.FindName (ns3_node)
if len(node_name)!=0:
self._label += ' (' + node_name + ')'
self._label_canvas_item = None
self._update_appearance() # call this last
def set_svg_icon(self, file_base_name, width=None, height=None, align_x=0.5, align_y=0.5):
"""
Set a background SVG icon for the node.
@param file_base_name: base file name, including .svg
extension, of the svg file. Place the file in the folder
src/contrib/visualizer/resource.
@param width: scale to the specified width, in meters
@param width: scale to the specified height, in meters
@param align_x: horizontal alignment of the icon relative to
the node position, from 0 (icon fully to the left of the node)
to 1.0 (icon fully to the right of the node)
@param align_y: vertical alignment of the icon relative to the
node position, from 0 (icon fully to the top of the node) to
1.0 (icon fully to the bottom of the node)
"""
if width is None and height is None:
raise ValueError("either width or height must be given")
rsvg_handle = svgitem.rsvg_handle_factory(file_base_name)
x = self.canvas_item.props.center_x
y = self.canvas_item.props.center_y
self.svg_item = svgitem.SvgItem(x, y, rsvg_handle)
self.svg_item.props.parent = self.visualizer.canvas.get_root_item()
self.svg_item.props.pointer_events = 0
self.svg_item.lower(None)
self.svg_item.props.visibility = goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD
if width is not None:
self.svg_item.props.width = transform_distance_simulation_to_canvas(width)
if height is not None:
self.svg_item.props.height = transform_distance_simulation_to_canvas(height)
#threshold1 = 10.0/self.svg_item.props.height
#threshold2 = 10.0/self.svg_item.props.width
#self.svg_item.props.visibility_threshold = min(threshold1, threshold2)
self.svg_align_x = align_x
self.svg_align_y = align_y
self._update_svg_position(x, y)
self._update_appearance()
def set_label(self, label):
assert isinstance(label, basestring)
self._label = label
self._update_appearance()
def _update_svg_position(self, x, y):
w = self.svg_item.width
h = self.svg_item.height
self.svg_item.set_properties(x=(x - (1-self.svg_align_x)*w),
y=(y - (1-self.svg_align_y)*h))
def tooltip_query(self, tooltip):
self.visualizer.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(self.node_index)
ipv4 = ns3_node.GetObject(ns.internet.Ipv4.GetTypeId())
ipv6 = ns3_node.GetObject(ns.internet.Ipv6.GetTypeId())
name = '<b><u>Node %i</u></b>' % self.node_index
node_name = ns.core.Names.FindName (ns3_node)
if len(node_name)!=0:
name += ' <b>(' + node_name + ')</b>'
lines = [name]
lines.append('')
self.emit("query-extra-tooltip-info", lines)
mob = ns.mobility.MobilityModel.GetMobilityModel (ns3_node)
if mob is not None:
lines.append(' <b>Mobility Model</b>: %s' % mob.GetInstanceTypeId().GetName())
for devI in range(ns3_node.GetNDevices()):
lines.append('')
lines.append(' <u>NetDevice %i:</u>' % devI)
dev = ns3_node.GetDevice(devI)
name = ns.core.Names.FindName(dev)
if name:
lines.append(' <b>Name:</b> %s' % name)
devname = dev.GetInstanceTypeId().GetName()
lines.append(' <b>Type:</b> %s' % devname)
if ipv4 is not None:
ipv4_idx = ipv4.GetInterfaceForDevice(dev)
if ipv4_idx != -1:
addresses = [
'%s/%s' % (ipv4.GetAddress(ipv4_idx, i).GetLocal(),
ipv4.GetAddress(ipv4_idx, i).GetMask())
for i in range(ipv4.GetNAddresses(ipv4_idx))]
lines.append(' <b>IPv4 Addresses:</b> %s' % '; '.join(addresses))
if ipv6 is not None:
ipv6_idx = ipv6.GetInterfaceForDevice(dev)
if ipv6_idx != -1:
addresses = [
'%s/%s' % (ipv6.GetAddress(ipv6_idx, i).GetAddress(),
ipv6.GetAddress(ipv6_idx, i).GetPrefix())
for i in range(ipv6.GetNAddresses(ipv6_idx))]
lines.append(' <b>IPv6 Addresses:</b> %s' % '; '.join(addresses))
lines.append(' <b>MAC Address:</b> %s' % (dev.GetAddress(),))
tooltip.set_markup('\n'.join(lines))
finally:
self.visualizer.simulation.lock.release()
def on_enter_notify_event(self, view, target, event):
self.highlighted = True
def on_leave_notify_event(self, view, target, event):
self.highlighted = False
def _set_selected(self, value):
self._selected = value
self._update_appearance()
def _get_selected(self):
return self._selected
selected = property(_get_selected, _set_selected)
def _set_highlighted(self, value):
self._highlighted = value
self._update_appearance()
def _get_highlighted(self):
return self._highlighted
highlighted = property(_get_highlighted, _set_highlighted)
def set_size(self, size):
self._size = size
self._update_appearance()
def _update_appearance(self):
"""Update the node aspect to reflect the selected/highlighted state"""
size = transform_distance_simulation_to_canvas(self._size)
if self.svg_item is not None:
alpha = 0x80
else:
alpha = 0xff
fill_color_rgba = (self._color & 0xffffff00) | alpha
self.canvas_item.set_properties(radius_x=size, radius_y=size,
fill_color_rgba=fill_color_rgba)
if self._selected:
line_width = size*.3
else:
line_width = size*.15
if self.highlighted:
stroke_color = 'yellow'
else:
stroke_color = 'black'
self.canvas_item.set_properties(line_width=line_width, stroke_color=stroke_color)
if self._label is not None:
if self._label_canvas_item is None:
self._label_canvas_item = goocanvas.Text(visibility_threshold=0.2,
font="Sans Serif 6",
fill_color_rgba=0x808080ff,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_N,
parent=self.visualizer.canvas.get_root_item(),
pointer_events=0)
self._label_canvas_item.lower(None)
self._label_canvas_item.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
text=self._label)
self._update_position()
def set_position(self, x, y):
self.canvas_item.set_property("center_x", x)
self.canvas_item.set_property("center_y", y)
if self.svg_item is not None:
self._update_svg_position(x, y)
for link in self.links:
link.update_points()
if self._label_canvas_item is not None:
self._label_canvas_item.set_properties(x=x, y=(y+self._size*3))
def get_position(self):
return (self.canvas_item.get_property("center_x"), self.canvas_item.get_property("center_y"))
def _update_position(self):
x, y = self.get_position()
self.set_position(x, y)
def set_color(self, color):
if isinstance(color, str):
color = gtk.gdk.color_parse(color)
color = ((color.red>>8) << 24) | ((color.green>>8) << 16) | ((color.blue>>8) << 8) | 0xff
self._color = color
self._update_appearance()
def add_link(self, link):
assert isinstance(link, Link)
self.links.append(link)
def remove_link(self, link):
assert isinstance(link, Link)
self.links.remove(link)
@property
def has_mobility(self):
if self._has_mobility is None:
node = ns.network.NodeList.GetNode(self.node_index)
mobility = ns.mobility.MobilityModel.GetMobilityModel (node)
self._has_mobility = (mobility is not None)
return self._has_mobility
class Channel(PyVizObject):
def __init__(self, channel):
self.channel = channel
self.canvas_item = goocanvas.Ellipse(radius_x=30, radius_y=30,
fill_color="white",
stroke_color="grey", line_width=2.0,
line_dash=goocanvas.LineDash([10.0, 10.0 ]),
visibility=goocanvas.ITEM_VISIBLE)
self.canvas_item.set_data("pyviz-object", self)
self.links = []
def set_position(self, x, y):
self.canvas_item.set_property("center_x", x)
self.canvas_item.set_property("center_y", y)
for link in self.links:
link.update_points()
def get_position(self):
return (self.canvas_item.get_property("center_x"), self.canvas_item.get_property("center_y"))
class WiredLink(Link):
def __init__(self, node1, node2):
assert isinstance(node1, Node)
assert isinstance(node2, (Node, Channel))
self.node1 = node1
self.node2 = node2
self.canvas_item = goocanvas.Path(line_width=1.0, stroke_color="black")
self.canvas_item.set_data("pyviz-object", self)
self.node1.links.append(self)
self.node2.links.append(self)
def update_points(self):
pos1_x, pos1_y = self.node1.get_position()
pos2_x, pos2_y = self.node2.get_position()
self.canvas_item.set_property("data", "M %r %r L %r %r" % (pos1_x, pos1_y, pos2_x, pos2_y))
class SimulationThread(threading.Thread):
def __init__(self, viz):
super(SimulationThread, self).__init__()
assert isinstance(viz, Visualizer)
self.viz = viz # Visualizer object
self.lock = threading.Lock()
self.go = threading.Event()
self.go.clear()
self.target_time = 0 # in seconds
self.quit = False
self.sim_helper = ns.visualizer.PyViz()
self.pause_messages = []
def set_nodes_of_interest(self, nodes):
self.lock.acquire()
try:
self.sim_helper.SetNodesOfInterest(nodes)
finally:
self.lock.release()
def run(self):
while not self.quit:
#print "sim: Wait for go"
self.go.wait() # wait until the main (view) thread gives us the go signal
self.go.clear()
if self.quit:
break
#self.go.clear()
#print "sim: Acquire lock"
self.lock.acquire()
try:
if 0:
if ns3.core.Simulator.IsFinished():
self.viz.play_button.set_sensitive(False)
break
#print "sim: Current time is %f; Run until: %f" % (ns3.Simulator.Now ().GetSeconds (), self.target_time)
#if ns3.Simulator.Now ().GetSeconds () > self.target_time:
# print "skipping, model is ahead of view!"
self.sim_helper.SimulatorRunUntil(ns.core.Seconds(self.target_time))
#print "sim: Run until ended at current time: ", ns3.Simulator.Now ().GetSeconds ()
self.pause_messages.extend(self.sim_helper.GetPauseMessages())
gobject.idle_add(self.viz.update_model, priority=PRIORITY_UPDATE_MODEL)
#print "sim: Run until: ", self.target_time, ": finished."
finally:
self.lock.release()
#print "sim: Release lock, loop."
# enumeration
class ShowTransmissionsMode(object):
__slots__ = []
ShowTransmissionsMode.ALL = ShowTransmissionsMode()
ShowTransmissionsMode.NONE = ShowTransmissionsMode()
ShowTransmissionsMode.SELECTED = ShowTransmissionsMode()
class Visualizer(gobject.GObject):
INSTANCE = None
if _import_error is None:
__gsignals__ = {
# signal emitted whenever a right-click-on-node popup menu is being constructed
'populate-node-menu': (gobject.SIGNAL_RUN_LAST, None, (object, gtk.Menu,)),
# signal emitted after every simulation period (SAMPLE_PERIOD seconds of simulated time)
# the simulation lock is acquired while the signal is emitted
'simulation-periodic-update': (gobject.SIGNAL_RUN_LAST, None, ()),
# signal emitted right after the topology is scanned
'topology-scanned': (gobject.SIGNAL_RUN_LAST, None, ()),
# signal emitted when it's time to update the view objects
'update-view': (gobject.SIGNAL_RUN_LAST, None, ()),
}
def __init__(self):
assert Visualizer.INSTANCE is None
Visualizer.INSTANCE = self
super(Visualizer, self).__init__()
self.nodes = {} # node index -> Node
self.channels = {} # id(ns3.Channel) -> Channel
self.window = None # toplevel window
self.canvas = None # goocanvas.Canvas
self.time_label = None # gtk.Label
self.play_button = None # gtk.ToggleButton
self.zoom = None # gtk.Adjustment
self._scrolled_window = None # gtk.ScrolledWindow
self.links_group = goocanvas.Group()
self.channels_group = goocanvas.Group()
self.nodes_group = goocanvas.Group()
self._update_timeout_id = None
self.simulation = SimulationThread(self)
self.selected_node = None # node currently selected
self.speed = 1.0
self.information_windows = []
self._transmission_arrows = []
self._last_transmissions = []
self._drop_arrows = []
self._last_drops = []
self._show_transmissions_mode = None
self.set_show_transmissions_mode(ShowTransmissionsMode.ALL)
self._panning_state = None
self.node_size_adjustment = None
self.transmissions_smoothing_adjustment = None
self.sample_period = SAMPLE_PERIOD
self.node_drag_state = None
self.follow_node = None
self.shell_window = None
self._topology_scan_timeout_id = None
self.last_discoverd_node = 0
self.create_gui()
for plugin in plugins:
plugin(self)
def set_show_transmissions_mode(self, mode):
assert isinstance(mode, ShowTransmissionsMode)
self._show_transmissions_mode = mode
if self._show_transmissions_mode == ShowTransmissionsMode.ALL:
self.simulation.set_nodes_of_interest(range(ns.network.NodeList.GetNNodes()))
elif self._show_transmissions_mode == ShowTransmissionsMode.NONE:
self.simulation.set_nodes_of_interest([])
elif self._show_transmissions_mode == ShowTransmissionsMode.SELECTED:
if self.selected_node is None:
self.simulation.set_nodes_of_interest([])
else:
self.simulation.set_nodes_of_interest([self.selected_node.node_index])
def _create_advanced_controls(self):
expander = gtk.Expander("Advanced")
expander.show()
main_vbox = gobject.new(gtk.VBox, border_width=8, visible=True)
expander.add(main_vbox)
main_hbox1 = gobject.new(gtk.HBox, border_width=8, visible=True)
main_vbox.pack_start(main_hbox1)
show_transmissions_group = HIGContainer("Show transmissions")
show_transmissions_group.show()
main_hbox1.pack_start(show_transmissions_group, False, False, 8)
vbox = gtk.VBox(True, 4)
vbox.show()
show_transmissions_group.add(vbox)
all_nodes = gtk.RadioButton(None)
all_nodes.set_label("All nodes")
all_nodes.set_active(True)
all_nodes.show()
vbox.add(all_nodes)
selected_node = gtk.RadioButton(all_nodes)
selected_node.show()
selected_node.set_label("Selected node")
selected_node.set_active(False)
vbox.add(selected_node)
no_node = gtk.RadioButton(all_nodes)
no_node.show()
no_node.set_label("Disabled")
no_node.set_active(False)
vbox.add(no_node)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.ALL)
all_nodes.connect("toggled", toggled)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.NONE)
no_node.connect("toggled", toggled)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.SELECTED)
selected_node.connect("toggled", toggled)
# -- misc settings
misc_settings_group = HIGContainer("Misc Settings")
misc_settings_group.show()
main_hbox1.pack_start(misc_settings_group, False, False, 8)
settings_hbox = gobject.new(gtk.HBox, border_width=8, visible=True)
misc_settings_group.add(settings_hbox)
# --> node size
vbox = gobject.new(gtk.VBox, border_width=0, visible=True)
scale = gobject.new(gtk.HScale, visible=True, digits=2)
vbox.pack_start(scale, True, True, 0)
vbox.pack_start(gobject.new(gtk.Label, label="Node Size", visible=True), True, True, 0)
settings_hbox.pack_start(vbox, False, False, 6)
self.node_size_adjustment = scale.get_adjustment()
def node_size_changed(adj):
for node in self.nodes.itervalues():
node.set_size(adj.value)
self.node_size_adjustment.connect("value-changed", node_size_changed)
self.node_size_adjustment.set_all(DEFAULT_NODE_SIZE, 0.01, 20, 0.1)
# --> transmissions smooth factor
vbox = gobject.new(gtk.VBox, border_width=0, visible=True)
scale = gobject.new(gtk.HScale, visible=True, digits=1)
vbox.pack_start(scale, True, True, 0)
vbox.pack_start(gobject.new(gtk.Label, label="Tx. Smooth Factor (s)", visible=True), True, True, 0)
settings_hbox.pack_start(vbox, False, False, 6)
self.transmissions_smoothing_adjustment = scale.get_adjustment()
self.transmissions_smoothing_adjustment.set_all(DEFAULT_TRANSMISSIONS_MEMORY*0.1, 0.1, 10, 0.1)
return expander
class _PanningState(object):
__slots__ = ['initial_mouse_pos', 'initial_canvas_pos', 'motion_signal']
def _begin_panning(self, widget, event):
self.canvas.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self._panning_state = self._PanningState()
x, y, dummy = widget.window.get_pointer()
self._panning_state.initial_mouse_pos = (x, y)
x = self._scrolled_window.get_hadjustment().value
y = self._scrolled_window.get_vadjustment().value
self._panning_state.initial_canvas_pos = (x, y)
self._panning_state.motion_signal = self.canvas.connect("motion-notify-event", self._panning_motion)
def _end_panning(self, event):
if self._panning_state is None:
return
self.canvas.window.set_cursor(None)
self.canvas.disconnect(self._panning_state.motion_signal)
self._panning_state = None
def _panning_motion(self, widget, event):
assert self._panning_state is not None
if event.is_hint:
x, y, dummy = widget.window.get_pointer()
else:
x, y = event.x, event.y
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
mx0, my0 = self._panning_state.initial_mouse_pos
cx0, cy0 = self._panning_state.initial_canvas_pos
dx = x - mx0
dy = y - my0
hadj.value = cx0 - dx
vadj.value = cy0 - dy
return True
def _canvas_button_press(self, widget, event):
if event.button == 2:
self._begin_panning(widget, event)
return True
return False
def _canvas_button_release(self, dummy_widget, event):
if event.button == 2:
self._end_panning(event)
return True
return False
def _canvas_scroll_event(self, dummy_widget, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom.value *= 1.25
return True
elif event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom.value /= 1.25
return True
return False
def get_hadjustment(self):
return self._scrolled_window.get_hadjustment()
def get_vadjustment(self):
return self._scrolled_window.get_vadjustment()
def create_gui(self):
self.window = gtk.Window()
vbox = gtk.VBox(); vbox.show()
self.window.add(vbox)
# canvas
self.canvas = goocanvas.Canvas()
self.canvas.connect_after("button-press-event", self._canvas_button_press)
self.canvas.connect_after("button-release-event", self._canvas_button_release)
self.canvas.connect("scroll-event", self._canvas_scroll_event)
self.canvas.props.has_tooltip = True
self.canvas.connect("query-tooltip", self._canvas_tooltip_cb)
self.canvas.show()
sw = gtk.ScrolledWindow(); sw.show()
self._scrolled_window = sw
sw.add(self.canvas)
vbox.pack_start(sw, True, True, 4)
self.canvas.set_size_request(600, 450)
self.canvas.set_bounds(-10000, -10000, 10000, 10000)
self.canvas.scroll_to(0, 0)
self.canvas.get_root_item().add_child(self.links_group)
self.links_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.canvas.get_root_item().add_child(self.channels_group)
self.channels_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.channels_group.raise_(self.links_group)
self.canvas.get_root_item().add_child(self.nodes_group)
self.nodes_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.nodes_group.raise_(self.channels_group)
self.hud = hud.Axes(self)
hbox = gtk.HBox(); hbox.show()
vbox.pack_start(hbox, False, False, 4)
# zoom
zoom_adj = gtk.Adjustment(1.0, 0.01, 10.0, 0.02, 1.0, 0)
self.zoom = zoom_adj
def _zoom_changed(adj):
self.canvas.set_scale(adj.value)
zoom_adj.connect("value-changed", _zoom_changed)
zoom = gtk.SpinButton(zoom_adj)
zoom.set_digits(3)
zoom.show()
hbox.pack_start(gobject.new(gtk.Label, label=" Zoom:", visible=True), False, False, 4)
hbox.pack_start(zoom, False, False, 4)
_zoom_changed(zoom_adj)
# speed
speed_adj = gtk.Adjustment(1.0, 0.01, 10.0, 0.02, 1.0, 0)
def _speed_changed(adj):
self.speed = adj.value
self.sample_period = SAMPLE_PERIOD*adj.value
self._start_update_timer()
speed_adj.connect("value-changed", _speed_changed)
speed = gtk.SpinButton(speed_adj)
speed.set_digits(3)
speed.show()
hbox.pack_start(gobject.new(gtk.Label, label=" Speed:", visible=True), False, False, 4)
hbox.pack_start(speed, False, False, 4)
_speed_changed(speed_adj)
# Current time
self.time_label = gobject.new(gtk.Label, label=" Speed:", visible=True)
self.time_label.set_width_chars(20)
hbox.pack_start(self.time_label, False, False, 4)
# Screenshot button
screenshot_button = gobject.new(gtk.Button,
label="Snapshot",
relief=gtk.RELIEF_NONE, focus_on_click=False,
visible=True)
hbox.pack_start(screenshot_button, False, False, 4)
def load_button_icon(button, icon_name):
try:
import gnomedesktop
except ImportError:
sys.stderr.write("Could not load icon %s due to missing gnomedesktop Python module\n" % icon_name)
else:
icon = gnomedesktop.find_icon(gtk.icon_theme_get_default(), icon_name, 16, 0)
if icon is not None:
button.props.image = gobject.new(gtk.Image, file=icon, visible=True)
load_button_icon(screenshot_button, "applets-screenshooter")
screenshot_button.connect("clicked", self._take_screenshot)
# Shell button
if ipython_view is not None:
shell_button = gobject.new(gtk.Button,
label="Shell",
relief=gtk.RELIEF_NONE, focus_on_click=False,
visible=True)
hbox.pack_start(shell_button, False, False, 4)
load_button_icon(shell_button, "gnome-terminal")
shell_button.connect("clicked", self._start_shell)
# Play button
self.play_button = gobject.new(gtk.ToggleButton,
image=gobject.new(gtk.Image, stock=gtk.STOCK_MEDIA_PLAY, visible=True),
label="Simulate (F3)",
relief=gtk.RELIEF_NONE, focus_on_click=False,
use_stock=True, visible=True)
accel_group = gtk.AccelGroup()
self.window.add_accel_group(accel_group)
self.play_button.add_accelerator("clicked", accel_group,
gtk.keysyms.F3, 0, gtk.ACCEL_VISIBLE)
self.play_button.connect("toggled", self._on_play_button_toggled)
hbox.pack_start(self.play_button, False, False, 4)
self.canvas.get_root_item().connect("button-press-event", self.on_root_button_press_event)
vbox.pack_start(self._create_advanced_controls(), False, False, 4)
self.window.show()
def scan_topology(self):
if (self.last_discoverd_node >= ns.network.NodeList.GetNNodes()):
return True
print "scanning topology: [%i, %i) nodes..." % (self.last_discoverd_node, ns.network.NodeList.GetNNodes(),)
graph = pygraphviz.AGraph()
seen_nodes = 0
for nodeI in range(self.last_discoverd_node, ns.network.NodeList.GetNNodes()):
seen_nodes += 1
if seen_nodes > 100:
print "scan topology... %i nodes visited (%.1f%%)" % (nodeI, 100*nodeI/ns.network.NodeList.GetNNodes())
seen_nodes = 0
node = ns.network.NodeList.GetNode(nodeI)
node_name = "Node %i" % nodeI
node_view = self.get_node(nodeI)
mobility = ns.mobility.MobilityModel.GetMobilityModel (node)
# print "Mobility type: " + mobility.GetInstanceTypeId().GetName()
if mobility is not None:
node_view.set_color("red")
pos = mobility.GetPosition()
node_view.set_position(*transform_point_simulation_to_canvas(pos.x, pos.y))
#print "node has mobility position -> ", "%f,%f" % (pos.x, pos.y)
else:
graph.add_node(node_name)
for devI in range(node.GetNDevices()):
device = node.GetDevice(devI)
device_traits = lookup_netdevice_traits(type(device))
if device_traits.is_wireless:
continue
if device_traits.is_virtual:
continue
channel = device.GetChannel()
if channel.GetNDevices() > 2:
if REPRESENT_CHANNELS_AS_NODES:
# represent channels as white nodes
if mobility is None:
channel_name = "Channel %s" % id(channel)
graph.add_edge(node_name, channel_name)
self.get_channel(channel)
self.create_link(self.get_node(nodeI), self.get_channel(channel))
else:
# don't represent channels, just add links between nodes in the same channel
for otherDevI in range(channel.GetNDevices()):
otherDev = channel.GetDevice(otherDevI)
otherNode = otherDev.GetNode()
otherNodeView = self.get_node(otherNode.GetId())
if otherNode is not node:
if mobility is None and not otherNodeView.has_mobility:
other_node_name = "Node %i" % otherNode.GetId()
graph.add_edge(node_name, other_node_name)
self.create_link(self.get_node(nodeI), otherNodeView)
else:
for otherDevI in range(channel.GetNDevices()):
otherDev = channel.GetDevice(otherDevI)
otherNode = otherDev.GetNode()
otherNodeView = self.get_node(otherNode.GetId())
if otherNode is not node:
if os.environ.get ('NS_VIS_ASSIGN') is not None:
other_node_name = "Node %i" % otherNode.GetId()
graph.add_edge(node_name, other_node_name)
else:
if mobility is None and not otherNodeView.has_mobility:
other_node_name = "Node %i" % otherNode.GetId()
graph.add_edge(node_name, other_node_name)
self.create_link(self.get_node(nodeI), otherNodeView)
# print "scanning topology: calling graphviz layout"
graph.layout(LAYOUT_ALGORITHM)
for node in graph.iternodes():
#print node, "=>", node.attr['pos']
node_type, node_id = node.split(' ')
pos_x, pos_y = [float(s) for s in node.attr['pos'].split(',')]
if node_type == 'Node':
obj = self.nodes[int(node_id)]
# If node reordering is requested
if os.environ.get ('NS_VIS_ASSIGN') is not None:
node = ns.network.NodeList.GetNode(int(node_id))
mobility = ns.mobility.MobilityModel.GetMobilityModel (node)
if mobility is not None:
pos = ns.core.Vector (pos_x, pos_y, 0)
mobility.SetPosition (pos)
elif node_type == 'Channel':
obj = self.channels[int(node_id)]
obj.set_position(pos_x, pos_y)
# print "scanning topology: all done."
self.emit("topology-scanned")
self.last_discoverd_node = ns.network.NodeList.GetNNodes();
return True
def get_node(self, index):
try:
return self.nodes[index]
except KeyError:
node = Node(self, index)
self.nodes[index] = node
self.nodes_group.add_child(node.canvas_item)
node.canvas_item.connect("button-press-event", self.on_node_button_press_event, node)
node.canvas_item.connect("button-release-event", self.on_node_button_release_event, node)
return node
def get_channel(self, ns3_channel):
try:
return self.channels[id(ns3_channel)]
except KeyError:
channel = Channel(ns3_channel)
self.channels[id(ns3_channel)] = channel
self.channels_group.add_child(channel.canvas_item)
return channel
def create_link(self, node, node_or_channel):
link = WiredLink(node, node_or_channel)
self.links_group.add_child(link.canvas_item)
link.canvas_item.lower(None)
def update_view(self):
#print "update_view"
self.time_label.set_text("Time: %f s" % ns.core.Simulator.Now().GetSeconds())
self._update_node_positions()
# Update information
for info_win in self.information_windows:
info_win.update()
self._update_transmissions_view()
self._update_drops_view()
self.emit("update-view")
def _update_node_positions(self):
for node in self.nodes.itervalues():
if node.has_mobility:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mobility = ns.mobility.MobilityModel.GetMobilityModel (ns3_node)
if mobility is not None:
pos = mobility.GetPosition()
x, y = transform_point_simulation_to_canvas(pos.x, pos.y)
node.set_position(x, y)
if node is self.follow_node:
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
px, py = self.canvas.convert_to_pixels(x, y)
hadj.value = px - hadj.page_size/2
vadj.value = py - vadj.page_size/2
def center_on_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
elif isinstance(node, (int, long)):
node = self.nodes[node]
elif isinstance(node, Node):
pass
else:
raise TypeError("expected int, viz.Node or ns.network.Node, not %r" % node)
x, y = node.get_position()
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
px, py = self.canvas.convert_to_pixels(x, y)
hadj.value = px - hadj.page_size/2
vadj.value = py - vadj.page_size/2
def update_model(self):
self.simulation.lock.acquire()
try:
self.emit("simulation-periodic-update")
finally:
self.simulation.lock.release()
def do_simulation_periodic_update(self):
smooth_factor = int(self.transmissions_smoothing_adjustment.value*10)
transmissions = self.simulation.sim_helper.GetTransmissionSamples()
self._last_transmissions.append(transmissions)
while len(self._last_transmissions) > smooth_factor:
self._last_transmissions.pop(0)
drops = self.simulation.sim_helper.GetPacketDropSamples()
self._last_drops.append(drops)
while len(self._last_drops) > smooth_factor:
self._last_drops.pop(0)
def _get_label_over_line_position(self, pos1_x, pos1_y, pos2_x, pos2_y):
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
bounds_x1, bounds_y1 = self.canvas.convert_from_pixels(hadj.value, vadj.value)
bounds_x2, bounds_y2 = self.canvas.convert_from_pixels(hadj.value + hadj.page_size,
vadj.value + vadj.page_size)
pos1_x, pos1_y, pos2_x, pos2_y = ns.visualizer.PyViz.LineClipping(bounds_x1, bounds_y1,
bounds_x2, bounds_y2,
pos1_x, pos1_y,
pos2_x, pos2_y)
return (pos1_x + pos2_x)/2, (pos1_y + pos2_y)/2
def _update_transmissions_view(self):
transmissions_average = {}
for transmission_set in self._last_transmissions:
for transmission in transmission_set:
key = (transmission.transmitter.GetId(), transmission.receiver.GetId())
rx_bytes, count = transmissions_average.get(key, (0, 0))
rx_bytes += transmission.bytes
count += 1
transmissions_average[key] = rx_bytes, count
old_arrows = self._transmission_arrows
for arrow, label in old_arrows:
arrow.set_property("visibility", goocanvas.ITEM_HIDDEN)
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
new_arrows = []
k = self.node_size_adjustment.value/5
for (transmitter_id, receiver_id), (rx_bytes, rx_count) in transmissions_average.iteritems():
transmitter = self.get_node(transmitter_id)
receiver = self.get_node(receiver_id)
try:
arrow, label = old_arrows.pop()
except IndexError:
arrow = goocanvas.Polyline(line_width=2.0, stroke_color_rgba=0x00C000C0, close_path=False, end_arrow=True)
arrow.set_property("parent", self.canvas.get_root_item())
arrow.props.pointer_events = 0
arrow.raise_(None)
label = goocanvas.Text(parent=self.canvas.get_root_item(), pointer_events=0)
label.raise_(None)
arrow.set_property("visibility", goocanvas.ITEM_VISIBLE)
line_width = max(0.1, math.log(float(rx_bytes)/rx_count/self.sample_period)*k)
arrow.set_property("line-width", line_width)
pos1_x, pos1_y = transmitter.get_position()
pos2_x, pos2_y = receiver.get_position()
points = goocanvas.Points([(pos1_x, pos1_y), (pos2_x, pos2_y)])
arrow.set_property("points", points)
kbps = float(rx_bytes*8)/1e3/rx_count/self.sample_period
label.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
visibility_threshold=0.5,
font=("Sans Serif %f" % int(1+BITRATE_FONT_SIZE*k)))
angle = math.atan2((pos2_y - pos1_y), (pos2_x - pos1_x))
if -PI_OVER_2 <= angle <= PI_OVER_2:
label.set_properties(text=("%.2f kbit/s →" % (kbps,)),
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=0, y=-line_width/2)
M = cairo.Matrix()
M.translate(*self._get_label_over_line_position(pos1_x, pos1_y, pos2_x, pos2_y))
M.rotate(angle)
label.set_transform(M)
else:
label.set_properties(text=("← %.2f kbit/s" % (kbps,)),
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_N,
x=0, y=line_width/2)
M = cairo.Matrix()
M.translate(*self._get_label_over_line_position(pos1_x, pos1_y, pos2_x, pos2_y))
M.rotate(angle)
M.scale(-1, -1)
label.set_transform(M)
new_arrows.append((arrow, label))
self._transmission_arrows = new_arrows + old_arrows
def _update_drops_view(self):
drops_average = {}
for drop_set in self._last_drops:
for drop in drop_set:
key = drop.transmitter.GetId()
drop_bytes, count = drops_average.get(key, (0, 0))
drop_bytes += drop.bytes
count += 1
drops_average[key] = drop_bytes, count
old_arrows = self._drop_arrows
for arrow, label in old_arrows:
arrow.set_property("visibility", goocanvas.ITEM_HIDDEN)
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
new_arrows = []
# get the coordinates for the edge of screen
vadjustment = self._scrolled_window.get_vadjustment()
bottom_y = vadjustment.value + vadjustment.page_size
dummy, edge_y = self.canvas.convert_from_pixels(0, bottom_y)
k = self.node_size_adjustment.value/5
for transmitter_id, (drop_bytes, drop_count) in drops_average.iteritems():
transmitter = self.get_node(transmitter_id)
try:
arrow, label = old_arrows.pop()
except IndexError:
arrow = goocanvas.Polyline(line_width=2.0, stroke_color_rgba=0xC00000C0, close_path=False, end_arrow=True)
arrow.props.pointer_events = 0
arrow.set_property("parent", self.canvas.get_root_item())
arrow.raise_(None)
label = goocanvas.Text()#, fill_color_rgba=0x00C000C0)
label.props.pointer_events = 0
label.set_property("parent", self.canvas.get_root_item())
label.raise_(None)
arrow.set_property("visibility", goocanvas.ITEM_VISIBLE)
arrow.set_property("line-width", max(0.1, math.log(float(drop_bytes)/drop_count/self.sample_period)*k))
pos1_x, pos1_y = transmitter.get_position()
pos2_x, pos2_y = pos1_x, edge_y
points = goocanvas.Points([(pos1_x, pos1_y), (pos2_x, pos2_y)])
arrow.set_property("points", points)
label.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
visibility_threshold=0.5,
font=("Sans Serif %i" % int(1+BITRATE_FONT_SIZE*k)),
text=("%.2f kbit/s" % (float(drop_bytes*8)/1e3/drop_count/self.sample_period,)),
alignment=pango.ALIGN_CENTER,
x=(pos1_x + pos2_x)/2,
y=(pos1_y + pos2_y)/2)
new_arrows.append((arrow, label))
self._drop_arrows = new_arrows + old_arrows
def update_view_timeout(self):
#print "view: update_view_timeout called at real time ", time.time()
# while the simulator is busy, run the gtk event loop
while not self.simulation.lock.acquire(False):
while gtk.events_pending():
gtk.main_iteration()
pause_messages = self.simulation.pause_messages
self.simulation.pause_messages = []
try:
self.update_view()
self.simulation.target_time = ns.core.Simulator.Now ().GetSeconds () + self.sample_period
#print "view: target time set to %f" % self.simulation.target_time
finally:
self.simulation.lock.release()
if pause_messages:
#print pause_messages
dialog = gtk.MessageDialog(parent=self.window, flags=0, type=gtk.MESSAGE_WARNING, buttons=gtk.BUTTONS_OK,
message_format='\n'.join(pause_messages))
dialog.connect("response", lambda d, r: d.destroy())
dialog.show()
self.play_button.set_active(False)
# if we're paused, stop the update timer
if not self.play_button.get_active():
self._update_timeout_id = None
return False
#print "view: self.simulation.go.set()"
self.simulation.go.set()
#print "view: done."
return True
def _start_topology_scan(self):
if self._topology_scan_timeout_id is not None:
gobject.source_remove(self._topology_scan_timeout_id)
self._topology_scan_timeout_id = gobject.timeout_add (1000, self.scan_topology, priority=PRIORITY_UPDATE_VIEW)
def _start_update_timer(self):
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
#print "start_update_timer"
self._update_timeout_id = gobject.timeout_add(int(SAMPLE_PERIOD/min(self.speed, 1)*1e3),
self.update_view_timeout,
priority=PRIORITY_UPDATE_VIEW)
def _on_play_button_toggled(self, button):
if button.get_active():
self._start_update_timer()
else:
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
def _quit(self, *dummy_args):
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
self._update_timeout_id = None
self.simulation.quit = True
self.simulation.go.set()
self.simulation.join()
gtk.main_quit()
def _monkey_patch_ipython(self):
# The user may want to access the NS 3 simulation state, but
# NS 3 is not thread safe, so it could cause serious problems.
# To work around this, monkey-patch IPython to automatically
# acquire and release the simulation lock around each code
# that is executed.
original_runcode = self.ipython.runcode
def runcode(ip, *args):
#print "lock"
self.simulation.lock.acquire()
try:
return original_runcode(*args)
finally:
#print "unlock"
self.simulation.lock.release()
import types
self.ipython.runcode = types.MethodType(runcode, self.ipython)
def autoscale_view(self):
if not self.nodes:
return
self._update_node_positions()
positions = [node.get_position() for node in self.nodes.itervalues()]
min_x, min_y = min(x for (x,y) in positions), min(y for (x,y) in positions)
max_x, max_y = max(x for (x,y) in positions), max(y for (x,y) in positions)
min_x_px, min_y_px = self.canvas.convert_to_pixels(min_x, min_y)
max_x_px, max_y_px = self.canvas.convert_to_pixels(max_x, max_y)
dx = max_x - min_x
dy = max_y - min_y
dx_px = max_x_px - min_x_px
dy_px = max_y_px - min_y_px
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
new_dx, new_dy = 1.5*dx_px, 1.5*dy_px
if new_dx == 0 or new_dy == 0:
return
self.zoom.value = min(hadj.page_size/new_dx, vadj.page_size/new_dy)
x1, y1 = self.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.canvas.convert_from_pixels(hadj.value+hadj.page_size, vadj.value+vadj.page_size)
width = x2 - x1
height = y2 - y1
center_x = (min_x + max_x) / 2
center_y = (min_y + max_y) / 2
self.canvas.scroll_to(center_x - width/2, center_y - height/2)
return False
def start(self):
self.scan_topology()
self._start_topology_scan()
self.window.connect("delete-event", self._quit)
#self._start_update_timer()
gobject.timeout_add(200, self.autoscale_view)
self.simulation.start()
try:
__IPYTHON__
except NameError:
pass
else:
self._monkey_patch_ipython()
gtk.main()
def on_root_button_press_event(self, view, target, event):
if event.button == 1:
self.select_node(None)
return True
def on_node_button_press_event(self, view, target, event, node):
if event.button == 1:
self.select_node(node)
return True
elif event.button == 3:
self.popup_node_menu(node, event)
return True
elif event.button == 2:
self.begin_node_drag(node)
return True
return False
def on_node_button_release_event(self, view, target, event, node):
if event.button == 2:
self.end_node_drag(node)
return True
return False
class NodeDragState(object):
def __init__(self, canvas_x0, canvas_y0, sim_x0, sim_y0):
self.canvas_x0 = canvas_x0
self.canvas_y0 = canvas_y0
self.sim_x0 = sim_x0
self.sim_y0 = sim_y0
self.motion_signal = None
def begin_node_drag(self, node):
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mob = ns.mobility.MobilityModel.GetMobilityModel (ns3_node)
if mob is None:
return
if self.node_drag_state is not None:
return
pos = mob.GetPosition()
finally:
self.simulation.lock.release()
x, y, dummy = self.canvas.window.get_pointer()
x0, y0 = self.canvas.convert_from_pixels(x, y)
self.node_drag_state = self.NodeDragState(x0, y0, pos.x, pos.y)
self.node_drag_state.motion_signal = node.canvas_item.connect("motion-notify-event", self.node_drag_motion, node)
def node_drag_motion(self, item, targe_item, event, node):
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mob = ns.mobility.MobilityModel.GetMobilityModel (ns3_node)
if mob is None:
return False
if self.node_drag_state is None:
return False
x, y, dummy = self.canvas.window.get_pointer()
canvas_x, canvas_y = self.canvas.convert_from_pixels(x, y)
dx = (canvas_x - self.node_drag_state.canvas_x0)
dy = (canvas_y - self.node_drag_state.canvas_y0)
pos = mob.GetPosition()
pos.x = self.node_drag_state.sim_x0 + transform_distance_canvas_to_simulation(dx)
pos.y = self.node_drag_state.sim_y0 + transform_distance_canvas_to_simulation(dy)
#print "SetPosition(%G, %G)" % (pos.x, pos.y)
mob.SetPosition(pos)
node.set_position(*transform_point_simulation_to_canvas(pos.x, pos.y))
finally:
self.simulation.lock.release()
return True
def end_node_drag(self, node):
if self.node_drag_state is None:
return
node.canvas_item.disconnect(self.node_drag_state.motion_signal)
self.node_drag_state = None
def popup_node_menu(self, node, event):
menu = gtk.Menu()
self.emit("populate-node-menu", node, menu)
menu.popup(None, None, None, event.button, event.time)
def _update_ipython_selected_node(self):
# If we are running under ipython -gthread, make this new
# selected node available as a global 'selected_node'
# variable.
try:
__IPYTHON__
except NameError:
pass
else:
if self.selected_node is None:
ns3_node = None
else:
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(self.selected_node.node_index)
finally:
self.simulation.lock.release()
self.ipython.updateNamespace({'selected_node': ns3_node})
def select_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
elif isinstance(node, (int, long)):
node = self.nodes[node]
elif isinstance(node, Node):
pass
elif node is None:
pass
else:
raise TypeError("expected None, int, viz.Node or ns.network.Node, not %r" % node)
if node is self.selected_node:
return
if self.selected_node is not None:
self.selected_node.selected = False
self.selected_node = node
if self.selected_node is not None:
self.selected_node.selected = True
if self._show_transmissions_mode == ShowTransmissionsMode.SELECTED:
if self.selected_node is None:
self.simulation.set_nodes_of_interest([])
else:
self.simulation.set_nodes_of_interest([self.selected_node.node_index])
self._update_ipython_selected_node()
def add_information_window(self, info_win):
self.information_windows.append(info_win)
self.simulation.lock.acquire()
try:
info_win.update()
finally:
self.simulation.lock.release()
def remove_information_window(self, info_win):
self.information_windows.remove(info_win)
def _canvas_tooltip_cb(self, canvas, x, y, keyboard_mode, tooltip):
#print "tooltip query: ", x, y
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
x, y = self.canvas.convert_from_pixels(hadj.value + x, vadj.value + y)
item = self.canvas.get_item_at(x, y, True)
#print "items at (%f, %f): %r | keyboard_mode=%r" % (x, y, item, keyboard_mode)
if not item:
return False
while item is not None:
obj = item.get_data("pyviz-object")
if obj is not None:
obj.tooltip_query(tooltip)
return True
item = item.props.parent
return False
def _get_export_file_name(self):
sel = gtk.FileChooserDialog("Save...", self.canvas.get_toplevel(),
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
sel.set_default_response(gtk.RESPONSE_OK)
sel.set_local_only(True)
sel.set_do_overwrite_confirmation(True)
sel.set_current_name("Unnamed.pdf")
filter = gtk.FileFilter()
filter.set_name("Embedded PostScript")
filter.add_mime_type("image/x-eps")
sel.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("Portable Document Graphics")
filter.add_mime_type("application/pdf")
sel.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("Scalable Vector Graphics")
filter.add_mime_type("image/svg+xml")
sel.add_filter(filter)
resp = sel.run()
if resp != gtk.RESPONSE_OK:
sel.destroy()
return None
file_name = sel.get_filename()
sel.destroy()
return file_name
def _take_screenshot(self, dummy_button):
#print "Cheese!"
file_name = self._get_export_file_name()
if file_name is None:
return
# figure out the correct bounding box for what is visible on screen
x1 = self._scrolled_window.get_hadjustment().value
y1 = self._scrolled_window.get_vadjustment().value
x2 = x1 + self._scrolled_window.get_hadjustment().page_size
y2 = y1 + self._scrolled_window.get_vadjustment().page_size
bounds = goocanvas.Bounds()
bounds.x1, bounds.y1 = self.canvas.convert_from_pixels(x1, y1)
bounds.x2, bounds.y2 = self.canvas.convert_from_pixels(x2, y2)
dest_width = bounds.x2 - bounds.x1
dest_height = bounds.y2 - bounds.y1
#print bounds.x1, bounds.y1, " -> ", bounds.x2, bounds.y2
dummy, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == '.eps':
surface = cairo.PSSurface(file_name, dest_width, dest_height)
elif extension == '.pdf':
surface = cairo.PDFSurface(file_name, dest_width, dest_height)
elif extension == '.svg':
surface = cairo.SVGSurface(file_name, dest_width, dest_height)
else:
dialog = gtk.MessageDialog(parent = self.canvas.get_toplevel(),
flags = gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = "Unknown extension '%s' (valid extensions are '.eps', '.svg', and '.pdf')"
% (extension,))
dialog.run()
dialog.destroy()
return
# draw the canvas to a printing context
cr = cairo.Context(surface)
cr.translate(-bounds.x1, -bounds.y1)
self.canvas.render(cr, bounds, self.zoom.value)
cr.show_page()
surface.finish()
def set_follow_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
self.follow_node = node
def _start_shell(self, dummy_button):
if self.shell_window is not None:
self.shell_window.present()
return
self.shell_window = gtk.Window()
self.shell_window.set_size_request(750,550)
self.shell_window.set_resizable(True)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.ipython = ipython_view.IPythonView()
self.ipython.modify_font(pango.FontDescription(SHELL_FONT))
self.ipython.set_wrap_mode(gtk.WRAP_CHAR)
self.ipython.show()
scrolled_window.add(self.ipython)
scrolled_window.show()
self.shell_window.add(scrolled_window)
self.shell_window.show()
self.shell_window.connect('destroy', self._on_shell_window_destroy)
self._update_ipython_selected_node()
self.ipython.updateNamespace({'viz': self})
def _on_shell_window_destroy(self, window):
self.shell_window = None
initialization_hooks = []
def add_initialization_hook(hook, *args):
"""
Adds a callback to be called after
the visualizer is initialized, like this::
initialization_hook(visualizer, *args)
"""
global initialization_hooks
initialization_hooks.append((hook, args))
def set_bounds(x1, y1, x2, y2):
assert x2>x1
assert y2>y1
def hook(viz):
cx1, cy1 = transform_point_simulation_to_canvas(x1, y1)
cx2, cy2 = transform_point_simulation_to_canvas(x2, y2)
viz.canvas.set_bounds(cx1, cy1, cx2, cy2)
add_initialization_hook(hook)
def start():
assert Visualizer.INSTANCE is None
if _import_error is not None:
import sys
print >> sys.stderr, "No visualization support (%s)." % (str(_import_error),)
ns.core.Simulator.Run()
return
load_plugins()
viz = Visualizer()
for hook, args in initialization_hooks:
gobject.idle_add(hook, viz, *args)
ns.network.Packet.EnablePrinting()
viz.start()
|
srene/ndnSIM-inrpp
|
src/visualizer/visualizer/core.py
|
Python
|
gpl-2.0
| 62,118
|
[
"FLEUR"
] |
9ee74f367db211aad330f138870b379375fd281159756828c881b2612f522b9d
|
class node(object):
"""
This class represents a node in the AST built while parsing command lines.
It's basically an object container for various attributes, with a slightly
specialised representation to make it a little easier to debug the parser.
"""
def __init__(self, **kwargs):
assert 'kind' in kwargs
self.__dict__.update(kwargs)
def dump(self, indent=' '):
return _dump(self, indent)
def __repr__(self):
chunks = []
d = dict(self.__dict__)
kind = d.pop('kind')
for k, v in sorted(d.items()):
chunks.append('%s=%r' % (k, v))
return '%sNode(%s)' % (kind.title(), ' '.join(chunks))
def __eq__(self, other):
if not isinstance(other, node):
return False
return self.__dict__ == other.__dict__
class nodevisitor(object):
def _visitnode(self, n, *args, **kwargs):
k = n.kind
self.visitnode(n)
return getattr(self, 'visit%s' % k)(n, *args, **kwargs)
def visit(self, n):
k = n.kind
if k == 'operator':
self._visitnode(n, n.op)
elif k == 'list':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'reservedword':
self._visitnode(n, n.word)
elif k == 'pipe':
self._visitnode(n, n.pipe)
elif k == 'pipeline':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'compound':
dochild = self._visitnode(n, n.list, n.redirects)
if dochild is None or dochild:
for child in n.list:
self.visit(child)
for child in n.redirects:
self.visit(child)
elif k in ('if', 'for', 'while', 'until'):
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'command':
dochild = self._visitnode(n, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'function':
dochild = self._visitnode(n, n.name, n.body, n.parts)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k == 'redirect':
dochild = self._visitnode(n, n.input, n.type, n.output, n.heredoc)
if dochild is None or dochild:
if isinstance(n.output, node):
self.visit(n.output)
if n.heredoc:
self.visit(n.heredoc)
elif k in ('word', 'assignment'):
dochild = self._visitnode(n, n.word)
if dochild is None or dochild:
for child in n.parts:
self.visit(child)
elif k in ('parameter', 'tilde', 'heredoc'):
self._visitnode(n, n.value)
elif k in ('commandsubstitution', 'processsubstitution'):
dochild = self._visitnode(n, n.command)
if dochild is None or dochild:
self.visit(n.command)
else:
raise ValueError('unknown node kind %r' % k)
self.visitnodeend(n)
def visitnode(self, n):
pass
def visitnodeend(self, n):
pass
def visitoperator(self, n, op):
pass
def visitlist(self, n, parts):
pass
def visitpipe(self, n, pipe):
pass
def visitpipeline(self, n, parts):
pass
def visitcompound(self, n, list, redirects):
pass
def visitif(self, node, parts):
pass
def visitfor(self, node, parts):
pass
def visitwhile(self, node, parts):
pass
def visituntil(self, node, parts):
pass
def visitcommand(self, n, parts):
pass
def visitfunction(self, n, name, body, parts):
pass
def visitword(self, n, word):
pass
def visitassignment(self, n, word):
pass
def visitreservedword(self, n, word):
pass
def visitparameter(self, n, value):
pass
def visittilde(self, n, value):
pass
def visitredirect(self, n, input, type, output, heredoc):
pass
def visitheredoc(self, n, value):
pass
def visitprocesssubstitution(self, n, command):
pass
def visitcommandsubstitution(self, n, command):
pass
def _dump(tree, indent=' '):
def _format(n, level=0):
if isinstance(n, node):
d = dict(n.__dict__)
kind = d.pop('kind')
if kind == 'list' and level > 0:
level = level + 1
fields = []
v = d.pop('s', None)
if v:
fields.append(('s', _format(v, level)))
for k, v in sorted(d.items()):
if not v or k == 'parts':
continue
llevel = level
if isinstance(v, node):
llevel += 1
fields.append((k, '\n' + (indent * llevel) + _format(v, llevel)))
else:
fields.append((k, _format(v, level)))
if kind == 'function':
fields = [f for f in fields if f[0] not in ('name', 'body')]
v = d.pop('parts', None)
if v:
fields.append(('parts', _format(v, level)))
return ''.join([
'%sNode' % kind.title(),
'(',
', '.join(('%s=%s' % field for field in fields)),
')'])
elif isinstance(n, list):
lines = ['[']
lines.extend((indent * (level + 1) + _format(x, level + 1) + ','
for x in n))
if len(lines) > 1:
lines.append(indent * (level) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(n)
if not isinstance(tree, node):
raise TypeError('expected node, got %r' % tree.__class__.__name__)
return _format(tree)
def findfirstkind(parts, kind):
for i, node in enumerate(parts):
if node.kind == kind:
return i
return -1
class posconverter(nodevisitor):
def __init__(self, string):
self.string = string
def visitnode(self, node):
assert hasattr(node, 'pos'), 'node %r is missing pos attr' % node
start, end = node.__dict__.pop('pos')
node.s = self.string[start:end]
class posshifter(nodevisitor):
def __init__(self, count):
self.count = count
def visitnode(self, node):
#assert node.pos[1] + base <= endlimit
node.pos = (node.pos[0] + self.count, node.pos[1] + self.count)
|
blurrymoi/bashlex
|
bashlex/ast.py
|
Python
|
gpl-3.0
| 7,001
|
[
"VisIt"
] |
6cca36aee97a4b32651794c9211b1e27cb4064c4eefd2d2608d0efe925b7d4bf
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 14:42:50 2015
@author: timmonen
"""
#####################33
'''
Iterate over sam-file in read pairs
-------------------------------------
0.a Get the reference positions of each (fwd and bwd) primer (only once)
0.b Make dictionary of fragment and beginning and end positions
Preliminary clean-up of bad reads
Track overall number thrown out due to prelim cleaning
1. If reads unmapped, trash
track number unmapped
2. If reads not proper pair, trash
track number not proper pair
3. If insert size <300 or > 750, trash
track number too small, number too big
3. See which fragment(s) insert belongs to: compare first and last position of
read to span of each fragment (if in array?)
def is_slice_in_list(s,l):
len_s = len(s) #so we don't recompute length of s on every iteration
return any(s == l[i:len_s+i] for i in xrange(len(l) - len_s+1))
4. If insert in zero or more than one fragments, trash
track how many thrown out due to 0 or more than 2 fragments
keep how many in 3 fragments (should be none! sanity check)
---------------------------------------------------------------------
We have exclusively mapped, paired, reasonably-sized inserts which belong to
exactly one fragment.
Trim reads in regions with primer overlap using pre-mapping information
--------------------------------------------------------------------------
We know from pre-mapping the position of each read wrt to same reference:
We know the length of each primer
Say primer starts at position 2300 and ends at position 2320
If read starts at position 2315, trim beginning to start at position 2021
Since accept only insert sizes > 350, only need to worry about forward
primers for forward reads, and backward primers for backward reads
Reverse read will never reach into forward primer and vice versa
If we need to accept insert sizes that are smaller, need to check forward
primer agaist reverse read, and reverse read against forwards primer
because reads could start looping back
---------------------------------------------------------------------------
We already know which fragment each reads belongs to, so sort the trimmed reads
into 6 different bam/sam files with fragment name
'''
from pipeline.sample import Sample
import pysam
import sys
import os
import subprocess as sp
import argparse
import numpy as np
def pair_generator(iterable):
'''Generator for pairs in interleaved files, such as BAM files'''
# Note: the last item is lost if odd
it = iter(iterable)
while True:
try:
p1 = it.next()
p2 = it.next()
yield (p1, p2)
except StopIteration:
raise
def assign_to_fragment(reads, fragment_full, VERBOSE=0, **kwargs):
'''Assign read pair to fragments'''
i_fwd = reads[0].is_reverse
# Insert coordinates
ins_start = reads[i_fwd].pos
ins_end = reads[i_fwd].pos + reads[i_fwd].isize
# What fragments could the read pair come from?
frags_pot = []
for n_frag, (fr_start, fr_end) in enumerate(fragment_full.values()):
if np.isscalar(fr_start):
frag_ind = (ins_start >= fr_start) and (ins_end <= fr_end)
if frag_ind:
frags_pot.append(str(fragment_full.keys()[n_frag]))
# If no fragments are compatible, it's cross-boundary (PCR crazyness)
if len(frags_pot) == 0:
pair_identity = 'Lost'
# If it is only compatible with one primer, it's ok
elif len(frags_pot) == 1:
pair_identity = frags_pot[0]
# If it is only compatible with one primer, it's ok
elif len(frags_pot) > 1:
pair_identity = "Ambiguous"
if VERBOSE > 0:
print fragment_full, ins_start, ins_end, pair_identity
return pair_identity
def trim_primers(reads, fragment, fragments_trim, **kwargs):
i_fwd = reads[0].is_reverse
i_rev = not i_fwd
tampered = "Intact"
frag_pos = fragments_trim[fragment]
# FWD primer
if reads[i_fwd].pos < frag_pos[0]:
tampered = "Trimmed"
ref_pos = reads[i_fwd].pos
read_pos = 0
cigar = reads[i_fwd].cigar[::-1]
for i, (bt, bl) in enumerate(reads[i_fwd].cigar):
# Match
if bt == 0:
if ref_pos + bl > frag_pos[0]:
cut = frag_pos[0] - ref_pos
cigar[-1] = (bt, bl - cut)
read_pos += cut
ref_pos = frag_pos[0]
break
cigar.pop(-1)
read_pos += bl
ref_pos += bl
# Insertion
elif bt == 1:
# Move up in read, nothing to move up in reference
cigar.pop(-1)
read_pos += bl
# Deletion
elif bt == 2:
# Starting with a deletion is not allowed
cigar.pop(-1)
# Move up in reference, nothing to move up in read
ref_pos += bl
if ref_pos > frag_pos[0]:
break
cigar = cigar[::-1]
# If you cut away everything, trash
if not len(cigar):
return "Trash"
# Alter the read!!!
seq = reads[i_fwd].seq
qual = reads[i_fwd].qual
reads[i_fwd].pos = ref_pos
reads[i_fwd].seq = seq[read_pos:]
reads[i_fwd].qual = qual[read_pos:]
reads[i_fwd].cigar = cigar
# REV primer
# Get end position of read by summing up over cigars (only substitutions, dels, inserts)
ref_pos = reads[i_rev].pos + sum(bl for (bt, bl) in reads[i_rev].cigar if bt in (0, 2))
if ref_pos > frag_pos[1]:
tampered = "Trimmed"
# Get the last position in the read, trim backwards
read_pos = reads[i_rev].rlen
cigar = reads[i_rev].cigar
# Go through all cigar statuses cutting them off as needed to match
for i, (bt, bl) in enumerate(reads[i_rev].cigar[::-1]):
# If substition
if bt == 0:
if ref_pos - bl < frag_pos[1]:
cut = ref_pos - frag_pos[1]
cigar[-1] = (bt, bl - cut)
read_pos -= ref_pos - frag_pos[1]
break
cigar.pop(-1)
read_pos -= bl
ref_pos -= bl
# If insertion
elif bt == 1:
cigar.pop(-1)
read_pos -= bl
# If deletion
elif bt == 2:
# Ending with a deletion is not allowed
cigar.pop(-1)
ref_pos -= bl
if ref_pos < frag_pos[1]:
break
# If you cut away everything, trash
if not len(cigar):
return "Trash"
seq = reads[i_rev].seq
qual = reads[i_rev].qual
reads[i_rev].seq = seq[:read_pos]
reads[i_rev].qual = qual[:read_pos]
reads[i_rev].cigar = cigar
# Fix mate pair
if tampered == "Trimmed":
reads[i_fwd].mpos = reads[i_rev].pos
reads[i_rev].mpos = reads[i_fwd].pos
isize = reads[i_rev].pos + sum(bl for bt, bl in reads[i_rev].cigar
if bt in (0, 2)) - reads[i_fwd].pos
reads[i_fwd].isize = isize
reads[i_rev].isize = -isize
return tampered
def trim_and_divide(sample, min_isize = 350, max_isize = 750, VERBOSE = 0,keep_trash = 1, **kwargs):
fragments = sample.fragment_names
fragment_full = sample.get_fragment_positions()
fragments_trim = sample.get_fragment_trim_positions()
primers = sample.primer_dict
primer_positions = sample.get_primer_positions()
data_folder = Sample.get_data_foldername()
input_filename = sample.get_pre_map_filename()['data']
fragment_output_names = sample.get_fragment_output_names()
trashed_read_names = sample.get_trashed_read_names()
fn_outsum = sample.get_divide_summary_filenames()
# Are reads ok?
unmapped = 0
too_small = 0
too_large = 0
# Are fragments identified?
frag_found = 0
ambiguous = 0
lost = 0
# Do we trim primers or not?
bad_primer = 0
trimmed_primers = 0
intact_primers = 0
# How many of each type of fragment
frag1 = 0
frag2 = 0
frag3 = 0
frag4 = 0
frag5 = 0
frag6 = 0
with pysam.Samfile(input_filename, 'r') as samfile:
try:
# If working with SAM-files, want to write the header as well
file_handles = [pysam.Samfile(ofn, 'wh', template=samfile)
for ofn in fragment_output_names[:len(fragments)]]
if keep_trash == 1:
trash_handles = [pysam.Samfile(otr, 'wh', template = samfile)
for otr in trashed_read_names[:len(trashed_read_names)]]
for irp, reads in enumerate(pair_generator(samfile)):
trash = 0
i_fwd = reads[0].is_reverse
if (reads[0].is_unmapped) or (reads[1].is_unmapped) or (not reads[0].is_proper_pair) or (not reads[1].is_proper_pair):
unmapped += 1
if keep_trash == 1:
trash_handles[0].write(reads[0])
trash_handles[0].write(reads[1])
trash = 1
continue
if (reads[i_fwd].isize < min_isize):
too_small += 1
if keep_trash == 1:
trash_handles[1].write(reads[0])
trash_handles[1].write(reads[1])
trash = 1
continue
if (reads[i_fwd].isize > max_isize):
too_large += 1
if keep_trash == 1:
trash_handles[2].write(reads[0])
trash_handles[2].write(reads[1])
trash = 1
continue
if trash == 0:
fragment = assign_to_fragment(reads, fragment_full, VERBOSE = 0)
if fragment == "Ambiguous":
ambiguous += 1
if keep_trash == 1:
trash_handles[4].write(reads[0])
trash_handles[4].write(reads[1])
if fragment == "Lost":
lost += 1
if keep_trash == 1:
trash_handles[5].write(reads[0])
trash_handles[5].write(reads[1])
if fragment in fragment_full.keys():
frag_found += 1
n_frag = fragments.index(fragment)
primer_status = trim_primers(reads, fragment, fragments_trim)
if primer_status == "Trash":
bad_primer += 1
if keep_trash == 1:
trash_handles[3].write(reads[0])
trash_handles[3].write(reads[1])
if primer_status == "Trimmed":
trimmed_primers += 1
file_handles[n_frag].write(reads[0])
file_handles[n_frag].write(reads[1])
if primer_status == "Intact":
intact_primers += 1
file_handles[n_frag].write(reads[0])
file_handles[n_frag].write(reads[1])
if (primer_status == "Trimmed") or (primer_status == "Intact"):
if fragment == "F1":
frag1 += 1
if fragment == "F2":
frag2 += 1
if fragment == "F3":
frag3 += 1
if fragment == "F4":
frag4 += 1
if fragment == "F5":
frag5 += 1
if fragment == "F6":
frag6 += 1
finally:
for f in file_handles:
f.close()
if keep_trash == 1:
for t in trash_handles:
t.close()
summary = {'sample name': sample.name,
'sequencing run': sample.run,
'read pairs': irp + 1,
'unmapped reads': unmapped,
'too short inserts': too_small,
'too long inserts': too_large,
'ambigous fragments': ambiguous,
'non-assigned fragments': lost,
'matching fragments': frag_found,
'reads with untrimmed primers': intact_primers,
'reads with trimmed primers': trimmed_primers,
'reads lost due to trimming primers': bad_primer,
'F1': frag1,
'F2': frag2,
'F3': frag3,
'F4': frag4,
'F5': frag5,
'F6': frag6,
}
sample.write_json(summary, fn_outsum)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Divide into fragments and trip primers of reads')
parser.add_argument('--sample', required=True,
help='MiSeq sample to analyze')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--min_isize', type=int, default = 350, help='Minimum insert size required')
parser.add_argument('--max_isize', type=int, default = 750, help='Maximum insert size allowed')
parser.add_argument('--keep_trash',type=int, default = 1, help = 'Keep trashed reads in separate files')
args = parser.parse_args()
sample = Sample(args.sample)
sample.trim_and_divide(VERBOSE=args.verbose,min_isize = args.min_isize, max_isize = args.max_isize, keep_trash = args.keep_trash)
|
timmonen/pipeline
|
pipeline/trim_and_divide.py
|
Python
|
mit
| 14,762
|
[
"pysam"
] |
e7f5e8526ce61928d2576f929f28f7cd3587eca0f1caf2a513292cc8d4ac2627
|
#!/usr/bin/env python 3
from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess
from genemethods.sipprCommon.bowtie import Bowtie2CommandLine, Bowtie2BuildCommandLine
from sipprverse.sippr.method import Method
import genemethods
from Bio.Sequencing.Applications import SamtoolsFaidxCommandline, SamtoolsIndexCommandline, \
SamtoolsSortCommandline, SamtoolsViewCommandline
from Bio.Blast.Applications import NcbiblastnCommandline
from argparse import ArgumentParser
from subprocess import call
import multiprocessing
from time import time
import psutil
import shutil
import sys
import os
mem = psutil.virtual_memory()
testpath = os.path.abspath(os.path.dirname(__file__))
scriptpath = os.path.join(testpath, '..')
scriptlocation = genemethods.sipprCommon.editsamheaders.__file__
sys.path.append(scriptpath)
__author__ = 'adamkoziol'
def variables():
v = ArgumentParser()
v.outputpath = os.path.join(testpath, 'testdata', 'results')
v.referencefilepath = os.path.join(testpath, 'testdata', 'targets')
v.miseqpath = os.path.join(testpath, 'testdata')
v.miseqfolder = 'flowcell'
v.readlengthforward = '1'
v.readlengthreverse = '0'
v.customsamplesheet = os.path.join(v.miseqpath, v.miseqfolder, 'SampleSheet.csv')
v.copy = True
v.debug = True
v.demultiplex = True
return v
def method_init():
global var
var = variables()
method_obj = Method(var, '', time(), scriptpath)
return method_obj
method = method_init()
def test_bcl2fastq():
method.createobjects()
assert os.path.isfile(os.path.join(var.outputpath, var.miseqfolder, '1_0',
'Undetermined_S0_L001_R1_001.fastq.gz'))
def metadata_update(analysistype):
"""
:param analysistype:
:return:
"""
method.sequencepath = os.path.join(testpath, 'testdata', 'sequences', analysistype)
method.reportpath = os.path.join(testpath, 'testdata', 'results', 'reports')
for sample in method.runmetadata.samples:
sample.name = 'unit_test'
sample.general.outputdirectory = method.sequencepath
sample.run.outputdirectory = method.sequencepath
sample.general.fastqfiles = [os.path.join(method.sequencepath, 'reads.fastq.gz')]
sample.general.trimmedcorrectedfastqfiles = sample.general.fastqfiles
sample.general.logout = os.path.join(method.sequencepath, 'logout')
sample.general.logerr = os.path.join(method.sequencepath, 'logerr')
def test_fastq_bait():
outfile = os.path.join(var.outputpath, 'bait', 'baited.fastq')
targetpath = os.path.join(var.referencefilepath, 'bait')
baitcall = 'bbduk.sh ref={ref} -Xmx5G in={input} threads={cpus} outm={out}'.format(
ref=os.path.join(targetpath, 'combinedtargets.fasta'),
input=os.path.join(targetpath, 'genesippr.fastq.gz'),
cpus=multiprocessing.cpu_count(),
out=os.path.join(outfile)
)
call(baitcall, shell=True)
size = os.stat(outfile)
assert size.st_size > 0
def test_reverse_bait():
outfile = os.path.join(var.outputpath, 'reverse_bait', 'baited_targets.fasta')
targetpath = os.path.join(var.referencefilepath, 'bait')
baitcall = 'bbduk.sh -Xmx5G ref={ref} in={input} threads={cpus} outm={out}'.format(
ref=os.path.join(targetpath, 'genesippr.fastq.gz'),
input=os.path.join(targetpath, 'combinedtargets.fasta'),
cpus=multiprocessing.cpu_count(),
out=os.path.join(outfile)
)
call(baitcall, shell=True)
size = os.stat(outfile)
assert size.st_size > 0
def test_bowtie2_build():
# Use bowtie2 wrapper to create index the target file
targetpath = os.path.join(var.referencefilepath, 'bait')
bowtie2build = Bowtie2BuildCommandLine(reference=os.path.join(targetpath, 'baitedtargets.fa'),
bt2=os.path.join(targetpath, 'baitedtargets'))
bowtie2build()
size = os.stat(os.path.join(targetpath, 'baitedtargets.1.bt2'))
assert size.st_size > 0
def test_bowtie2_align():
outpath = os.path.join(var.outputpath, 'bait')
outfile = os.path.join(outpath, 'map_test_sorted.bam')
targetpath = os.path.join(var.referencefilepath, 'bait')
# Use samtools wrapper to set up the bam sorting command
samsort = SamtoolsSortCommandline(input=outfile,
o=True,
out_prefix="-")
samtools = [
# When bowtie2 maps reads to all possible locations rather than choosing a 'best' placement, the
# SAM header for that read is set to 'secondary alignment', or 256. Please see:
# http://davetang.org/muse/2014/03/06/understanding-bam-flags/ The script below reads in the stdin
# and subtracts 256 from headers which include 256
'python3 {}'.format(scriptlocation),
# Use samtools wrapper to set up the samtools view
SamtoolsViewCommandline(b=True,
S=True,
h=True,
input_file="-"),
samsort]
# Add custom parameters to a dictionary to be used in the bowtie2 alignment wrapper
indict = {'--very-sensitive-local': True,
'-U': os.path.join(targetpath, 'genesippr.fastq.gz'),
'-a': True,
'--threads': multiprocessing.cpu_count(),
'--local': True}
# Create the bowtie2 reference mapping command
bowtie2align = Bowtie2CommandLine(bt2=os.path.join(targetpath, 'baitedtargets'),
threads=multiprocessing.cpu_count(),
samtools=samtools,
**indict)
run_subprocess(command=str(bowtie2align))
size = os.stat(outfile)
assert size.st_size > 0
def test_index_target():
targetpath = os.path.join(var.referencefilepath, 'bait')
target_index = SamtoolsFaidxCommandline(reference=os.path.join(targetpath, 'baitedtargets.fa'))
target_index()
size = os.stat(os.path.join(targetpath, 'baitedtargets.fa.fai'))
assert size.st_size > 0
def test_index_bam():
targetpath = os.path.join(var.referencefilepath, 'bait')
bam_index = SamtoolsIndexCommandline(input=os.path.join(targetpath, 'genesippr_sorted.bam'))
bam_index()
size = os.stat(os.path.join(targetpath, 'genesippr_sorted.bam.bai'))
assert size.st_size > 0
def test_subsample():
targetpath = os.path.join(var.referencefilepath, 'blast')
outpath = os.path.join(var.outputpath, 'blast')
make_path(outpath)
outfile = os.path.join(outpath, 'subsampled_reads.fastq.gz')
cmd = 'reformat.sh in={input} out={output} samplebasestarget=100000'.format(
input=os.path.join(targetpath, 'reads.fastq.gz'),
output=os.path.join(outfile))
call(cmd, shell=True)
size = os.stat(outfile)
assert size.st_size > 0
def test_downsample():
outpath = os.path.join(var.outputpath, 'blast')
outfile = os.path.join(outpath, 'subsampled_reads.fastq')
cmd = 'seqtk sample {input} 1000 > {output}' .format(
input=os.path.join(outpath, 'subsampled_reads.fastq.gz'),
output=outfile)
call(cmd, shell=True)
size = os.stat(outfile)
assert size.st_size > 0
def test_fastq_to_fasta():
outfile = os.path.join(var.outputpath, 'blast', 'subsampled_reads.fasta')
cmd = 'reformat.sh in={input} out={output}' \
.format(input=os.path.join(os.path.join(var.outputpath, 'blast', 'subsampled_reads.fastq')),
output=outfile)
call(cmd, shell=True)
size = os.stat(outfile)
assert size.st_size > 0
def test_make_blastdb():
targetpath = os.path.join(var.referencefilepath, 'blast')
command = 'makeblastdb -in {targets} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {output}'.format(
targets=os.path.join(targetpath, 'baitedtargets.fa'),
output=os.path.join(targetpath, 'baitedtargets'))
call(command, shell=True)
outfile = os.path.join(targetpath, 'baitedtargets.ndb')
size = os.stat(outfile)
assert size.st_size > 0
def test_blast():
targetpath = os.path.join(var.referencefilepath, 'blast')
outpath = os.path.join(var.outputpath, 'blast')
outfile = os.path.join(outpath, 'blast_results.csv')
# Use the NCBI BLASTn command line wrapper module from BioPython to set the parameters of the search
blastn = NcbiblastnCommandline(query=os.path.join(outpath, 'subsampled_reads.fasta'),
db=os.path.join(targetpath, 'baitedtargets'),
max_target_seqs=1,
num_threads=multiprocessing.cpu_count(),
outfmt='6 qseqid sseqid positive mismatch gaps evalue bitscore slen length qstart '
'qend qseq sstart send sseq',
out=outfile)
blastn()
size = os.stat(outfile)
assert size.st_size > 0
def clean_folder(analysistype):
"""
:param analysistype: the name of the current typing analysis
"""
try:
shutil.rmtree(os.path.join(method.sequencepath, analysistype))
except FileNotFoundError:
pass
try:
os.remove(os.path.join(method.sequencepath, 'logout'))
except FileNotFoundError:
pass
try:
os.remove(os.path.join(method.sequencepath, 'logerr'))
except FileNotFoundError:
pass
try:
os.remove(os.path.join(method.sequencepath, 'unit_test_metadata.json'))
except FileNotFoundError:
pass
def test_confindr():
analysistype = 'ConFindr'
metadata_update(analysistype)
method.contamination_detection()
shutil.rmtree(os.path.join(method.sequencepath, 'confindr'))
for sample in method.runmetadata.samples:
assert sample.confindr.num_contaminated_snvs == 0
def test_genesippr():
analysistype = 'genesippr'
metadata_update(analysistype)
method.run_genesippr()
outfile = os.path.join(method.reportpath, '{}.csv'.format(analysistype))
size = os.stat(outfile)
clean_folder(analysistype)
assert size.st_size > 0
def test_sixteens():
analysistype = 'sixteens_full'
metadata_update(analysistype)
method.run_sixteens()
outfile = os.path.join(method.reportpath, '{}.csv'.format(analysistype))
size = os.stat(outfile)
clean_folder(analysistype)
assert size.st_size > 0
def test_mash():
analysistype = 'mash'
metadata_update(analysistype)
method.run_mash()
outfile = os.path.join(method.reportpath, '{}.csv'.format(analysistype))
size = os.stat(outfile)
clean_folder(analysistype)
assert size.st_size > 0
def test_gdcs():
analysistype = 'GDCS'
metadata_update(analysistype)
method.run_gdcs()
outfile = os.path.join(method.reportpath, '{}.csv'.format(analysistype))
size = os.stat(outfile)
assert size.st_size > 0
clean_folder(analysistype)
def test_clear_results():
shutil.rmtree(var.outputpath)
def test_clear_targets():
targetpath = os.path.join(var.referencefilepath, 'bait')
os.remove(os.path.join(targetpath, 'baitedtargets.1.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.2.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.3.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.4.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.rev.1.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.rev.2.bt2'))
os.remove(os.path.join(targetpath, 'baitedtargets.fa.fai'))
os.remove(os.path.join(targetpath, 'genesippr_sorted.bam.bai'))
def test_clear_blast():
targetpath = os.path.join(var.referencefilepath, 'blast')
os.remove(os.path.join(targetpath, 'baitedtargets.ndb'))
os.remove(os.path.join(targetpath, 'baitedtargets.nhr'))
os.remove(os.path.join(targetpath, 'baitedtargets.nin'))
os.remove(os.path.join(targetpath, 'baitedtargets.nnd'))
os.remove(os.path.join(targetpath, 'baitedtargets.nni'))
os.remove(os.path.join(targetpath, 'baitedtargets.nog'))
os.remove(os.path.join(targetpath, 'baitedtargets.nos'))
os.remove(os.path.join(targetpath, 'baitedtargets.ntf'))
os.remove(os.path.join(targetpath, 'baitedtargets.nsq'))
os.remove(os.path.join(targetpath, 'baitedtargets.nto'))
os.remove(os.path.join(targetpath, 'baitedtargets.not'))
def test_clear_kma():
targetpath = os.path.join(var.referencefilepath, 'ConFindr')
os.remove(os.path.join(targetpath, 'rMLST_combined_kma.length.b'))
os.remove(os.path.join(targetpath, 'rMLST_combined_kma.name'))
os.remove(os.path.join(targetpath, 'rMLST_combined_kma.seq.b'))
def test_clear_logs():
# Use os.walk to find all log files in the subfolders within the reference file path
for root, folders, files in os.walk(var.referencefilepath):
for sub_file in files:
# Only target log files
if '.log' in sub_file:
# Remove the file
os.remove(os.path.join(root, sub_file))
|
adamkoziol/geneSipprV2
|
tests/test_method.py
|
Python
|
mit
| 13,128
|
[
"BLAST",
"Biopython",
"Bowtie"
] |
5f89cf90e436170b37dbb902290dbb6f7224b4227f29612a1c2bb45869b2f442
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.lib.robot.conf import RebotSettings
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.model import ModelModifier
from robotide.lib.robot.output import LOGGER
from robotide.lib.robot.result import ExecutionResult, Result
from robotide.lib.robot.utils import unic
from .jsmodelbuilders import JsModelBuilder
from .logreportwriters import LogWriter, ReportWriter
from .xunitwriter import XUnitWriter
class ResultWriter(object):
"""A class to create log, report, output XML and xUnit files.
:param sources: Either one :class:`~robot.result.executionresult.Result`
object, or one or more paths to existing output XML files.
By default writes ``report.html`` and ``log.html``, but no output XML
or xUnit files. Custom file names can be given and results disabled
or enabled using ``settings`` or ``options`` passed to the
:meth:`write_results` method. The latter is typically more convenient::
writer = ResultWriter(result)
writer.write_results(report='custom.html', log=None, xunit='xunit.xml')
"""
def __init__(self, *sources):
self._sources = sources
def write_results(self, settings=None, **options):
"""Writes results based on the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RebotSettings` object
to configure result writing.
:param options: Used to construct new
:class:`~robot.conf.settings.RebotSettings` object if ``settings``
are not given.
"""
settings = settings or RebotSettings(options)
results = Results(settings, *self._sources)
if settings.output:
self._write_output(results.result, settings.output)
if settings.xunit:
self._write_xunit(results.result, settings.xunit,
settings.xunit_skip_noncritical)
if settings.log:
config = dict(settings.log_config,
minLevel=results.js_result.min_level)
self._write_log(results.js_result, settings.log, config)
if settings.report:
results.js_result.remove_data_not_needed_in_report()
self._write_report(results.js_result, settings.report,
settings.report_config)
return results.return_code
def _write_output(self, result, path):
self._write('Output', result.save, path)
def _write_xunit(self, result, path, skip_noncritical):
self._write('XUnit', XUnitWriter(result, skip_noncritical).write, path)
def _write_log(self, js_result, path, config):
self._write('Log', LogWriter(js_result).write, path, config)
def _write_report(self, js_result, path, config):
self._write('Report', ReportWriter(js_result).write, path, config)
def _write(self, name, writer, path, *args):
try:
writer(path, *args)
except DataError as err:
LOGGER.error(unicode(err))
except EnvironmentError as err:
# `err.filename` can be different than `path` at least if reading
# log/report templates or writing split log fails.
# `unic` is needed due to http://bugs.jython.org/issue1825.
LOGGER.error("Writing %s file '%s' failed: %s: %s" %
(name.lower(), path, err.strerror, unic(err.filename)))
else:
LOGGER.output_file(name, path)
class Results(object):
def __init__(self, settings, *sources):
self._settings = settings
self._sources = sources
if len(sources) == 1 and isinstance(sources[0], Result):
self._result = sources[0]
self._prune = False
self.return_code = self._result.return_code
else:
self._result = None
self._prune = True
self.return_code = -1
self._js_result = None
@property
def result(self):
if self._result is None:
include_keywords = bool(self._settings.log or self._settings.output)
flattened = self._settings.flatten_keywords
self._result = ExecutionResult(include_keywords=include_keywords,
flattened_keywords=flattened,
merge=self._settings.merge,
*self._sources)
self._result.configure(self._settings.status_rc,
self._settings.suite_config,
self._settings.statistics_config)
modifier = ModelModifier(self._settings.pre_rebot_modifiers,
self._settings.process_empty_suite,
LOGGER)
self._result.suite.visit(modifier)
self.return_code = self._result.return_code
return self._result
@property
def js_result(self):
if self._js_result is None:
builder = JsModelBuilder(log_path=self._settings.log,
split_log=self._settings.split_log,
prune_input_to_save_memory=self._prune)
self._js_result = builder.build_from(self.result)
if self._prune:
self._result = None
return self._js_result
|
fingeronthebutton/RIDE
|
src/robotide/lib/robot/reporting/resultwriter.py
|
Python
|
apache-2.0
| 6,011
|
[
"VisIt"
] |
2c06a360cfe32691b0ba919a5300c53805c8bb4140ce87d3cd9a63c74f7ecd45
|
#!/usr/bin/env python
import sys, os, pysam
from optparse import OptionParser
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from jobTree.src.bioio import fastqRead, fastaRead, setLoggingFromOptions, logger
from subprocess import Popen, PIPE
from itertools import izip
"""
Analysis script to determine if template/complement reads can be 'rescued' by providing the small region of the
reference where the 2D read they combined into aligned.
Should be ran via run_muscle.sh, which has three arguments:
--template_sam, --twoD_sam, --complement_sam
The sam files should be from the same aligner and read/reference set.
This program will dig through the nanopore pipeline's directory structure looking for reference and read files.
This assumes that you are on the version of the pipeline whereby reads are sorted by type into respective folders
(I.E. nanopore/readFastqFiles/template/a_template_file.fastq, etc)
"""
def find_analyses(target, recordsToAnalyze, templateFastqFiles, complementFastqFiles, references, outputDir):
"""takes a set of records to analyze and finds the corresponding sequences and creates alignment targets"""
files = {"template":[], "complement":[]}
logger.info("Finding template analyses")
for fastqFile in templateFastqFiles:
for name, seq, qual in fastqRead(fastqFile):
if name in recordsToAnalyze:
outfile = os.path.join(target.getGlobalTempDir(), "template_" + name)
files["template"].append(outfile)
ref_name, ref_start, ref_stop = recordsToAnalyze[name]
ref_seq = references[ref_name][ref_start : ref_stop]
analysis = [name, seq, ref_name, ref_seq, outfile]
target.addChildTarget(Target.makeTargetFn(analyze, args=analysis))
logger.info("Finding complement analyses")
for fastqFile in complementFastqFiles:
for name, seq, qual in fastqRead(fastqFile):
if name in recordsToAnalyze:
outfile = os.path.join(target.getGlobalTempDir(), "complement_" + name)
files["complement"].append(outfile)
ref_name, ref_start, ref_stop = recordsToAnalyze[name]
ref_seq = references[ref_name][ref_start : ref_stop]
analysis = [name, seq, ref_name, ref_seq, outfile]
target.addChildTarget(Target.makeTargetFn(analyze, args=analysis))
target.setFollowOnTargetFn(merge, args=(files, outputDir))
def analyze(target, name, seq, ref_name, ref_seq, outfile):
"""main analysis target; runs muscle on each pair of sequences"""
outf = open(outfile, "w")
p = Popen(["muscle", "-quiet"], stdout=PIPE, stdin=PIPE).communicate(">{}\n{}\n>{}\n{}\n".format(name, seq, ref_name, ref_seq))[0]
outf.write(p); outf.close()
def merge(target, files, outputDir):
"""merges all muscle output into one fasta and runs metrics() on each"""
for typeof in files:
outmetrics = open(os.path.join(outputDir, typeof + "_metrics.tsv"), "w")
outmetrics.write("Read\tReference\tMatches\tMismatches\tReadDeletionLength\tReadInsertionLength\tIdentity\tReferenceCoverage\n")
for f in files[typeof]:
handle = fastaRead(f)
name, seq = handle.next()
ref_name, ref_seq = handle.next()
name = name.lstrip(">"); ref_name = ref_name.lstrip(">")
outmetrics.write("\t".join([name, ref_name] + metrics(seq, ref_seq))); outmetrics.write("\n")
outmetrics.close()
def metrics(seq, ref_seq):
"""takes in two aligned fasta sequences and calculates identity/coverage metrics"""
matches = 0.0; mismatches = 0.0; readDeletionLength = 0.0; readInsertionLength = 0.0
for s, r in izip(seq, ref_seq):
if s == "-":
readDeletionLength += 1
elif r == "-":
readInsertionLength += 1
elif s == r == "-":
continue #just in case?
elif s == r:
matches += 1
elif s != r:
mismatches += 1
identity = matches / (matches + mismatches)
referenceCoverage = (matches + mismatches) / (matches + mismatches + readDeletionLength)
return map(str, [matches, mismatches, readDeletionLength, readInsertionLength, identity, referenceCoverage])
def main():
parser = OptionParser()
Stack.addJobTreeOptions(parser)
options, args = parser.parse_args()
setLoggingFromOptions(options)
outputDir = "muscle_compare_2d/output/"
if not os.path.exists(outputDir):
logger.info("Output dir {} does not exist. Creating.")
os.mkdir(outputDir)
if len(os.listdir(outputDir)) > 0:
logger.info("Output dir not empty.")
if len(args) != 3:
raise RuntimeError("Error: expected three arguments got %s arguments: %s" % (len(args), " ".join(args)))
templateRecords = {x.qname for x in pysam.Samfile(args[0]) if not x.is_unmapped}
complementRecords = {x.qname for x in pysam.Samfile(args[1]) if not x.is_unmapped}
twodSamFile = pysam.Samfile(args[2])
twodRecords = {x.qname : x for x in twodSamFile if not x.is_unmapped}
recordsToAnalyze = dict()
for name, record in twodRecords.iteritems():
if name not in templateRecords and name not in complementRecords:
ref_name = twodSamFile.getrname(record.tid)
ref_start, ref_stop = int(record.aend - record.alen), int(record.aend)
recordsToAnalyze[name] = [ref_name, ref_start, ref_stop]
if os.path.exists("../readFastqFiles/template/") and os.path.exists("../readFastqFiles/complement"):
templateFastqFiles = [os.path.join("../readFastqFiles/template/", x) for x in os.listdir("../readFastqFiles/template/") if x.endswith(".fastq") or x.endswith(".fq")]
complementFastqFiles = [os.path.join("../readFastqFiles/complement/", x) for x in os.listdir("../readFastqFiles/complement/") if x.endswith(".fastq") or x.endswith(".fq")]
else:
raise RuntimeError("Error: readFastqFiles does not contain template and/or complement folders")
referenceFastaFiles = [os.path.join("../referenceFastaFiles", x) for x in os.listdir("../referenceFastaFiles") if x.endswith(".fa") or x.endswith(".fasta")]
if len(referenceFastaFiles) > 0:
references = { y[0].split(" ")[0] : y[1] for x in referenceFastaFiles for y in fastaRead(x) }
else:
raise RuntimeError("Error: no reference fasta files")
if len(recordsToAnalyze) == 0:
raise RuntimeError("Error: none of the mappable twoD reads in this set did not map as template/complement.")
logger.info("Starting to find analyses to run...")
args = (recordsToAnalyze, templateFastqFiles, complementFastqFiles, references, outputDir)
i = Stack(Target.makeTargetFn(find_analyses, args=args)).startJobTree(options)
if i != 0:
raise RuntimeError("Got {} failed jobs".format(i))
if __name__ == "__main__":
from scripts.muscle_compare_2d.muscle_compare_2d import *
main()
|
mitenjain/nanopore
|
scripts/muscle_compare_2d/muscle_compare_2d.py
|
Python
|
mit
| 7,051
|
[
"pysam"
] |
607c868259044dcd44e1656fcddb169313638b59c0a7ada989c33e6224730629
|
#
# Copyright 2014-2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014-2017) James Kermode, Warwick University
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import unittest
from ase.build import bulk
import matscipytest
from matscipy.calculators import EAM, SupercellCalculator
###
class TestSupercellCalculator(matscipytest.MatSciPyTestCase):
def test_eam(self):
for calc in [EAM('Au-Grochola-JCP05.eam.alloy')]:
a = bulk('Au')
a *= (2, 2, 2)
a.rattle(0.1)
a.calc = calc
e = a.get_potential_energy()
f = a.get_forces()
s = a.get_stress()
a.set_calculator(SupercellCalculator(calc, (3, 3, 3)))
self.assertAlmostEqual(e, a.get_potential_energy())
self.assertArrayAlmostEqual(f, a.get_forces())
self.assertArrayAlmostEqual(s, a.get_stress())
###
if __name__ == '__main__':
unittest.main()
|
libAtoms/matscipy
|
tests/test_supercell_calculator.py
|
Python
|
lgpl-2.1
| 2,641
|
[
"ASE",
"Matscipy"
] |
807ba18a463feba659bb91d6740b48436ebbfd106b2c3d3c9016a38c6071dd55
|
""" Base Storage Class provides the base interface for all storage plug-ins
exists()
These are the methods for manipulating files:
isFile()
getFile()
putFile()
removeFile()
getFileMetadata()
getFileSize()
prestageFile()
getTransportURL()
These are the methods for manipulating directories:
isDirectory()
getDirectory()
putDirectory()
createDirectory()
removeDirectory()
listDirectory()
getDirectoryMetadata()
getDirectorySize()
These are the methods for manipulating the client:
changeDirectory()
getCurrentDirectory()
getName()
getParameters()
getCurrentURL()
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
import os
PROTOCOL_PARAMETERS = [ "Protocol", "Host", "Path", "Port", "SpaceToken", "WSUrl" ]
class StorageBase( object ):
"""
.. class:: StorageBase
"""
def __init__( self, name, parameterDict ):
self.name = name
self.pluginName = ''
self.protocolParameters = {}
self.__updateParameters( parameterDict )
self.basePath = parameterDict['Path']
self.cwd = self.basePath
self.se = None
self.isok = True
def setStorageElement( self, se ):
self.se = se
def setParameters( self, parameterDict ):
""" Set standard parameters, method can be overriden in subclasses
to process specific parameters
"""
self.__updateParameters( parameterDict )
def __updateParameters( self, parameterDict ):
""" setParameters implementation method
"""
for item in PROTOCOL_PARAMETERS:
self.protocolParameters[item] = parameterDict.get( item, '' )
def getParameters( self ):
""" Get the parameters with which the storage was instantiated
"""
parameterDict = dict( self.protocolParameters )
parameterDict["StorageName"] = self.name
parameterDict["PluginName"] = self.pluginName
return parameterDict
def exists( self, *parms, **kws ):
"""Check if the given path exists
"""
return S_ERROR( "Storage.exists: implement me!" )
#############################################################
#
# These are the methods for file manipulation
#
def isFile( self, *parms, **kws ):
"""Check if the given path exists and it is a file
"""
return S_ERROR( "Storage.isFile: implement me!" )
def getFile( self, *parms, **kws ):
"""Get a local copy of the file specified by its path
"""
return S_ERROR( "Storage.getFile: implement me!" )
def putFile( self, *parms, **kws ):
"""Put a copy of the local file to the current directory on the
physical storage
"""
return S_ERROR( "Storage.putFile: implement me!" )
def removeFile( self, *parms, **kws ):
"""Remove physically the file specified by its path
"""
return S_ERROR( "Storage.removeFile: implement me!" )
def getFileMetadata( self, *parms, **kws ):
""" Get metadata associated to the file
"""
return S_ERROR( "Storage.getFileMetadata: implement me!" )
def getFileSize( self, *parms, **kws ):
"""Get the physical size of the given file
"""
return S_ERROR( "Storage.getFileSize: implement me!" )
def prestageFile( self, *parms, **kws ):
""" Issue prestage request for file
"""
return S_ERROR( "Storage.prestageFile: implement me!" )
def prestageFileStatus( self, *parms, **kws ):
""" Obtain the status of the prestage request
"""
return S_ERROR( "Storage.prestageFileStatus: implement me!" )
def pinFile( self, *parms, **kws ):
""" Pin the file on the destination storage element
"""
return S_ERROR( "Storage.pinFile: implement me!" )
def releaseFile( self, *parms, **kws ):
""" Release the file on the destination storage element
"""
return S_ERROR( "Storage.releaseFile: implement me!" )
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory( self, *parms, **kws ):
"""Check if the given path exists and it is a directory
"""
return S_ERROR( "Storage.isDirectory: implement me!" )
def getDirectory( self, *parms, **kws ):
"""Get locally a directory from the physical storage together with all its
files and subdirectories.
"""
return S_ERROR( "Storage.getDirectory: implement me!" )
def putDirectory( self, *parms, **kws ):
"""Put a local directory to the physical storage together with all its
files and subdirectories.
"""
return S_ERROR( "Storage.putDirectory: implement me!" )
def createDirectory( self, *parms, **kws ):
""" Make a new directory on the physical storage
"""
return S_ERROR( "Storage.createDirectory: implement me!" )
def removeDirectory( self, *parms, **kws ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
"""
return S_ERROR( "Storage.removeDirectory: implement me!" )
def listDirectory( self, *parms, **kws ):
""" List the supplied path
"""
return S_ERROR( "Storage.listDirectory: implement me!" )
def getDirectoryMetadata( self, *parms, **kws ):
""" Get the metadata for the directory
"""
return S_ERROR( "Storage.getDirectoryMetadata: implement me!" )
def getDirectorySize( self, *parms, **kws ):
""" Get the size of the directory on the storage
"""
return S_ERROR( "Storage.getDirectorySize: implement me!" )
#############################################################
#
# These are the methods for manipulating the client
#
def isOK( self ):
return self.isok
def resetCurrentDirectory( self ):
""" Reset the working directory to the base dir
"""
self.cwd = self.basePath
def changeDirectory( self, directory ):
""" Change the directory to the supplied directory
"""
if directory.startswith( '/' ):
self.cwd = "%s/%s" % ( self.basePath, directory )
else:
self.cwd = '%s/%s' % ( self.cwd, directory )
def getCurrentDirectory( self ):
""" Get the current directory
"""
return self.cwd
def getCurrentURL( self, fileName ):
""" Obtain the current file URL from the current working directory and the filename
:param self: self reference
:param str fileName: path on storage
"""
urlDict = dict( self.protocolParameters )
if not fileName.startswith( '/' ):
# Relative path is given
urlDict['Path'] = self.cwd
result = pfnunparse( urlDict )
if not result['OK']:
return result
cwdUrl = result['Value']
fullUrl = '%s%s' % ( cwdUrl, fileName )
return S_OK( fullUrl )
def getName( self ):
""" The name with which the storage was instantiated
"""
return self.name
def getURLBase( self, withWSUrl = False ):
""" This will get the URL base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withWSUrl: flag to include Web Service part of the url
:returns URL
"""
urlDict = dict( self.protocolParameters )
if not withWSUrl:
urlDict['WSUrl'] = ''
return pfnunparse( urlDict )
def isURL( self, path ):
""" Guess if the path looks like a URL
:param self: self reference
:param string path: input file LFN or URL
:returns boolean: True if URL, False otherwise
"""
if self.basePath and path.startswith( self.basePath ):
return S_OK( True )
result = pfnparse( path )
if not result['OK']:
return result
if len( result['Value']['Protocol'] ) != 0:
return S_OK( True )
if result['Value']['Path'].startswith( self.basePath ):
return S_OK( True )
return S_OK( False )
def getTransportURL( self, pathDict, protocols ):
""" Get a transport URL for a given URL. For a simple storage plugin
it is just returning input URL if the plugin protocol is one of the
requested protocols
:param dict pathDict: URL obtained from File Catalog or constructed according
to convention
:param list protocols: a list of acceptable transport protocols in priority order
"""
res = checkArgumentFormat( pathDict )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
if protocols and not self.protocolParameters['Protocol'] in protocols:
return S_ERROR( 'No native protocol requested' )
for url in urls:
successful[url] = url
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def constructURLFromLFN( self, lfn, withWSUrl = False ):
""" Construct URL from the given LFN according to the VO convention for the
primary protocol of the storage plagin
:param str lfn: file LFN
:param boolean withWSUrl: flag to include the web service part into the resulting URL
:return result: result['Value'] - resulting URL
"""
# Check the LFN convention:
# 1. LFN must start with the VO name as the top level directory
# 2. VO name must not appear as any subdirectory or file name
lfnSplitList = lfn.split( '/' )
voLFN = lfnSplitList[1]
# TODO comparison to Sandbox below is for backward compatibility, should be removed in the next release
if ( voLFN != self.se.vo and voLFN != "SandBox" and voLFN != "Sandbox" ):
return S_ERROR( 'LFN does not follow the DIRAC naming convention %s' % lfn )
result = self.getURLBase( withWSUrl = withWSUrl )
if not result['OK']:
return result
urlBase = result['Value']
url = os.path.join( urlBase, lfn.lstrip( '/' ) )
return S_OK( url )
def updateURL( self, url, withWSUrl = False ):
""" Update the URL according to the current SE parameters
"""
result = pfnparse( url )
if not result['OK']:
return result
urlDict = result['Value']
urlDict['Protocol'] = self.protocolParameters['Protocol']
urlDict['Host'] = self.protocolParameters['Host']
urlDict['Port'] = self.protocolParameters['Port']
urlDict['WSUrl'] = ''
if withWSUrl:
urlDict['WSUrl'] = self.protocolParameters['WSUrl']
return pfnunparse( urlDict )
def isNativeURL( self, url ):
""" Check if URL :url: is valid for :self.protocol:
:param self: self reference
:param str url: URL
"""
res = pfnparse( url )
if not res['OK']:
return res
urlDict = res['Value']
return S_OK( urlDict['Protocol'] == self.protocolParameters['Protocol'] )
|
vmendez/DIRAC
|
Resources/Storage/StorageBase.py
|
Python
|
gpl-3.0
| 10,756
|
[
"DIRAC"
] |
5bf1507cc037bf8d8dc48f696bd42f90138ff0c9c58ed32a49d8464ea1efaa19
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyParmed(PythonPackage):
"""ParmEd is a general tool for aiding in investigations of
biomolecular systems using popular molecular simulation
packages, like Amber, CHARMM, and OpenMM written in
Python."""
homepage = "https://parmed.github.io/ParmEd/html/index.html"
pypi = "ParmEd/ParmEd-3.4.3.tar.gz"
version('3.4.3', sha256='90afb155e3ffe69230a002922b28968464126d4450059f0bd97ceca679c6627c')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-parmed/package.py
|
Python
|
lgpl-2.1
| 753
|
[
"Amber",
"CHARMM",
"OpenMM"
] |
3686d595c435394681e54626beaf76cb1b20a08f549823065c287411624e8d14
|
"""
How to get a particular Neuron's gap junctions from the database.
"""
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
from PyOpenWorm.worm import Worm
from PyOpenWorm.context import Context
from OpenWormData import BIO_ENT_NS
# Connect to existing database.
P.connect('default.conf')
ctx = Context(ident=BIO_ENT_NS['worm0']).stored
# Put the Worm's Network object in a variable.
net = ctx(Worm)().get_neuron_network()
# Put a particular Neuron object in a variable ('AVAL' in this example).
aval = net.aneuron('AVAL')
print("Getting all Connections to/from AVAL, and printing the gap junctions")
# We could also put them into an array or do other things with them other
# than print.
num_gjs = 0
for c in aval.connection():
# the `one()` returns a string like "gapJunction" instead of
# "syntype=gapJunction"
if c.syntype.one() == 'gapJunction':
num_gjs += 1
print(num_gjs, c)
|
gsarma/PyOpenWorm
|
examples/gap_junctions.py
|
Python
|
mit
| 968
|
[
"NEURON"
] |
8ba05d01c3449eac34592cd82637d4e91fac26d364a5d717235b225029d53daf
|
"""
Test utils for CCX
"""
import datetime
import pytz
from django.conf import settings
from student.roles import (
CourseCcxCoachRole,
CourseInstructorRole,
CourseStaffRole
)
from student.tests.factories import (
UserFactory
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.factories import CcxFactory
class CcxTestCase(SharedModuleStoreTestCase):
"""
General test class to be used in other CCX tests classes.
It creates a course that can be used as master course for CCXs.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(CcxTestCase, cls).setUpClass()
cls.course = course = CourseFactory.create()
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
cls.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC
)
cls.chapters = [
ItemFactory.create(start=start, parent=course) for _ in xrange(2)
]
cls.sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in cls.chapters
])
cls.verticals = flatten([
[
ItemFactory.create(
start=start, due=due, parent=sequential, graded=True, format='Homework', category=u'vertical'
) for _ in xrange(2)
] for sequential in cls.sequentials
])
# Trying to wrap the whole thing in a bulk operation fails because it
# doesn't find the parents. But we can at least wrap this part...
with cls.store.bulk_operations(course.id, emit_signals=False):
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in cls.verticals
])
def setUp(self):
"""
Set up tests
"""
super(CcxTestCase, self).setUp()
# Create instructor account
self.coach = UserFactory.create()
# create an instance of modulestore
self.mstore = modulestore()
def make_staff(self):
"""
create staff user.
"""
staff = UserFactory.create(password="test")
role = CourseStaffRole(self.course.id)
role.add_users(staff)
return staff
def make_instructor(self):
"""
create instructor user.
"""
instructor = UserFactory.create(password="test")
role = CourseInstructorRole(self.course.id)
role.add_users(instructor)
return instructor
def make_coach(self):
"""
create coach user
"""
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
def make_ccx(self, max_students_allowed=settings.CCX_MAX_STUDENTS_ALLOWED):
"""
create ccx
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
override_field_for_ccx(ccx, self.course, 'max_student_enrollments_allowed', max_students_allowed)
return ccx
def get_outbox(self):
"""
get fake outbox
"""
from django.core import mail
return mail.outbox
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def iter_blocks(course):
"""
Returns an iterator over all of the blocks in a course.
"""
def visit(block):
""" get child blocks """
yield block
for child in block.get_children():
for descendant in visit(child): # wish they'd backport yield from
yield descendant
return visit(course)
|
ampax/edx-platform
|
lms/djangoapps/ccx/tests/utils.py
|
Python
|
agpl-3.0
| 4,160
|
[
"VisIt"
] |
8ea59661a55286b832fa74f551f480e49ab014f1c9b8273d93193263837becad
|
# Copyright (C) 2012 Mathias Brodala
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import division
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
import cairo
from collections import namedtuple
import sys
from math import pi
from xl import (
event,
player,
settings
)
from xl.nls import gettext as _
from xl.player.adapters import PlaybackAdapter
from xlgui.widgets import info
import migration
from alphacolor import alphacolor_parse
import osd_preferences
OSDWINDOW = None
def enable(exaile):
"""
Enables the on screen display plugin
"""
migration.migrate_settings()
global OSDWINDOW
OSDWINDOW = OSDWindow()
def disable(exaile):
"""
Disables the on screen display plugin
"""
global OSDWINDOW
OSDWINDOW.destroy()
OSDWINDOW = None
def get_preferences_pane():
return osd_preferences
Point = namedtuple('Point', 'x y')
class OSDWindow(Gtk.Window, PlaybackAdapter):
"""
A popup window showing information
of the currently playing track
"""
autohide = GObject.property(
type=GObject.TYPE_BOOLEAN,
nick='autohide',
blurb='Whether to automatically hide the window after some time',
default=True,
flags=GObject.PARAM_READWRITE
)
__gsignals__ = {}
def __init__(self):
"""
Initializes the window
"""
Gtk.Window.__init__(self, Gtk.WindowType.TOPLEVEL)
# for whatever reason, calling set_opacity seems
# to crash on Windows when using PyGTK that comes with
# the GStreamer SDK. Since this plugin is enabled by
# default, just don't fade in/out on windows
#
# https://bugs.freedesktop.org/show_bug.cgi?id=54682
self.use_fade = True
if sys.platform == 'win32':
self.use_fade = False
self.fadeout_id = None
self.drag_origin = None
self.hide_id = None
self.set_type_hint(Gdk.WindowTypeHint.NOTIFICATION)
self.set_title('Exaile OSD')
self.set_decorated(False)
self.set_keep_above(True)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.set_resizable(True)
self.set_app_paintable(True)
self.stick()
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
# Cached option values
self.__options = {
'background': None,
'display_duration': None,
'border_radius': None
}
self.info_area = info.TrackInfoPane(player.PLAYER)
self.info_area.set_default_text('')
self.info_area.set_auto_update(True)
self.add(self.info_area)
event.add_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_callback(self.on_option_set, 'plugin_osd_option_set')
# Trigger initial setup trough options
for option in ('format', 'background', 'display_duration',
'show_progress', 'position', 'width', 'height',
'border_radius'):
self.on_option_set('plugin_osd_option_set', settings,
'plugin/osd/{option}'.format(option=option))
# Trigger color map update
self.emit('screen-changed', self.get_screen())
PlaybackAdapter.__init__(self, player.PLAYER)
def destroy(self):
"""
Cleanups
"""
event.remove_callback(self.on_option_set, 'plugin_osd_option_set')
event.remove_callback(self.on_track_tags_changed, 'track_tags_changed')
Gtk.Window.destroy(self)
def hide(self):
"""
Starts fadeout of the window
"""
if not self.use_fade:
Gtk.Window.hide(self)
return
if self.fadeout_id is None:
self.fadeout_id = GLib.timeout_add(50, self.__fade_out)
def show(self):
"""
Stops fadeout and immediately shows the window
"""
if self.use_fade:
try:
GLib.source_remove(self.fadeout_id)
except Exception:
pass
finally:
self.fadeout_id = None
self.set_opacity(1)
Gtk.Window.show_all(self)
def __fade_out(self):
"""
Constantly decreases the opacity to fade out the window
"""
opacity = self.get_opacity()
if opacity == 0:
GLib.source_remove(self.fadeout_id)
self.fadeout_id = None
Gtk.Window.hide(self)
return False
self.set_opacity(opacity - 0.1)
return True
def do_notify(self, parameter):
"""
Triggers hiding if autohide is enabled
"""
if parameter.name == 'autohide':
if self.props.autohide:
self.hide()
def do_expose_event(self, event):
"""
Draws the background of the window
"""
context = self.props.window.cairo_create()
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
context.set_source_rgba(
self.__options['background'].red_float,
self.__options['background'].green_float,
self.__options['background'].blue_float,
self.__options['background'].alpha_float
)
context.set_operator(cairo.OPERATOR_SOURCE)
context.paint()
Gtk.Window.do_expose_event(self, event)
def do_screen_changed(self, screen):
"""
Updates the used colormap
"""
visual = screen.get_rgba_visual()
if visual is None:
visual = screen.get_system_visual()
self.unrealize()
self.set_visual(visual)
self.realize()
def do_size_allocate(self, allocation):
"""
Applies the non-rectangular shape
"""
width, height = allocation.width, allocation.height
mask = Gdk.Pixmap(None, width, height, 1)
context = mask.cairo_create()
context.set_source_rgb(0, 0, 0)
context.set_operator(cairo.OPERATOR_CLEAR)
context.paint()
radius = self.__options['border_radius']
inner = (radius, radius, width - radius, height - radius)
context.set_source_rgb(1, 1, 1)
context.set_operator(cairo.OPERATOR_SOURCE)
# Top left corner
context.arc(inner.x, inner.y, radius, 1.0 * pi, 1.5 * pi)
# Top right corner
context.arc(inner.width, inner.y, radius, 1.5 * pi, 2.0 * pi)
# Bottom right corner
context.arc(inner.width, inner.height, radius, 0.0 * pi, 0.5 * pi)
# Bottom left corner
context.arc(inner.x, inner.height, radius, 0.5 * pi, 1.0 * pi)
context.fill()
self.shape_combine_mask(mask, 0, 0)
Gtk.Window.do_size_allocate(self, allocation)
def do_configure_event(self, e):
"""
Stores the window size
"""
width, height = self.get_size()
settings.set_option('plugin/osd/width', width)
settings.set_option('plugin/osd/height', height)
Gtk.Window.do_configure_event(self, e)
def do_button_press_event(self, e):
"""
Starts the dragging process
"""
if e.button == 1:
self.drag_origin = Point(e.x, e.y)
self.window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.FLEUR))
return True
elif e.button == 3 and e.state & Gdk.ModifierType.MOD1_MASK:
self.begin_resize_drag(Gdk.WindowEdge.SOUTH_EAST, 3, int(e.x_root), int(e.y_root), e.time)
def do_button_release_event(self, e):
"""
Finishes the dragging process and
saves the window position
"""
if e.button == 1:
settings.set_option('plugin/osd/position', list(self.get_position()))
self.drag_origin = None
self.window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.ARROW))
return True
def do_motion_notify_event(self, e):
"""
Moves the window while dragging, makes sure
the window is always visible upon mouse hover
"""
drag_origin = self.drag_origin
if drag_origin is not None:
position = Point(e.x_root, e.y_root)
self.move(
int(position.x - drag_origin.x),
int(position.y - drag_origin.y)
)
try:
GLib.source_remove(self.hide_id)
except Exception:
pass
finally:
self.hide_id = None
self.show()
def do_leave_notify_event(self, e):
"""
Hides the window upon mouse leave
"""
try:
GLib.source_remove(self.hide_id)
except Exception:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
Gtk.Window.do_leave_notify_event(self, e)
def on_track_tags_changed(self, e, track, tag):
if not tag.startswith('__') and track == player.PLAYER.current:
self.on_playback_track_start(e, player.PLAYER, track)
def on_playback_track_start(self, e, player, track):
"""
Shows the OSD upon track change
"""
GLib.idle_add(self.show)
try:
GLib.source_remove(self.hide_id)
except Exception:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_playback_toggle_pause(self, e, player, track):
"""
Shows the OSD after resuming playback
"""
if not player.is_playing(): return
GLib.idle_add(self.show)
try:
GLib.source_remove(self.hide_id)
except Exception:
pass
finally:
self.hide_id = None
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_playback_player_end(self, e, player, track):
"""
Hides the OSD upon playback end
"""
if self.props.autohide:
self.hide_id = GLib.timeout_add_seconds(
self.__options['display_duration'], self.hide)
def on_option_set(self, event, settings, option):
"""
Updates appearance on setting change
"""
if option == 'plugin/osd/format':
self.info_area.set_info_format(settings.get_option(option,
_('<span font_desc="Sans 11" foreground="#fff"><b>$title</b></span>\n'
'by $artist\n'
'from $album')
))
if option == 'plugin/osd/background':
self.__options['background'] = alphacolor_parse(settings.get_option(option, '#333333cc'))
GLib.idle_add(self.queue_draw)
elif option == 'plugin/osd/display_duration':
self.__options['display_duration'] = int(settings.get_option(option, 4))
elif option == 'plugin/osd/show_progress':
self.info_area.set_display_progress(settings.get_option(option, True))
elif option == 'plugin/osd/position':
position = Point._make(settings.get_option(option, [20, 20]))
GLib.idle_add(self.move, position.x, position.y)
elif option == 'plugin/osd/border_radius':
value = settings.get_option(option, 10)
self.set_border_width(max(6, int(value / 2)))
self.__options['border_radius'] = value
self.emit('size-allocate', self.get_allocation())
|
Zarokka/exaile
|
plugins/osd/__init__.py
|
Python
|
gpl-2.0
| 12,804
|
[
"FLEUR"
] |
e2e9f37fe9f747fe2b2a52f5f86a1c0aba7203c7a52e3e124dd37e560caa9343
|
from iminuit import Minuit
from probfit import BinnedLH, Extended, AddPdf, gen_toy
from probfit.pdf import HistogramPdf
from probfit.plotting import draw_pdf
import numpy as np
bound = (0, 10)
np.random.seed(0)
bkg = gen_toy(lambda x : x**2, 100000, bound=bound) # a parabola background
sig= np.random.randn(50000)+5 # a Gaussian signal
data= np.concatenate([sig,bkg])
# fill histograms with large statistics
hsig,be= np.histogram(sig, bins=40, range=bound);
hbkg,be= np.histogram(bkg, bins=be, range=bound);
# randomize data
data= np.random.permutation(data)
fitdata= data[:1000]
psig= HistogramPdf(hsig,be)
pbkg= HistogramPdf(hbkg,be)
epsig= Extended(psig, extname='N1')
epbkg= Extended(pbkg, extname='N2')
pdf= AddPdf(epbkg,epsig)
blh= BinnedLH(pdf, fitdata, bins=40, bound=bound, extended=True)
m= Minuit(blh, N1=330, N2= 670, error_N1=20, error_N2=30)
#m.migrad()
blh.draw(m, parts=True)
|
mtresch/probfit
|
doc/pyplots/pdf/histogrampdf.py
|
Python
|
mit
| 899
|
[
"Gaussian"
] |
1e942f0a175b064b8f33aae192bd6b8061a053902a34efbcc5f570f2da710f32
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from pyscf import lib
from pyscf.pbc import gto as pgto
from pyscf.pbc import scf as pscf
from pyscf.pbc.scf import kuhf
cell = pgto.Cell()
cell.atom = '''
He 0 0 1
He 1 0 1
'''
cell.basis = '321g'
cell.a = np.eye(3) * 3
cell.mesh = [8] * 3
cell.verbose = 7
cell.output = '/dev/null'
cell.spin = 2
cell.build()
nk = [2, 2, 1]
kpts = cell.make_kpts(nk, wrap_around=True)
kmf = pscf.KUHF(cell, kpts).run()
mf = pscf.UHF(cell).run()
def tearDownModule():
global cell, kmf, mf
cell.stdout.close()
del cell, kmf, mf
class KnownValues(unittest.TestCase):
def test_kuhf_kernel(self):
self.assertAlmostEqual(kmf.e_tot, -4.586720023431593, 8)
def test_uhf_kernel(self):
self.assertAlmostEqual(mf.e_tot, -3.3634535013441855, 8)
def test_kuhf_vs_uhf(self):
np.random.seed(1)
k = np.random.random(3)
mf = pscf.UHF(cell, k, exxdiv='vcut_sph')
dm = mf.get_init_guess(key='1e')
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel(dm)
nao = cell.nao
kmf = pscf.KUHF(cell, [k], exxdiv='vcut_sph')
kmf.max_cycle = 1
kmf.diis = None
e2 = kmf.kernel(dm.reshape(2,1,nao,nao))
self.assertAlmostEqual(e1, e2, 9)
self.assertAlmostEqual(e1, -3.498612316383892, 9)
def test_init_guess_by_chkfile(self):
np.random.seed(1)
k = np.random.random(3)
mf = pscf.KUHF(cell, [k], exxdiv='vcut_sph')
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.4070772194665477, 9)
mf1 = pscf.UHF(cell, exxdiv='vcut_sph')
mf1.chkfile = mf.chkfile
mf1.init_guess = 'chkfile'
mf1.diis = None
mf1.max_cycle = 1
e1 = mf1.kernel()
self.assertAlmostEqual(e1, -3.4272925247351256, 9)
self.assertTrue(mf1.mo_coeff[0].dtype == np.double)
def test_dipole_moment(self):
dip = mf.dip_moment()
self.assertAlmostEqual(lib.fp(dip), 1.644379056097664, 7)
dip = kmf.dip_moment()
self.assertAlmostEqual(lib.fp(dip), 0.6934317735537686, 6)
def test_spin_square(self):
ss = kmf.spin_square()[0]
self.assertAlmostEqual(ss, 2.077383024287556, 4)
def test_bands(self):
np.random.seed(1)
kpts_bands = np.random.random((1,3))
e = mf.get_bands(kpts_bands)[0]
self.assertAlmostEqual(lib.fp(e), 0.9038555558945438, 6)
e = kmf.get_bands(kpts_bands)[0]
self.assertAlmostEqual(lib.fp(e), -0.3020614, 6)
def test_small_system(self):
mol = pgto.Cell(
atom='H 0 0 0;',
a=[[3, 0, 0], [0, 3, 0], [0, 0, 3]],
basis=[[0, [1, 1]]],
spin=1,
verbose=7,
output='/dev/null'
)
mf = pscf.KUHF(mol,kpts=[[0., 0., 0.]]).run()
self.assertAlmostEqual(mf.e_tot, -0.10439957735616917, 8)
mol = pgto.Cell(
atom='He 0 0 0;',
a=[[3, 0, 0], [0, 3, 0], [0, 0, 3]],
basis=[[0, [1, 1]]],
verbose=7,
output='/dev/null'
)
mf = pscf.KUHF(mol,kpts=[[0., 0., 0.]]).run()
self.assertAlmostEqual(mf.e_tot, -2.2719576422665635, 8)
if __name__ == '__main__':
print("Tests for PBC UHF and PBC KUHF")
unittest.main()
|
sunqm/pyscf
|
pyscf/pbc/scf/test/test_uhf.py
|
Python
|
apache-2.0
| 4,007
|
[
"PySCF"
] |
627ee54614fbdd463041ea75cf87ce07016b17d4568365b071eefa433b210b67
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 18:38:07 2018
Handler for the big nasty integrals pertaining to generating correlation models
from CAMB.
The model desired here is:
omega_{DM}(theta) = pi times integral of (z = 0 to infinity)
times integral of (k = 0 to infinity)
times the dimensionless power spectrum
(which is equal to the power spectrum times
k^3 divided by 2 pi squared)
times the zeroth order bessel function of the first kind,
whose input is k times theta times the comoving
distance along the line of sight
times the derivative of the comoving distance with respect
to redshift
times the PDZ of one sample, times the PDZ of the other sample
dz dk
All detailed in DiPompeo et al 2017/
@author: csh4
"""
from scipy import interpolate
import os
import camb
import pickle
import scipy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mp_manager as mpm
"""
Initializing the CAMB parameters and getting the matter power interpolator
"""
params, results, PK = 0,0,0
params = camb.CAMBparams()
params.set_cosmology()#H0=67.5, ombh2=0.022, omch2=0.122)
params.InitPower.set_params()#ns=0.965)
results = camb.get_results(params)
results.calc_power_spectra()
c = 2.998 * ( 10 ** 5 ) #km/s
PK = camb.get_matter_power_interpolator(params, k_hunit=False, hubble_units=False,
nonlinear=True, kmax=1000, return_z_k = True)
"""
BEGIN HELPER FUNCTIONS (which mostly go into the integrand)
"""
def p_spec(z, k):
"""
matter power spectrum as function of z
"""
return PK[0].P(z, k)
def p_spec_nodims(z, k):
"""
dimensionless power spectrum at redshift z, wavenumber k
"""
return p_spec(z,k)/(2*(np.pi**2))
def comoving_distance(z):
"""
comoving distance along line of sight
"""
return results.comoving_radial_distance(z)
def dzdchi(z):
"""
(H0 /c)[Ωm (1 + z)**3 + ΩΛ ]**1/2 = H(z)/c
"""
return results.hubble_parameter(z)/c
def bessel0(x):
"""
zeroth order bessel function of 1st kind
"""
return scipy.special.jn(0, x)
def deg_2_rad(theta):
"""
Convert degrees to radians
"""
return np.pi*theta/(180.0)
def and_them(arrs):
"""
And the arrays of booleans given
"""
out = np.ones(len(arrs[0]), dtype=bool)
for arr in arrs:
out = np.logical_and(out, arr)
return out
def or_them(arrs):
"""
Or the arrays of booleans given
"""
out = np.zeros(len(arrs[0]), dtype=bool)
for arr in arrs:
out = np.logical_or(out, arr)
return out
def gauss(x, mu, sigma):
"""
Return the probability associated with a normal distribution of a given
mean and sigma for some given x value
"""
return (1/(sigma*np.sqrt(2*np.pi)))*np.exp((-((x-mu)**2))/(2*(sigma**2)))
def inbin(x, lo, hi):
"""
Return true if x is between lo and hi
"""
good = np.logical_and(np.greater_equal(x, lo),
np.less_equal(x, hi))
return good
def norm(x, y):
"""
Normalize the distibution to 1.
"""
return y/(np.trapz(y, x=x))
"""
BEGIN INTEGRATION FUNCTIONS
"""
def k_integrand(z, k, theta):
"""
the k part of the integrand from DiPompeo et al 2017
"""
return (k*
p_spec_nodims(z,k)*
bessel0(k*
deg_2_rad(theta)*
comoving_distance(z)))
"""
BEGIN K_INTEGRAL LOGGING
"""
class kstore:
"""
This class basically pre-generates the k integrand values for some given
parameter set of k and z and theta, and holds them in memory as a numpy
interpolator object (as a function of z and theta)
Thus, you can quickly generate models for a range of z and theta values.
"""
def __init__(self, gen = False, def_name="/kstore.hdf"):
"""
Load or generate the k_integral values for an ensemble of z and theta
values
Make a 2d interpolator
"""
self.krange = np.linspace(0.0, 100.0, num=200000)
self.zrange = np.arange(0, 4.0, 0.01)
self.trange = np.logspace(np.log10(0.0025),np.log10(1.3),num=50)
self.name = def_name
self.df = pd.DataFrame()
if gen:
self.gen_self()
else:
try:
self.load()
except FileNotFoundError:
print("Could not find kstore = " + def_name + ". Generating new one.")
self.gen_self()
zvals = self.df.values
self.f = interpolate.interp2d(self.zrange, self.trange, zvals.T)
def k_int(self, z, theta):
"""
Retrieve a value from the interpolator
"""
return self.f(z, theta)
def z_integrand_internal(self, z, pdz1_x, pdz1_y, pdz2_x, pdz2_y, theta):
"""
Helper function for z_integral. What is being integrated over.
Details in DiPompeo et al. 2017
"""
return (np.pi*
np.interp(z, pdz1_x, pdz1_y)*
np.interp(z, pdz2_x, pdz2_y)*
dzdchi(z)*
self.k_int(z, theta))
def z_integral(self, pdz1_x, pdz1_y, pdz2_x, pdz2_y, theta):
"""
Integrate the k_integral over z space given some PDZs and a theta value
Uses the PDZs.
"""
zvals = []
for z in self.zrange:
zvals.append(self.z_integrand_internal(z, pdz1_x, pdz1_y,
pdz2_x, pdz2_y, theta)[0])
return np.trapz(np.array(zvals), x=self.zrange)
def load(self):
"""
Load the internal data frame from disk
"""
directory = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(directory)
self.df = pd.read_hdf(parent+self.name, key='primary')
def save(self):
"""
Save the internal data frame to disk
"""
directory = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(directory)
self.df.to_hdf(parent+self.name, key='primary', format='table')
def gen_self(self):
"""
Generate k_integral values for an enseble of z and theta values
Save in the internal data frame
"""
dct = {}
for t in self.trange:
k_ints = []
for z in self.zrange:
k_ints.append(np.trapz(k_integrand(z, self.krange, t),
x=self.krange))
dct.update({t:k_ints})
self.df = pd.DataFrame.from_dict(dct)
self.save()
"""
CREATE A KSTORE OBJECT FOR MODEL FITTING
automatically generates a new integrand set if it does not exist. Alternatively,
pass in gen = True
I know this is sloppy. The model fitting functions should probably be methods
within the class kstore, so you don't have this scripted element. But it works ok.
"""
kst = kstore(gen = False)
"""
BEGIN MODEL FITTING FUNCTIONS
"""
def mod(pdz1_x, pdz1_y, pdz2_x, pdz2_y, theta):
"""
Generate a model value from the pdzs and a theta value.
Poorly named helper function
"""
return kst.z_integral(pdz1_x, pdz1_y, pdz2_x, pdz2_y, theta)
def model(pdz1_x, pdz1_y, pdz2_x, pdz2_y,
trange = np.logspace(np.log10(0.0025),np.log10(1.3),num=50)):
"""
Given some PDZs, generate a model and return.
"""
modvals = [mod(pdz1_x, pdz1_y, pdz2_x, pdz2_y, t) for t in trange]
return trange, modvals
def likelihood(x_data, y_data, yerr, x_model, y_model):
"""
Generate the likelihood of a given model being correct
(via product of normal distributions)
Your standard bayesian model fitting routine
"""
y_mod_interp = np.interp(x_data, x_model, y_model)
prob = 1.0
for i in range(len(y_data)):
prob = prob * gauss(y_data[i], y_mod_interp[i], yerr[i])
return prob
def chisquare(y1, y2, yerr):
"""
Figure out the chi square value for the model, given some data points,
model points, and errors
@params
y1 - data points
y2 - model points
yerr - data point errorbars
"""
chisq= 0.0
for i in range(len(y1)):
chisq = chisq + ((y1[i] - y2[i])**2)/(yerr[i]**2)
return chisq
def bias_fit(x_data, y_data, yerr, x_model, y_model):
"""
Given a model, find the best fit bias measurement to some data, using a
bog-standard gaussian product likelihood function
Recall, if you have two sets of data (say D1 and D2) and you are running
the correlation, the model is multiplied by the product of the two biases,
b1 and b2
So what you are getting here is the square root of the product of the
biases b1 and b2. If this is an autocorrelation, this is the bias itself.
If this is a cross correlation, you must square the bias received, divide
it by the bias of one data set (as a known) to get the bias of the other
set.
@params
x_data - The central angles of the correlation measurements
y_data - The values of the correlation measurements
yerr - The errorbars of the correlation measurements
x_model - The angular coordinates of the model
y_model - The values of the models
@returns
brange - The range of bias values tested
likelihoods - The probability associated with each bias value
chisq - The not reduced chi square value associated with the best
fit bias value
best - The best fit bias value
(i.e. square this and multiply it by the base model for
the best fitting model)
"""
brange = np.linspace(0.0, 8.0, num=400)
likelihoods = np.array([likelihood(x_data, y_data, yerr,
x_model, np.array(y_model)*(b**2)) for b in brange], dtype=float)
likelihoods = likelihoods/np.trapz(likelihoods, x=brange)
best = brange[np.argmax(likelihoods)]
y_mod_interp = np.interp(x_data, x_model, y_model)
chisq = chisquare(y_data, np.array(y_mod_interp)*(best**2), yerr)
return brange, likelihoods, chisq, best
def corr_bias(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
"""
Given a correlation measurement and associated PDZs, generate a model and
fit as a bias to the measurement. Return:
1) the model [unbiased] (x and y float arrays)
2) best fit bias (float)
3) the bias PDF (x and y float arrays)
@params
x_data - The central angles of the correlation measurements
y_data - The values of the correlation measurements
yerr - The errorbars of the correlation measurements
pdz1_x - PDZ 1 redshift range to generate models from
pdz1_y - PDZ 1 probability values to generate models from
pdz2_x - PDZ 2 redshift range to generate models from
pdz2_y - PDZ 2 probability values to generate models from
pdz1_x and pdz2_x, pdz1_y and pdz2_y should be the same for an autocorrelation
@returns
xmod - the angular range associated with the generated model
ymod - the value of the model at each angle
best - The best fit bias value
(i.e. square this and multiply it by the base model for
the best fitting model)
xbias - The range of bias values tested
ybias - The probability associated with each bias value
chisq - The not reduced chi square value associated with the best
fit bias value
"""
xmod, ymod = model(pdz1_x, pdz1_y, pdz2_x, pdz2_y)
xbias, ybias, chisq, best = bias_fit(x_data, y_data, yerr, xmod, ymod)
return xmod, ymod, best, xbias, ybias, chisq
def corr_bias_plot(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y, title=""):
"""
Plot the correlation and bias given some data and PDZs (sort of a test function)
"""
xmod, ymod, best, xbias, ybias, chisq = corr_bias(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y)
plt.figure()
plt.errorbar(x_data, y_data, yerr=yerr, ms=6.0, fmt='o', capthick=1.5,
capsize=4.0, elinewidth=1.5, label='Data')
plt.plot(xmod, ymod, label="Model (no bias)")
plt.plot(xmod, ymod*(best**2), label='Model (best bias of '+str(best)+')')
plt.legend()
plt.xlabel("Degrees")
plt.ylabel("Correlation")
plt.loglog()
plt.title(title + " correlation and model")
plt.figure()
plt.step(xbias, ybias, color='black')
plt.xlabel("Bias")
plt.ylabel("Probability")
plt.title(title + " bias fit")
plt.show()
"""
Some old stuff from before I used OOP for model generation. OOP is certainly
the better way of doing things
"""
#def k_integral(z, theta):
# """
# The k integral itself
# """
# krange = np.linspace(0.0, 100.0, num=200000)
# #krange = np.linspace(0.0, 5.0, num=100)
# #kvals = np.array([k_integrand(z, k, theta) for k in krange], dtype=float)
# kvals = k_integrand(z, krange, theta)
# return np.trapz(kvals, x=krange)
#def z_integrand(z, theta, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
# return (np.pi*
# np.interp(z, pdz1_x, pdz1_y)*
# np.interp(z, pdz2_x, pdz2_y)*
# dzdchi(z)*
# k_integral(z, theta))
#
#def z_integral(theta, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
# zrange = np.arange(0, 4.0, 0.001)
# zvals = np.array([z_integrand(z, theta, pdz1_x, pdz1_y, pdz2_x, pdz2_y) for z in zrange])
# #zvals = z_integrand(zrange, theta, pdz1_x, pdz1_y, pdz2_x, pdz2_y)
# return np.trapz(zvals, x=zrange)
if __name__ == "__main__":
print("done")
|
cassandra-sh/corrset
|
camb_model_new.py
|
Python
|
mit
| 14,255
|
[
"Gaussian"
] |
1fdbb1e11dc06b0468f8bfb18cbae0d9e2ab1086dfde0a1fe3aad7dde3b0cb40
|
# -*- coding: utf-8 -*-
from pymol.cgo import *
from pymol import cmd
from random import randint
#############################################################################
#
# drawBoundingBox.py -- Draws a box surrounding a selection
#
#
# AUTHOR: Jason Vertrees
# DATE : 2/20/2009
# NOTES : See comments below.
#
#############################################################################
def drawBoundingBox(selection="(all)", padding=0.0, linewidth=2.0, r=1.0, g=1.0, b=1.0):
"""
DESCRIPTION
Given selection, draw the bounding box around it.
USAGE:
drawBoundingBox [selection, [padding, [linewidth, [r, [g, b]]]]]
PARAMETERS:
selection, the selection to enboxen. :-)
defaults to (all)
padding, defaults to 0
linewidth, width of box lines
defaults to 2.0
r, red color component, valid range is [0.0, 1.0]
defaults to 1.0
g, green color component, valid range is [0.0, 1.0]
defaults to 1.0
b, blue color component, valid range is [0.0, 1.0]
defaults to 1.0
RETURNS
string, the name of the CGO box
NOTES
* This function creates a randomly named CGO box that minimally spans the protein. The
user can specify the width of the lines, the padding and also the color.
"""
([minX, minY, minZ],[maxX, maxY, maxZ]) = cmd.get_extent(selection)
print "Box dimensions (%.2f, %.2f, %.2f)" % (maxX-minX, maxY-minY, maxZ-minZ)
minX = minX - float(padding)
minY = minY - float(padding)
minZ = minZ - float(padding)
maxX = maxX + float(padding)
maxY = maxY + float(padding)
maxZ = maxZ + float(padding)
if padding != 0:
print "Box dimensions + padding (%.2f, %.2f, %.2f)" % (maxX-minX, maxY-minY, maxZ-minZ)
boundingBox = [
LINEWIDTH, float(linewidth),
BEGIN, LINES,
COLOR, float(r), float(g), float(b),
VERTEX, minX, minY, minZ, #1
VERTEX, minX, minY, maxZ, #2
VERTEX, minX, maxY, minZ, #3
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, minY, minZ, #5
VERTEX, maxX, minY, maxZ, #6
VERTEX, maxX, maxY, minZ, #7
VERTEX, maxX, maxY, maxZ, #8
VERTEX, minX, minY, minZ, #1
VERTEX, maxX, minY, minZ, #5
VERTEX, minX, maxY, minZ, #3
VERTEX, maxX, maxY, minZ, #7
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, maxY, maxZ, #8
VERTEX, minX, minY, maxZ, #2
VERTEX, maxX, minY, maxZ, #6
VERTEX, minX, minY, minZ, #1
VERTEX, minX, maxY, minZ, #3
VERTEX, maxX, minY, minZ, #5
VERTEX, maxX, maxY, minZ, #7
VERTEX, minX, minY, maxZ, #2
VERTEX, minX, maxY, maxZ, #4
VERTEX, maxX, minY, maxZ, #6
VERTEX, maxX, maxY, maxZ, #8
END
]
boxName = "box_" + str(randint(0,10000))
while boxName in cmd.get_names():
boxName = "box_" + str(randint(0,10000))
cmd.load_cgo(boundingBox,boxName)
return boxName
cmd.extend ("drawBoundingBox", drawBoundingBox)
|
yorzh86/Step1
|
scripts/drawBox.py
|
Python
|
gpl-2.0
| 5,047
|
[
"PyMOL"
] |
5198bd864b5190d2bcdfcbe2b6ee7dcb055b8441964b9b35c129d4f14ff87e25
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
from chempy import Storage,Atom,feedback
from chempy.models import Indexed,Connected
import string
import copy
generics = {
'CT' : 1,
'C' : 2,
'CA' : 3,
'CM' : 4,
'CC' : 5,
'CV' : 6,
'CW' : 7,
'CR' : 8,
'CB' : 9,
'C*' : 10,
'CN' : 11,
'CK' : 12,
'CQ' : 13,
'N' : 14,
'NA' : 15,
'NB' : 16,
'NC' : 17,
'N*' : 18,
'N2' : 19,
'N3' : 20,
'OW' : 21,
'OH' : 22,
'OS' : 23,
'O' : 24,
'O2' : 25,
'S' : 26,
'SH' : 27,
'P' : 28,
'H' : 29,
'HW' : 30,
'HO' : 31,
'HS' : 32,
'HA' : 33,
'HC' : 34,
'H1' : 35,
'H2' : 36,
'H3' : 37,
'HP' : 38,
'H4' : 39,
'H5' : 40,
}
class XYZ(Storage):
#------------------------------------------------------------------------------
def toList(self,model,mapping=None):
conn = copy.deepcopy(model)
conn = conn.convert_to_connected()
list = []
if len(model.atom):
if not model.atom[0].has('numeric_type'):
if not mapping:
mapping = generics
if feedback['warnings']:
print ' '+str(self.__class__)+': no numeric atom types found, using defaults.'
list.append("%6d\n" % conn.nAtom)
c = 0
for a in conn.atom:
if mapping:
n_type = mapping[a.text_type]
else:
n_type = a.numeric_type
if n_type<0:
print str(self.__class__)+\
'-WARNING: negative numeric type (%d) for atom %d'% (n_type,c)
st = "%6d %-3s%12.6f%12.6f%12.6f%6d" % (
c+1,a.text_type,a.coord[0],a.coord[1],a.coord[2],int(n_type))
for b in conn.bond[c]:
idx = b.index[0]
if idx == c:
idx = b.index[1]
st = st + "%6d" % (idx+1)
st = st + "\n"
list.append(st)
c = c + 1
return(list)
#----------------------------------------------------------------------------
def updateFromList(self,model,list):
c = 0
for a in list[1:]:
model.atom[c].coord = [ float(a[11:23]),float(a[23:35]),float(a[35:47])]
c = c + 1
|
gratefulfrog/lib
|
python/chempy/xyz.py
|
Python
|
gpl-2.0
| 3,096
|
[
"ChemPy",
"PyMOL"
] |
f8f82abc5919082d1b11fbb39aa891d4ea569e4551faf500223771f1c6035ad4
|
"""
Sample problems on which to test algorithms.
XXX: get some standard optimization problems from literature
"""
import numpy as np
import base
from pyll import as_apply
from pyll import scope
from pyll_utils import hp_choice
from pyll_utils import hp_uniform, hp_loguniform, hp_quniform, hp_qloguniform
from pyll_utils import hp_normal, hp_lognormal, hp_qnormal, hp_qlognormal
@base.as_bandit(loss_target=0)
def quadratic1():
"""
About the simplest problem you could ask for:
optimize a one-variable quadratic function.
"""
return {'loss': (hp_uniform('x', -5, 5) - 3) ** 2}
@base.as_bandit(loss_target=0)
def q1_choice():
o_x = hp_choice('o_x', [
(-3, hp_uniform('x_neg', -5, 5)),
( 3, hp_uniform('x_pos', -5, 5)),
])
return {'loss': (o_x[0] - o_x[1]) ** 2}
@base.as_bandit(loss_target=0)
def q1_lognormal():
"""
About the simplest problem you could ask for:
optimize a one-variable quadratic function.
"""
return {'loss': scope.max(-(hp_lognormal('x', 0, 2) - 3) ** 2, -100)}
@base.as_bandit(loss_target=-2, rseed=123)
def n_arms(N=2):
"""
Each arm yields a reward from a different Gaussian.
The correct arm is arm 0.
"""
x = hp_choice('x', [0, 1])
reward_mus = as_apply([-1] + [0] * (N - 1))
reward_sigmas = as_apply([1] * N)
return {'loss': scope.normal(reward_mus[x], reward_sigmas[x]),
'loss_variance': 1.0}
@base.as_bandit(loss_target=-2)
def distractor():
"""
This is a nasty function: it has a max in a spike near -10, and a long
asymptote that is easy to find, but guides hill-climbing approaches away
from the true max.
The second peak is at x=-10.
The prior mean is 0.
"""
loss_target = -2
x = hp_uniform('x', -15, 15)
f1 = 1.0 / (1.0 + scope.exp(-x)) # climbs rightward from 0.0 to 1.0
f2 = 2 * scope.exp(-(x + 10) ** 2) # bump with height 2 at (x=-10)
return {'loss': -f1 - f2}
@base.as_bandit(loss_target=-1)
def gauss_wave():
"""
Essentially, this is a high-frequency sinusoidal function plus a broad quadratic.
One variable controls the position along the curve.
The binary variable determines whether the sinusoidal is shifted by pi.
So there are actually two maxima in this problem, it's just one is more
probable. The tricky thing here is dealing with the fact that there are two
variables and one is discrete.
"""
x = hp_uniform('x', -20, 20)
t = hp_choice('curve', [x, x + np.pi])
f1 = scope.sin(t)
f2 = 2 * scope.exp(-(t / 5.0) ** 2)
return {'loss': - (f1 + f2)}
@base.as_bandit(loss_target=-2.5, rseed=123)
def gauss_wave2():
"""
Variant of the GaussWave problem in which noise is added to the score
function, and there is an option to either have no sinusoidal variation, or
a negative cosine with variable amplitude.
Immediate local max is to sample x from spec and turn off the neg cos.
Better solution is to move x a bit to the side, turn on the neg cos and turn
up the amp to 1.
"""
var = .1
x = hp_uniform('x', -20, 20)
amp = hp_uniform('amp', 0, 1)
t = (scope.normal(0, var) + 2 * scope.exp(-(x / 5.0) ** 2))
return {'loss': - hp_choice('hf', [t, t + scope.sin(x) * amp]),
'loss_variance': var}
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/hyperopt-0.0.2/hyperopt/bandits.py
|
Python
|
bsd-3-clause
| 3,342
|
[
"Gaussian"
] |
3841e3762662dc1d537792304d045ae44a0e4ccc74cd39e621c2b0d5c95cc7f2
|
"""
This module provides the AST. Subclass :py:class:`Context` and override the
various methods to allow minivect visitors over the AST, to promote and map types,
etc. Subclass and override :py:class:`ASTBuilder`'s methods to provide alternative
AST nodes or different implementations.
"""
import copy
import string
import types
import minitypes
import miniutils
import minivisitor
import specializers
import type_promoter
import minicode
import codegen
import llvm_codegen
import graphviz
try:
import llvm.core
import llvm.ee
import llvm.passes
except ImportError:
llvm = None
class UndocClassAttribute(object):
"Use this to document class attributes for Sphinx"
def __init__(self, cls):
self.cls = cls
def __call__(self, *args, **kwargs):
return self.cls(*args, **kwargs)
def make_cls(cls1, cls2):
"Fuse two classes together."
name = "%s_%s" % (cls1.__name__, cls2.__name__)
return type(name, (cls1, cls2), {})
class Context(object):
"""
A context that knows how to map ASTs back and forth, how to wrap nodes
and types, and how to instantiate a code generator for specialization.
An opaque_node or foreign node is a node that is not from our AST,
and a normal node is one that has a interface compatible with ours.
To provide custom functionality, set the following attributes, or
subclass this class.
:param astbuilder: the :py:class:`ASTBuilder` or ``None``
:param typemapper: the :py:class:`minivect.minitypes.Typemapper` or
``None`` for the default.
.. attribute:: codegen_cls
The code generator class that is used to generate code.
The default is :py:class:`minivect.codegen.CodeGen`
.. attribute:: cleanup_codegen_cls
The code generator that generates code to dispose of any
garbage (e.g. intermediate object temporaries).
The default is :py:class:`minivect.codegen.CodeGenCleanup`
.. attribute:: codewriter_cls
The code writer that the code generator writes its generated code
to. This may be strings or arbitrary objects.
The default is :py:class:`minivect.minicode.CodeWriter`, which accepts
arbitrary objects.
.. attribute:: codeformatter_cls
A formatter to format the generated code.
The default is :py:class:`minivect.minicode.CodeFormatter`,
which returns a list of objects written. Set this to
:py:class:`minivect.minicode.CodeStringFormatter`
to have the strings joined together.
.. attribute:: specializer_mixin_cls
A specializer mixin class that can override or intercept
functionality. This class should likely participate
cooperatively in MI.
.. attribute:: variable_resolving_mixin_cls
A specializer mixin class that resolves wrapped miniasts in a foreign
AST. This is only needed if you are using :py:class:`NodeWrapper`,
which wraps a miniast somewhere at the leaves.
.. attribute: graphviz_cls
Visitor to generate a Graphviz graph. See the :py:module:`graphviz`
module.
.. attribute: minifunction
The current minifunction that is being translated.
Use subclass :py:class:`CContext` to get the defaults for C code generation.
"""
debug = False
debug_elements = False
use_llvm = False
optimize_broadcasting = True
shape_type = minitypes.Py_ssize_t.pointer()
strides_type = shape_type
astbuilder_cls = None
codegen_cls = UndocClassAttribute(codegen.VectorCodegen)
cleanup_codegen_cls = UndocClassAttribute(codegen.CodeGenCleanup)
codewriter_cls = UndocClassAttribute(minicode.CodeWriter)
codeformatter_cls = UndocClassAttribute(minicode.CodeFormatter)
graphviz_cls = UndocClassAttribute(graphviz.GraphvizGenerator)
specializer_mixin_cls = None
variable_resolving_mixin_cls = None
func_counter = 0
final_specializer = specializers.FinalSpecializer
def __init__(self):
self.init()
if self.use_llvm:
if llvm is None:
import llvm.core as llvm_py_not_available # llvm-py not available
self.llvm_module = llvm.core.Module.new('default_module')
# self.llvm_ee = llvm.ee.ExecutionEngine.new(self.llvm_module)
self.llvm_ee = llvm.ee.EngineBuilder.new(self.llvm_module).force_jit().opt(3).create()
self.llvm_fpm = llvm.passes.FunctionPassManager.new(self.llvm_module)
self.llvm_fpm.initialize()
if not self.debug:
for llvm_pass in self.llvm_passes():
self.llvm_fpm.add(llvm_pass)
else:
self.llvm_ee = None
self.llvm_module = None
def init(self):
self.astbuilder = self.astbuilder_cls(self)
self.typemapper = minitypes.TypeMapper(self)
def run_opaque(self, astmapper, opaque_ast, specializers):
return self.run(astmapper.visit(opaque_ast), specializers)
def run(self, ast, specializer_classes, graphviz_outfile=None,
print_tree=False):
"""
Specialize the given AST with all given specializers and return
an iterable of generated code in the form of
``(specializer, new_ast, codewriter, code_obj)``
The code_obj is the generated code (e.g. a string of C code),
depending on the code formatter used.
"""
for specializer_class in specializer_classes:
self.init()
pipeline = self.pipeline(specializer_class)
specialized_ast = specializers.specialize_ast(ast)
self.astbuilder.minifunction = specialized_ast
for transform in pipeline:
specialized_ast = transform.visit(specialized_ast)
if print_tree:
specialized_ast.print_tree(self)
if graphviz_outfile is not None:
data = self.graphviz(specialized_ast)
graphviz_outfile.write(data)
codewriter = self.codewriter_cls(self)
codegen = self.codegen_cls(self, codewriter)
codegen.visit(specialized_ast)
yield (pipeline[0], specialized_ast, codewriter,
self.codeformatter_cls().format(codewriter))
def debug_c(self, ast, specializer, astbuilder_cls=None):
"Generate C code (for debugging)"
context = CContext()
if astbuilder_cls:
context.astbuilder_cls = astbuilder_cls
else:
context.astbuilder_cls = self.astbuilder_cls
context.shape_type = self.shape_type
context.strides_type = self.strides_type
context.debug = self.debug
result = context.run(ast, [specializer]).next()
_, specialized_ast, _, (proto, impl) = result
return impl
def pipeline(self, specializer_class):
# add specializer mixin and run specializer
if self.specializer_mixin_cls:
specializer_class = make_cls(self.specializer_mixin_cls,
specializer_class)
specializer = specializer_class(self)
pipeline = [specializer]
# Add variable resolving mixin to the final specializer and run
# transform
final_specializer_cls = self.final_specializer
if final_specializer_cls:
if self.variable_resolving_mixin_cls:
final_specializer_cls = make_cls(
self.variable_resolving_mixin_cls,
final_specializer_cls)
pipeline.append(final_specializer_cls(self, specializer))
pipeline.append(type_promoter.TypePromoter(self))
return pipeline
def generate_disposal_code(self, code, node):
"Run the disposal code generator on an (sub)AST"
transform = self.cleanup_codegen_cls(self, code)
transform.visit(node)
#
### Override in subclasses where needed
#
def llvm_passes(self):
"Returns a list of LLVM optimization passes"
return []
return [
# llvm.passes.PASS_CFG_SIMPLIFICATION
llvm.passes.PASS_BLOCK_PLACEMENT,
llvm.passes.PASS_BASIC_ALIAS_ANALYSIS,
llvm.passes.PASS_NO_AA,
llvm.passes.PASS_SCALAR_EVOLUTION_ALIAS_ANALYSIS,
# llvm.passes.PASS_ALIAS_ANALYSIS_COUNTER,
llvm.passes.PASS_AAEVAL,
llvm.passes.PASS_LOOP_DEPENDENCE_ANALYSIS,
llvm.passes.PASS_BREAK_CRITICAL_EDGES,
llvm.passes.PASS_LOOP_SIMPLIFY,
llvm.passes.PASS_PROMOTE_MEMORY_TO_REGISTER,
llvm.passes.PASS_CONSTANT_PROPAGATION,
llvm.passes.PASS_LICM,
# llvm.passes.PASS_CONSTANT_MERGE,
llvm.passes.PASS_LOOP_STRENGTH_REDUCE,
llvm.passes.PASS_LOOP_UNROLL,
# llvm.passes.PASS_FUNCTION_ATTRS,
# llvm.passes.PASS_GLOBAL_OPTIMIZER,
# llvm.passes.PASS_GLOBAL_DCE,
llvm.passes.PASS_DEAD_CODE_ELIMINATION,
llvm.passes.PASS_INSTRUCTION_COMBINING,
llvm.passes.PASS_CODE_GEN_PREPARE,
]
def mangle_function_name(self, name):
name = "%s_%d" % (name, self.func_counter)
self.func_counter += 1
return name
def promote_types(self, type1, type2):
"Promote types in an arithmetic operation"
if type1 == type2:
return type1
return self.typemapper.promote_types(type1, type2)
def getchildren(self, node):
"Implement to allow a minivisitor.Visitor over a foreign AST."
return node.child_attrs
def getpos(self, opaque_node):
"Get the position of a foreign node"
filename, line, col = opaque_node.pos
return Position(filename, line, col)
def gettype(self, opaque_node):
"Get a type of a foreign node"
return opaque_node.type
def may_error(self, opaque_node):
"Return whether this node may result in an exception."
raise NotImplementedError
def declare_type(self, type):
"Return a declaration for a type"
raise NotImplementedError
def to_llvm(self, type):
"Return an LLVM type for the given minitype"
return self.typemapper.to_llvm(type)
def graphviz(self, node, graphviz_name="AST"):
visitor = self.graphviz_cls(self, graphviz_name)
graphviz_graph = visitor.visit(node)
return graphviz_graph.to_string()
def is_object(self, type):
return isinstance(type, minitypes.ObjectType)
class CContext(Context):
"Set defaults for C code generation."
codegen_cls = codegen.VectorCodegen
codewriter_cls = minicode.CCodeWriter
codeformatter_cls = minicode.CCodeStringFormatter
class LLVMContext(Context):
"Context with default for LLVM code generation"
use_llvm = True
codegen_cls = llvm_codegen.LLVMCodeGen
class ASTBuilder(object):
"""
This class is used to build up a minivect AST. It can be used by a user
from a transform or otherwise, but the important bit is that we use it
in our code to build up an AST that can be overridden by the user,
and which makes it convenient to build up complex ASTs concisely.
"""
# the 'pos' attribute is set for each visit to each node by
# the ASTMapper
pos = None
temp_reprname_counter = 0
def __init__(self, context):
"""
:param context: the :py:class:`Context`
"""
self.context = context
def _infer_type(self, value):
"Used to infer types for self.constant()"
if isinstance(value, (int, long)):
return minitypes.long_
elif isinstance(value, float):
return minitypes.double
elif isinstance(value, str):
return minitypes.CStringType()
else:
raise minierror.InferTypeError()
def create_function_type(self, function, strides_args=True):
arg_types = []
for arg in function.arguments + function.scalar_arguments:
if arg.used:
if arg.type and arg.type.is_array and not strides_args:
arg_types.append(arg.data_pointer.type)
arg.variables = [arg.data_pointer]
else:
for variable in arg.variables:
arg_types.append(variable.type)
function.type = minitypes.FunctionType(
return_type=function.success_value.type, args=arg_types)
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
"""
Create a new function.
:type name: str
:param name: name of the function
:type args: [:py:class:`FunctionArgument`]
:param args: all array and scalar arguments to the function, excluding
shape or position information.
:param shapevar: the :py:class:`Variable` for the total broadcast shape
If ``None``, a default of ``Py_ssize_t *`` is assumed.
:type posinfo: :py:class:`FunctionArgument`
:param posinfo: if given, this will be the second, third and fourth
arguments to the function ``(filename, lineno, column)``.
"""
if shapevar is None:
shapevar = self.variable(self.context.shape_type, 'shape')
arguments, scalar_arguments = [], []
for arg in args:
if arg.type.is_array:
arguments.append(arg)
else:
scalar_arguments.append(arg)
arguments.insert(0, self.funcarg(shapevar))
if posinfo:
arguments.insert(1, posinfo)
body = self.stats(self.nditerate(body))
error_value = self.constant(-1)
success_value = self.constant(0)
function = FunctionNode(self.pos, name, body,
arguments, scalar_arguments,
shapevar, posinfo,
error_value=error_value,
success_value=success_value,
omp_size=omp_size or self.constant(1024))
# prepending statements, used during specialization
function.prepending = self.stats()
function.body = self.stats(function.prepending, function.body)
self.create_function_type(function)
return function
def build_function(self, variables, body, name=None, shapevar=None):
"Convenience method for building a minivect function"
args = []
for var in variables:
if var.type.is_array:
args.append(self.array_funcarg(var))
else:
args.append(self.funcarg(var))
name = name or 'function'
return self.function(name, body, args, shapevar=shapevar)
def funcarg(self, variable, *variables):
"""
Create a (compound) function argument consisting of one or multiple
argument Variables.
"""
if variable.type is not None and variable.type.is_array:
assert not variables
return self.array_funcarg(variable)
if not variables:
variables = [variable]
return FunctionArgument(self.pos, variable, list(variables))
def array_funcarg(self, variable):
"Create an array function argument"
return ArrayFunctionArgument(
self.pos, variable.type, name=variable.name,
variable=variable,
data_pointer=self.data_pointer(variable),
#shape_pointer=self.shapevar(variable),
strides_pointer=self.stridesvar(variable))
def incref(self, var, funcname='Py_INCREF'):
"Generate a Py_INCREF() statement"
functype = minitypes.FunctionType(return_type=minitypes.void,
args=[minitypes.object_])
py_incref = self.funcname(functype, funcname)
return self.expr_stat(self.funccall(py_incref, [var]))
def decref(self, var):
"Generate a Py_DECCREF() statement"
return self.incref(var, funcname='Py_DECREF')
def print_(self, *args):
"Print out all arguments to stdout"
return PrintNode(self.pos, args=list(args))
def funccall(self, func_or_pointer, args, inline=False):
"""
Generate a call to the given function (a :py:class:`FuncNameNode`) of
:py:class:`minivect.minitypes.FunctionType` or a
pointer to a function type and the given arguments.
"""
type = func_or_pointer.type
if type.is_pointer:
type = func_or_pointer.type.base_type
return FuncCallNode(self.pos, type.return_type,
func_or_pointer=func_or_pointer, args=args,
inline=inline)
def funcname(self, type, name, is_external=True):
assert type.is_function
return FuncNameNode(self.pos, type, name=name, is_external=is_external)
def nditerate(self, body):
"""
This node wraps the given AST expression in an :py:class:`NDIterate`
node, which will be expanded by the specializers to one or several
loops.
"""
return NDIterate(self.pos, body)
def for_(self, body, init, condition, step, index=None):
"""
Create a for loop node.
:param body: loop body
:param init: assignment expression
:param condition: boolean loop condition
:param step: step clause (assignment expression)
"""
return ForNode(self.pos, init, condition, step, body, index=index)
def for_range_upwards(self, body, upper, lower=None, step=None):
"""
Create a single upwards for loop, typically used from a specializer to
replace an :py:class:`NDIterate` node.
:param body: the loop body
:param upper: expression specifying an upper bound
"""
index_type = upper.type.unqualify("const")
if lower is None:
lower = self.constant(0, index_type)
if step is None:
step = self.constant(1, index_type)
temp = self.temp(index_type)
init = self.assign_expr(temp, lower)
condition = self.binop(minitypes.bool_, '<', temp, upper)
step = self.assign_expr(temp, self.add(temp, step))
result = self.for_(body, init, condition, step)
result.target = temp
return result
def omp_for(self, for_node, if_clause):
"""
Annotate the for loop with an OpenMP parallel for clause.
:param if_clause: the expression node that determines whether the
parallel section is executed or whether it is
executed sequentially (to avoid synchronization
overhead)
"""
if isinstance(for_node, PragmaForLoopNode):
for_node = for_node.for_node
return OpenMPLoopNode(self.pos, for_node=for_node,
if_clause=if_clause,
lastprivates=[for_node.init.lhs],
privates=[])
def omp_if(self, if_body, else_body=None):
return OpenMPConditionalNode(self.pos, if_body=if_body,
else_body=else_body)
def pragma_for(self, for_node):
"""
Annotate the for loop with pragmas.
"""
return PragmaForLoopNode(self.pos, for_node=for_node)
def stats(self, *statements):
"""
Wrap a bunch of statements in an AST node.
"""
return StatListNode(self.pos, list(statements))
def expr_stat(self, expr):
"Turn an expression into a statement"
return ExprStatNode(expr.pos, type=expr.type, expr=expr)
def expr(self, stats=(), expr=None):
"Evaluate a bunch of statements before evaluating an expression."
return ExprNodeWithStatement(self.pos, type=expr.type,
stat=self.stats(*stats), expr=expr)
def if_(self, cond, body):
"If statement"
return self.if_else(cond, body, None)
def if_else_expr(self, cond, lhs, rhs):
"If/else expression, resulting in lhs if cond else rhs"
type = self.context.promote_types(lhs.type, rhs.type)
return IfElseExprNode(self.pos, type=type, cond=cond, lhs=lhs, rhs=rhs)
def if_else(self, cond, if_body, else_body):
return IfNode(self.pos, cond=cond, body=if_body, else_body=else_body)
def promote(self, dst_type, node):
"Promote or demote the node to the given dst_type"
if node.type != dst_type:
if node.is_constant and node.type.kind == dst_type.kind:
node.type = dst_type
return node
return PromotionNode(self.pos, dst_type, node)
return node
def binop(self, type, op, lhs, rhs):
"""
Binary operation on two nodes.
:param type: the result type of the expression
:param op: binary operator
:type op: str
"""
return BinopNode(self.pos, type, op, lhs, rhs)
def add(self, lhs, rhs, result_type=None, op='+'):
"""
Shorthand for the + binop. Filters out adding 0 constants.
"""
if lhs.is_constant and lhs.value == 0:
return rhs
elif rhs.is_constant and rhs.value == 0:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def sub(self, lhs, rhs, result_type=None):
return self.add(lhs, rhs, result_type, op='-')
def mul(self, lhs, rhs, result_type=None, op='*'):
"""
Shorthand for the * binop. Filters out multiplication with 1 constants.
"""
if op == '*' and lhs.is_constant and lhs.value == 1:
return rhs
elif rhs.is_constant and rhs.value == 1:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def div(self, lhs, rhs, result_type=None):
return self.mul(lhs, rhs, result_type=result_type, op='/')
def min(self, lhs, rhs):
"""
Returns min(lhs, rhs) expression.
.. NOTE:: Make lhs and rhs temporaries if they should only be
evaluated once.
"""
type = self.context.promote_types(lhs.type, rhs.type)
cmp_node = self.binop(type, '<', lhs, rhs)
return self.if_else_expr(cmp_node, lhs, rhs)
def index(self, pointer, index, dest_pointer_type=None):
"""
Index a pointer with the given index node.
:param dest_pointer_type: if given, cast the result (*after* adding
the index) to the destination type and
dereference.
"""
if dest_pointer_type:
return self.index_multiple(pointer, [index], dest_pointer_type)
return SingleIndexNode(self.pos, pointer.type.base_type,
pointer, index)
def index_multiple(self, pointer, indices, dest_pointer_type=None):
"""
Same as :py:meth:`index`, but accepts multiple indices. This is
useful e.g. after multiplication of the indices with the strides.
"""
for index in indices:
pointer = self.add(pointer, index)
if dest_pointer_type is not None:
pointer = self.cast(pointer, dest_pointer_type)
return self.dereference(pointer)
def assign_expr(self, node, value, may_reorder=False):
"Create an assignment expression assigning ``value`` to ``node``"
assert node is not None
if not isinstance(value, Node):
value = self.constant(value)
return AssignmentExpr(self.pos, node.type, node, value,
may_reorder=may_reorder)
def assign(self, node, value, may_reorder=False):
"Assignment statement"
expr = self.assign_expr(node, value, may_reorder=may_reorder)
return self.expr_stat(expr)
def dereference(self, pointer):
"Dereference a pointer"
return DereferenceNode(self.pos, pointer.type.base_type, pointer)
def unop(self, type, operator, operand):
"Unary operation. ``type`` indicates the result type of the expression."
return UnopNode(self.pos, type, operator, operand)
def coerce_to_temp(self, expr):
"Coerce the given expression to a temporary"
type = expr.type
if type.is_array:
type = type.dtype
temp = self.temp(type)
return self.expr(stats=[self.assign(temp, expr)], expr=temp)
def temp(self, type, name=None):
"Allocate a temporary of a given type"
name = name or 'temp'
repr_name = '%s%d' % (name.rstrip(string.digits),
self.temp_reprname_counter)
self.temp_reprname_counter += 1
return TempNode(self.pos, type, name=name, repr_name=repr_name)
def constant(self, value, type=None):
"""
Create a constant from a Python value. If type is not given, it is
inferred (or it will raise a
:py:class:`minivect.minierror.InferTypeError`).
"""
if type is None:
type = self._infer_type(value)
return ConstantNode(self.pos, type, value)
def variable(self, type, name):
"""
Create a variable with a name and type. Variables
may refer to function arguments, functions, etc.
"""
return Variable(self.pos, type, name)
def resolved_variable(self, array_type, name, element):
"""
Creates a node that keeps the array operand information such as the
original array type, but references an actual element in the array.
:param type: original array type
:param name: original array's name
:param element: arbitrary expression that resolves some element in the
array
"""
return ResolvedVariable(self.pos, element.type, name,
element=element, array_type=array_type)
def cast(self, node, dest_type):
"Cast node to the given destination type"
return CastNode(self.pos, dest_type, node)
def return_(self, result):
"Return a result"
return ReturnNode(self.pos, result)
def data_pointer(self, variable):
"Return the data pointer of an array variable"
assert variable.type.is_array
return DataPointer(self.pos, variable.type.dtype.pointer(),
variable)
def shape_index(self, index, function):
"Index the shape of the array operands with integer `index`"
return self.index(function.shape, self.constant(index))
def extent(self, variable, index, function):
"Index the shape of a specific variable with integer `index`"
assert variable.type.is_array
offset = function.ndim - variable.type.ndim
return self.index(function.shape, self.constant(index + offset))
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
return StridePointer(self.pos, self.context.strides_type, variable)
def stride(self, variable, index):
"Return the stride of array operand `variable` at integer `index`"
return self.index(self.stridesvar(variable), self.constant(index))
def sizeof(self, type):
"Return the expression sizeof(type)"
return SizeofNode(self.pos, minitypes.size_t, sizeof_type=type)
def jump(self, label):
"Jump to a label"
return JumpNode(self.pos, label)
def jump_target(self, label):
"""
Return a target that can be jumped to given a label. The label is
shared between the jumpers and the target.
"""
return JumpTargetNode(self.pos, label)
def label(self, name):
"Return a label with a name"
return LabelNode(self.pos, name)
def raise_exc(self, posinfo, exc_var, msg_val, fmt_args):
"""
Raise an exception given the positional information (see the `posinfo`
method), the exception type (PyExc_*), a formatted message string and
a list of values to be used for the format string.
"""
return RaiseNode(self.pos, posinfo, exc_var, msg_val, fmt_args)
def posinfo(self, posvars):
"""
Return position information given a list of position variables
(filename, lineno, column). This can be used for raising exceptions.
"""
return PositionInfoNode(self.pos, posinfo=posvars)
def error_handler(self, node):
"""
Wrap the given node, which may raise exceptions, in an error handler.
An error handler allows the code to clean up before propagating the
error, and finally returning an error indicator from the function.
"""
return ErrorHandler(self.pos, body=node,
error_label=self.label('error'),
cleanup_label=self.label('cleanup'))
def wrap(self, opaque_node, specialize_node_callback, **kwds):
"""
Wrap a node and type and return a NodeWrapper node. This node
will have to be handled by the caller in a code generator. The
specialize_node_callback is called when the NodeWrapper is
specialized by a Specializer.
"""
type = minitypes.TypeWrapper(self.context.gettype(opaque_node),
self.context)
return NodeWrapper(self.context.getpos(opaque_node), type,
opaque_node, specialize_node_callback, **kwds)
#
### Vectorization Functionality
#
def _vector_type(self, base_type, size):
return minitypes.VectorType(element_type=base_type, vector_size=size)
def vector_variable(self, variable, size):
"Return a vector variable for a data pointer variable"
type = self._vector_type(variable.type.dtype, size)
if size == 4:
name = 'xmm_%s' % variable.name
else:
name = 'ymm_%s' % variable.name
return VectorVariable(self.pos, type, name, variable=variable)
def vector_load(self, data_pointer, size):
"Load a SIMD vector of size `size` given an array operand variable"
type = self._vector_type(data_pointer.type.base_type, size)
return VectorLoadNode(self.pos, type, data_pointer, size=size)
def vector_store(self, data_pointer, vector_expr):
"Store a SIMD vector of size `size`"
assert data_pointer.type.base_type == vector_expr.type.element_type
return VectorStoreNode(self.pos, None, "=", data_pointer, vector_expr)
def vector_binop(self, operator, lhs, rhs):
"Perform a binary SIMD operation between two operands of the same type"
assert lhs.type == rhs.type, (lhs.type, rhs.type)
type = lhs.type
return VectorBinopNode(self.pos, type, operator, lhs=lhs, rhs=rhs)
def vector_unop(self, type, operator, operand):
return VectorUnopNode(self.pos, type, operator, operand)
def vector_const(self, type, constant):
return ConstantVectorNode(self.pos, type, constant=constant)
def noop_expr(self):
return NoopExpr(self.pos, type=None)
class DynamicArgumentASTBuilder(ASTBuilder):
"""
Create a function with a dynamic number of arguments. This means the
signature looks like
func(int *shape, float *data[n_ops], int *strides[n_ops])
To create minivect kernels supporting this signature, set the
astbuilder_cls attribute of Context to this class.
"""
def data_pointer(self, variable):
if not hasattr(variable, 'data_pointer'):
temp = self.temp(variable.type.dtype.pointer(),
variable.name + "_data_temp")
variable.data_pointer = temp
return variable.data_pointer
def _create_data_pointer(self, function, argument, i):
variable = argument.variable
temp = self.data_pointer(variable)
p = self.index(function.data_pointers, self.constant(i))
p = self.cast(p, variable.type.dtype.pointer())
assmt = self.assign(temp, p)
function.body.stats.insert(0, assmt)
return temp
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
if not hasattr(variable, 'strides_pointer'):
temp = self.temp(self.context.strides_type,
variable.name + "_stride_temp")
variable.strides_pointer = temp
return variable.strides_pointer
def _create_strides_pointer(self, function, argument, i):
variable = argument.variable
temp = self.stridesvar(variable)
strides = self.index(function.strides_pointers, self.constant(i))
function.body.stats.insert(0, self.assign(temp, strides))
return temp
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
function = super(DynamicArgumentASTBuilder, self).function(
name, body, args, shapevar, posinfo, omp_size)
function.data_pointers = self.variable(
minitypes.void.pointer().pointer(), 'data_pointers')
function.strides_pointers = self.variable(
function.shape.type.pointer(), 'strides_pointer')
i = len(function.arrays) - 1
for argument in function.arrays[::-1]:
data_p = self._create_data_pointer(function, argument, i)
strides_p = self._create_strides_pointer(function, argument, i)
argument.data_pointer = data_p
argument.strides_pointer = strides_p
argument.used = False
i -= 1
argpos = 1
if posinfo:
argpos = 4
function.arguments.insert(argpos,
self.funcarg(function.strides_pointers))
function.arguments.insert(argpos,
self.funcarg(function.data_pointers))
self.create_function_type(function)
# print function.type
# print self.context.debug_c(
# function, specializers.StridedSpecializer, type(self))
return function
Context.astbuilder_cls = UndocClassAttribute(ASTBuilder)
class Position(object):
"Each node has a position which is an instance of this type."
def __init__(self, filename, line, col):
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return "%s:%d:%d" % (self.filename, self.line, self.col)
class Node(miniutils.ComparableObjectMixin):
"""
Base class for AST nodes.
"""
is_expression = False
is_statlist = False
is_constant = False
is_assignment = False
is_unop = False
is_binop = False
is_node_wrapper = False
is_data_pointer = False
is_jump = False
is_label = False
is_temp = False
is_statement = False
is_sizeof = False
is_variable = False
is_function = False
is_funcarg = False
is_array_funcarg = False
is_specialized = False
child_attrs = []
def __init__(self, pos, **kwds):
self.pos = pos
vars(self).update(kwds)
def may_error(self, context):
"""
Return whether something may go wrong and we need to jump to an
error handler.
"""
visitor = minivisitor.MayErrorVisitor(context)
visitor.visit(self)
return visitor.may_error
def print_tree(self, context):
visitor = minivisitor.PrintTree(context)
visitor.visit(self)
@property
def children(self):
return [getattr(self, attr) for attr in self.child_attrs
if getattr(self, attr) is not None]
@property
def comparison_objects(self):
type = getattr(self, 'type', None)
if type is None:
return self.children
return tuple(self.children) + (type,)
def __eq__(self, other):
# Don't use isinstance here, compare on exact type to be consistent
# with __hash__. Override where sensible
return (type(self) is type(other) and
self.comparison_objects == other.comparison_objects)
def __hash__(self):
h = hash(type(self))
for obj in self.comparison_objects:
h = h ^ hash(obj)
return h
class ExprNode(Node):
"Base class for expressions. Each node has a type."
is_expression = True
hoistable = False
need_temp = False
def __init__(self, pos, type, **kwds):
super(ExprNode, self).__init__(pos, **kwds)
self.type = type
class FunctionNode(Node):
"""
Function node. error_value and success_value are returned in case of
exceptions and success respectively.
.. attribute:: shape
the broadcast shape for all operands
.. attribute:: ndim
the ndim of the total broadcast' shape
.. attribute:: arguments
all array arguments
.. attribute:: scalar arguments
all non-array arguments
.. attribute:: posinfo
the position variables we can write to in case of an exception
.. attribute:: omp_size
the threshold of minimum data size needed before starting a parallel
section. May be overridden at any time before specialization time.
"""
is_function = True
child_attrs = ['body', 'arguments', 'scalar_arguments']
def __init__(self, pos, name, body, arguments, scalar_arguments,
shape, posinfo, error_value, success_value, omp_size):
super(FunctionNode, self).__init__(pos)
self.type = None # see ASTBuilder.create_function_type
self.name = name
self.body = body
self.arrays = [arg for arg in arguments if arg.type and arg.type.is_array]
self.arguments = arguments
self.scalar_arguments = scalar_arguments
self.shape = shape
self.posinfo = posinfo
self.error_value = error_value
self.success_value = success_value
self.omp_size = omp_size
self.args = dict((v.name, v) for v in arguments)
self.ndim = max(arg.type.ndim for arg in arguments
if arg.type and arg.type.is_array)
class FuncCallNode(ExprNode):
"""
Call a function given a pointer or its name (FuncNameNode)
"""
inline = False
child_attrs = ['func_or_pointer', 'args']
class FuncNameNode(ExprNode):
"""
Load an external function by its name.
"""
name = None
class ReturnNode(Node):
"Return an operand"
child_attrs = ['operand']
def __init__(self, pos, operand):
super(ReturnNode, self).__init__(pos)
self.operand = operand
class RaiseNode(Node):
"Raise a Python exception. The callee must hold the GIL."
child_attrs = ['posinfo', 'exc_var', 'msg_val', 'fmt_args']
def __init__(self, pos, posinfo, exc_var, msg_val, fmt_args):
super(RaiseNode, self).__init__(pos)
self.posinfo = posinfo
self.exc_var, self.msg_val, self.fmt_args = (exc_var, msg_val, fmt_args)
class PositionInfoNode(Node):
"""
Node that holds a position of where an error occurred. This position
needs to be returned to the callee if the callee supports it.
"""
class FunctionArgument(ExprNode):
"""
Argument to the FunctionNode. Array arguments contain multiple
actual arguments, e.g. the data and stride pointer.
.. attribute:: variable
some argument to the function (array or otherwise)
.. attribute:: variables
the actual variables this operand should be unpacked into
"""
child_attrs = ['variables']
if_funcarg = True
used = True
def __init__(self, pos, variable, variables):
super(FunctionArgument, self).__init__(pos, variable.type)
self.variables = variables
self.variable = variable
self.name = variable.name
self.args = dict((v.name, v) for v in variables)
class ArrayFunctionArgument(ExprNode):
"Array operand to the function"
child_attrs = ['data_pointer', 'strides_pointer']
is_array_funcarg = True
used = True
def __init__(self, pos, type, data_pointer, strides_pointer, **kwargs):
super(ArrayFunctionArgument, self).__init__(pos, type, **kwargs)
self.data_pointer = data_pointer
self.strides_pointer = strides_pointer
self.variables = [data_pointer, strides_pointer]
class PrintNode(Node):
"Print node for some arguments"
child_attrs = ['args']
class NDIterate(Node):
"""
Iterate in N dimensions. See :py:class:`ASTBuilder.nditerate`
"""
child_attrs = ['body']
def __init__(self, pos, body):
super(NDIterate, self).__init__(pos)
self.body = body
class ForNode(Node):
"""
A for loop, see :py:class:`ASTBuilder.for_`
"""
child_attrs = ['init', 'condition', 'step', 'body']
is_controlling_loop = False
is_tiling_loop = False
should_vectorize = False
is_fixup = False
def __init__(self, pos, init, condition, step, body, index=None):
super(ForNode, self).__init__(pos)
self.init = init
self.condition = condition
self.step = step
self.body = body
self.index = index or init.lhs
class IfNode(Node):
"An 'if' statement, see A for loop, see :py:class:`ASTBuilder.if_`"
child_attrs = ['cond', 'body', 'else_body']
should_vectorize = False
is_fixup = False
class StatListNode(Node):
"""
A node to wrap multiple statements, see :py:class:`ASTBuilder.stats`
"""
child_attrs = ['stats']
is_statlist = True
def __init__(self, pos, statements):
super(StatListNode, self).__init__(pos)
self.stats = statements
class ExprStatNode(Node):
"Turn an expression into a statement, see :py:class:`ASTBuilder.expr_stat`"
child_attrs = ['expr']
is_statement = True
class ExprNodeWithStatement(Node):
child_attrs = ['stat', 'expr']
class NodeWrapper(ExprNode):
"""
Adapt an opaque node to provide a consistent interface. This has to be
handled by the user's specializer. See :py:class:`ASTBuilder.wrap`
"""
is_node_wrapper = True
is_constant_scalar = False
child_attrs = []
def __init__(self, pos, type, opaque_node, specialize_node_callback,
**kwds):
super(NodeWrapper, self).__init__(pos, type)
self.opaque_node = opaque_node
self.specialize_node_callback = specialize_node_callback
vars(self).update(kwds)
def __hash__(self):
return hash(self.opaque_node)
def __eq__(self, other):
if getattr(other, 'is_node_wrapper ', False):
return self.opaque_node == other.opaque_node
return NotImplemented
def __deepcopy__(self, memo):
kwds = dict(vars(self))
kwds.pop('opaque_node')
kwds.pop('specialize_node_callback')
kwds = copy.deepcopy(kwds, memo)
opaque_node = self.specialize_node_callback(self, memo)
return type(self)(opaque_node=opaque_node,
specialize_node_callback=self.specialize_node_callback,
**kwds)
class BinaryOperationNode(ExprNode):
"Base class for binary operations"
child_attrs = ['lhs', 'rhs']
def __init__(self, pos, type, lhs, rhs, **kwds):
super(BinaryOperationNode, self).__init__(pos, type, **kwds)
self.lhs, self.rhs = lhs, rhs
class BinopNode(BinaryOperationNode):
"Node for binary operations"
is_binop = True
def __init__(self, pos, type, operator, lhs, rhs, **kwargs):
super(BinopNode, self).__init__(pos, type, lhs, rhs, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.lhs, self.rhs)
class SingleOperandNode(ExprNode):
"Base class for operations with one operand"
child_attrs = ['operand']
def __init__(self, pos, type, operand, **kwargs):
super(SingleOperandNode, self).__init__(pos, type, **kwargs)
self.operand = operand
class AssignmentExpr(BinaryOperationNode):
is_assignment = True
class IfElseExprNode(ExprNode):
child_attrs = ['cond', 'lhs', 'rhs']
class PromotionNode(SingleOperandNode):
pass
class UnopNode(SingleOperandNode):
is_unop = True
def __init__(self, pos, type, operator, operand, **kwargs):
super(UnopNode, self).__init__(pos, type, operand, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.operand)
class CastNode(SingleOperandNode):
is_cast = True
class DereferenceNode(SingleOperandNode):
is_dereference = True
class SingleIndexNode(BinaryOperationNode):
is_index = True
class ConstantNode(ExprNode):
is_constant = True
def __init__(self, pos, type, value):
super(ConstantNode, self).__init__(pos, type)
self.value = value
class SizeofNode(ExprNode):
is_sizeof = True
class Variable(ExprNode):
"""
Represents use of a function argument in the function.
"""
is_variable = True
mangled_name = None
hoisted = False
def __init__(self, pos, type, name, **kwargs):
super(Variable, self).__init__(pos, type, **kwargs)
self.name = name
self.array_type = None
def __eq__(self, other):
return isinstance(other, Variable) and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedVariable(Variable):
child_attrs = ['element']
def __eq__(self, other):
return (isinstance(other, ResolvedVariable) and
self.element == other.element)
class ArrayAttribute(Variable):
"Denotes an attribute of array operands, e.g. the data or stride pointers"
def __init__(self, pos, type, arrayvar):
super(ArrayAttribute, self).__init__(pos, type,
arrayvar.name + self._name)
self.arrayvar = arrayvar
class DataPointer(ArrayAttribute):
"Reference to the start of an array operand"
_name = '_data'
class StridePointer(ArrayAttribute):
"Reference to the stride pointer of an array variable operand"
_name = '_strides'
#class ShapePointer(ArrayAttribute):
# "Reference to the shape pointer of an array operand."
# _name = '_shape'
class TempNode(Variable):
"A temporary of a certain type"
is_temp = True
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
class OpenMPLoopNode(Node):
"""
Execute a loop in parallel.
"""
child_attrs = ['for_node', 'if_clause', 'lastprivates', 'privates']
class OpenMPConditionalNode(Node):
"""
Execute if_body if _OPENMP, otherwise execute else_body.
"""
child_attrs = ['if_body', 'else_body']
class PragmaForLoopNode(Node):
"""
Generate compiler-specific pragmas to aid things like SIMDization.
"""
child_attrs = ['for_node']
class ErrorHandler(Node):
"""
A node to handle errors. If there is an error handler in the outer scope,
the specializer will first make this error handler generate disposal code
for the wrapped AST body, and then jump to the error label of the parent
error handler. At the outermost (function) level, the error handler simply
returns an error indication.
.. attribute:: error_label
point to jump to in case of an error
.. attribute:: cleanup_label
point to jump to in the normal case
It generates the following:
.. code-block:: c
error_var = 0;
...
goto cleanup;
error:
error_var = 1;
cleanup:
...
if (error_var)
goto outer_error_label;
"""
child_attrs = ['error_var_init', 'body', 'cleanup_jump',
'error_target_label', 'error_set', 'cleanup_target_label',
'cascade']
error_var_init = None
cleanup_jump = None
error_target_label = None
error_set = None
cleanup_target_label = None
cascade = None
class JumpNode(Node):
"A jump to a jump target"
child_attrs = ['label']
def __init__(self, pos, label):
Node.__init__(self, pos)
self.label = label
class JumpTargetNode(JumpNode):
"A point to jump to"
class LabelNode(ExprNode):
"A goto label or memory address that we can jump to"
def __init__(self, pos, name):
super(LabelNode, self).__init__(pos, None)
self.name = name
self.mangled_name = None
class NoopExpr(ExprNode):
"Do nothing expression"
#
### Vectorization Functionality
#
class VectorVariable(Variable):
child_attrs = ['variable']
class VectorLoadNode(SingleOperandNode):
"Load a SIMD vector"
class VectorStoreNode(BinopNode):
"Store a SIMD vector"
class VectorBinopNode(BinopNode):
"Binary operation on SIMD vectors"
class VectorUnopNode(SingleOperandNode):
"Unary operation on SIMD vectors"
class ConstantVectorNode(ExprNode):
"Load the constant into the vector register"
|
markflorisson/minivect
|
minivect/miniast.py
|
Python
|
bsd-2-clause
| 49,678
|
[
"VisIt"
] |
368d73ae3f9ec8e8335725f7468550d9cc759ac0cb8368c0488a1877e6a44936
|
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
import os
from GangaGaudi.Lib.RTHandlers.RunTimeHandlerUtils import get_share_path, master_sandbox_prepare, sandbox_prepare, script_generator
from GangaDirac.Lib.RTHandlers.DiracRTHUtils import dirac_inputdata, dirac_ouputdata, mangle_job_name, diracAPI_script_template, diracAPI_script_settings, API_nullifier, dirac_outputfile_jdl
from GangaDirac.Lib.Files.DiracFile import DiracFile
from GangaCore.GPIDev.Lib.File.OutputFileManager import getOutputSandboxPatterns, getWNCodeForOutputPostprocessing
from GangaCore.GPIDev.Adapters.IRuntimeHandler import IRuntimeHandler
from GangaCore.GPIDev.Adapters.StandardJobConfig import StandardJobConfig
from GangaCore.Core.exceptions import ApplicationConfigurationError
from GangaCore.GPIDev.Lib.File import FileBuffer
from GangaCore.GPIDev.Lib.File.File import File
from GangaCore.Utility.Config import getConfig
from GangaCore.Utility.logging import getLogger
from GangaCore.Utility.util import unique
logger = getLogger()
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
class Im3ShapeDiracRTHandler(IRuntimeHandler):
"""The runtime handler to run the Im3ShapeApp on the Dirac backend"""
def master_prepare(self, app, appmasterconfig):
"""
This function prepares the application of a master job during submit. A priori we aren't doing anything with this in Im3ShapeApp but until this is understood I'd rather not remove it
Args:
app (IApplication): This is the application given in the master job
appasterconfig (tuple): This is the configuration which is to prepare the app in the master job # TODO check type and this interface
"""
inputsandbox, outputsandbox = master_sandbox_prepare(app, appmasterconfig)
return StandardJobConfig(inputbox=unique(inputsandbox),
outputbox=unique(outputsandbox))
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig):
"""
This function prepares the application of the actual job being submitted, master or not
Args:
app (IApplication): This is the application actually being submitted belonging to the master or sub job being configured
appsubconfig (tuple): This is used to prepare the inputsandbox according to the configuration for each subjob if it varies
appmasterconfig (tuple): This is also used to prepare the inputsandbox but contains the config of the app for the master job
jobmasterconfig (StandardJobConfig): This is the configuration of the master job which may or may not be the same job as owning the app
"""
# Construct some common objects used in job submission here
inputsandbox, outputsandbox = sandbox_prepare(app, appsubconfig, appmasterconfig, jobmasterconfig)
input_data, parametricinput_data = dirac_inputdata(app, hasOtherInputData=True)
job = app.getJobObject()
# Construct the im3shape-script which is used by this job. i.e. the script and full command line to be used in this job
exe_script_name = 'im3shape-script.py'
output_filename = os.path.basename(job.inputdata[0].lfn) + '.' + str(app.rank) + '.' + str(app.size)
im3shape_args = ' '.join([ os.path.basename(job.inputdata[0].lfn), os.path.basename(app.ini_location.namePattern), # input.fz, config.ini
app.catalog, output_filename, # catalog, output
str(app.rank), str(app.size) ])
full_cmd = app.exe_name + ' ' + im3shape_args
outputfiles = [this_file for this_file in job.outputfiles if isinstance(this_file, DiracFile)]
inputsandbox.append(FileBuffer( name=exe_script_name,
contents=script_generator(Im3Shape_script_template(),
## ARGS for app from job.app
RUN_DIR = app.run_dir,
BLACKLIST = os.path.basename(app.blacklist.namePattern),
COMMAND = full_cmd,
## Stuff for Ganga
OUTPUTFILES = repr([this_file.namePattern for this_file in job.outputfiles]),
OUTPUTFILESINJECTEDCODE = getWNCodeForOutputPostprocessing(job, ' '),
),
executable=True)
)
# TODO once there is a common, IApplication.getMeFilesForThisApp function replace this list with a getter ad it shouldn't really be hard-coded
app_file_list = [app.im3_location, app.ini_location, app.blacklist]
app_file_list = [this_file for this_file in app_file_list if isinstance(this_file, DiracFile)]
job.inputfiles.extend(app_file_list)
# Slightly mis-using this here but it would be nice to have these files
#job.inputfiles.extend(job.inputdata)
# NOTE special case for replicas: replicate string must be empty for no
# replication
dirac_script = script_generator(diracAPI_script_template(),
DIRAC_IMPORT = 'from DIRAC.Interfaces.API.Dirac import Dirac',
DIRAC_JOB_IMPORT = 'from DIRAC.Interfaces.API.Job import Job',
DIRAC_OBJECT = 'Dirac()',
JOB_OBJECT = 'Job()',
NAME = mangle_job_name(app),
EXE = exe_script_name,
EXE_ARG_STR = '',
EXE_LOG_FILE = 'Ganga_Executable.log',
ENVIRONMENT = None,
INPUTDATA = input_data,
PARAMETRIC_INPUTDATA = parametricinput_data,
OUTPUT_SANDBOX = API_nullifier(outputsandbox),
OUTPUTFILESSCRIPT = dirac_outputfile_jdl(outputfiles, False),
OUTPUT_PATH = "", # job.fqid,
SETTINGS = diracAPI_script_settings(app),
DIRAC_OPTS = job.backend.diracOpts,
REPLICATE = 'True' if getConfig('DIRAC')['ReplicateOutputData'] else '',
# leave the sandbox for altering later as needs
# to be done in backend.submit to combine master.
# Note only using 2 #s as auto-remove 3
INPUT_SANDBOX = '##INPUT_SANDBOX##'
)
return StandardJobConfig(dirac_script,
inputbox=unique(inputsandbox),
outputbox=unique(outputsandbox))
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
def Im3Shape_script_template():
"""
This function returns the script to be run on the worker nodes for Im3ShapeApp
"""
script_template = """#!/usr/bin/env python
'''Script to run Im3Shape application'''
from __future__ import print_function
from os import system, environ, pathsep, getcwd, listdir, path, chdir
from shutil import move
from glob import glob
from subprocess import call
from sys import exit
from copy import deepcopy
def run_Im3ShapeApp():
# Some useful paths to store
wn_dir = getcwd()
run_dir = '###RUN_DIR###'
run_dir = path.join(wn_dir, run_dir)
## Move all needed files into run_dir
#Blacklist is currently hard-coded lets move the file on the WN
blacklist_file = '###BLACKLIST###'
move(path.join(wn_dir, blacklist_file), path.join(run_dir, 'blacklist-y1.txt'))
# Move all .txt, .ini and .fz files in the WN to the run_dir of the executable by convention
for pattern in ['./*.txt', './*.fz', './*.ini']:
for this_file in glob(pattern):
move(path.join(wn_dir, this_file), run_dir)
## Fully construct the command we're about to run
chdir(run_dir)
full_cmd = '###COMMAND###'
print("full_cmd: %s" % full_cmd)
my_env = deepcopy(environ)
my_env['PATH'] = getcwd() + ':' + my_env['PATH']
rc = call(full_cmd, env=my_env, shell=True)
output_filePattern = ###OUTPUTFILES###
for pattern in output_filePattern:
for this_file in glob(pattern):
try:
move(path.join(run_dir, this_file), wn_dir)
except:
# Let the job fail at a later stage if 1 of the outputs are missing,
# don't crash, we may still be returning useful stuff
pass
print("files in run_dir: " + str(listdir('.')))
chdir(wn_dir)
return rc
# Main
if __name__ == '__main__':
err = None
try:
rc = run_Im3ShapeApp()
except Exception as x:
rc = -9999
print('Exception occured in running app.')
print('Err was: ' + str(x))
print("files on WN: " + str(listdir('.')))
print("environ: %s" % environ)
raise
print("files on WN: " + str(listdir('.')))
###OUTPUTFILESINJECTEDCODE###
exit(rc)
"""
return script_template
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
from GangaCore.GPIDev.Adapters.ApplicationRuntimeHandlers import allHandlers
allHandlers.add('Im3ShapeApp', 'Dirac', Im3ShapeDiracRTHandler)
|
ganga-devs/ganga
|
ganga/GangaLSST/Lib/Im3ShapeApp/Im3ShapeRTHandler.py
|
Python
|
gpl-3.0
| 9,462
|
[
"DIRAC"
] |
90db995d3e9d6f0a5c2d868b6e09c3ea5fcda6d9702e262805e72e2f9115f6af
|
# IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import yaml
import json
import os
import string
import random
import logging
import threading
import IM.InfrastructureInfo
import IM.InfrastructureList
from IM.VMRC import VMRC
from IM.CloudInfo import CloudInfo
from IM.auth import Authentication
from IM.recipe import Recipe
from IM.config import Config
from IM.VirtualMachine import VirtualMachine
from radl import radl_parse
from radl.radl import Feature, RADL
from radl.radl_json import dump_radl as dump_radl_json
from IM.openid.JWT import JWT
from IM.openid.OpenIDClient import OpenIDClient
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
from multiprocessing.pool import ThreadPool
try:
unicode("hola")
except NameError:
unicode = str
class UnauthorizedUserException(Exception):
""" Invalid InfrastructureManager credentials to access an infrastructure"""
def __init__(self, msg="Access to this infrastructure not granted."):
Exception.__init__(self, msg)
self.message = msg
class IncorrectInfrastructureException(Exception):
""" Invalid infrastructure ID or access not granted. """
def __init__(self, msg="Invalid infrastructure ID or access not granted."):
Exception.__init__(self, msg)
self.message = msg
class DeletedInfrastructureException(Exception):
""" Deleted infrastructure. """
def __init__(self, msg="Deleted infrastructure."):
Exception.__init__(self, msg)
self.message = msg
class InvaliddUserException(Exception):
""" Invalid InfrastructureManager credentials """
def __init__(self, msg="Invalid InfrastructureManager credentials"):
Exception.__init__(self, msg)
self.message = msg
class IncorrectVMCrecentialsException(Exception):
""" Invalid InfrastructureManager credentials """
def __init__(self, msg="Incorrect VM credentials"):
Exception.__init__(self, msg)
self.message = msg
class DisabledFunctionException(Exception):
""" Disabled function called"""
def __init__(self, msg="Function currently disabled."):
Exception.__init__(self, msg)
self.message = msg
class InfrastructureManager:
"""
Front-end to the functionality of the service.
"""
logger = logging.getLogger('InfrastructureManager')
"""Logger object."""
@staticmethod
def _reinit():
"""Restart the class attributes to initial values."""
IM.InfrastructureList.InfrastructureList._reinit()
@staticmethod
def _compute_deploy_groups(radl):
"""
Group the virtual machines that had to be deployed together.
Args:
- radl(RADL): RADL to consider.
Return(list of list of deploy): list of group of deploys.
"""
# If some virtual machine is in two private networks, the machines in both
# networks will be in the same group
# NOTE: net_groups is a *Disjoint-set data structure*
net_groups = {}
for net in radl.networks:
net_groups[net.id] = net.id
def root(n):
while True:
n0 = net_groups[n]
if n0 == n:
return n
n = n0
for d in radl.deploys:
private_nets = [net.id for net in radl.networks if not net.isPublic() and
net.id in radl.get_system_by_name(d.id).getNetworkIDs()]
if not private_nets:
continue
for n in private_nets[1:]:
net_groups[root(n)] = net_groups[root(private_nets[0])]
deploy_groups = []
deploy_groups_net = {}
for d in radl.deploys:
private_nets = [net.id for net in radl.networks if not net.isPublic() and
net.id in radl.get_system_by_name(d.id).getNetworkIDs()]
# If no private net is set, every launch can go in a separate group
if not private_nets:
for _ in range(d.vm_number):
d0 = d.clone()
d0.vm_number = 1
deploy_groups.append([d0])
continue
# Otherwise the deploy goes to some group
net = net_groups[root(private_nets[0])]
if net not in deploy_groups_net:
deploy_groups_net[net] = [d]
else:
deploy_groups_net[net].append(d)
deploy_groups.extend(deploy_groups_net.values())
return deploy_groups
@staticmethod
def _launch_deploy(sel_inf, deploy, cloud_id, cloud, concrete_systems, radl, auth, deployed_vm):
"""Launch a deploy."""
if deploy.vm_number <= 0:
InfrastructureManager.logger.warning(
"Inf ID: %s: deploy %s with 0 num: Ignoring." % (sel_inf.id, deploy.id))
return
if not deploy.id.startswith(IM.InfrastructureInfo.InfrastructureInfo.FAKE_SYSTEM):
concrete_system = concrete_systems[cloud_id][deploy.id][0]
launched_vms = []
launch_radl = radl.clone()
requested_radl = radl.clone()
requested_radl.systems = [radl.get_system_by_name(deploy.id)]
if not concrete_system:
InfrastructureManager.logger.error("Inf ID: " + str(sel_inf.id) +
". Error, no concrete system to deploy: " +
deploy.id + " in cloud: " + cloud_id +
". Check if a correct image is being used")
for _ in range(deploy.vm_number):
launched_vms.append((False, "Error, no concrete system to deploy: " + deploy.id +
" in cloud: " + cloud_id + ". Check if a correct image is being used"))
else:
launch_radl = radl.clone()
launch_radl.systems = [concrete_system.clone()]
requested_radl = radl.clone()
requested_radl.systems = [radl.get_system_by_name(concrete_system.name)]
(username, _, _, _) = concrete_system.getCredentialValues()
if not username:
for _ in range(deploy.vm_number):
launched_vms.append((False, "No username for deploy: " + deploy.id))
else:
InfrastructureManager.logger.debug("Inf ID: %s. Launching %d VMs of type %s" %
(sel_inf.id, deploy.vm_number, concrete_system.name))
launched_vms = cloud.cloud.getCloudConnector(sel_inf).launch_with_retry(
sel_inf, launch_radl, requested_radl, deploy.vm_number, auth, Config.MAX_VM_FAILS,
Config.DELAY_BETWEEN_VM_RETRIES)
# this must never happen ...
if len(launched_vms) < deploy.vm_number:
for _ in range(deploy.vm_number - len(launched_vms)):
launched_vms.append((False, "Error in deploy: " + deploy.id))
for success, launched_vm in launched_vms:
if success:
InfrastructureManager.logger.debug("Inf ID: %s. VM successfully launched: %s" % (sel_inf.id,
launched_vm.id))
deployed_vm.setdefault(deploy, []).append(launched_vm)
deploy.cloud_id = cloud_id
else:
InfrastructureManager.logger.error("Inf ID: %s. Error launching some of the "
"VMs: %s" % (sel_inf.id, launched_vm))
vm = VirtualMachine(sel_inf, None, cloud.cloud, launch_radl, requested_radl)
vm.state = VirtualMachine.FAILED
vm.info.systems[0].setValue('state', VirtualMachine.FAILED)
vm.error_msg = "Error launching the VMs of type %s to cloud ID %s of type %s. %s" % (
deploy.id, cloud.cloud.id, cloud.cloud.type, launched_vm)
sel_inf.add_vm(vm)
deployed_vm.setdefault(deploy, []).append(vm)
deploy.cloud_id = cloud_id
@staticmethod
def get_infrastructure(inf_id, auth):
"""Return infrastructure info with some id if valid authorization provided."""
if inf_id not in IM.InfrastructureList.InfrastructureList.get_inf_ids():
InfrastructureManager.logger.error("Error, incorrect Inf ID: %s" % inf_id)
raise IncorrectInfrastructureException()
sel_inf = IM.InfrastructureList.InfrastructureList.get_infrastructure(inf_id)
if not sel_inf:
InfrastructureManager.logger.error("Error loading Inf ID: %s" % inf_id)
raise IncorrectInfrastructureException("Error loading Inf ID data.")
if not sel_inf.is_authorized(auth):
InfrastructureManager.logger.error("Access Error to Inf ID: %s" % inf_id)
raise UnauthorizedUserException()
if sel_inf.deleted:
InfrastructureManager.logger.error("Inf ID: %s is deleted." % inf_id)
raise DeletedInfrastructureException()
return sel_inf
@staticmethod
def get_vm_from_inf(inf_id, vm_id, auth):
"""Return VirtualMachie info with some id of an infrastructure if valid authorization provided."""
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
return sel_inf.get_vm(vm_id)
@staticmethod
def Reconfigure(inf_id, radl_data, auth, vm_list=None):
"""
Add and update RADL definitions and reconfigure the infrastructure.
Args:
- inf_id(str): infrastructure id.
- radl_data(str): RADL description, it can be empty.
- auth(Authentication): parsed authentication tokens.
- vm_list(list of int): List of VM ids to reconfigure. If None all VMs will be reconfigured.
Return: "" if success.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Reconfiguring the Inf ID: " + str(inf_id))
if isinstance(radl_data, RADL):
radl = radl_data
else:
radl = radl_parse.parse_radl(radl_data)
InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
# Update infrastructure RADL with this new RADL
# Add or update configures
for s in radl.configures:
# first check that the YAML is correct
try:
yaml.safe_load(s.recipes)
except Exception as ex:
raise Exception("Error parsing YAML: %s" % str(ex))
sel_inf.radl.add(s.clone(), "replace")
InfrastructureManager.logger.info(
"Inf ID: " + sel_inf.id + ": " +
"(Re)definition of %s %s" % (type(s), s.getId()))
# and update contextualize
sel_inf.radl.add(radl.contextualize)
# Check if the user want to set a new password to any system:
for system in sel_inf.radl.systems:
new_system = radl.get_system_by_name(system.name)
if new_system:
new_creds = new_system.getCredentialValues(new=True)
# The user has specified a credential:
if len(list(set(new_creds))) > 1 or list(set(new_creds))[0] is not None:
creds = system.getCredentialValues()
if new_creds != creds:
# The credentials have changed
(_, password, public_key, private_key) = new_creds
system.setCredentialValues(
password=password, public_key=public_key, private_key=private_key, new=True)
# The user has new applications
curr_apps = system.getValue("disk.0.applications")
curr_apps_names = {}
if curr_apps:
for app_name in curr_apps.keys():
orig_app_name = app_name
if "," in app_name:
# remove version substring
pos = app_name.find(",")
app_name = app_name[:pos]
curr_apps_names[app_name] = orig_app_name
new_apps = new_system.getValue("disk.0.applications")
if new_apps:
for app_name, app in new_apps.items():
orig_app_name = app_name
if "," in app_name:
# remove version substring
pos = app_name.find(",")
app_name = app_name[:pos]
if app_name in list(curr_apps_names.keys()):
del curr_apps[curr_apps_names[app_name]]
curr_apps[orig_app_name] = app
# Stick all virtual machines to be reconfigured
InfrastructureManager.logger.info("Contextualize the Inf ID: " + sel_inf.id)
# reset ansible_configured to force the re-installation of galaxy roles
sel_inf.ansible_configured = None
sel_inf.Contextualize(auth, vm_list)
IM.InfrastructureList.InfrastructureList.save_data(inf_id)
return ""
@staticmethod
def _compute_score(system_score, requested_radl):
"""
Computes the score of a concrete radl comparing with the requested one.
Args:
- system_score(tuple(radl.system, int)): System object to deploy and the score
- requested_radl(radl.system): Original system requested by the user.
Return(tuple(radl.system, int)): System object to deploy and the new computed score
"""
concrete_system, score = system_score
req_apps = requested_radl.getApplications()
inst_apps = concrete_system.getApplications()
# Set highest priority to the original score
score *= 10000
# For each requested app installed in the VMI score with +100
if inst_apps:
for req_app in req_apps:
for inst_app in inst_apps:
if inst_app.isNewerThan(req_app):
score += 100
# For each installed app that is not requested score with -1
if inst_apps:
for inst_app in inst_apps:
if inst_app in req_apps:
# Check the version
for req_app in req_apps:
if req_app.isNewerThan(inst_app):
score -= 1
elif inst_app.getValue("version"):
# Only set score to -1 when the user requests a version
# to avoid score -1 if the user wants to install some packages
# if is not requested -1
score -= 1
return concrete_system, score
@staticmethod
def systems_with_vmrc(sel_inf, radl, auth):
"""
Concrete systems using VMRC
NOTE: consider not-fake deploys (vm_number > 0)
"""
# Get VMRC credentials
vmrc_list = []
for vmrc_elem in auth.getAuthInfo('VMRC'):
if 'host' in vmrc_elem and 'username' in vmrc_elem and 'password' in vmrc_elem:
vmrc_list.append(VMRC(vmrc_elem['host'], vmrc_elem['username'], vmrc_elem['password']))
systems_with_vmrc = {}
for system_id in set([d.id for d in radl.deploys if d.vm_number > 0]):
s = radl.get_system_by_name(system_id)
if not s.getValue("disk.0.image.url") and len(vmrc_list) == 0:
raise Exception("No correct VMRC auth data provided nor image URL")
if Config.SINGLE_SITE:
image_id = os.path.basename(s.getValue("disk.0.image.url"))
url_prefix = Config.SINGLE_SITE_IMAGE_URL_PREFIX
if not url_prefix.endswith("/"):
url_prefix = url_prefix + "/"
s.setValue("disk.0.image.url", url_prefix + image_id)
# Remove the requested apps from the system
s_without_apps = radl.get_system_by_name(system_id).clone()
s_without_apps.delValue("disk.0.applications")
# Set the default values for cpu, memory
defaults = (Feature("cpu.count", ">=", Config.DEFAULT_VM_CPUS),
Feature("memory.size", ">=", Config.DEFAULT_VM_MEMORY, Config.DEFAULT_VM_MEMORY_UNIT),
Feature("cpu.arch", "=", Config.DEFAULT_VM_CPU_ARCH))
for f in defaults:
if not s_without_apps.hasFeature(f.prop, check_softs=True):
s_without_apps.addFeature(f)
vmrc_res = [s0 for vmrc in vmrc_list for s0 in vmrc.search_vm(s)]
# Check that now the image URL is in the RADL
if not s.getValue("disk.0.image.url") and not vmrc_res:
sel_inf.add_cont_msg("No VMI obtained from VMRC to system: " + system_id)
raise Exception("No VMI obtained from VMRC to system: " + system_id)
n = [s_without_apps.clone().applyFeatures(s0, conflict="other", missing="other")
for s0 in vmrc_res]
systems_with_vmrc[system_id] = n if n else [s_without_apps]
return systems_with_vmrc
@staticmethod
def sort_by_score(sel_inf, concrete_systems, cloud_list, deploy_groups, auth):
"""
Sort by score the cloud providers
NOTE: consider fake deploys (vm_number == 0)
"""
deploys_group_cloud = {}
# reverse the list to use the reverse order in the sort function
# list of ordered clouds
ordered_cloud_list = [c.id for c in CloudInfo.get_cloud_list(auth)]
ordered_cloud_list.reverse()
for deploy_group in deploy_groups:
suggested_cloud_ids = list(set([d.cloud_id for d in deploy_group if d.cloud_id]))
if len(suggested_cloud_ids) > 1:
raise Exception("Two deployments that have to be launched in the same cloud provider "
"are asked to be deployed in different cloud providers: %s" % deploy_group)
elif len(suggested_cloud_ids) == 1:
if suggested_cloud_ids[0] not in cloud_list:
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Cloud Provider list:")
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + " - " + str(cloud_list))
raise Exception("No auth data for cloud with ID: %s" % suggested_cloud_ids[0])
else:
cloud_list0 = [(suggested_cloud_ids[0], cloud_list[suggested_cloud_ids[0]])]
else:
cloud_list0 = cloud_list.items()
scored_clouds = []
for cloud_id, _ in cloud_list0:
total = 0
for d in deploy_group:
if d.vm_number:
total += d.vm_number * concrete_systems[cloud_id][d.id][1]
else:
total += 1
scored_clouds.append((cloud_id, total))
# Order the clouds first by the score and then using the cloud
# order in the auth data
sorted_scored_clouds = sorted(scored_clouds,
key=lambda x: (x[1], ordered_cloud_list.index(x[0])),
reverse=True)
if sorted_scored_clouds and sorted_scored_clouds[0]:
deploys_group_cloud[id(deploy_group)] = sorted_scored_clouds[0][0]
else:
sel_inf.configured = False
sel_inf.add_cont_msg("No cloud provider available")
raise Exception("No cloud provider available")
return deploys_group_cloud
@staticmethod
def AddResource(inf_id, radl_data, auth, context=True):
"""
Add the resources in the RADL to the infrastructure.
Args:
- inf_id(str): infrastructure id.
- radl(str): RADL description.
- auth(Authentication): parsed authentication tokens.
- context(bool): Flag to specify if the ctxt step will be made
Return(list of int): ids of the new virtual machine created.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Adding resources to Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
try:
if isinstance(radl_data, RADL):
radl = radl_data
else:
radl = radl_parse.parse_radl(radl_data)
InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl))
radl.check()
# Update infrastructure RADL with this new RADL
sel_inf.complete_radl(radl)
sel_inf.update_radl(radl, [])
# If any deploy is defined, only update definitions.
if not radl.deploys:
InfrastructureManager.logger.warn("Inf ID: " + sel_inf.id + ": without any deploy. Exiting.")
sel_inf.add_cont_msg("Infrastructure without any deploy. Exiting.")
if sel_inf.configured is None:
sel_inf.configured = False
return []
except Exception as ex:
sel_inf.configured = False
sel_inf.add_cont_msg("Error parsing RADL: %s" % str(ex))
InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + " error parsing RADL")
raise ex
for system in radl.systems:
# Add apps requirements to the RADL
apps_to_install = system.getApplications()
for app_to_install in apps_to_install:
for app_avail, _, _, _, requirements in Recipe.getInstallableApps():
if requirements and app_avail.isNewerThan(app_to_install):
# This app must be installed and it has special
# requirements
try:
requirements_radl = radl_parse.parse_radl(requirements).systems[0]
system.applyFeatures(requirements_radl, conflict="other", missing="other")
except Exception:
InfrastructureManager.logger.exception(
"Inf ID: " + sel_inf.id + ": Error in the requirements of the app: " +
app_to_install.getValue("name") + ". Ignore them.")
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(requirements))
break
# Concrete systems using VMRC
try:
systems_with_vmrc = InfrastructureManager.systems_with_vmrc(sel_inf, radl, auth)
except Exception as ex:
sel_inf.configured = False
sel_inf.add_cont_msg("Error getting VM images: %s" % str(ex))
InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + " error getting VM images")
raise ex
# Concrete systems with cloud providers and select systems with the greatest score
# in every cloud
cloud_list = dict([(c.id, c.getCloudConnector(sel_inf)) for c in CloudInfo.get_cloud_list(auth)])
concrete_systems = {}
for cloud_id, cloud in cloud_list.items():
for system_id, systems in systems_with_vmrc.items():
s1 = [InfrastructureManager._compute_score(s.clone().applyFeatures(s0,
conflict="other",
missing="other").concrete(),
radl.get_system_by_name(system_id))
for s in systems for s0 in cloud.concreteSystem(s, auth)]
# Store the concrete system with largest score
concrete_systems.setdefault(cloud_id, {})[system_id] = (
max(s1, key=lambda x: x[1]) if s1 else (None, -1e9))
# Group virtual machines to deploy by network dependencies
deploy_groups = InfrastructureManager._compute_deploy_groups(radl)
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Groups of VMs with dependencies")
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + "\n" + str(deploy_groups))
# Sort by score the cloud providers
deploys_group_cloud = InfrastructureManager.sort_by_score(sel_inf, concrete_systems, cloud_list,
deploy_groups, auth)
# We are going to start adding resources
sel_inf.set_adding()
# Launch every group in the same cloud provider
deployed_vm = {}
for deploy_group in deploy_groups:
if not deploy_group:
InfrastructureManager.logger.warning("Inf ID: %s: No VMs to deploy!" % sel_inf.id)
sel_inf.add_cont_msg("No VMs to deploy. Exiting.")
if sel_inf.configured is None:
sel_inf.configured = False
return []
cloud_id = deploys_group_cloud[id(deploy_group)]
cloud = cloud_list[cloud_id]
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
pool.map(
lambda deploy: InfrastructureManager._launch_deploy(sel_inf, deploy, cloud_id,
cloud, concrete_systems, radl, auth,
deployed_vm),
deploy_group)
pool.close()
else:
for deploy in deploy_group:
InfrastructureManager._launch_deploy(sel_inf, deploy, cloud_id,
cloud, concrete_systems, radl,
auth, deployed_vm)
# We make this to maintain the order of the VMs in the sel_inf.vm_list
# according to the deploys shown in the RADL
new_vms = []
for orig_dep in radl.deploys:
for deploy in deployed_vm.keys():
if orig_dep.id == deploy.id:
for vm in deployed_vm.get(deploy, []):
if vm not in new_vms:
new_vms.append(vm)
# Remove the VMs in creating state
sel_inf.remove_creating_vms()
all_failed = True
for vm in new_vms:
# Set now the VM as "created"
vm.creating = False
# and add it to the Inf
sel_inf.add_vm(vm)
if vm.state != VirtualMachine.FAILED:
all_failed = False
(_, passwd, _, _) = vm.info.systems[0].getCredentialValues()
(_, new_passwd, _, _) = vm.info.systems[0].getCredentialValues(new=True)
if passwd and not new_passwd:
# The VM uses the VMI password, set to change it
random_password = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
vm.info.systems[0].setCredentialValues(password=random_password, new=True)
error_msg = ""
# Add the new virtual machines to the infrastructure
sel_inf.update_radl(radl,
[(d, deployed_vm[d], concrete_systems[d.cloud_id][d.id][0]) for d in deployed_vm],
False)
if all_failed:
InfrastructureManager.logger.error("VMs failed when adding to Inf ID: %s" % sel_inf.id)
sel_inf.add_cont_msg("All VMs failed. No contextualize.")
# in case of all VMs are failed delete it
delete_list = list(reversed(sel_inf.get_vm_list()))
for vm in new_vms:
if vm.error_msg:
error_msg += "%s\n" % vm.error_msg
vm.delete(delete_list, auth, [])
sel_inf.add_cont_msg(error_msg)
else:
InfrastructureManager.logger.info("VMs %s successfully added to Inf ID: %s" % (new_vms, sel_inf.id))
# The resources has been added
sel_inf.set_adding(False)
# Let's contextualize!
if context and new_vms and not all_failed:
sel_inf.Contextualize(auth)
IM.InfrastructureList.InfrastructureList.save_data(inf_id)
if all_failed and new_vms:
# if there are no VMs, set it as unconfigured
if not sel_inf.get_vm_list():
sel_inf.configured = False
raise Exception("Error adding VMs: %s" % error_msg)
return [vm.im_id for vm in new_vms]
@staticmethod
def RemoveResource(inf_id, vm_list, auth, context=True):
"""
Remove a list of resources from the infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_list(str, int or list of str): list of virtual machine ids.
- auth(Authentication): parsed authentication tokens.
- context(bool): Flag to specify if the ctxt step will be made
Return(int): number of undeployed virtual machines.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Removing the VMs: " + str(vm_list) + " from Inf ID: '" + str(inf_id) + "'")
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
if isinstance(vm_list, str):
vm_ids = vm_list.split(",")
elif isinstance(vm_list, int):
vm_ids = [str(vm_list)]
elif isinstance(vm_list, list):
vm_ids = vm_list
else:
raise Exception(
'Incorrect parameter type to RemoveResource function: expected: str, int or list of str.')
cont = 0
exceptions = []
delete_list = [sel_inf.get_vm(vmid) for vmid in vm_ids]
for vm in delete_list:
if vm.delete(delete_list, auth, exceptions):
cont += 1
InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": %d VMs successfully removed" % cont)
if context and cont > 0:
# Now test again if the infrastructure is contextualizing
sel_inf.Contextualize(auth)
IM.InfrastructureList.InfrastructureList.save_data(inf_id)
if exceptions:
InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + ": Error removing resources")
raise Exception("Error removing resources: %s" % exceptions)
return cont
@staticmethod
def GetVMProperty(inf_id, vm_id, property_name, auth):
"""
Get a particular property about a virtual machine in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- property(str): RADL property to get.
- auth(Authentication): parsed authentication tokens.
Return: a str with the property value
"""
auth = InfrastructureManager.check_auth_data(auth)
radl = InfrastructureManager.GetVMInfo(inf_id, vm_id, auth)
res = None
if radl.systems:
res = radl.systems[0].getValue(property_name)
return res
@staticmethod
def GetVMInfo(inf_id, vm_id, auth, json_res=False):
"""
Get information about a virtual machine in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- auth(Authentication): parsed authentication tokens.
- json_res(bool): Flag to return the info in RADL JSON format
Return: the RADL with the information about the VM or a str with the JSON data if json_res flag.
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Get information about the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
success = vm.update_status(auth)
if not success:
InfrastructureManager.logger.debug(
"Inf ID: " + str(inf_id) + ": " +
"Information not updated. Using last information retrieved")
if json_res:
return dump_radl_json(vm.get_vm_info())
else:
return vm.get_vm_info()
@staticmethod
def GetVMContMsg(inf_id, vm_id, auth):
"""
Get the contextualization log of a virtual machine in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- auth(Authentication): parsed authentication tokens.
Return: a str with the contextualization log of the VM
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Get contextualization log of the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
cont_msg = vm.get_cont_msg()
InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": " + cont_msg)
return cont_msg
@staticmethod
def AlterVM(inf_id, vm_id, radl_data, auth):
"""
Get information about a virtual machine in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- radl(str): RADL description.
- auth(Authentication): parsed authentication tokens.
Return: a str with the information about the VM
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Modifying the VM: '" + str(vm_id) + "' from Inf ID: " + str(inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
if not vm:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"VM does not exist or Access Error")
raise Exception("VM does not exist or Access Error")
if isinstance(radl_data, RADL):
radl = radl_data
else:
radl = radl_parse.parse_radl(radl_data)
(success, alter_res) = vm.alter(radl, auth)
if not success:
raise Exception("Error modifying the information about the VM %s: %s" % (vm_id, alter_res))
vm.update_status(auth)
IM.InfrastructureList.InfrastructureList.save_data(inf_id)
return vm.info
@staticmethod
def GetInfrastructureRADL(inf_id, auth):
"""
Get the original RADL of an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return: str with the RADL
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Getting RADL of the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
radl = str(sel_inf.get_radl())
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + radl)
return radl
@staticmethod
def GetInfrastructureInfo(inf_id, auth):
"""
Get information about an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return: a list of str: list of virtual machine ids.
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Getting information about the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
res = [str(vm.im_id) for vm in sel_inf.get_vm_list()]
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(res))
return res
@staticmethod
def GetInfrastructureContMsg(inf_id, auth, headeronly=False):
"""
Get cont msg of an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
- headeronly(bool): Flag to return only the header part of the infra log.
Return: a str with the cont msg
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Getting cont msg of the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
res = sel_inf.cont_out
if not headeronly:
for vm in sel_inf.get_vm_list():
if vm.get_cont_msg():
res += "VM " + str(vm.im_id) + ":\n" + vm.get_cont_msg() + "\n"
res += "***************************************************************************\n"
InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + res)
return res
@staticmethod
def GetInfrastructureState(inf_id, auth):
"""
Get the aggregated state of an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return: a dict with two elements:
- 'state': str with the aggregated state of the infrastructure
- 'vm_states': a dict indexed with the id of the VM and its state as value
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Getting state of the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
vm_list = sel_inf.get_vm_list()
vm_states = {}
for vm in vm_list:
# First try to update the status of the VM
vm.update_status(auth)
vm_states[str(vm.im_id)] = vm.state
state = None
for vm in vm_list:
# First try to update the status of the VM
if vm.state == VirtualMachine.FAILED:
state = VirtualMachine.FAILED
break
elif vm.state == VirtualMachine.UNKNOWN:
state = VirtualMachine.UNKNOWN
break
elif vm.state == VirtualMachine.PENDING:
state = VirtualMachine.PENDING
elif vm.state == VirtualMachine.RUNNING:
if state != VirtualMachine.PENDING:
state = VirtualMachine.RUNNING
elif vm.state == VirtualMachine.STOPPED:
if state is None:
state = VirtualMachine.STOPPED
elif vm.state == VirtualMachine.OFF:
if state is None:
state = VirtualMachine.OFF
elif vm.state == VirtualMachine.CONFIGURED:
if state is None:
state = VirtualMachine.CONFIGURED
elif vm.state == VirtualMachine.UNCONFIGURED:
if state is None or state == VirtualMachine.CONFIGURED:
state = VirtualMachine.UNCONFIGURED
if state is None:
if sel_inf.configured is False:
state = VirtualMachine.FAILED
elif not vm_list and sel_inf.configured is None:
# if there are no vms we probably are in the vm creation process
state = VirtualMachine.PENDING
else:
state = VirtualMachine.UNKNOWN
if sel_inf.deleting:
state = VirtualMachine.DELETING
InfrastructureManager.logger.info("Inf ID: " + str(inf_id) + " is in state: " + state)
return {'state': state, 'vm_states': vm_states}
@staticmethod
def _stop_vm(vm, auth, exceptions):
try:
success = False
InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Stopping the VM id: " + vm.id)
(success, msg) = vm.stop(auth)
except Exception as e:
msg = str(e)
if not success:
InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be stopped")
exceptions.append(msg)
@staticmethod
def StopInfrastructure(inf_id, auth):
"""
Stop all virtual machines in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Stopping the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
exceptions = []
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
pool.map(
lambda vm: InfrastructureManager._stop_vm(
vm, auth, exceptions),
reversed(sel_inf.get_vm_list())
)
pool.close()
else:
for vm in sel_inf.get_vm_list():
InfrastructureManager._stop_vm(vm, auth, exceptions)
if exceptions:
msg = ""
for e in exceptions:
msg += str(e) + "\n"
raise Exception("Error stopping the infrastructure: %s" % msg)
InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully stopped")
return ""
@staticmethod
def _start_vm(vm, auth, exceptions):
try:
success = False
InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Starting the VM id: " + vm.id)
(success, msg) = vm.start(auth)
except Exception as e:
msg = str(e)
if not success:
InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be restarted")
exceptions.append(msg)
@staticmethod
def StartInfrastructure(inf_id, auth):
"""
Start all virtual machines in an infrastructure previously stopped.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Starting the Inf ID: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
exceptions = []
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
pool.map(
lambda vm: InfrastructureManager._start_vm(
vm, auth, exceptions),
reversed(sel_inf.get_vm_list())
)
pool.close()
else:
for vm in sel_inf.get_vm_list():
InfrastructureManager._start_vm(vm, auth, exceptions)
if exceptions:
msg = ""
for e in exceptions:
msg += str(e) + "\n"
raise Exception("Error starting the infrastructure: %s" % msg)
InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully restarted")
return ""
@staticmethod
def StartVM(inf_id, vm_id, auth):
"""
Start the specified virtual machine in an infrastructure previously stopped.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Starting the VM id %s from the Inf ID: %s" % (vm_id, inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
success = False
try:
(success, msg) = vm.start(auth)
except Exception as e:
msg = str(e)
if not success:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s cannot be restarted: %s" % (vm_id, msg))
raise Exception("Error starting the VM: %s" % msg)
else:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s successfully restarted" % vm_id)
return ""
@staticmethod
def StopVM(inf_id, vm_id, auth):
"""
Stop the specified virtual machine in an infrastructure
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
# First check the auth data
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Stopping the VM id %s from the Inf ID: %s" % (vm_id, inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
success = False
try:
(success, msg) = vm.stop(auth)
except Exception as e:
msg = str(e)
if not success:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s cannot be stopped: %s" % (vm_id, msg))
raise Exception("Error stopping the VM: %s" % msg)
else:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s successfully stopped" % vm_id)
return ""
@staticmethod
def RebootVM(inf_id, vm_id, auth):
"""
Reboot the specified virtual machine in an infrastructure
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
# First check the auth data
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info(
"Rebooting the VM id %s from the Inf ID: %s" % (vm_id, inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
success = False
try:
(success, msg) = vm.reboot(auth)
except Exception as e:
msg = str(e)
if not success:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s cannot be rebooted: %s" % (vm_id, msg))
raise Exception("Error rebooting the VM: %s" % msg)
else:
InfrastructureManager.logger.info(
"Inf ID: " + str(inf_id) + ": " +
"The VM %s successfully rebooted" % vm_id)
return ""
@staticmethod
def DestroyInfrastructure(inf_id, auth, force=False, async_call=False):
"""
Destroy all virtual machines in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
- force(bool): delete the infra from the IM although not all resources are deleted.
- async_call(bool): Destroy the inf in an async way.
Return: None.
"""
if Config.BOOT_MODE == 1:
raise DisabledFunctionException()
# First check the auth data
auth = InfrastructureManager.check_auth_data(auth)
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
# First set this infra as "deleting"
sel_inf.set_deleting()
if async_call:
t = threading.Thread(name="DestroyResource-%s" % sel_inf.id,
target=sel_inf.destroy,
args=(auth, force))
t.daemon = True
t.start()
else:
sel_inf.destroy(auth, force)
return ""
@staticmethod
def check_im_user(auth):
"""
Check if the IM user is valid
Args:
- auth(Authentication): IM parsed authentication tokens.
Return(bool): true if the user is valid or false otherwise.
"""
if Config.USER_DB:
if os.path.isfile(Config.USER_DB):
try:
found = False
user_db = json.load(open(Config.USER_DB, "r"))
for user in user_db['users']:
if user['username'] == auth[0]['username'] and user['password'] == auth[0]['password']:
found = True
break
return found
except Exception:
InfrastructureManager.logger.exception("Incorrect format in the User DB file %s" % Config.USER_DB)
return False
else:
InfrastructureManager.logger.error("User DB file %s not found" % Config.USER_DB)
return False
else:
return True
@staticmethod
def check_oidc_token(im_auth):
token = im_auth["token"]
success = False
try:
# decode the token to get the info
decoded_token = JWT().get_info(token)
except Exception as ex:
InfrastructureManager.logger.exception("Error trying decode OIDC auth token: %s" % str(ex))
raise Exception("Error trying to decode OIDC auth token: %s" % str(ex))
# First check if the issuer is in valid
if decoded_token['iss'] not in Config.OIDC_ISSUERS:
InfrastructureManager.logger.error("Incorrect OIDC issuer: %s" % decoded_token['iss'])
raise InvaliddUserException("Invalid InfrastructureManager credentials. Issuer not accepted.")
# Now check the audience
if Config.OIDC_AUDIENCE:
if 'aud' in decoded_token and decoded_token['aud']:
found = False
for aud in decoded_token['aud'].split(","):
if aud == Config.OIDC_AUDIENCE:
found = True
break
if found:
InfrastructureManager.logger.debug("Audience %s successfully checked." % Config.OIDC_AUDIENCE)
else:
InfrastructureManager.logger.error("Audience %s not found in access token." % Config.OIDC_AUDIENCE)
raise InvaliddUserException("Invalid InfrastructureManager credentials. Audience not accepted.")
else:
InfrastructureManager.logger.error("Audience %s not found in access token." % Config.OIDC_AUDIENCE)
raise InvaliddUserException("Invalid InfrastructureManager credentials. Audience not accepted.")
if Config.OIDC_SCOPES and Config.OIDC_CLIENT_ID and Config.OIDC_CLIENT_SECRET:
success, res = OpenIDClient.get_token_introspection(token,
Config.OIDC_CLIENT_ID,
Config.OIDC_CLIENT_SECRET)
if not success:
raise InvaliddUserException("Invalid InfrastructureManager credentials. "
"Invalid token or Client credentials.")
else:
if not res["scope"]:
raise InvaliddUserException("Invalid InfrastructureManager credentials. "
"No scope obtained from introspection.")
else:
scopes = res["scope"].split(" ")
if not all([elem in scopes for elem in Config.OIDC_SCOPES]):
raise InvaliddUserException("Invalid InfrastructureManager credentials. Scopes %s "
"not in introspection scopes: %s" % (" ".join(Config.OIDC_SCOPES),
res["scope"]))
# Now check if the token is not expired
expired, msg = OpenIDClient.is_access_token_expired(token)
if expired:
InfrastructureManager.logger.error("OIDC auth %s." % msg)
raise InvaliddUserException("Invalid InfrastructureManager credentials. OIDC auth %s." % msg)
try:
# Now try to get user info
success, userinfo = OpenIDClient.get_user_info_request(token)
if success:
# convert to username to use it in the rest of the IM
im_auth['username'] = IM.InfrastructureInfo.InfrastructureInfo.OPENID_USER_PREFIX
if userinfo.get("preferred_username"):
im_auth['username'] += str(userinfo.get("preferred_username"))
elif userinfo.get("name"):
im_auth['username'] += str(userinfo.get("name"))
else:
im_auth['username'] += str(userinfo.get("sub"))
im_auth['password'] = str(decoded_token['iss']) + str(userinfo.get("sub"))
except Exception as ex:
InfrastructureManager.logger.exception("Error trying to validate OIDC auth token: %s" % str(ex))
raise Exception("Error trying to validate OIDC auth token: %s" % str(ex))
if not success:
InfrastructureManager.logger.error("Incorrect OIDC auth token: %s" % userinfo)
raise InvaliddUserException("Invalid InfrastructureManager credentials. %s." % userinfo)
@staticmethod
def check_auth_data(auth):
# First check if it is configured to check the users from a list
im_auth = auth.getAuthInfo("InfrastructureManager")
if not im_auth:
raise IncorrectVMCrecentialsException("No credentials provided for the InfrastructureManager.")
if Config.FORCE_OIDC_AUTH and "token" not in im_auth[0]:
raise IncorrectVMCrecentialsException("No token provided for the InfrastructureManager.")
# First check if an OIDC token is included
if "token" in im_auth[0]:
InfrastructureManager.check_oidc_token(im_auth[0])
elif "username" in im_auth[0]:
if im_auth[0]['username'].startswith(IM.InfrastructureInfo.InfrastructureInfo.OPENID_USER_PREFIX):
# This is a OpenID user do not enable to get data using user/pass creds
raise IncorrectVMCrecentialsException("Invalid username used for the InfrastructureManager.")
else:
raise IncorrectVMCrecentialsException("No username nor token for the InfrastructureManager.")
# Now check if the user is in authorized
if not InfrastructureManager.check_im_user(im_auth):
raise InvaliddUserException()
if Config.SINGLE_SITE:
vmrc_auth = auth.getAuthInfo("VMRC")
single_site_auth = auth.getAuthInfo(Config.SINGLE_SITE_TYPE)
single_site_auth[0]["host"] = Config.SINGLE_SITE_AUTH_HOST
auth_list = []
auth_list.extend(im_auth)
auth_list.extend(vmrc_auth)
auth_list.extend(single_site_auth)
auth = Authentication(auth_list)
# We have to check if TTS is needed for other auth item
return auth
@staticmethod
def CreateInfrastructure(radl_data, auth, async_call=False):
"""
Create a new infrastructure.
IM creates an infrastructure based on the RADL description and associated it to
the first valid IM user in the authentication tokens.
Args:
- radl_data(RADL): RADL description.
- auth(Authentication): parsed authentication tokens.
- async_call(bool): Create the inf in an async way.
Return(int): the new infrastructure ID if successful.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
# First check the auth data
auth = InfrastructureManager.check_auth_data(auth)
# Then parse the RADL
if isinstance(radl_data, RADL):
radl = radl_data
else:
radl = radl_parse.parse_radl(radl_data)
radl.check()
# Create a new infrastructure
inf = IM.InfrastructureInfo.InfrastructureInfo()
inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager"))
IM.InfrastructureList.InfrastructureList.add_infrastructure(inf)
IM.InfrastructureList.InfrastructureList.save_data(inf.id)
InfrastructureManager.logger.info("Creating new Inf ID: " + str(inf.id))
# Add the resources in radl_data
try:
if async_call:
InfrastructureManager.logger.debug("Inf ID: " + str(inf.id) + " created Async.")
t = threading.Thread(name="AddResource-%s" % inf.id,
target=InfrastructureManager.AddResource,
args=(inf.id, radl, auth))
t.daemon = True
t.start()
else:
# In case of sync call
vms = InfrastructureManager.AddResource(inf.id, radl, auth)
all_failed = False
error_msg = ""
for vmid in vms:
vm = inf.get_vm(vmid)
if vm.state == VirtualMachine.FAILED:
all_failed = True
if vm.error_msg:
error_msg += "%s\n" % vm.error_msg
else:
all_failed = False
break
if all_failed:
# If all VMs has failed, destroy then inf and return the error
try:
inf.destroy(auth)
except Exception as de:
error_msg += "%s" % de
raise Exception(error_msg)
except Exception as e:
InfrastructureManager.logger.exception("Error Creating Inf ID " + str(inf.id))
inf.delete()
IM.InfrastructureList.InfrastructureList.save_data(inf.id)
IM.InfrastructureList.InfrastructureList.remove_inf(inf)
raise e
InfrastructureManager.logger.info("Inf ID:" + str(inf.id) + ": Successfully created")
return inf.id
@staticmethod
def GetInfrastructureList(auth, flt=None):
"""
Return the infrastructure ids associated to IM tokens.
Args:
- auth(Authentication): parsed authentication tokens.
- flt(string): string to filter the list of returned infrastructures.
A regex to be applied in the RADL or TOSCA of the infra.
Return(list of int): list of infrastructure ids.
"""
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Listing the user infrastructures")
auths = auth.getAuthInfo('InfrastructureManager')
if not auths:
InfrastructureManager.logger.error("No correct auth data has been specified.")
raise InvaliddUserException()
inf_ids = IM.InfrastructureList.InfrastructureList.get_inf_ids(auth)
if flt:
res = []
for infid in inf_ids:
inf = InfrastructureManager.get_infrastructure(infid, auth)
radl = str(inf.get_radl())
tosca = ""
if "TOSCA" in inf.extra_info:
tosca = inf.extra_info["TOSCA"].serialize()
if re.search(flt, radl) or re.search(flt, tosca):
res.append(infid)
else:
res = inf_ids
return res
@staticmethod
def ExportInfrastructure(inf_id, delete, auth_data):
if delete and Config.BOOT_MODE == 1:
raise DisabledFunctionException()
auth = Authentication(auth_data)
auth = InfrastructureManager.check_auth_data(auth)
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
str_inf = sel_inf.serialize()
InfrastructureManager.logger.info("Exporting Inf ID: " + str(sel_inf.id))
if delete:
sel_inf.delete()
IM.InfrastructureList.InfrastructureList.save_data(sel_inf.id)
IM.InfrastructureList.InfrastructureList.remove_inf(sel_inf)
return str_inf
@staticmethod
def ImportInfrastructure(str_inf, auth_data):
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = Authentication(auth_data)
auth = InfrastructureManager.check_auth_data(auth)
try:
new_inf = IM.InfrastructureInfo.InfrastructureInfo.deserialize(str_inf)
except Exception as ex:
InfrastructureManager.logger.exception("Error importing the infrastructure, incorrect data")
raise Exception("Error importing the infrastructure, incorrect data: " + str(ex))
new_inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager"))
IM.InfrastructureList.InfrastructureList.add_infrastructure(new_inf)
InfrastructureManager.logger.info("Importing new infrastructure with Inf ID: " + str(new_inf.id))
# Save the state
IM.InfrastructureList.InfrastructureList.save_data(new_inf.id)
return new_inf.id
@staticmethod
def CreateDiskSnapshot(inf_id, vm_id, disk_num, image_name, auto_delete, auth):
"""
Create a snapshot of the specified num disk in a
virtual machine in an infrastructure.
Args:
- inf_id(str): infrastructure id.
- vm_id(str): virtual machine id.
- image_name(str): A name to set to the image
- disk_num(int): Number of the disk.
- auto_delete(bool): A flag to specify that the snapshot will be deleted when the
infrastructure is destroyed.
- auth(Authentication): parsed authentication tokens.
Return: a str with url of the saved snapshot.
"""
if Config.BOOT_MODE in [1, 2]:
raise DisabledFunctionException()
auth = InfrastructureManager.check_auth_data(auth)
InfrastructureManager.logger.info("Creating a snapshot of VM id: %s Inf ID: %s" % (vm_id, inf_id))
vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth)
success, image_url = vm.create_snapshot(disk_num, image_name, auto_delete, auth)
if not success:
InfrastructureManager.logger.error("Error creating a snapshot: %s of VM id: %s "
"Inf ID: %s" % (image_url, vm_id, inf_id))
raise Exception("Error creating snapshot: %s" % image_url)
else:
return image_url
@staticmethod
def stop():
IM.InfrastructureList.InfrastructureList.stop()
|
indigo-dc/im
|
IM/InfrastructureManager.py
|
Python
|
gpl-3.0
| 65,292
|
[
"Galaxy"
] |
4c1867fa2f420b8c55c56e9c934a9e174080976b6ede28de10be5ace0a199760
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
""" Handle the SCOP CLAssification file, which describes SCOP domains.
The file format is described in the scop
"release notes.":http://scop.berkeley.edu/release-notes-1.55.html
The latest CLA file can be found
"elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
"Release 1.55":http://scop.berkeley.edu/parse/dir.cla.scop.txt_1.55 (July 2001)
"""
from types import *
from Residues import *
from FileIndex import FileIndex
class Record:
"""Holds information for one SCOP domain
sid -- SCOP identifier. e.g. d1danl2
residues -- The domain definition as a Residues object
sccs -- SCOP concise classification strings. e.g. b.1.2.1
sunid -- SCOP unique identifier for this domain
hierarchy -- A sequence of tuples (nodetype, sunid) describing the
location of this domain in the SCOP hierarchy.
See the Scop module for a description of nodetypes.
"""
def __init__(self):
self.sid = ''
self.residues = None
self.sccs = ''
self.sunid =''
self.hierarchy = []
def __str__(self):
s = []
s.append(self.sid)
s += str(self.residues).split(" ")
s.append(self.sccs)
s.append(self.sunid)
h=[]
for ht in self.hierarchy:
h.append("=".join(map(str,ht)))
s.append(",".join(h))
return "\t".join(map(str,s)) + "\n"
class Iterator:
"""Iterates over a CLA file.
"""
def __init__(self, handle, parser=None):
"""Create an object that iterates over a DES file.
handle -- file-like object.
parser -- an optional Parser object to chang the results into
another form. If set to None, then the raw contents
of the file will be returned.
"""
if type(handle) is not FileType and type(handle) is not InstanceType:
raise TypeError, "I expected a file handle or file-like object"
self._handle = handle
self._parser = parser
def next(self):
"""Retrieve the next CLA record."""
while 1:
line = self._handle.readline()
if not line: return None
if line[0] !='#': break # Not a comment line
if self._parser is not None :
return self._parser.parse(line)
return line
def __iter__(self):
return iter(self.next, None)
class Parser:
"""Parses tab-deliminated CLA records.
"""
def parse(self, entry):
"""Returns a Cla Record """
entry = entry.rstrip() # no trailing whitespace
columns = entry.split('\t') # separate the tab-delineated cols
if len(columns) != 6:
raise ValueError, "I don't understand the format of %s" % entry
rec = Record()
rec.sid, pdbid, residues, rec.sccs, rec.sunid, hierarchy = columns
rec.residues = Residues(residues)
rec.residues.pdbid = pdbid
rec.sunid = int(rec.sunid)
h = []
for ht in hierarchy.split(",") :
h.append( ht.split('='))
for ht in h:
ht[1] = int(ht[1])
rec.hierarchy = h
return rec
class Index(FileIndex):
"""A CLA file indexed by SCOP identifiers."""
def __init__(self, filename, ) :
iterator = lambda f : Iterator(f, Parser())
key_gen = lambda rec : rec.sid
FileIndex.__init__(self, filename, iterator, key_gen)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SCOP/Cla.py
|
Python
|
apache-2.0
| 3,824
|
[
"Biopython"
] |
5abaf20f57c89c1c31a4ab3f49e38fa8fc2d50169c2bc462a1c155bf044a1d68
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete pipeline for online processing using OnACID.
@author: Andrea Giovannucci @agiovann and Eftychios Pnevmatikakis @epnev
Special thanks to Andreas Tolias and his lab at Baylor College of Medicine
for sharing their data used in this demo.
"""
from copy import deepcopy
import glob
import numpy as np
import os
import pylab as pl
import scipy
import sys
from time import time
try:
if __IPYTHON__:
print('Detected iPython')
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
import caiman as cm
from caiman.utils.visualization import view_patches_bar
from caiman.utils.utils import download_demo, load_object, save_object
from caiman.components_evaluation import evaluate_components_CNN
from caiman.motion_correction import motion_correct_iteration_fast
import cv2
from caiman.utils.visualization import plot_contours
from caiman.source_extraction.cnmf.online_cnmf import bare_initialization
from caiman.source_extraction.cnmf.utilities import detrend_df_f_auto
from caiman.paths import caiman_datadir
#%%
def main():
pass # For compatibility between running under Spyder and the CLI
#%% download and list all files to be processed
# folder inside ./example_movies where files will be saved
fld_name = 'Mesoscope'
download_demo('Tolias_mesoscope_1.hdf5', fld_name)
download_demo('Tolias_mesoscope_2.hdf5', fld_name)
download_demo('Tolias_mesoscope_3.hdf5', fld_name)
# folder where files are located
folder_name = os.path.join(caiman_datadir(), 'example_movies', fld_name)
extension = 'hdf5' # extension of files
# read all files to be processed
fls = glob.glob(folder_name + '/*' + extension)
# your list of files should look something like this
print(fls)
#%% Set up some parameters
# frame rate (Hz)
fr = 15
# approximate length of transient event in seconds
decay_time = 0.5
# expected half size of neurons
gSig = (3, 3)
# order of AR indicator dynamics
p = 1
# minimum SNR for accepting new components
min_SNR = 2.5
# correlation threshold for new component inclusion
rval_thr = 0.85
# spatial downsampling factor (increases speed but may lose some fine structure)
ds_factor = 1
# number of background components
gnb = 2
# recompute gSig if downsampling is involved
gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int'))
# flag for online motion correction
mot_corr = True
# maximum allowed shift during motion correction
max_shift = np.ceil(10. / ds_factor).astype('int')
# set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)
# number of shapes to be updated each time (put this to a finite small value to increase speed)
max_comp_update_shape = np.inf
# number of files used for initialization
init_files = 1
# number of files used for online
online_files = len(fls) - 1
# number of frames for initialization (presumably from the first file)
initbatch = 200
# maximum number of expected components used for memory pre-allocation (exaggerate here)
expected_comps = 300
# initial number of components
K = 2
# number of timesteps to consider when testing new neuron candidates
N_samples = np.ceil(fr * decay_time)
# exceptionality threshold
thresh_fitness_raw = scipy.special.log_ndtr(-min_SNR) * N_samples
# number of passes over the data
epochs = 2
# upper bound for number of frames in each file (used right below)
len_file = 1000
# total length of all files (if not known use a large number, then truncate at the end)
T1 = len(fls) * len_file * epochs
#%% Initialize movie
# load only the first initbatch frames and possibly downsample them
if ds_factor > 1:
Y = cm.load(fls[0], subindices=slice(0, initbatch, None)).astype(
np.float32).resize(1. / ds_factor, 1. / ds_factor)
else:
Y = cm.load(fls[0], subindices=slice(
0, initbatch, None)).astype(np.float32)
if mot_corr: # perform motion correction on the first initbatch frames
mc = Y.motion_correct(max_shift, max_shift)
Y = mc[0].astype(np.float32)
borders = np.max(mc[1])
else:
Y = Y.astype(np.float32)
# minimum value of movie. Subtract it to make the data non-negative
img_min = Y.min()
Y -= img_min
img_norm = np.std(Y, axis=0)
# normalizing factor to equalize the FOV
img_norm += np.median(img_norm)
Y = Y / img_norm[None, :, :] # normalize data
_, d1, d2 = Y.shape
dims = (d1, d2) # dimensions of FOV
Yr = Y.to_2D().T # convert data into 2D array
Cn_init = Y.local_correlations(swap_dim=False) # compute correlation image
#pl.imshow(Cn_init)
#pl.title('Correlation Image on initial batch')
#pl.colorbar()
bnd_Y = np.percentile(Y,(0.001,100-0.001)) # plotting boundaries for Y
#%% initialize OnACID with bare initialization
cnm_init = bare_initialization(Y[:initbatch].transpose(1, 2, 0), init_batch=initbatch, k=K, gnb=gnb,
gSig=gSig, p=p, minibatch_shape=100, minibatch_suff_stat=5,
update_num_comps=True, rval_thr=rval_thr,
thresh_fitness_raw=thresh_fitness_raw,
batch_update_suff_stat=True, max_comp_update_shape=max_comp_update_shape,
deconv_flag=False, use_dense=False,
simultaneously=False, n_refit=0,
max_num_added=3, min_num_trial=3,
sniper_mode=False, use_peak_max=False,
expected_comps=expected_comps)
#%% Plot initialization results
crd = plot_contours(cnm_init.estimates.A.tocsc(), Cn_init, thr=0.9)
A, C, b, f, YrA, sn = cnm_init.estimates.A, cnm_init.estimates.C, cnm_init.estimates.b, cnm_init.estimates.f, \
cnm_init.estimates.YrA, cnm_init.estimates.sn
view_patches_bar(Yr, scipy.sparse.coo_matrix(
A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn_init)
bnd_AC = np.percentile(A.dot(C),(0.001,100-0.005))
bnd_BG = np.percentile(b.dot(f),(0.001,100-0.001))
#%% create a function for plotting results in real time if needed
def create_frame(cnm2, img_norm, captions):
cnm2_est = cnm2.estimates
A, b = cnm2_est.Ab[:, gnb:], cnm2_est.Ab[:, :gnb].toarray()
C, f = cnm2_est.C_on[gnb:cnm2.M, :], cnm2_est.C_on[:gnb, :]
# inferred activity due to components (no background)
frame_plot = (frame_cor.copy() - bnd_Y[0])/np.diff(bnd_Y)
comps_frame = A.dot(C[:, t - 1]).reshape(cnm2.dims, order='F')
bgkrnd_frame = b.dot(f[:, t - 1]).reshape(cnm2.dims, order='F') # denoised frame (components + background)
denoised_frame = comps_frame + bgkrnd_frame
denoised_frame = (denoised_frame.copy() - bnd_Y[0])/np.diff(bnd_Y)
comps_frame = (comps_frame.copy() - bnd_AC[0])/np.diff(bnd_AC)
if show_residuals:
#all_comps = np.reshape(cnm2.Yres_buf.mean(0), cnm2.dims, order='F')
all_comps = np.reshape(cnm2_est.mean_buff, cnm2.dims, order='F')
all_comps = np.minimum(np.maximum(all_comps, 0)*2 + 0.25, 255)
else:
all_comps = np.array(A.sum(-1)).reshape(cnm2.dims, order='F')
# spatial shapes
frame_comp_1 = cv2.resize(np.concatenate([frame_plot, all_comps * 1.], axis=-1),
(2 * np.int(cnm2.dims[1] * resize_fact), np.int(cnm2.dims[0] * resize_fact)))
frame_comp_2 = cv2.resize(np.concatenate([comps_frame, denoised_frame], axis=-1),
(2 * np.int(cnm2.dims[1] * resize_fact), np.int(cnm2.dims[0] * resize_fact)))
frame_pn = np.concatenate([frame_comp_1, frame_comp_2], axis=0).T
vid_frame = np.repeat(frame_pn[:, :, None], 3, axis=-1)
vid_frame = np.minimum((vid_frame * 255.), 255).astype('u1')
if show_residuals and cnm2_est.ind_new:
add_v = np.int(cnm2.dims[1]*resize_fact)
for ind_new in cnm2_est.ind_new:
cv2.rectangle(vid_frame,(int(ind_new[0][1]*resize_fact),int(ind_new[1][1]*resize_fact)+add_v),
(int(ind_new[0][0]*resize_fact),int(ind_new[1][0]*resize_fact)+add_v),(255,0,255),2)
cv2.putText(vid_frame, captions[0], (5, 20), fontFace=5, fontScale=0.8, color=(
0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[1], (np.int(
cnm2.dims[0] * resize_fact) + 5, 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[2], (5, np.int(
cnm2.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, captions[3], (np.int(cnm2.dims[0] * resize_fact) + 5, np.int(
cnm2.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1)
cv2.putText(vid_frame, 'Frame = ' + str(t), (vid_frame.shape[1] // 2 - vid_frame.shape[1] //
10, vid_frame.shape[0] - 20), fontFace=5, fontScale=0.8, color=(0, 255, 255), thickness=1)
return vid_frame
#%% Prepare object for OnACID
cnm2 = deepcopy(cnm_init)
save_init = False # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization
if save_init:
cnm_init.dview = None
save_object(cnm_init, fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl')
cnm_init = load_object(fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl')
cnm2._prepare_object(np.asarray(Yr), T1, idx_components=None)
cnm2.thresh_CNN_noisy = 0.5
#%% Run OnACID and optionally plot results in real time
epochs = 1
cnm2.estimates.Ab_epoch = [] # save the shapes at the end of each epoch
t = initbatch # current timestep
tottime = []
Cn = Cn_init.copy()
# flag for removing components with bad shapes
remove_flag = False
T_rm = 650 # remove bad components every T_rm frames
rm_thr = 0.1 # CNN classifier removal threshold
# flag for plotting contours of detected components at the end of each file
plot_contours_flag = False
# flag for showing results video online (turn off flags for improving speed)
play_reconstr = True
# flag for saving movie (file could be quite large..)
save_movie = False
movie_name = os.path.join(folder_name, 'sniper_meso_0.995_new.avi') # name of movie to be saved
resize_fact = 1.2 # image resizing factor
if online_files == 0: # check whether there are any additional files
process_files = fls[:init_files] # end processing at this file
init_batc_iter = [initbatch] # place where to start
end_batch = T1
else:
process_files = fls[:init_files + online_files] # additional files
# where to start reading at each file
init_batc_iter = [initbatch] + [0] * online_files
shifts = []
show_residuals = True
if show_residuals:
caption = 'Mean Residual Buffer'
else:
caption = 'Identified Components'
captions = ['Raw Data', 'Inferred Activity', caption, 'Denoised Data']
if save_movie and play_reconstr:
fourcc = cv2.VideoWriter_fourcc('8', 'B', 'P', 'S')
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(movie_name, fourcc, 30.0, tuple(
[int(2 * x * resize_fact) for x in cnm2.dims]))
for iter in range(epochs):
if iter > 0:
# if not on first epoch process all files from scratch
process_files = fls[:init_files + online_files]
init_batc_iter = [0] * (online_files + init_files)
# np.array(fls)[np.array([1,2,3,4,5,-5,-4,-3,-2,-1])]:
for file_count, ffll in enumerate(process_files):
print('Now processing file ' + ffll)
Y_ = cm.load(ffll, subindices=slice(
init_batc_iter[file_count], T1, None))
# update max-correlation (and perform offline motion correction) just for illustration purposes
if plot_contours_flag:
if ds_factor > 1:
Y_1 = Y_.resize(1. / ds_factor, 1. / ds_factor, 1)
else:
Y_1 = Y_.copy()
if mot_corr:
templ = (cnm2.estimates.Ab.data[:cnm2.estimates.Ab.indptr[1]] * cnm2.estimates.C_on[0, t - 1]).reshape(cnm2.estimates.dims, order='F') * img_norm
newcn = (Y_1 - img_min).motion_correct(max_shift, max_shift,
template=templ)[0].local_correlations(swap_dim=False)
Cn = np.maximum(Cn, newcn)
else:
Cn = np.maximum(Cn, Y_1.local_correlations(swap_dim=False))
old_comps = cnm2.N # number of existing components
for frame_count, frame in enumerate(Y_): # now process each file
if np.isnan(np.sum(frame)):
raise Exception('Frame ' + str(frame_count) + ' contains nan')
if t % 100 == 0:
print('Epoch: ' + str(iter + 1) + '. ' + str(t) + ' frames have beeen processed in total. ' + str(cnm2.N -
old_comps) + ' new components were added. Total number of components is ' + str(cnm2.estimates.Ab.shape[-1] - gnb))
old_comps = cnm2.N
t1 = time() # count time only for the processing part
frame_ = frame.copy().astype(np.float32) #
if ds_factor > 1:
frame_ = cv2.resize(
frame_, img_norm.shape[::-1]) # downsampling
frame_ -= img_min # make data non-negative
if mot_corr: # motion correct
templ = cnm2.estimates.Ab.dot(
cnm2.estimates.C_on[:cnm2.M, t - 1]).reshape(cnm2.dims, order='F') * img_norm
frame_cor, shift = motion_correct_iteration_fast(
frame_, templ, max_shift, max_shift)
shifts.append(shift)
else:
templ = None
frame_cor = frame_
frame_cor = frame_cor / img_norm # normalize data-frame
cnm2.fit_next(t, frame_cor.reshape(-1, order='F')) # run OnACID on this frame
# store time
tottime.append(time() - t1)
t += 1
if t % T_rm == 0 and remove_flag:
prd, _ = evaluate_components_CNN(cnm2.estimates.Ab[:, gnb:], dims, gSig)
ind_rem = np.where(prd[:, 1] < rm_thr)[0].tolist()
cnm2.remove_components(ind_rem)
print('Removing '+str(len(ind_rem))+' components')
if t % 1000 == 0 and plot_contours_flag:
pl.cla()
A = cnm2.estimates.Ab[:, gnb:]
# update the contour plot every 1000 frames
crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)
pl.pause(1)
if play_reconstr: # generate movie with the results
vid_frame = create_frame(cnm2, img_norm, captions)
if save_movie:
out.write(vid_frame)
if t-initbatch < 100:
#for rp in np.int32(np.ceil(np.exp(-np.arange(1,100)/30)*20)):
for rp in range(len(cnm2.estimates.ind_new)*2):
out.write(vid_frame)
cv2.imshow('frame', vid_frame)
if t-initbatch < 100:
for rp in range(len(cnm2.estimates.ind_new)*2):
cv2.imshow('frame', vid_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print('Cumulative processing speed is ' + str((t - initbatch) /
np.sum(tottime))[:5] + ' frames per second.')
# save the shapes at the end of each epoch
cnm2.estimates.Ab_epoch.append(cnm2.estimates.Ab.copy())
if save_movie:
out.release()
cv2.destroyAllWindows()
#%% save results (optional)
save_results = False
if save_results:
np.savez('results_analysis_online_MOT_CORR.npz',
Cn=Cn, Ab=cnm2.estimates.Ab, Cf=cnm2.estimates.C_on, b=cnm2.estimates.b, f=cnm2.estimates.f,
dims=cnm2.dims, tottime=tottime, noisyC=cnm2.estimates.noisyC, shifts=shifts)
#%% extract results from the objects and do some plotting
A, b = cnm2.estimates.Ab[:, gnb:], cnm2.estimates.Ab[:, :gnb].toarray()
C, f = cnm2.estimates.C_on[gnb:cnm2.M, t - t //
epochs:t], cnm2.estimates.C_on[:gnb, t - t // epochs:t]
noisyC = cnm2.estimates.noisyC[:, t - t // epochs:t]
b_trace = [osi.b for osi in cnm2.estimates.OASISinstances] if hasattr(
cnm2, 'OASISinstances') else [0] * C.shape[0]
pl.figure()
crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f,
dims[0], dims[1], YrA=noisyC[gnb:cnm2.M] - C, img=Cn)
#%%
# This is to mask the differences between running this demo in Spyder
# versus from the CLI
if __name__ == "__main__":
main()
|
agiovann/Constrained_NMF
|
demos/obsolete/1_1/demo_OnACID_mesoscope_1_1.py
|
Python
|
gpl-2.0
| 18,568
|
[
"NEURON"
] |
8c1c2aeff042f137023138303aad056c907a826c9f3e2a9c1842f9de1692ea32
|
from traceHandler import *
from modelHandler import *
from optimizerHandler import *
from optionHandler import optionHandler
from scipy.interpolate import interp1d
from scipy import linspace
import time
import numpy
class coreModul():
"""
This class is responsible to carry out the main steps of the optimization process by
interacting with the other modules. The main attributes are the following:
:attr: data_handler:
performs input operations and handles input data
:attr: option_handler:
stores the settings
:attr: model_handler:
handles the model and runs the simulations and carries out other model related tasks
:attr: optimizer:
carries out the optimization process
:attr: optimal_params:
contains the resulting parameters
:attr: ffun_calc_list:
contains the list of available fitness functions in a dictionary
"""
def __init__(self):
self.data_handler=DATA()
self.option_handler=optionHandler()
self.model_handler=None
self.optimizer=None
self.wfits = []
self.wfits2 = []
f_m={"MSE": "calc_ase",
"Spike count": "calc_spike",
"MSE (excl. spikes)": "calc_spike_ase",
"Spike count (stim.)": "spike_rate",
"ISI differences": "isi_differ",
"Latency to 1st spike": "first_spike",
"AP amplitude": "AP_overshoot",
"AHP depth": "AHP_depth",
"AP width": "AP_width",
"Derivative difference" : "calc_grad_dif"}
#"PPTD" : "pyelectro_pptd"}
self.ffun_mapper=dict((v,k) for k,v in list(f_m.items()))
self.ffun_calc_list=["MSE",
"MSE (excl. spikes)",
"Spike count",
"Spike count (stim.)",
"ISI differences",
"Latency to 1st spike",
"AP amplitude",
"AHP depth",
"AP width",
"Derivative difference"]
#"PPTD"]
self.grid_result=None
def htmlStrBold(self,inp):
return "<b>"+str(inp)+"</b>"
def htmlStr(self,inp):
return "<p>"+str(inp)+"</p>"
def htmlUnderline(self):
return "text-decoration:underline"
def htmlResize(self,size):
return "font-size:"+str(int(size))+"%"
def htmlAlign(self,align_to):
if align_to not in ["left","right","center"]:
raise ValueError
return "text-align:"+align_to
def htmlStyle(self,inp,*args):
tmp_str="<span style=\""
for n in args:
tmp_str+=n+";"
tmp_str+="\">"+str(inp)+"</span>"
return tmp_str
def htmlTable(self,header_list,data):
tmp_str="<table border=\"1\" align=\"center\">"
for h in header_list:
tmp_str+="\n<th>"+str(h)+"</th>"
for r in data:
tmp_str+="\n<tr>"
for c in r:
tmp_str+="\n<td>"+str(c)+"</td>"
tmp_str+="\n</tr>"
tmp_str+="\n</table>"
return tmp_str
def htmlPciture(self,inp):
return "<p align=\"center\"><img style=\"border:none;\" src=\""+inp+"\" ></p>"
def Print(self):
print([self.option_handler.GetFileOption(),
self.option_handler.GetInputOptions(),
self.option_handler.GetModelOptions(),
self.option_handler.GetModelStim(),
self.option_handler.GetModelStimParam(),
self.option_handler.GetObjTOOpt(),
self.option_handler.GetOptParam(),
self.option_handler.GetFitnessParam(),
self.option_handler.GetOptimizerOptions()])
print("\n")
def FirstStep(self,args):
"""
Stores the location of the input, and the base directory in the ``option_handler`` object
and reads the data from the file into the ``data_handler`` object.
:param args: dictionary with keys "file" and "input"
"""
self.option_handler.SetFileOptions(args.get("file"))
self.option_handler.SetInputOptions(args.get("input"))
self.data_handler.Read([self.option_handler.input_dir],self.option_handler.input_size,self.option_handler.input_scale,self.option_handler.input_length,self.option_handler.input_freq,self.option_handler.type[-1])
if self.option_handler.type[-1]=='features':
self.option_handler.input_size= len(self.data_handler.features_data['stim_amp'])
def LoadModel(self,args):
"""
Stores the type of the simulator as well as the optional parameters passed to it.
Creates the ``model_handler`` objects which can be either ``modelHandlerNeuron`` or ``externalHandler``.
If the ``externalHandler`` is selected then the number of parameters subject to optimization is also set.
:param args: dictionary with keys "simulator" and "sim_command"
"""
print(args)
self.model_handler=None
self.option_handler.SetSimParam([args.get("simulator","Neuron"),args.get("sim_command"),None])
if self.option_handler.GetSimParam()[0]=="Neuron":
self.option_handler.SetModelOptions(args.get("model"))
self.model_handler=modelHandlerNeuron(self.option_handler.model_path,self.option_handler.model_spec_dir,self.option_handler.base_dir)
else:
self.model_handler=externalHandler(self.option_handler.GetSimParam()[1])
self.model_handler.SetNParams(self.option_handler)
self.option_handler.SetModelStimParam([[0]*self.data_handler.number_of_traces(),0,0])
def ReturnSections(self):
"""
:return: the sections found in the model including "None" in a ``string`` ``list``.
"""
temp=self.model_handler.GetParameters()
sections=[]
for n in temp:
sections.append(n[0])
sections=list(set(sections))
sections.append("None")
return sections
def ReturnMorphology(self):
"""
:return: the morphological parameters found in the model including "None" in a ``string`` ``list``.
"""
temp=self.model_handler.GetParameters()
morphs=(str.split(temp[0][1], ", "))
morphs=list(set(morphs))
morphs.append("None")
return morphs
def ReturnChannels(self,section):
"""
Collects the channels from the given section.
:param section: the name of the section
:return: the channels in the given section including "None" in a ``string`` ``list``.
"""
temp=self.model_handler.GetParameters()
channels=[]
for n in temp:
if n[0]==section:
for k in str.split(n[2]," "):
if k!="":
for s in str.split(n[3]," "):
if str.count(k,s)==1 and s!="":
channels.append(s)
channels=list(set(channels))
channels.append("None")
return channels
def ReturnChParams(self,channel):
"""
Collects channel parameters from the given channel
:param channel: the name of the channel mechanism
:return: the channel parameters in the given channel including "None" in a ``string`` ``list``.
.. note::
This function returns everything from the channel object not only the parameters.
"""
temp=self.model_handler.GetParameters()
ch_param=[]
for n in temp:
if str.find(n[3],channel)!=-1:
for p in n[2].split():
if str.find(p,channel)!=-1:
ch_param.append(p)
ch_param=list(set(ch_param))
ch_param.append("None")
return ch_param
#not in use
def SetModel(self,args):
if args.get("channel")!="None":
self.model_handler.SetChannelParameters(args.get("section"), args.get("segment"), args.get("channel"), args.get("params"), args.get("values"))
else:
self.model_handler.SetMorphParameters(args.get("section"), args.get("morph"), args.get("values"))
def SetModel2(self,args):
"""
Stores the selected parameter as subject to optimization in the ``option_handler`` object.
For future use it offers a way to store initial value (not in use at the moment).
:param args: must be a string-string dictionary containing the following keys:
* section
* channel
* params
* value
or:
* section
* morph
* values
"""
if args.get("channel")!="None":
self.option_handler.SetObjTOOpt(args.get("section")+" "+args.get("segment")+" "+args.get("channel")+" "+args.get("params"))
self.option_handler.SetOptParam(args.get("values"))
else:
self.option_handler.SetObjTOOpt(args.get("section")+" "+args.get("morph"))
self.option_handler.SetOptParam(args.get("values"))
def SecondStep(self,args):
"""
Stores the stimulation settings in the option object.
:param args: must be a dictionary with the following keys:
* stim
must hold a ``list`` as value, which contains:
* stimulus type as ``string``, must be either "IClamp" or "VClamp"
* position of stimulus inside the section as of real value (0-1)
* name of stimulated section as ``string``
* stimparam
must hold a ``list`` as value which contains:
* stimulus amplitudes as a ``list`` of real values
* delay of stimulus as real value
* duration of stimulus as real value
"""
self.option_handler.SetModelStim(args.get("stim"))
self.option_handler.SetModelStimParam(args.get("stimparam"))
def ThirdStep(self,args):
"""
Stores the parameters in the ``option_handler`` object regarding the optimization process.
If the sampling rate of the simulation is higher than the sampling rate of the input trace,
then it re-samples the input using linear interpolation to create more points.
Currently running a simulation with lower sampling rate than the input trace is not supported!
After storing the necessary settings the ``optimizer`` object is initialized and the optimization is performed.
The raw results are stored in the ``final_pop`` variable in the ``optimizer`` object.
:param args: a dictionary containing the following keys:
* runparam
must be a list containing the following values:
* length of simulation as real value
* integration step as real value
* parameter to record as ``string``
* name of the section where the recording takes place as ``string``
* position inside the section as real value (0-1)
* initial voltage as a real value
* feat
must be a ``list`` with the names of the selected fitness functions
* weights
must be a list of real values
* algo_options
must be a dictionary containing options related to the optimization algorithm
mandatory parameters:
* seed
* evo_strat
* pop_size
* num_params
* boundaries
optional parameters belonging to the different algorithms (see the optimizerHandler module for more)
* max_evaluation
* mutation_rate
* cooling_rate
* m_gauss
* std_gauss
* schedule
* init_temp
* final_temp
* acc
* dwell
* x_tol
* f_tol
* inertia
* social_rate
* cognitive_rate
* neighoborhood_size
optional parameter shared by every algorithm
* starting_points
"""
self.grid_result=None
if args!=None:
self.option_handler.SetModelRun(args.get("runparam"))
fit_par=[]
#fit_par.append(args.get("ffun",[]))
fit_par.append(args.get("feat",[]))
fit_par.append(args.get("weights",[]))
self.option_handler.SetFitnesParam(fit_par)
tmp=args.get("algo_options")
"""
if self.option_handler.type[-1]=='features':
tmp.update({"num_params" : len(self.data_handler.features_data['stim_amp'])})
"""
if len(tmp.get("boundaries")[0])<1:
raise sizeError("No boundaries were given!")
#tmp.append(args.get("starting_points"))
self.option_handler.SetOptimizerOptions(tmp)
if self.option_handler.type[-1]!='features':
if self.option_handler.run_controll_dt<self.data_handler.data.step:
print("re-sampling because integration step is smaller then data step")
print((self.option_handler.run_controll_dt,self.data_handler.data.step))
#we have to resample the input trace so it would match the model output
#will use lin interpolation
x=linspace(0,self.option_handler.run_controll_tstop,int(self.option_handler.run_controll_tstop*(1/self.data_handler.data.step)))#x axis of data points
tmp=[]
for i in range(self.data_handler.number_of_traces()):
y=self.data_handler.data.GetTrace(i)#y axis, the values from the input traces, corresponding to x
f=interp1d(x,y)
#we have the continuous trace, we could re-sample it now
new_x=linspace(0,self.option_handler.run_controll_tstop,int(self.option_handler.run_controll_tstop/self.option_handler.run_controll_dt))
#self.trace_reader.SetColumn(i,f(new_x)) the resampled vector replaces the original in the trace reader object
tmp.append(f(new_x))
self.data_handler.data.t_length=len(tmp[0])
self.data_handler.data.freq=self.option_handler.run_controll_tstop/self.option_handler.run_controll_dt
self.data_handler.data.step=self.option_handler.run_controll_dt
transp=list(map(list,list(zip(*tmp))))
self.data_handler.data.data=[]
for n in transp:
self.data_handler.data.SetTrace(n)
#running simulation with smaller resolution is not supported
if self.option_handler.run_controll_dt>self.data_handler.data.step:
self.option_handler.run_controll_dt=self.data_handler.data.step
import re
algo_str=re.sub('_+',"_",re.sub("[\(\[].*?[\)\]]", "", self.option_handler.evo_strat).replace("-","_").replace(" ","_"))
exec("self.optimizer="+algo_str.upper()+"(self.data_handler,self.option_handler)")
f_handler=open(self.option_handler.GetFileOption()+"/"+self.option_handler.GetFileOption().split("/")[-1]+"_settings.xml", 'w')
f_handler.write(self.option_handler.dump(self.ffun_mapper))
f_handler.close()
if self.option_handler.type[-1]!= 'features':
self.feat_str=", ".join([self.ffun_mapper[x.__name__] for x in self.option_handler.feats])
else:
self.feat_str=", ".join(self.option_handler.feats)
try:
if(self.option_handler.simulator == 'Neuron'):
del self.model_handler
except:
"no model yet"
start_time=time.time()
self.optimizer.Optimize()
stop_time=time.time()
self.cands,self.fits = [],[]
if self.option_handler.evo_strat.split(" ")[-1] == "Bluepyopt":
self.cands=[list(normalize(hof,self.optimizer)) for hof in self.optimizer.hall_of_fame]
self.fits=[x.fitness.values for x in self.optimizer.hall_of_fame]
popsize=int(self.option_handler.pop_size)
self.allfits=[self.optimizer.hist.genealogy_history[x].fitness.values for x in self.optimizer.hist.genealogy_history]
self.allpop=[self.optimizer.hist.genealogy_history[x] for x in self.optimizer.hist.genealogy_history]
if self.option_handler.type[-1]!="features":
number_of_traces=self.data_handler.number_of_traces()
else:
number_of_traces=len(self.data_handler.features_data["stim_amp"])
allgens=[]
minfits=[]
maxfits=[]
medfits=[]
cumminfits=[]
with open(self.option_handler.base_dir + "/bpopt_stats.txt" , "w") as out_handler:
out_handler.write("Gen \t Min \t \t Max \t \t Median \t Cumulative Min \n")
for idx in range(0,int(self.option_handler.max_evaluation*2),2):
current_gen=self.allfits[idx*popsize:(idx+1)*popsize]
weighted_sum=numpy.dot(current_gen,self.option_handler.weights*int(number_of_traces))
min_e=numpy.min(weighted_sum)
max_e=numpy.max(weighted_sum)
med_e=numpy.median(weighted_sum)
minfits.append(min_e)
maxfits.append(max_e)
medfits.append(med_e)
if cumminfits:
cumminfits.append(cumminfits[-1]) if min_e>cumminfits[-1] else cumminfits.append(min_e)
else:
cumminfits.append(minfits[-1])
out_handler.write(str(int(idx/2+1))+","+str(min_e)+","+str(max_e)+","+str(med_e)+","+str(cumminfits[-1])+"\n")
with open(self.option_handler.base_dir + "/bpopt_pop.txt" , "w") as out_handler:
out_handler.write("Gen \t Parameters \t \t Fitnesses \n")
for idx in range(0,int(self.option_handler.max_evaluation*2),2):
current_fits=self.allfits[idx*popsize:(idx+1)*popsize]
current_gen=self.allpop[idx*popsize:(idx+1)*popsize]
for gen,fit in zip(current_gen,current_fits):
out_handler.write(str(int(idx/2+1))+":"+str(gen)+":"+str(fit)+"\n")
elif(self.option_handler.evo_strat.split(" ")[-1] == "Pygmo"):
'''
Currently only the best individual with its fitness is passed
'''
self.cands = [self.optimizer.best]
self.fits = [self.optimizer.best_fitness]
print((self.cands, "CANDS"))
print((self.fits, "FITS"))
elif(self.option_handler.evo_strat.split(" ")[-1] == "Base"):
'''
Currently only the best individual with its fitness is passed
'''
self.cands = [x.candidate[0] for x in reversed(self.optimizer.final_pop)]
self.fits = [x.fitness[0] for x in reversed(self.optimizer.final_pop)]
print((self.cands[0], "CANDS"))
print((self.fits[0], "FITS"))
else:
self.optimizer.final_pop.sort(reverse=True)
for i in range(len(self.optimizer.final_pop)):
self.cands.append(self.optimizer.final_pop[i].candidate[0:len(self.option_handler.adjusted_params)])
self.fits.append(self.optimizer.final_pop[i].fitness)
print(("Optimization lasted for ", stop_time-start_time, " s"))
self.optimal_params=self.cands[0]
def FourthStep(self,args={}):
"""
Renormalizes the output of the ``optimizer`` (see optimizerHandler module for more), and runs
a simulation with the optimal parameters to receive an optimal trace.
The components of the fitness value is calculated on this optimal trace.
Settings of the entire work flow are saved into a configuration file named "model name"_settings.xml.
A report of the results is generated in the form of a html document.
:param args: currently not in use
"""
self.final_result=[]
self.error_comps=[]
self.last_fitness=self.optimizer.fit_obj.combineFeatures([self.optimal_params],delete_model=False)
self.renormed_params=self.optimizer.fit_obj.ReNormalize(self.optimal_params)
#calculate the error components
if self.option_handler.type[-1]!= 'features':
k_range=self.data_handler.number_of_traces()
else:
k_range=len(self.data_handler.features_data["stim_amp"])
for k in range(k_range):
self.error_comps.append(self.optimizer.fit_obj.getErrorComponents(k, self.optimizer.fit_obj.model_trace[k]))
trace_handler=open("result_trace"+str(k)+".txt","w")
for l in self.optimizer.fit_obj.model_trace[k]:
trace_handler.write(str(l))
trace_handler.write("\n")
trace_handler.close()
self.final_result.append(self.optimizer.fit_obj.model_trace[k])
if isinstance(self.optimizer.fit_obj.model, externalHandler):
self.optimizer.fit_obj.model.record[0]=[]
name=self.option_handler.model_path.split("/")[-1].split(".")[0]
f_handler=open(name+"_results.html","w")
tmp_str="<!DOCTYPE html>\n<html>\n<body>\n"
tmp_str+=self.htmlStr(str(time.asctime( time.localtime(time.time()) )))+"\n"
tmp_str+="<p>"+self.htmlStyle("Optimization of <b>"+name+".hoc</b> based on: "+self.option_handler.input_dir,self.htmlAlign("center"))+"</p>\n"
tmp_list=[]
#tmp_fit=self.optimizer.fit_obj.ReNormalize(self.optimizer.final_pop[0].candidate[0:len(self.option_handler.adjusted_params)])
tmp_fit=self.renormed_params
for name,mmin,mmax,f in zip(self.option_handler.GetObjTOOpt(),self.option_handler.boundaries[0],self.option_handler.boundaries[1],tmp_fit):
tmp_list.append([str(name),str(mmin),str(mmax),str(f)])
tmp_str+="<center><p>"+self.htmlStyle("Results",self.htmlUnderline(),self.htmlResize(200))+"</p></center>\n"
tmp_str+=self.htmlTable(["Parameter Name","Minimum","Maximum","Optimum"], tmp_list)+"\n"
tmp_str+="<center><p>"+self.htmlStrBold("Fitness: ")
#tmp_str+=self.htmlStrBold(str(self.optimizer.final_pop[0].fitness))+"</p></center>\n"
tmp_str+=self.htmlStrBold(str(self.last_fitness))+"</p></center>\n"
tmp_str+=self.htmlPciture("result_trace.png")+"\n"
for k in list(self.option_handler.GetOptimizerOptions().keys()):
tmp_str+="<p><b>"+k+" =</b> "+str(self.option_handler.GetOptimizerOptions()[k])+"</p>\n"
tmp_str+="<p><b>feats =</b> "+self.feat_str +"</p>\n"
tmp_str+="<p><b>weights =</b> "+ str(self.option_handler.weights)+"</p>\n"
tmp_str+="<p><b>user function =</b></p>\n"
for l in (self.option_handler.u_fun_string.split("\n")[4:-1]):
tmp_str+="<p>"+l+"</p>"
tmp_str+="</body>\n</html>\n"
#error components
tmp_str+="<p><b>Fitness Components:</b></p>\n"
tmp_w_sum=0
tmp_list=[]
for t in self.error_comps:
for c in t:
if self.option_handler.type[-1]!='features':
#tmp_str.append( "*".join([str(c[0]),c[1].__name__]))
tmp_list.append([self.ffun_mapper[c[1].__name__],
str(c[2]),
str(c[0]),
str(c[0]*c[2]),""])
tmp_w_sum +=c[0]*c[2]
else:
tmp_list.append([c[1],
str(c[2]),
str(c[0]),
str(c[0]*c[2]),""])
tmp_w_sum +=c[0]*c[2]
tmp_list.append(["","","","",tmp_w_sum])
tmp_w_sum=0
#print tmp_list
tmp_str+=self.htmlTable(["Name","Value","Weight","Weighted Value","Weighted Sum"], tmp_list)+"\n"
#print tmp_str
#transpose the error comps
tmp_list=[]
for c in zip(*self.error_comps):
tmp=[0]*4
for t_idx in range(len(c)):
#print c[t_idx]
tmp[1]+=c[t_idx][2]
tmp[2]=c[t_idx][0]
tmp[3]+=c[t_idx][2]*c[t_idx][0]
if self.option_handler.type[-1]!='features':
tmp[0]=self.ffun_mapper[c[t_idx][1].__name__]
else:
tmp[0]=(c[t_idx][1])
tmp=list(map(str,tmp))
tmp_list.append(tmp)
#print tmp_list
tmp_str+=self.htmlTable(["Name","Value","Weight","Weighted Value"], tmp_list)+"\n"
#tmp_str+="<center><p><b>weighted sum = "+(str(tmp_w_sum)[0:5])+"</b></p></centered>"
#print tmp_str
f_handler.write(tmp_str)
f_handler.close()
def callGrid(self,resolution):
"""
Calculates fitness values on a defined grid (see optimizerHandler module for more).
This tool is purely for analyzing results, and we do not recommend to use it to obtain parameter values.
"""
import copy
self.prev_result=copy.copy(self.optimizer.final_pop)
self.optimizer=grid(self.data_handler,self.optimizer.fit_obj.model,self.option_handler,resolution)
self.optimizer.Optimize(self.optimal_params)
self.grid_result=copy.copy(self.optimizer.final_pop)
self.optimizer.final_pop=self.prev_result
|
KaliLab/optimizer
|
optimizer/Core.py
|
Python
|
lgpl-2.1
| 21,650
|
[
"NEURON"
] |
663e035054da403abd7247357228cde09cc16fac1cac1b6411d3a141b0805731
|
import unittest,zlib
from DIRAC.WorkloadManagementSystem.DB.SandboxDB import SandboxDB
class JobDBTestCase(unittest.TestCase):
""" Base class for the SandboxDB test cases
"""
def setUp(self):
print
self.sDB = SandboxDB('Test',20)
class SandboxCase(JobDBTestCase):
""" TestJobDB represents a test suite for the JobDB database front-end
"""
def test_uploadFile(self):
sandbox = 'out'
#testfile = open('test.jdl','r')
testfile = open('/home/atsareg/distributive/skype-1.3.0.53-1mdk.i586.rpm','r')
body = testfile.read()
#body = zlib.compress(body)
testfile.close()
result = self.sDB.storeSandboxFile(1,sandbox+'putFile1',body,sandbox)
print result
self.assert_( result['OK'])
result = self.sDB.getSandboxFile(1,sandbox+'putFile1',sandbox)
self.assert_( result['OK'])
newbody = result['Value']
self.assertEqual(body,newbody)
result = self.sDB.getFileNames(1,sandbox)
self.assert_( result['OK'])
print result
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SandboxCase)
# suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(JobRemovalCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
avedaee/DIRAC
|
WorkloadManagementSystem/DB/test/TestSandboxDB.py
|
Python
|
gpl-3.0
| 1,326
|
[
"DIRAC"
] |
25fa3ab95dc73e8da44789520411b0c75c556ab18b8bf3e5c52b9d2780217eee
|
# Copyright (C) 2013-2020 2ndQuadrant Limited
#
# Client Utilities for Barman, Backup and Recovery Manager for PostgreSQL
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import random
import re
import subprocess
import tarfile
from contextlib import closing
from io import BytesIO
import mock
import pytest
from barman.clients import walarchive
def pipe_helper():
"""
Create two BytesIO objects (input_mock, output_mock) to simulate a pipe.
When the input_mock is closed, the content is copied in output_mock,
ready to be used.
:rtype: tuple[BytesIO, BytesIO]
"""
input_mock = BytesIO()
output_mock = BytesIO()
# Save the content of input_mock into the output_mock before closing it
def save_before_close(orig_close=input_mock.close):
output_mock.write(input_mock.getvalue())
output_mock.seek(0)
orig_close()
input_mock.close = save_before_close
return input_mock, output_mock
# noinspection PyMethodMayBeStatic
class TestMain(object):
@mock.patch('barman.clients.walarchive.subprocess.Popen')
def test_ok(self, popen_mock, tmpdir):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
source_hash = source.computehash()
# Prepare the fake Pipe
input_mock, output_mock = pipe_helper()
popen_mock.return_value.stdin = input_mock
popen_mock.return_value.returncode = 0
walarchive.main(['-c', '/etc/bwa.conf', '-U', 'user', 'a.host',
'a-server', source.strpath])
popen_mock.assert_called_once_with(
['ssh', '-q', '-T', 'user@a.host',
'barman', "--config='/etc/bwa.conf'", 'put-wal', 'a-server'],
stdin=subprocess.PIPE)
# Verify the tar content
tar = tarfile.open(mode='r|', fileobj=output_mock)
first = tar.next()
with closing(tar.extractfile(first)) as fp:
first_content = fp.read().decode()
assert first.name == '000000080000ABFF000000C1'
assert first_content == 'something'
second = tar.next()
with closing(tar.extractfile(second)) as fp:
second_content = fp.read().decode()
assert second.name == 'MD5SUMS'
assert second_content == \
'%s *000000080000ABFF000000C1\n' % source_hash
assert tar.next() is None
@mock.patch('barman.clients.walarchive.RemotePutWal')
def test_error_dir(self, rpw_mock, tmpdir, capsys):
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', tmpdir.strpath])
assert exc.value.code == 2
assert not rpw_mock.called
out, err = capsys.readouterr()
assert not out
assert 'WAL_PATH cannot be a directory' in err
@mock.patch('barman.clients.walarchive.RemotePutWal')
def test_error_io(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.side_effect = EnvironmentError
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 2
out, err = capsys.readouterr()
assert not out
assert 'Error executing ssh' in err
@mock.patch('barman.clients.walarchive.RemotePutWal')
def test_error_ssh(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.return_value.returncode = 255
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 3
out, err = capsys.readouterr()
assert not out
assert 'Connection problem with ssh' in err
@mock.patch('barman.clients.walarchive.RemotePutWal')
def test_error_barman(self, rpw_mock, tmpdir, capsys):
# Prepare some content
source = tmpdir.join('wal_dir/000000080000ABFF000000C1')
source.write('something', ensure=True)
rpw_mock.return_value.returncode = 1
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', source.strpath])
assert exc.value.code == 1
out, err = capsys.readouterr()
assert not out
assert "Remote 'barman put-wal' command has failed" in err
@mock.patch('barman.clients.walarchive.subprocess.Popen')
def test_connectivity_test_ok(self, popen_mock, capsys):
popen_mock.return_value.communicate.return_value = ('Good test!', '')
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', '--test', 'dummy_wal'])
assert exc.value.code == 0
out, err = capsys.readouterr()
assert "Good test!" in out
assert not err
@mock.patch('barman.clients.walarchive.subprocess.Popen')
def test_connectivity_test_error(self, popen_mock, capsys):
popen_mock.return_value.communicate.side_effect = subprocess.\
CalledProcessError(255, "remote barman")
with pytest.raises(SystemExit) as exc:
walarchive.main(['a.host', 'a-server', '--test', 'dummy_wal'])
assert exc.value.code == 2
out, err = capsys.readouterr()
assert not out
assert ("ERROR: Impossible to invoke remote put-wal: "
"Command 'remote barman' returned non-zero "
"exit status 255") in err
# noinspection PyMethodMayBeStatic
class TestRemotePutWal(object):
@mock.patch('barman.clients.walarchive.subprocess.Popen')
def test_str_source_file(self, popen_mock, tmpdir):
input_mock, output_mock = pipe_helper()
popen_mock.return_value.stdin = input_mock
popen_mock.return_value.returncode = 0
config = mock.Mock(
user='barman',
barman_host='remote.barman.host',
config=None,
server_name='this-server',
test=False)
source_file = tmpdir.join('test-source/000000010000000000000001')
source_file.write("test-content", ensure=True)
source_path = source_file.strpath
# In python2 the source_path can be an unicode object
if hasattr(source_path, 'decode'):
source_path = source_path.decode()
rpw = walarchive.RemotePutWal(config, source_path)
popen_mock.assert_called_once_with(
['ssh', '-q', '-T', 'barman@remote.barman.host',
'barman', 'put-wal', 'this-server'], stdin=subprocess.PIPE)
assert rpw.returncode == 0
tar = tarfile.open(mode='r|', fileobj=output_mock)
first = tar.next()
with closing(tar.extractfile(first)) as fp:
first_content = fp.read().decode()
assert first.name == '000000010000000000000001'
assert first_content == 'test-content'
second = tar.next()
with closing(tar.extractfile(second)) as fp:
second_content = fp.read().decode()
assert second.name == 'MD5SUMS'
assert second_content == \
'%s *000000010000000000000001\n' % source_file.computehash('md5')
assert tar.next() is None
@mock.patch('barman.clients.walarchive.subprocess.Popen')
def test_error(self, popen_mock, tmpdir):
input_mock = BytesIO()
popen_mock.return_value.stdin = input_mock
config = mock.Mock(
user='barman',
barman_host='remote.barman.host',
config=None,
server_name='this-server',
test=False)
source_file = tmpdir.join('test-source/000000010000000000000001')
source_file.write("test-content", ensure=True)
source_path = source_file.strpath
# Simulate a remote failure
popen_mock.return_value.returncode = 5
# In python2 the source_path can be an unicode object
if hasattr(source_path, 'decode'):
source_path = source_path.decode()
rwa = walarchive.RemotePutWal(config, source_path)
popen_mock.assert_called_once_with(
['ssh', '-q', '-T', 'barman@remote.barman.host',
'barman', 'put-wal', 'this-server'], stdin=subprocess.PIPE)
assert rwa.returncode == 5
# noinspection PyMethodMayBeStatic
class TestChecksumTarFile(object):
def test_tar(self, tmpdir):
# Prepare some content
source = tmpdir.join('source.file')
source.write('something', ensure=True)
source.setmtime(source.mtime() - 100) # Set mtime to 100 seconds ago
source_hash = source.computehash()
# Write the content in a tar file
storage = tmpdir.join('storage.tar')
with closing(walarchive.ChecksumTarFile.open(
storage.strpath, mode='w:')) as tar:
tar.add(source.strpath, source.basename)
checksum = tar.members[0].data_checksum
assert checksum == source_hash
# Double close should not give any issue
tar.close()
lab = tmpdir.join('lab').ensure(dir=True)
tar = tarfile.open(storage.strpath, mode='r:')
tar.extractall(lab.strpath)
tar.close()
dest_file = lab.join(source.basename)
sum_file = lab.join('MD5SUMS')
sums = {}
for line in sum_file.readlines():
checksum, name = re.split(r' [* ]', line.rstrip(), 1)
sums[name] = checksum
assert list(sums.keys()) == [source.basename]
assert sums[source.basename] == source_hash
assert dest_file.computehash() == source_hash
# Verify file mtime
# Use a round(2) comparison because float is not precise in Python 2.x
assert round(dest_file.mtime(), 2) == round(source.mtime(), 2)
@pytest.mark.parametrize(
['size', 'mode'],
[
[0, 0],
[10, None],
[10, 0],
[10, 1],
[10, -5],
[16 * 1024, 0],
[32 * 1024 - 1, -1],
[32 * 1024 - 1, 0],
[32 * 1024 - 1, 1],
])
def test_md5copyfileobj(self, size, mode):
"""
Test md5copyfileobj different size.
If mode is None, copy the whole data.
If mode is <= 0, copy the data passing the exact length.
If mode is > 0, require more bytes than available, raising an error
:param int size: The size of random data to use for the test
:param int|None mode: the mode of operation, see above description
"""
src = BytesIO()
dst = BytesIO()
# Generate `size` random bytes
src_string = bytearray(random.getrandbits(8) for _ in range(size))
src.write(src_string)
src.seek(0)
if mode and mode > 0:
# Require more bytes thant available. Make sure to get an exception
with pytest.raises(IOError):
walarchive.md5copyfileobj(src, dst, size + mode)
else:
if mode is None:
# Copy the whole file until the end
md5 = walarchive.md5copyfileobj(src, dst)
else:
# Copy only a portion of the file
md5 = walarchive.md5copyfileobj(src, dst, size + mode)
src_string = src_string[0:size + mode]
# Validate the content and the checksum
assert dst.getvalue() == src_string
assert md5 == hashlib.md5(bytes(src_string)).hexdigest()
|
2ndquadrant-it/barman
|
tests/test_barman_wal_archive.py
|
Python
|
gpl-3.0
| 12,267
|
[
"BWA"
] |
5f56b12835873abed0c7c9c7c33430504907df3393ca269edcb3c1eb9bd57aed
|
# Natural Language Toolkit: Expectation Maximization Clusterer
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import numpy
from api import *
from util import *
class EMClusterer(VectorSpaceClusterer):
"""
The Gaussian EM clusterer models the vectors as being produced by
a mixture of k Gaussian sources. The parameters of these sources
(prior probability, mean and covariance matrix) are then found to
maximise the likelihood of the given data. This is done with the
expectation maximisation algorithm. It starts with k arbitrarily
chosen means, priors and covariance matrices. It then calculates
the membership probabilities for each vector in each of the
clusters; this is the 'E' step. The cluster parameters are then
updated in the 'M' step using the maximum likelihood estimate from
the cluster membership probabilities. This process continues until
the likelihood of the data does not significantly increase.
"""
def __init__(self, initial_means, priors=None, covariance_matrices=None,
conv_threshold=1e-6, bias=0.1, normalise=False,
svd_dimensions=None):
"""
Creates an EM clusterer with the given starting parameters,
convergence threshold and vector mangling parameters.
@param initial_means: the means of the gaussian cluster centers
@type initial_means: [seq of] numpy array or seq of SparseArray
@param priors: the prior probability for each cluster
@type priors: numpy array or seq of float
@param covariance_matrices: the covariance matrix for each cluster
@type covariance_matrices: [seq of] numpy array
@param conv_threshold: maximum change in likelihood before deemed
convergent
@type conv_threshold: int or float
@param bias: variance bias used to ensure non-singular covariance
matrices
@type bias: float
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._means = numpy.array(initial_means, numpy.float64)
self._num_clusters = len(initial_means)
self._conv_threshold = conv_threshold
self._covariance_matrices = covariance_matrices
self._priors = priors
self._bias = bias
def num_clusters(self):
return self._num_clusters
def cluster_vectorspace(self, vectors, trace=False):
assert len(vectors) > 0
# set the parameters to initial values
dimensions = len(vectors[0])
means = self._means
priors = self._priors
if not priors:
priors = self._priors = numpy.ones(self._num_clusters,
numpy.float64) / self._num_clusters
covariances = self._covariance_matrices
if not covariances:
covariances = self._covariance_matrices = \
[ numpy.identity(dimensions, numpy.float64)
for i in range(self._num_clusters) ]
# do the E and M steps until the likelihood plateaus
lastl = self._loglikelihood(vectors, priors, means, covariances)
converged = False
while not converged:
if trace: print 'iteration; loglikelihood', lastl
# E-step, calculate hidden variables, h[i,j]
h = numpy.zeros((len(vectors), self._num_clusters),
numpy.float64)
for i in range(len(vectors)):
for j in range(self._num_clusters):
h[i,j] = priors[j] * self._gaussian(means[j],
covariances[j], vectors[i])
h[i,:] /= sum(h[i,:])
# M-step, update parameters - cvm, p, mean
for j in range(self._num_clusters):
covariance_before = covariances[j]
new_covariance = numpy.zeros((dimensions, dimensions),
numpy.float64)
new_mean = numpy.zeros(dimensions, numpy.float64)
sum_hj = 0.0
for i in range(len(vectors)):
delta = vectors[i] - means[j]
new_covariance += h[i,j] * \
numpy.multiply.outer(delta, delta)
sum_hj += h[i,j]
new_mean += h[i,j] * vectors[i]
covariances[j] = new_covariance / sum_hj
means[j] = new_mean / sum_hj
priors[j] = sum_hj / len(vectors)
# bias term to stop covariance matrix being singular
covariances[j] += self._bias * \
numpy.identity(dimensions, numpy.float64)
# calculate likelihood - FIXME: may be broken
l = self._loglikelihood(vectors, priors, means, covariances)
# check for convergence
if abs(lastl - l) < self._conv_threshold:
converged = True
lastl = l
def classify_vectorspace(self, vector):
best = None
for j in range(self._num_clusters):
p = self._priors[j] * self._gaussian(self._means[j],
self._covariance_matrices[j], vector)
if not best or p > best[0]:
best = (p, j)
return best[1]
def likelihood_vectorspace(self, vector, cluster):
cid = self.cluster_names().index(cluster)
return self._priors[cluster] * self._gaussian(self._means[cluster],
self._covariance_matrices[cluster], vector)
def _gaussian(self, mean, cvm, x):
m = len(mean)
assert cvm.shape == (m, m), \
'bad sized covariance matrix, %s' % str(cvm.shape)
try:
det = numpy.linalg.det(cvm)
inv = numpy.linalg.inv(cvm)
a = det ** -0.5 * (2 * numpy.pi) ** (-m / 2.0)
dx = x - mean
print dx, inv
b = -0.5 * numpy.dot( numpy.dot(dx, inv), dx)
return a * numpy.exp(b)
except OverflowError:
# happens when the exponent is negative infinity - i.e. b = 0
# i.e. the inverse of cvm is huge (cvm is almost zero)
return 0
def _loglikelihood(self, vectors, priors, means, covariances):
llh = 0.0
for vector in vectors:
p = 0
for j in range(len(priors)):
p += priors[j] * \
self._gaussian(means[j], covariances[j], vector)
llh += numpy.log(p)
return llh
def __repr__(self):
return '<EMClusterer means=%s>' % list(self._means)
def demo():
"""
Non-interactive demonstration of the clusterers with simple 2-D data.
"""
from nltk import cluster
# example from figure 14.10, page 519, Manning and Schutze
vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
means = [[4, 2], [4, 2.01]]
clusterer = cluster.EMClusterer(means, bias=0.1)
clusters = clusterer.cluster(vectors, True, trace=True)
print 'Clustered:', vectors
print 'As: ', clusters
print
for c in range(2):
print 'Cluster:', c
print 'Prior: ', clusterer._priors[c]
print 'Mean: ', clusterer._means[c]
print 'Covar: ', clusterer._covariance_matrices[c]
print
# classify a new vector
vector = numpy.array([2, 2])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
# show the classification probabilities
vector = numpy.array([2, 2])
print 'classification_probdist(%s):' % vector
pdist = clusterer.classification_probdist(vector)
for sample in pdist.samples():
print '%s => %.0f%%' % (sample,
pdist.prob(sample) *100)
#
# The following demo code is broken.
#
# # use a set of tokens with 2D indices
# vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# # test the EM clusterer with means given by k-means (2) and
# # dimensionality reduction
# clusterer = cluster.KMeans(2, euclidean_distance, svd_dimensions=1)
# print 'Clusterer:', clusterer
# clusters = clusterer.cluster(vectors)
# means = clusterer.means()
# print 'Means:', clusterer.means()
# print
# clusterer = cluster.EMClusterer(means, svd_dimensions=1)
# clusters = clusterer.cluster(vectors, True)
# print 'Clusterer:', clusterer
# print 'Clustered:', str(vectors)[:60], '...'
# print 'As:', str(clusters)[:60], '...'
# print
# # classify a new vector
# vector = numpy.array([3, 3])
# print 'classify(%s):' % vector,
# print clusterer.classify(vector)
# print
# # show the classification probabilities
# vector = numpy.array([2.2, 2])
# print 'classification_probdist(%s)' % vector
# pdist = clusterer.classification_probdist(vector)
# for sample in pdist:
# print '%s => %.0f%%' % (sample, pdist.prob(sample) *100)
if __name__ == '__main__':
demo()
|
tadgh/ArgoRevisit
|
third_party/nltk/cluster/em.py
|
Python
|
apache-2.0
| 9,501
|
[
"Gaussian"
] |
4ebdd5745eb13a432f979be9ee83095f6d7440052b14699182f964dcca8795fa
|
import logging
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
from api.models import GalaxyInstance
from registration.backends.simple.views import RegistrationView
log = logging.getLogger(__name__)
class GalaxyInstanceEdit(UpdateView):
model = GalaxyInstance
slug_field = 'id'
fields = ('url', 'title', 'description')
class GalaxyInstanceView(DetailView):
model = GalaxyInstance
slug_field = 'id'
def get_context_data(self, **kwargs):
context = super(GalaxyInstanceView, self).get_context_data(**kwargs)
context['url'] = "{}://{}{}".format(self.request.scheme, self.request.get_host(), reverse_lazy('home'))
return context
class GalaxyInstanceConfig(DetailView):
model = GalaxyInstance
slug_field = 'id'
template_name_suffix = '.yml'
class GalaxyInstanceCreateSuccess(DetailView):
model = GalaxyInstance
slug_field = 'id'
template_name_suffix = '_create_success'
def get_context_data(self, **kwargs):
context = super(GalaxyInstanceCreateSuccess, self).get_context_data(**kwargs)
full_url = self.request.build_absolute_uri(str(reverse_lazy('galaxy-instance-create-success', args=(self.object.id, ))))
components = full_url.split('/')[0:-3] + ['api', 'v1', 'upload']
context['api_url'] = '/'.join(components)
return context
class GalaxyInstanceCreate(CreateView):
model = GalaxyInstance
fields = ('url', 'title', 'description')
template_name_suffix = '_create'
def get_success_url(self):
return reverse_lazy(
'galaxy-instance-create-success',
kwargs={'slug': self.object.id}
)
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
self.object.owners.add(self.request.user)
return HttpResponseRedirect(self.get_success_url())
class GalaxyInstanceListView(ListView):
model = GalaxyInstance
class CustomRegistrationView(RegistrationView):
def get_success_url(self, user):
return reverse_lazy('home')
|
erasche/galactic-radio-telescope
|
web/views.py
|
Python
|
agpl-3.0
| 2,279
|
[
"Galaxy"
] |
d1d4748cf1f1189c649727061eabc62239c7b6a75aa0ed1f4b314f33bd81c8d7
|
#!/usr/bin/python
# sim1: simulate effect of B/F on shape measurement if it's not accounted for
import numpy as np
import os
import sys
import math
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.font_manager as fm
try:
import galsim
from galsim.cdmodel import *
except ImportError:
path, filename = os.path.split(__file__)
sys.path.append(os.path.abspath(os.path.join(path, "..")))
import galsim
from galsim.cdmodel import *
def secondmoments ( stamp ):
# calculates second moments and returns list [ q11 q22 q12 ]
# this is less elegant than it could be
# stamp needs to be a background-subtracted image
moments = [ 0, 0, 0, 0, 0, 0 ] # q11 q22 q12 norm x y
centx = (stamp.xmin+stamp.xmax+1)/2.
centy = (stamp.ymin+stamp.ymax+1)/2.
for x in range(stamp.xmin,stamp.xmax+1):
for y in range(stamp.ymin,stamp.xmax+1):
moments[0] += stamp.at(x,y)*(x-centx)*(x-centx) # q11
moments[1] += stamp.at(x,y)*(y-centy)*(y-centy) # q22
moments[2] += stamp.at(x,y)*(x-centx)*(y-centy) # q12
moments[3] += stamp.at(x,y)
moments[4] += stamp.at(x,y)*(x-centx)
moments[5] += stamp.at(x,y)*(y-centy)
moments[0] /= moments[3]
moments[1] /= moments[3]
moments[2] /= moments[3]
moments[4] /= moments[3]
moments[5] /= moments[3]
return moments
def m(delta_Ixx, delta_Iyy, r_gal):
return - (delta_Ixx + delta_Iyy)/(r_gal**2)
def c1(delta_Ixx, delta_Iyy, r_gal):
return (delta_Ixx - delta_Iyy)/(2*r_gal**2)
def c2(delta_Ixy, r_gal):
return - (delta_Ixy)/(r_gal**2)
flux_vec=[1000, 3000, 5000, 10000, 15000] #,30000, 50000, 100000]
e1, e2 = float(sys.argv[1]), float(sys.argv[2])
m_nl_vec,c1_nl_vec,c2_nl_vec=[],[],[]
m_cd_vec,c1_cd_vec,c2_cd_vec=[],[],[]
f=lambda x: x - 3.566e-7*x*x
for flux in flux_vec:
## Gaussian PSF
#create the object at given flux
psf_flux=flux
psf_e1, psf_e2= e1, e2
psf_sigma=0.5/0.27/2.35
psf=galsim.Gaussian (flux=flux, sigma=psf_sigma)
psf = psf.shear(galsim.Shear(g1=psf_e1, g2=psf_e2))
psf= psf.shift (0.5, 0.5)
psf_image_original=psf.drawImage (scale=0.27)
psf_image=psf_image_original
psf_moments= secondmoments (psf_image_original)
Ixx_psf=psf_moments [0]
Iyy_psf=psf_moments [1]
Ixy_psf=psf_moments [2]
# Apply non-linearity
psf_image.applyNonlinearity(f)
nl_psf_moments= secondmoments (psf_image)
Ixx_nl_psf=nl_psf_moments [0]
Iyy_nl_psf=nl_psf_moments [1]
Ixy_nl_psf=nl_psf_moments [2]
m_var=m(Ixx_psf - Ixx_nl_psf,Iyy_psf - Iyy_nl_psf, psf_sigma)
c1_var=c1(Ixx_psf - Ixx_nl_psf,Iyy_psf - Iyy_nl_psf, psf_sigma)
c2_var=c2(Ixy_psf - Ixy_nl_psf, psf_sigma)
m_nl_vec.append(m_var)
c1_nl_vec.append(c1_var)
c2_nl_vec.append(c2_var)
# Apply BF from CCDs
cd = PowerLawCD(5, 5.e-7, 5.e-7, 1.5e-7, 1.5e-7, 2.5e-7, 2.5e-7, 1.3)
# a symmetric version similar to DECam or slightly stronger
# note that this is not a particularly physical model, but similar enough for testing
# for real simulations, use the actual fitted a coefficient with the base class
# apply charge deflection
psf_image_cd = cd.applyForward(psf_image_original)
cd_psf_moments = secondmoments (psf_image_cd)
Ixx_cd_psf=cd_psf_moments [0]
Iyy_cd_psf=cd_psf_moments [1]
Ixy_cd_psf=cd_psf_moments [2]
m_var=m(Ixx_psf - Ixx_cd_psf,Iyy_psf - Iyy_cd_psf, psf_sigma)
c1_var=c1(Ixx_psf - Ixx_cd_psf,Iyy_psf - Iyy_cd_psf, psf_sigma)
c2_var=c2(Ixy_psf - Ixy_cd_psf, psf_sigma)
m_cd_vec.append(m_var)
c1_cd_vec.append(c1_var)
c2_cd_vec.append(c2_var)
pp=PdfPages("mc.pdf")
#### PLOTS
#### Do the plotting here
plt.minorticks_on()
#plt.tight_layout()
### We do not have matplotlib 1.1, with the 'style' package. Modify the matplotlibrc file parameters instead
import matplotlib as mpl
mpl.rc('lines', linewidth=1, color='black', linestyle='-')
mpl.rc('font', family='serif',weight='normal', size=10.0 )
mpl.rc('text', color='black', usetex=False)
mpl.rc('axes', edgecolor='black', linewidth=1, grid=False, titlesize='x-large', labelsize='x-large', labelweight='normal',labelcolor='black')
mpl.rc('axes.formatter', limits=[-4,4])
mpl.rcParams['xtick.major.size']=7
mpl.rcParams['xtick.minor.size']=4
mpl.rcParams['xtick.major.pad']=8
mpl.rcParams['xtick.minor.pad']=8
mpl.rcParams['xtick.labelsize']= 'x-large'
mpl.rcParams['xtick.minor.width']= 1.0
mpl.rcParams['xtick.major.width']= 1.0
mpl.rcParams['ytick.major.size']=7
mpl.rcParams['ytick.minor.size']=4
mpl.rcParams['ytick.major.pad']=8
mpl.rcParams['ytick.minor.pad']=8
mpl.rcParams['ytick.labelsize']= 'x-large'
mpl.rcParams['ytick.minor.width']= 1.0
mpl.rcParams['ytick.major.width']= 1.0
mpl.rc ('legend', numpoints=1, fontsize='x-large', shadow=False, frameon=False)
print flux_vec
print "NL: "
print "m: ", m_nl_vec
print "c1: ", c1_nl_vec
print "c2: ", c2_nl_vec
print "BF: "
print "m: ", m_cd_vec
print "c1: ", c1_cd_vec
print "c2: ", c2_cd_vec
#### Change PSF profile: optical?
m_nl_opt_vec,c1_nl_opt_vec, c2_nl_opt_vec=[],[],[]
m_cd_opt_vec,c1_cd_opt_vec, c2_cd_opt_vec=[],[],[]
opt_defocus=0. #0.53 # wavelengths
opt_a1=0. #-0.29 # wavelengths
opt_a2=0. #0.12 # wavelengths
opt_c1=0. #0.64 # wavelengths
opt_c2=0. #-0.33 # wavelengths
opt_obscuration=0.4 # linear scale size of secondary mirror obscuration
lam = 1000 # nm NB: don't use lambda - that's a reserved word.
tel_diam = 2.4 # meters
pixel_scale = 0.23 # arcsec / pixel
lam_over_diam = lam * 1.e-9 / tel_diam * galsim.radians
lam_over_diam = lam_over_diam / galsim.arcsec
for flux in flux_vec:
### Optical PSF
psf_flux=flux
psf_e1, psf_e2= e1, e2
psf_flux=flux
psf_e1, psf_e2= e1, e2
psf=galsim.OpticalPSF(lam_over_diam,
defocus = opt_defocus,
coma1 = opt_c1, coma2 = opt_c2,
astig1 = opt_a1, astig2 = opt_a2,
obscuration = opt_obscuration,
flux=psf_flux)
psf = psf.shear(galsim.Shear(g1=psf_e1, g2=psf_e2))
psf= psf.shift (0.5, 0.5)
psf_image_original=psf.drawImage (scale=0.27)
psf_image=psf_image_original
psf_moments= secondmoments (psf_image_original)
Ixx_psf=psf_moments [0]
Iyy_psf=psf_moments [1]
Ixy_psf=psf_moments [2]
# Apply non-linearity
psf_image.applyNonlinearity(f)
nl_psf_moments= secondmoments (psf_image)
Ixx_nl_psf=nl_psf_moments [0]
Iyy_nl_psf=nl_psf_moments [1]
Ixy_nl_psf=nl_psf_moments [2]
m_var=m(Ixx_psf - Ixx_nl_psf,Iyy_psf - Iyy_nl_psf, psf_sigma)
c1_var=c1(Ixx_psf - Ixx_nl_psf,Iyy_psf - Iyy_nl_psf, psf_sigma)
c2_var=c2(Ixy_psf - Ixy_nl_psf, psf_sigma)
m_nl_opt_vec.append(m_var)
c1_nl_opt_vec.append(c1_var)
c2_nl_opt_vec.append(c2_var)
# Apply BF from CCDs
cd = PowerLawCD(5, 5.e-7, 5.e-7, 1.5e-7, 1.5e-7, 2.5e-7, 2.5e-7, 1.3)
# a symmetric version similar to DECam or slightly stronger
# note that this is not a particularly physical model, but similar enough for testing
# for real simulations, use the actual fitted a coefficient with the base class
# apply charge deflection
psf_image_cd = cd.applyForward(psf_image_original)
cd_psf_moments = secondmoments (psf_image_cd)
Ixx_cd_psf=cd_psf_moments [0]
Iyy_cd_psf=cd_psf_moments [1]
Ixy_cd_psf=cd_psf_moments [2]
m_var=m(Ixx_psf - Ixx_cd_psf,Iyy_psf - Iyy_cd_psf, psf_sigma)
c1_var=c1(Ixx_psf - Ixx_cd_psf,Iyy_psf - Iyy_cd_psf, psf_sigma)
c2_var=c2(Ixy_psf - Ixy_cd_psf, psf_sigma)
m_cd_opt_vec.append(m_var)
c1_cd_opt_vec.append(c1_var)
c2_cd_opt_vec.append(c2_var)
print " "
print " "
print "Optical PSF"
print flux_vec
print "NL: "
print "m: ", m_nl_opt_vec
print "c1: ", c1_nl_opt_vec
print "c2: ", c2_nl_opt_vec
print "BF: "
print "m: ", m_cd_opt_vec
print "c1: ", c1_cd_opt_vec
print "c2: ", c2_cd_opt_vec
###### PLOTS
## 1: m
## Plot parameters
plt.subplots_adjust(hspace=0.01, wspace=0.01)
prop = fm.FontProperties(size=10)
marker_size=11
loc_label = "upper left"
visible_x, visible_y = True, True
grid=True
ymin, ymax = -0.02, 0.02
m_req=1e-3
c_req=1e-4
fig = plt.figure()
ax = fig.add_subplot (111)
ax.errorbar( flux_vec, m_nl_vec, yerr=None, ecolor = 'r', label='Gaussian, vnl', fmt='r.-', markersize=marker_size)
ax.errorbar( flux_vec, m_cd_vec, yerr=None, ecolor = 'r', label='Gaussian, BF' , fmt='rx--', markersize=marker_size)
ax.errorbar( flux_vec, m_nl_opt_vec, yerr=None, ecolor = 'b', label='Optical, vnl', fmt='b.-', markersize=marker_size)
ax.errorbar( flux_vec, m_cd_opt_vec, yerr=None , ecolor = 'b', label='Optical, BF' , fmt='bx--', markersize=marker_size)
plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(r"Flux", visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(r"m", visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop)
plt.grid(grid)
pp.savefig()
## 2: c1
fig = plt.figure()
ax = fig.add_subplot (111)
ax.errorbar( flux_vec, c1_nl_vec, yerr=None, ecolor = 'r', label='Gaussian, vnl', fmt='r.-', markersize=marker_size)
ax.errorbar( flux_vec, c1_cd_vec, yerr=None, ecolor = 'r', label='Gaussian, BF' , fmt='rx--', markersize=marker_size)
ax.errorbar( flux_vec, c1_nl_opt_vec, yerr=None, ecolor = 'b', label='Optical, vnl', fmt='b.-', markersize=marker_size)
ax.errorbar( flux_vec, c1_cd_opt_vec, yerr=None , ecolor = 'b', label='Optical, BF' , fmt='bx--', markersize=marker_size)
plt.axhspan(-c_req, c_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(r"Flux", visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(r"c1", visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop)
plt.grid(grid)
pp.savefig()
## 3: c2
fig = plt.figure()
ax = fig.add_subplot (111)
ax.errorbar( flux_vec, c2_nl_vec, yerr=None, ecolor = 'r', label='Gaussian, vnl', fmt='r.-', markersize=marker_size)
ax.errorbar( flux_vec, c2_cd_vec, yerr=None, ecolor = 'r', label='Gaussian, BF' , fmt='rx--', markersize=marker_size)
ax.errorbar( flux_vec, c2_nl_opt_vec, yerr=None, ecolor = 'b', label='Optical, vnl', fmt='b.-', markersize=marker_size)
ax.errorbar( flux_vec, c2_cd_opt_vec, yerr=None , ecolor = 'b', label='Optical, BF' , fmt='bx--', markersize=marker_size)
plt.axhspan(-c_req, c_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(r"Flux", visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(r"c2", visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop)
plt.grid(grid)
pp.savefig()
pp.close()
|
plazas/wfirst-detectors-vnl
|
code/nonlinearity.py
|
Python
|
mit
| 11,762
|
[
"Gaussian"
] |
3ae37aa4221ae0465ef079da3e9f169278f6e521d6e40bff631fcf01512b9e39
|
import json
import nose
import datetime
import pylons
import sqlalchemy.orm as orm
import ckan.plugins as p
import ckan.lib.create_test_data as ctd
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.tests.helpers as helpers
import ckanext.datastore.db as db
from ckanext.datastore.tests.helpers import rebuild_all_dbs, set_url_type
assert_equal = nose.tools.assert_equal
class TestDatastoreUpsert(tests.WsgiAppCase):
sysadmin_user = None
normal_user = None
@classmethod
def setup_class(cls):
if not tests.is_datastore_supported():
raise nose.SkipTest("Datastore not supported")
p.load('timeseries')
helpers.reset_db()
ctd.CreateTestData.create()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.normal_user = model.User.get('annafan')
set_url_type(
model.Package.get('annakarenina').resources, cls.sysadmin_user)
resource = model.Package.get('annakarenina').resources[0]
cls.data = {
'resource_id': resource.id,
'fields': [{'id': u'b\xfck', 'type': 'text'},
{'id': 'author', 'type': 'text'},
{'id': 'nested', 'type': 'json'},
{'id': 'characters', 'type': 'text[]'},
{'id': 'published'}],
'primary_key': u'b\xfck',
'records': [{u'b\xfck': 'annakarenina', 'author': 'tolstoy',
'published': '2005-03-01', 'nested': ['b', {'moo': 'moo'}]},
{u'b\xfck': 'warandpeace', 'author': 'tolstoy',
'nested': {'a':'b'}}
]
}
postparams = '%s=1' % json.dumps(cls.data)
auth = {'Authorization': str(cls.sysadmin_user.apikey)}
res = cls.app.post('/api/action/datastore_ts_create', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
engine = db._get_engine(
{'connection_url': pylons.config['ckan.datastore.write_url']})
cls.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@classmethod
def teardown_class(cls):
rebuild_all_dbs(cls.Session)
p.unload('timeseries')
def test_insert_timeseries(self):
hhguide = u"hitchhiker's guide to the galaxy"
data = {
'resource_id': self.data['resource_id'],
'method': 'insert',
'records': [{
'author': 'adams',
'characters': ['Arthur Dent', 'Marvin'],
'nested': {'foo': 'bar', 'baz': 3},
u'b\xfck': hhguide}]
}
postparams = '%s=1' % json.dumps(data)
auth = {'Authorization': str(self.sysadmin_user.apikey)}
res = self.app.post('/api/action/datastore_ts_upsert', params=postparams,
extra_environ=auth)
res_dict = json.loads(res.body)
assert res_dict['success'] is True
c = self.Session.connection()
results = c.execute('select * from "{0}"'.format(self.data['resource_id']))
self.Session.remove()
for row in results:
assert '_autogen_timestamp' in row
assert results.rowcount == 3
|
namgk/ckan-timeseries
|
ckanext/timeseries/tests/test_upsert_short.py
|
Python
|
agpl-3.0
| 3,333
|
[
"Galaxy"
] |
7b7e2235f7b28a417ad0b409ad0189388e10c2fe4a34f40a7b5ce8ed5dd11c3c
|
"""Test the JobInfo"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import sys
from six import StringIO
from mock import MagicMock as Mock
from parameterized import parameterized, param
from DIRAC import S_OK, S_ERROR, gLogger
import DIRAC
from DIRAC.TransformationSystem.Utilities.JobInfo import TaskInfoException, JobInfo
import DIRAC.Interfaces.API.Dirac
gLogger.setLevel("DEBUG")
__RCSID__ = "$Id$"
# pylint: disable=W0212, E1101
class TestJI(unittest.TestCase):
"""Test the JobInfo Module"""
def setUp(self):
self.jbi = JobInfo(jobID=123, status="Failed", tID=1234, tType="MCReconstruction")
self.diracAPI = Mock(name="dilcMock", spec=DIRAC.Interfaces.API.Dirac.Dirac)
self.jobMon = Mock(
name="jobMonMock", spec=DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient.JobMonitoringClient)
self.jobMon.getInputData = Mock(return_value=S_OK([]))
self.jobMon.getJobAttribute = Mock(return_value=S_OK('0'))
self.jobMon.getJobParameter = Mock(return_value=S_OK({}))
self.diracAPI.getJobJDL = Mock()
self.jdl2 = {
'LogTargetPath': "/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/LOG/00006326_015.tar",
'Executable': "dirac-jobexec",
'TaskID': 15,
'SoftwareDistModule': "ILCDIRAC.Core.Utilities.CombinedSoftwareInstallation",
'JobName': "00006326_00000015",
'Priority': 1,
'Platform': "x86_64-slc5-gcc43-opt",
'JobRequirements': {
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'VirtualOrganization': "ilc",
'Setup': "ILC-Production",
'CPUTime': 300000,
'OwnerGroup': "ilc_prod",
'Platforms': [
"x86_64-slc6-gcc44-opt",
"x86_64-slc5-gcc43-opt",
"slc5_amd64_gcc43",
"Linux_x86_64_glibc-2.12",
"Linux_x86_64_glibc-2.5",
],
'UserPriority': 1,
'Sites': [
"LCG.LAPP.fr",
"LCG.UKI-SOUTHGRID-RALPP.uk",
],
'BannedSites': "LCG.KEK.jp",
'JobTypes': "MCReconstruction_Overlay",
},
'Arguments': "jobDescription.xml -o LogLevel=verbose",
'SoftwarePackages': [
"overlayinput.1",
"marlin.v0111Prod",
],
'DebugLFNs': "",
'Status': "Created",
'InputDataModule': "DIRAC.WorkloadManagementSystem.Client.InputDataResolution",
'BannedSites': "LCG.KEK.jp",
'LogLevel': "verbose",
'InputSandbox': [
"jobDescription.xml",
"SB:ProductionSandboxSE2|/SandBox/i/ilc_prod/5d3/92f/5d392f5266a796018ab6774ef84cbd31.tar.bz2",
],
'OwnerName': "sailer",
'StdOutput': "std.out",
'JobType': "MCReconstruction_Overlay",
'GridEnv': "/cvmfs/grid.cern.ch/emi-ui-3.7.3-1_sl6v2/etc/profile.d/setup-emi3-ui-example",
'TransformationID': 6326,
'DIRACSetup': "ILC-Production",
'StdError': "std.err",
'IS_PROD': "True",
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'JobGroup': 0o0006326,
'OutputSandbox': [
"std.err",
"std.out",
],
'JobID': 15756436,
'VirtualOrganization': "ilc",
'ProductionOutputData': [
"/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/000/yyveyx_o_rec_6326_15.slcio",
"/ilc/prod/clic/500gev/yyveyx_o/ILD/DST/00006326/000/yyveyx_o_dst_6326_15.slcio",
],
'Site': "ANY",
'OwnerGroup': "ilc_prod",
'Owner': "sailer",
'LogFilePath': "/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/LOG/000",
'InputData': "/ilc/prod/clic/500gev/yyveyx_o/ILD/SIM/00006325/000/yyveyx_o_sim_6325_17.slcio",
}
self.jdlBrokenContent = {
'LogTargetPath': "/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/LOG/00006326_015.tar",
'Executable': "dirac-jobexec",
'TaskID': 'muahahaha',
'SoftwareDistModule': "ILCDIRAC.Core.Utilities.CombinedSoftwareInstallation",
'JobName': "00006326_00000015",
'Priority': 1,
'Platform': "x86_64-slc5-gcc43-opt",
'JobRequirements': {
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'VirtualOrganization': "ilc",
'Setup': "ILC-Production",
'CPUTime': 300000,
'OwnerGroup': "ilc_prod",
'Platforms': [
"x86_64-slc6-gcc44-opt",
"x86_64-slc5-gcc43-opt",
"slc5_amd64_gcc43",
"Linux_x86_64_glibc-2.12",
"Linux_x86_64_glibc-2.5",
],
'UserPriority': 1,
'Sites': [
"LCG.LAPP.fr",
"LCG.UKI-SOUTHGRID-RALPP.uk",
],
'BannedSites': "LCG.KEK.jp",
'JobTypes': "MCReconstruction_Overlay",
},
'Arguments': "jobDescription.xml -o LogLevel=verbose",
'SoftwarePackages': [
"overlayinput.1",
"marlin.v0111Prod",
],
'DebugLFNs': "",
'Status': "Created",
'InputDataModule': "DIRAC.WorkloadManagementSystem.Client.InputDataResolution",
'BannedSites': "LCG.KEK.jp",
'LogLevel': "verbose",
'InputSandbox': [
"jobDescription.xml",
"SB:ProductionSandboxSE2|/SandBox/i/ilc_prod/5d3/92f/5d392f5266a796018ab6774ef84cbd31.tar.bz2",
],
'OwnerName': "sailer",
'StdOutput': "std.out",
'JobType': "MCReconstruction_Overlay",
'GridEnv': "/cvmfs/grid.cern.ch/emi-ui-3.7.3-1_sl6v2/etc/profile.d/setup-emi3-ui-example",
'TransformationID': 6326,
'DIRACSetup': "ILC-Production",
'StdError': "std.err",
'IS_PROD': "True",
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'JobGroup': 0o0006326,
'OutputSandbox': [
"std.err",
"std.out",
],
'JobID': 15756436,
'VirtualOrganization': "ilc",
'ProductionOutputData': [
"/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/000/yyveyx_o_rec_6326_15.slcio",
"/ilc/prod/clic/500gev/yyveyx_o/ILD/DST/00006326/000/yyveyx_o_dst_6326_15.slcio",
],
'Site': "ANY",
'OwnerGroup': "ilc_prod",
'Owner': "sailer",
'LogFilePath': "/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/LOG/000",
'InputData': "/ilc/prod/clic/500gev/yyveyx_o/ILD/SIM/00006325/000/yyveyx_o_sim_6325_17.slcio",
}
# jdl with single outputdata,
self.jdl1 = {
'LogTargetPath': "/ilc/prod/clic/3tev/e1e1_o/SID/SIM/00006301/LOG/00006301_10256.tar",
'Executable': "dirac-jobexec",
'TaskID': 10256,
'SoftwareDistModule': "ILCDIRAC.Core.Utilities.CombinedSoftwareInstallation",
'JobName': "00006301_00010256",
'Priority': 1,
'Platform': "x86_64-slc5-gcc43-opt",
'JobRequirements': {
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'VirtualOrganization': "ilc",
'Setup': "ILC-Production",
'CPUTime': 300000,
'OwnerGroup': "ilc_prod",
'Platforms': [
"x86_64-slc6-gcc44-opt",
"x86_64-slc5-gcc43-opt",
"slc5_amd64_gcc43",
"Linux_x86_64_glibc-2.12",
"Linux_x86_64_glibc-2.5",
],
'UserPriority': 1,
'Sites': [
"LCG.LAPP.fr",
"LCG.UKI-SOUTHGRID-RALPP.uk",
],
'BannedSites': [
"OSG.MIT.us",
"OSG.SPRACE.br",
],
'JobTypes': "MCSimulation",
},
'Arguments': "jobDescription.xml -o LogLevel=verbose",
'SoftwarePackages': "slic.v2r9p8",
'DebugLFNs': "",
'Status': "Created",
'InputDataModule': "DIRAC.WorkloadManagementSystem.Client.InputDataResolution",
'BannedSites': [
"OSG.MIT.us",
"OSG.SPRACE.br",
],
'LogLevel': "verbose",
'InputSandbox': [
"jobDescription.xml",
"SB:ProductionSandboxSE2|/SandBox/i/ilc_prod/042/d64/042d64cb0fe73720cbd114a73506c582.tar.bz2",
],
'OwnerName': "sailer",
'StdOutput': "std.out",
'JobType': "MCSimulation",
'GridEnv': "/cvmfs/grid.cern.ch/emi-ui-3.7.3-1_sl6v2/etc/profile.d/setup-emi3-ui-example",
'TransformationID': 6301,
'DIRACSetup': "ILC-Production",
'StdError': "std.err",
'IS_PROD': "True",
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'JobGroup': '00006301',
'OutputSandbox': [
"std.err",
"std.out",
],
'JobID': 15756456,
'VirtualOrganization': "ilc",
'ProductionOutputData': "/ilc/prod/clic/3tev/e1e1_o/SID/SIM/00006301/010/e1e1_o_sim_6301_10256.slcio",
'Site': "ANY",
'OwnerGroup': "ilc_prod",
'Owner': "sailer",
'LogFilePath': "/ilc/prod/clic/3tev/e1e1_o/SID/SIM/00006301/LOG/010",
'InputData': "/ilc/prod/clic/3tev/e1e1_o/gen/00006300/004/e1e1_o_gen_6300_4077.stdhep",
}
self.jdlNoInput = {
'LogTargetPath': "/ilc/prod/clic/1.4tev/ea_qqqqnu/gen/00006498/LOG/00006498_1307.tar",
'Executable': "dirac-jobexec",
'TaskID': 1307,
'SoftwareDistModule': "ILCDIRAC.Core.Utilities.CombinedSoftwareInstallation",
'JobName': "00006498_00001307",
'Priority': 1,
'Platform': "x86_64-slc5-gcc43-opt",
'JobRequirements': {
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'VirtualOrganization': "ilc",
'Setup': "ILC-Production",
'CPUTime': 300000,
'OwnerGroup': "ilc_prod",
'Platforms': [
"x86_64-slc6-gcc44-opt",
"x86_64-slc5-gcc43-opt",
"slc5_amd64_gcc43",
"Linux_x86_64_glibc-2.12",
"Linux_x86_64_glibc-2.5",
],
'UserPriority': 1,
'BannedSites': "LCG.KEK.jp",
'JobTypes': "MCGeneration",
},
'Arguments': "jobDescription.xml -o LogLevel=verbose",
'SoftwarePackages': "whizard.SM_V57",
'DebugLFNs': "",
'Status': "Created",
'InputDataModule': "DIRAC.WorkloadManagementSystem.Client.InputDataResolution",
'BannedSites': "LCG.KEK.jp",
'LogLevel': "verbose",
'InputSandbox': [
"jobDescription.xml",
"SB:ProductionSandboxSE2|/SandBox/i/ilc_prod/b2a/d98/b2ad98c3e240361a4253c4bb277be478.tar.bz2",
],
'OwnerName': "sailer",
'StdOutput': "std.out",
'JobType': "MCGeneration",
'GridEnv': "/cvmfs/grid.cern.ch/emi-ui-3.7.3-1_sl6v2/etc/profile.d/setup-emi3-ui-example",
'TransformationID': 6498,
'DIRACSetup': "ILC-Production",
'StdError': "std.err",
'IS_PROD': "True",
'OwnerDN': "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=sailer/CN=683529/CN=Andre Sailer",
'JobGroup': '00006498',
'OutputSandbox': [
"std.err",
"std.out",
],
'JobID': 15762268,
'VirtualOrganization': "ilc",
'ProductionOutputData': "/ilc/prod/clic/1.4tev/ea_qqqqnu/gen/00006498/001/ea_qqqqnu_gen_6498_1307.stdhep",
'Site': "ANY",
'OwnerGroup': "ilc_prod",
'Owner': "sailer",
'LogFilePath': "/ilc/prod/clic/1.4tev/ea_qqqqnu/gen/00006498/LOG/001",
'InputData': "",
}
def tearDown(self):
pass
def test_Init(self):
"""Transformation.Utilities.JobInfo init ...................................................."""
assert self.jbi.outputFiles == []
self.assertFalse(self.jbi.pendingRequest)
def test_allFilesExist(self):
"""Transformation.Utilities.JobInfo.allFilesExist............................................"""
self.jbi.outputFileStatus = ["Exists", "Exists"]
self.assertTrue(self.jbi.allFilesExist())
self.jbi.outputFileStatus = ["Exists", "Missing"]
self.assertFalse(self.jbi.allFilesExist())
self.jbi.outputFileStatus = ["Missing", "Exists"]
self.assertFalse(self.jbi.allFilesExist())
self.jbi.outputFileStatus = ["Missing", "Missing"]
self.assertFalse(self.jbi.allFilesExist())
self.jbi.outputFileStatus = []
self.assertFalse(self.jbi.allFilesExist())
def test_allFilesMissing(self):
"""Transformation.Utilities.JobInfo.allFilesMissing.........................................."""
self.jbi.outputFileStatus = ["Exists", "Exists"]
self.assertFalse(self.jbi.allFilesMissing())
self.jbi.outputFileStatus = ["Exists", "Missing"]
self.assertFalse(self.jbi.allFilesMissing())
self.jbi.outputFileStatus = ["Missing", "Exists"]
self.assertFalse(self.jbi.allFilesMissing())
self.jbi.outputFileStatus = ["Missing", "Missing"]
self.assertTrue(self.jbi.allFilesMissing())
self.jbi.outputFileStatus = []
self.assertFalse(self.jbi.allFilesMissing())
@parameterized.expand([('someFilesMissing', 'outputFileStatus', ['Exists', 'Exists'], False),
('someFilesMissing', 'outputFileStatus', ['Exists', 'Missing'], True),
('someFilesMissing', 'outputFileStatus', ['Missing', 'Exists'], True),
('someFilesMissing', 'outputFileStatus', ['Missing', 'Missing'], False),
('someFilesMissing', 'outputFileStatus', [], False),
('allInputFilesExist', 'inputFileStatus', ['Exists', 'Exists'], True),
('allInputFilesExist', 'inputFileStatus', ['Exists', 'Missing'], False),
('allInputFilesExist', 'inputFileStatus', ['Missing', 'Missing'], False),
('allInputFilesExist', 'inputFileStatus', [], False),
('allInputFilesMissing', 'inputFileStatus', ['Exists', 'Exists'], False),
('allInputFilesMissing', 'inputFileStatus', ['Exists', 'Missing'], False),
('allInputFilesMissing', 'inputFileStatus', ['Missing', 'Missing'], True),
('allInputFilesMissing', 'inputFileStatus', [], False),
('someInputFilesMissing', 'inputFileStatus', ['Exists', 'Exists'], False),
('someInputFilesMissing', 'inputFileStatus', ['Exists', 'Missing'], True),
('someInputFilesMissing', 'inputFileStatus', ['Missing', 'Exists'], True),
('someInputFilesMissing', 'inputFileStatus', ['Missing', 'Missing'], False),
('someInputFilesMissing', 'inputFileStatus', [], False),
('allFilesProcessed', 'transFileStatus', ['Processed', 'Processed'], True),
('allFilesProcessed', 'transFileStatus', ['Processed', 'Assigned'], False),
('allFilesProcessed', 'transFileStatus', ['Assigned', 'Assigned'], False),
('allFilesProcessed', 'transFileStatus', ['Deleted', 'Deleted'], False),
('allFilesProcessed', 'transFileStatus', ['Unused', 'Unused'], False),
('allFilesProcessed', 'transFileStatus', [], False),
('allFilesAssigned', 'transFileStatus', ['Processed', 'Processed'], True),
('allFilesAssigned', 'transFileStatus', ['Processed', 'Assigned'], True),
('allFilesAssigned', 'transFileStatus', ['Assigned', 'Assigned'], True),
('allFilesAssigned', 'transFileStatus', ['Assigned', 'Unused'], False),
('allFilesAssigned', 'transFileStatus', ['Deleted', 'Deleted'], False),
('allFilesAssigned', 'transFileStatus', ['Unused', 'Unused'], False),
('allFilesAssigned', 'transFileStatus', [], False),
('checkErrorCount', 'errorCounts', [0, 9], False),
('checkErrorCount', 'errorCounts', [0, 10], False),
('checkErrorCount', 'errorCounts', [0, 11], True),
('checkErrorCount', 'errorCounts', [0, 12], True),
('allTransFilesDeleted', 'transFileStatus', ['Deleted', 'Deleted'], True),
('allTransFilesDeleted', 'transFileStatus', ['Deleted', 'Assigned'], False),
('allTransFilesDeleted', 'transFileStatus', ['Assigned', 'Deleted'], False),
('allTransFilesDeleted', 'transFileStatus', ['Assigned', 'Assigned'], False),
])
def test_fileChecker(self, func, attr, value, expected):
setattr(self.jbi, attr, value)
gLogger.notice('%s, %s, %s, %s, %s' % (getattr(self.jbi, func)(), func, attr, value, expected))
assert expected == getattr(self.jbi, func)()
def test_getJDL(self):
"""Transformation.Utilities.JobInfo.getJDL..................................................."""
self.diracAPI.getJobJDL.return_value = S_OK(self.jdl1)
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
self.assertIsInstance(jdlList, dict)
self.diracAPI.getJobJDL.return_value = S_ERROR("no mon")
with self.assertRaises(RuntimeError) as contextManagedException:
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
self.assertIn("Failed to get jobJDL", str(contextManagedException.exception))
def test_getTaskInfo_1(self):
# task is only one
wit = ['MCReconstruction']
self.jbi.taskID = 1234
self.jbi.inputFiles = ["lfn"]
tasksDict = {1234: [dict(FileID=123456, LFN="lfn", Status="Assigned", ErrorCount=7)]}
lfnTaskDict = {}
self.jbi.getTaskInfo(tasksDict, lfnTaskDict, wit)
self.assertEqual(self.jbi.transFileStatus, ['Assigned'])
self.assertEqual(self.jbi.otherTasks, [])
def test_getTaskInfo_2(self):
# there are other tasks
wit = ['MCReconstruction']
self.jbi.taskID = 1234
self.jbi.inputFiles = ["lfn"]
tasksDict = {12: [dict(FileID=123456, LFN="lfn", Status="Processed", ErrorCount=7)]}
lfnTaskDict = {"lfn": 12}
self.jbi.getTaskInfo(tasksDict, lfnTaskDict, wit)
self.assertEqual(self.jbi.transFileStatus, ['Processed'])
self.assertEqual(self.jbi.otherTasks, [12])
def test_getTaskInfo_3(self):
# raise
wit = ['MCReconstruction']
self.jbi.taskID = 1234
self.jbi.inputFiles = ['otherLFN']
tasksDict = {1234: [dict(FileID=123456, LFN='lfn', Status='Processed', ErrorCount=23)]}
lfnTaskDict = {}
with self.assertRaisesRegexp(TaskInfoException, "InputFiles do not agree"):
self.jbi.getTaskInfo(tasksDict, lfnTaskDict, wit)
# def test_getTaskInfo_4(self):
# # raise keyError
# wit = ['MCReconstruction']
# self.jbi.taskID = 1235
# self.jbi.inputFiles = []
# tasksDict = {1234: dict(FileID=123456, LFN="lfn", Status="Processed")}
# lfnTaskDict = {}
# with self.assertRaisesRegexp(KeyError, ""):
# self.jbi.getTaskInfo(tasksDict, lfnTaskDict, wit)
def test_getTaskInfo_5(self):
# raise inputFile
wit = ['MCReconstruction']
self.jbi.taskID = 1235
self.jbi.inputFiles = []
tasksDict = {1234: dict(FileID=123456, LFN="lfn", Status="Processed")}
lfnTaskDict = {}
with self.assertRaisesRegexp(TaskInfoException, "InputFiles is empty"):
self.jbi.getTaskInfo(tasksDict, lfnTaskDict, wit)
def test_getJobInformation(self):
"""Transformation.Utilities.JobInfo.getJobInformation........................................"""
self.diracAPI.getJobJDL.return_value = S_OK(self.jdl1)
self.jbi.getJobInformation(self.diracAPI, self.jobMon)
self.assertEqual(self.jbi.outputFiles,
['/ilc/prod/clic/3tev/e1e1_o/SID/SIM/00006301/010/e1e1_o_sim_6301_10256.slcio'])
self.assertEqual(10256, self.jbi.taskID)
self.assertEqual(self.jbi.inputFiles, ["/ilc/prod/clic/3tev/e1e1_o/gen/00006300/004/e1e1_o_gen_6300_4077.stdhep"])
# empty jdl
self.setUp()
self.diracAPI.getJobJDL.return_value = S_OK({})
self.jbi.getJobInformation(self.diracAPI, self.jobMon)
self.assertEqual(self.jbi.outputFiles, [])
self.assertIsNone(self.jbi.taskID)
self.assertEqual(self.jbi.inputFiles, [])
def test_getOutputFiles(self):
"""Transformation.Utilities.JobInfo.getOutputFiles..........................................."""
# singleLFN
self.diracAPI.getJobJDL.return_value = S_OK(self.jdl1)
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
self.jbi._JobInfo__getOutputFiles(jdlList)
self.assertEqual(
self.jbi.outputFiles,
["/ilc/prod/clic/3tev/e1e1_o/SID/SIM/00006301/010/e1e1_o_sim_6301_10256.slcio"])
# two LFNs
self.diracAPI.getJobJDL.return_value = S_OK(self.jdl2)
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
self.jbi._JobInfo__getOutputFiles(jdlList)
self.assertEqual(self.jbi.outputFiles,
["/ilc/prod/clic/500gev/yyveyx_o/ILD/REC/00006326/000/yyveyx_o_rec_6326_15.slcio",
"/ilc/prod/clic/500gev/yyveyx_o/ILD/DST/00006326/000/yyveyx_o_dst_6326_15.slcio"])
def test_getTaskID(self):
"""Transformation.Utilities.JobInfo.getTaskID................................................"""
# singleLFN
self.diracAPI.getJobJDL.return_value = S_OK(self.jdl1)
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
self.jbi._JobInfo__getTaskID(jdlList)
self.assertEqual(10256, self.jbi.taskID)
# broken jdl
out = StringIO()
sys.stdout = out
self.diracAPI.getJobJDL.return_value = S_OK(self.jdlBrokenContent)
jdlList = self.jbi._JobInfo__getJDL(self.diracAPI)
with self.assertRaises(ValueError):
self.jbi._JobInfo__getTaskID(jdlList)
def test_getInputFile(self):
"""Test the extraction of the inputFile from the JDL parameters."""
# singleLFN
self.jbi._JobInfo__getInputFile({'InputData': '/single/lfn2'})
self.assertEqual(self.jbi.inputFiles, ['/single/lfn2'])
# list with singleLFN
self.jbi._JobInfo__getInputFile({'InputData': ['/single/lfn1']})
self.assertEqual(self.jbi.inputFiles, ['/single/lfn1'])
# list with two LFN
self.jbi._JobInfo__getInputFile({'InputData': ['/lfn1', '/lfn2']})
self.assertEqual(self.jbi.inputFiles, ['/lfn1', '/lfn2'])
def test_checkFileExistence(self):
"""Transformation.Utilities.JobInfo.checkFileExistance......................................."""
# input and output files
repStatus = {'inputFile1': True, 'inputFile2': False, 'outputFile1': False, 'outputFile2': True}
self.jbi.inputFiles = ['inputFile1', 'inputFile2', 'inputFile3']
self.jbi.outputFiles = ['outputFile1', 'outputFile2', 'unknownFile']
self.jbi.checkFileExistence(repStatus)
self.assertTrue(self.jbi.inputFilesExist[0])
self.assertFalse(self.jbi.inputFilesExist[1])
self.assertFalse(self.jbi.inputFilesExist[2])
self.assertEqual(self.jbi.inputFileStatus, ["Exists", "Missing", "Unknown"])
self.assertEqual(self.jbi.outputFileStatus, ["Missing", "Exists", "Unknown"])
# just output files
self.setUp()
repStatus = {'inputFile': True, 'outputFile1': False, 'outputFile2': True}
self.jbi.inputFiles = []
self.jbi.outputFiles = ["outputFile1", "outputFile2", "unknownFile"]
self.jbi.checkFileExistence(repStatus)
self.assertEqual(self.jbi.outputFileStatus, ["Missing", "Exists", "Unknown"])
@parameterized.expand([
param(['123: Failed MCReconstruction Transformation: 1234 -- 5678 ', 'inputFile (True, Assigned, Errors 0'], []),
param(['123: Failed MCReconstruction Transformation: 1234 -- 5678 (Last task [7777])'], [], otherTasks=[7777]),
param([], ['MCReconstruction Transformation'], trID=0, taID=0),
param([], ['(Last task'], otherTasks=[]),
param(['PENDING REQUEST IGNORE THIS JOB'], [], pendingRequest=True, ),
param(['No Pending Requests'], [], pendingRequest=False,),
])
def test__str__(self, asserts, assertNots, trID=1234, taID=5678, otherTasks=False, pendingRequest=False):
jbi = JobInfo(jobID=123, status="Failed", tID=trID, tType="MCReconstruction")
jbi.pendingRequest = pendingRequest
jbi.otherTasks = otherTasks
gLogger.notice('otherTasks: ', jbi.otherTasks)
jbi.taskID = taID
jbi.inputFiles = ['inputFile']
jbi.inputFilesExist = [True]
jbi.transFileStatus = ['Assigned']
jbi.outputFiles = ["outputFile"]
jbi.errorCounts = [0]
info = str(jbi)
for assertStr in asserts:
self.assertIn(assertStr, info)
for assertStr in assertNots:
self.assertNotIn(assertStr, info)
def test_TaskInfoException(self):
"""Transformation.Utilities.JobInfo.TaskInfoException........................................"""
tie = TaskInfoException("notTasked")
self.assertIsInstance(tie, Exception)
self.assertIn("notTasked", str(tie))
if __name__ == "__main__":
SUITE = unittest.defaultTestLoader.loadTestsFromTestCase(TestJI)
TESTRESULT = unittest.TextTestRunner(verbosity=3).run(SUITE)
|
yujikato/DIRAC
|
src/DIRAC/TransformationSystem/test/Test_JobInfo.py
|
Python
|
gpl-3.0
| 25,699
|
[
"DIRAC"
] |
9061650b38f2a9feccbb3e136c1611ed0e2d70d503264df853f7197bb94dd4ee
|
#!/usr/bin/env python2
'''
description: Wrapper to create a single output file in LITTLE_R format from a
list of netcdf files defined in an input file.
Time window is extracted from obsproc.namelist.
Uses external packages: convert_littler_single and cdo
license: APACHE 2.0
author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
'''
# import main packages
import argparse
from namelist import namelist_get, namelist_set
import os
class wrapper_littler:
'''
Wrapper class to create a single output file in LITTLE_R format from a
list of netcdf files defined in an input file.
'''
def __init__(self,filelist, obsproc_namelist):
self.filelist = filelist
self.workdir = './workdir'
self.obsproc_namelist = obsproc_namelist
self.cleanup_workdir()
self.test_input()
self.read_filelist() # create list of filenames
self.namelist_obsproc(self.obsproc_namelist) # extract time-window
for idx, filename in enumerate(self.files): # loop over all files
self.process_file(filename,idx) # process file
self.combine_output_files() # combine all LITTLE_R files
def test_input(self):
if not os.path.exists(self.filelist):
raise IOError(self.filelist + ' not found.')
elif not os.path.exists(self.obsproc_namelist):
raise IOError(self.obsproc_namelist + ' not found.')
else:
pass
def cleanup_workdir(self):
'''
cleanup previous results and copy files to workdir
'''
import shutil
if os.path.exists(self.workdir):
# remove workdir if exists
shutil.rmtree(self.workdir)
# create workdir
try:
os.makedirs(self.workdir)
except IOError:
raise IOError('Cannot create work directory: ' + self.workdir)
# copy files to workdir
files = ['convert_littler', 'wageningen.namelist']
[ shutil.copy(file, self.workdir) for file in files ]
def read_filelist(self):
'''
read list of files from file
discard lines with length 0
add files to list
'''
self.files = [line.strip() for line in open(
self.filelist, 'r') if len(line.strip())>0]
def namelist_obsproc(self, obsproc_namelist):
'''
extract time window from obsproc namelist
'''
self.t_min = namelist_get(obsproc_namelist, 'record2:time_window_min')
self.t_max = namelist_get(obsproc_namelist, 'record2:time_window_max')
def process_file(self, filename, idx):
'''
process input file:
- extract time interval netcdf file
- convert extracted time interval to LITTLE_R format
'''
import subprocess
import sys
# extract time interval from netcdf file using cdo
# remove out.nc file if it exists
try:
os.remove('workdir/out.nc')
except OSError:
pass
# extract time interval from input netcdf file, save as out.nc
commands = ['cdo seldate,' + self.t_min + ',' + self.t_max + ' ' + filename + ' workdir/out.nc',
'ncks -A -v longitude,latitude ' + filename + ' workdir/out.nc']
for command in commands:
# execute command, catch exceptions
try:
# cdo requires shell=True in subprocess.call
retcode = subprocess.call(command, shell=True, stdout=open(os.devnull,
'wb'))
except OSError as e:
print >>sys.stderr, "Execution failed:", e
# if retcode!=0, no out.nc file is created, skip rest of function
if retcode != 0:
print "Execution of command failed: " + command
return
# edit namelist
namelist_set('workdir/wageningen.namelist', 'group_name:filename', 'out.nc')
namelist_set('workdir/wageningen.namelist', 'group_name:outfile',
'results' + str(idx).zfill(3) +'.txt')
# convert resulting ncdf file to little_R format
owd = os.getcwd()
try:
os.chdir('workdir')
retcode = subprocess.call('./convert_littler',
stdout=open(os.devnull, 'wb'))
except OSError as e:
print >>sys.stderr, "Execution failed:", e
finally:
os.chdir(owd)
def combine_output_files(self):
'''
concatenate all txt files to a single outputfile
'''
import fileinput
import glob
outfilename = 'output.test'
#filenames = ['workdir/results.txt', 'workdir/results.txt', 'workdir/results.txt']
filenames = glob.glob('workdir/results*txt')
with open(outfilename, 'w') as fout:
for line in fileinput.input(filenames):
fout.write(line)
if __name__=="__main__":
# define logger
#logname = os.path.basename(__file__) + '.log'
#logger = utils.start_logging(filename=logname, level='info')
#global logger
# define argument menu
description = 'Time filter Wunderground netCDF data'
parser = argparse.ArgumentParser(description=description)
# fill argument groups
parser.add_argument('-f', '--filelist', help='filelist containing netcdf files',
default='wrapper.filelist', required=False)
parser.add_argument('-o', '--obsproc', help='obsproc namelist',
default='namelist.obsproc', required=False)
opts = parser.parse_args()
# main function
#wrapper_littler('filelist', '/data/github/WRFDA/var/obsproc/namelist.obsproc')
wrapper_littler(opts.filelist, opts.obsproc)
|
rvanharen/ERA_URBAN
|
scripts/wrapper_littler/wrapper_littler.py
|
Python
|
apache-2.0
| 5,397
|
[
"NetCDF"
] |
24201d9b1ddcf39439ea79982b27b3ea1618dc068bcfa411a00a45b264fce9e5
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.basics.models Contains the SersicModel, ExponentialDiskModel and DeprojectionModel classes.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
import numpy as np
from abc import ABCMeta, abstractmethod, abstractproperty
from scipy.special import gammaincinv
from scipy import ndimage
import itertools
import warnings
# Import astronomical modules
from astropy.coordinates import Angle
from astropy.units import dimensionless_angles
# Import the relevant PTS classes and modules
from ...core.basics.composite import SimplePropertyComposite
from ...core.units.parsing import parse_unit as u
from ...magic.core.frame import Frame
from ...core.tools import filesystem as fs
from ..basics.projection import GalaxyProjection, FaceOnProjection, EdgeOnProjection
from ..basics.instruments import FrameInstrument, SEDInstrument, SimpleInstrument, FullInstrument
from ...magic.basics.pixelscale import Pixelscale
from ...core.tools import numbers
from ...magic.basics.vector import PixelShape
from ...magic.basics.coordinate import PixelCoordinate
from ...core.basics.range import QuantityRange
from pts.core.tools.utils import lazyproperty
# -----------------------------------------------------------------
default_truncation = 4.
# -----------------------------------------------------------------
sersic = "sersic"
exponential = "exponential"
deprojection = "deprojection"
# -----------------------------------------------------------------
models_2D = [sersic, exponential]
models_3D = [sersic, exponential, deprojection]
# -----------------------------------------------------------------
class Model(SimplePropertyComposite):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
@abstractproperty
def xmin(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def xmax(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def xrange(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@property
def x_range(self):
"""
This function ...
:return:
"""
return self.xrange
# -----------------------------------------------------------------
@abstractproperty
def ymin(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def ymax(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def yrange(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@property
def y_range(self):
"""
This function ...
:return:
"""
return self.yrange
# -----------------------------------------------------------------
class Model2D(Model):
"""
This function ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
class Model3D(Model):
"""
This function ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
@abstractproperty
def zmin(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def zmax(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def zrange(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@property
def z_range(self):
"""
This function ...
:return:
"""
return self.zrange
# -----------------------------------------------------------------
#
# 3D MODELS
#
# -----------------------------------------------------------------
def load_3d_model(path):
"""
This function ...
:param path:
:return:
"""
first_line = fs.get_first_line(path)
# Create the appropriate model
if "SersicModel3D" in first_line: return SersicModel3D.from_file(path)
elif "ExponentialDiskModel3D" in first_line: return ExponentialDiskModel3D.from_file(path)
elif "DeprojectionModel3D" in first_line: return DeprojectionModel3D.from_file(path)
else: raise ValueError("Unrecognized model file")
# -----------------------------------------------------------------
def create_3d_model(model_type, **properties):
"""
This function ...
:param model_type:
:param properties:
:return:
"""
# Create the appropriate model
if model_type == sersic: return SersicModel3D(**properties)
elif model_type == exponential: return ExponentialDiskModel3D(**properties)
elif model_type == deprojection: return DeprojectionModel3D(**properties)
else: raise ValueError("Unrecognized model type: " + model_type)
# -----------------------------------------------------------------
# FROM SKIRT:
#SersicFunction::SersicFunction(double n)
# if (n<0.5 || n>10.0)
# throw FATALERROR("The Sersic parameter should be between 0.5 and 10 (n = " + QString::number(n) + ")");
# double b = 2.0*n - 1.0/3.0 + 4.0/405.0/n + 46.0/25515.0/(n*n) + 131.0/1148175.0/(n*n*n);
# double I0 = pow(b,2.0*n) / (M_PI*SpecialFunctions::gamma(2.0*n+1));
# int Ns = 101;
# _sv.resize(Ns);
# _Sv.resize(Ns);
# _Mv.resize(Ns);
# double logsmin = -6.0;
# double logsmax = 4.0;
# double dlogs = (logsmax-logsmin)/(Ns-1.0);
# for (int i=0; i<Ns; i++)
# {
# double logs = logsmin + i*dlogs;
# double s = pow(10.0,logs);
# _sv[i] = s;
# double alpha = b*pow(s,1.0/n);
# double sum = 0.0;
# int Nu = 10000;
# double tmax = 100.0;
# double umax = sqrt((tmax+1.0)*(tmax-1.0));
# double du = umax/Nu;
# for (int j=0; j<=Nu; j++)
# {
# double weight = 1.0;
# if (j==0 || j==Nu) weight=0.5;
# double u = j*du;
# double u2 = u*u;
# double w;
# if (u>1e-3)
# w = (pow(1.0+u2,2.0*n)-1.0)/u2;
# else
# w = 2.0*n + n*(2.0*n-1.0)*u2 + 2.0/3.0*n*(2.0*n-1.0)*(n-1.0)*u2*u2;
# double integrandum = 2.0*exp(-alpha*(1.0+u2))/sqrt(w);
# sum += weight*integrandum;
# }
# _Sv[i] = I0 * pow(b,n) * pow(alpha,1.0-n) / M_PI * du * sum;
# }
#
# // calculate the cumulative mass
#
# for (int i=1; i<Ns; i++)
# {
# double sum = 0.0;
# for (int j=0; j<=32; j++)
# {
# double weight=1.0;
# if (j==0 || j==32) weight=0.5;
# double ds = (_sv[i]-_sv[i-1])/32.0;
# double s = _sv[i-1] + j*ds;
# double S = operator()(s);
# sum += weight*S*s*s*ds;
# }
# double dM = 4.0 * M_PI * sum;
# _Mv[i] = _Mv[i-1] + dM;
# }
# for (int i=0; i<Ns; i++) _Mv[i] /= _Mv[Ns-1];
# -----------------------------------------------------------------
# FROM GalSIM:
# https://github.com/GalSim-developers/GalSim/blob/4a2a281f96d11eab63c7584b34e2598a978e5b86/src/SBSersic.cpp
# // Find what radius encloses (1-missing_flux_frac) of the total flux in a Sersic profile,
# // in units of half-light radius re.
# double SBSersic::SersicInfo::findMaxR(double missing_flux_frac, double gamma2n)
# {
# // int(exp(-b r^1/n) r, r=R..inf) = x * int(exp(-b r^1/n) r, r=0..inf)
# // = x n b^-2n Gamma(2n) [x == missing_flux_frac]
# // Change variables: u = b r^1/n,
# // du = b/n r^(1-n)/n dr
# // = b/n r^1/n dr/r
# // = u/n dr/r
# // r dr = n du r^2 / u
# // = n du (u/b)^2n / u
# // n b^-2n int(u^(2n-1) exp(-u), u=bR^1/n..inf) = x n b^-2n Gamma(2n)
# // Let z = b R^1/n
# //
# // int(u^(2n-1) exp(-u), u=z..inf) = x Gamma(2n)
# //
# // The lhs is an incomplete gamma function: Gamma(2n,z), which according to
# // Abramowitz & Stegun (6.5.32) has a high-z asymptotic form of:
# // Gamma(2n,z) ~= z^(2n-1) exp(-z) (1 + (2n-2)/z + (2n-2)(2n-3)/z^2 + ... )
# // ln(x Gamma(2n)) = (2n-1) ln(z) - z + 2(n-1)/z + 2(n-1)(n-2)/z^2
# // z = -ln(x Gamma(2n) + (2n-1) ln(z) + 2(n-1)/z + 2(n-1)(n-2)/z^2
# // Iterate this until it converges. Should be quick.
# dbg<<"Find maxR for missing_flux_frac = "<<missing_flux_frac<<std::endl;
# double z0 = -std::log(missing_flux_frac * gamma2n);
# // Successive approximation method:
# double z = 4.*(_n+1.); // A decent starting guess for a range of n.
# double oldz = 0.;
# const int MAXIT = 15;
# dbg<<"Start with z = "<<z<<std::endl;
# for(int niter=0; niter < MAXIT; ++niter) {
# oldz = z;
# z = z0 + (2.*_n-1.) * std::log(z) + 2.*(_n-1.)/z + 2.*(_n-1.)*(_n-2.)/(z*z);
# dbg<<"z = "<<z<<", dz = "<<z-oldz<<std::endl;
# if (std::abs(z-oldz) < 0.01) break;
# }
# dbg<<"Converged at z = "<<z<<std::endl;
# double R=std::pow(z/_b, _n);
# dbg<<"R = (z/b)^n = "<<R<<std::endl;
# return R;
# }
# -----------------------------------------------------------------
class SersicModel3D(Model3D):
"""
This function ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(SersicModel3D, self).__init__()
# Define properties
self.add_property("effective_radius", "quantity", "effective radius")
self.add_property("index", "real", "sersic index")
self.add_property("y_flattening", "real", "flattening along y direction", 1.)
self.add_property("z_flattening", "real", "flattening along z direction", 1.)
self.add_property("azimuth", "angle", "azimuth angle", Angle(0.0, "deg"))
self.add_property("tilt", "angle", "tilt angle", Angle(0.0, "deg"))
# Truncation
self.add_property("truncation", "real", "truncation limit (in units of effective_radius)", default_truncation)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
@classmethod
def from_2d(cls, sersic2d, inclination, position_angle, azimuth_or_tilt="azimuth"):
"""
:param sersic2d:
:param inclination:
:param position_angle:
:param azimuth_or_tilt:
:return:
"""
# Get effective radius and Sersic index
effective_radius = sersic2d.effective_radius
index = sersic2d.index
# Tilt angle and z flattening
if azimuth_or_tilt == "tilt":
# Calculate the intrinsic flattening
y_flattening = 1.
z_flattening = intrinsic_z_flattening(sersic2d.axial_ratio, inclination)
# Set azimuth
azimuth = 0. * u("deg")
# Calculate the tilt angle of the bulge (tilt w.r.t. the x-axis)
tilt = deproject_pa_to_tilt(sersic2d.position_angle - position_angle, inclination)
tilt = Angle(90., "deg") - tilt
# Azimuth angle and y flattening
elif azimuth_or_tilt == "azimuth":
# TODO: this is not working right and therefore unusable for the moment
# Calculate the intrinsic flattening
y_flattening = intrinsic_z_flattening(sersic2d.axial_ratio, inclination)
#z_flattening = intrinsic_z_flattening(sersic2d.axial_ratio, inclination)
z_flattening = 1.
# Calculate the azimuth angle of the bulge
azimuth = deproject_pa_to_azimuth(sersic2d.position_angle - position_angle, inclination)
# Set tilt
#tilt = Angle(90., "deg")
tilt = Angle(0., "deg")
# Other input
else: raise ValueError("Incorrect value for 'azimuth_or_tilt'")
# Create a new Sersic model and return it
return cls(effective_radius=effective_radius, index=index, y_flattening=y_flattening, z_flattening=z_flattening,
azimuth=azimuth, tilt=tilt)
# -----------------------------------------------------------------
@property
def xmin(self):
"""
This function ...
:return:
"""
return - self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def xmax(self):
"""
This function ...
:return:
"""
return self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def xrange(self):
"""
This function ...
:return:
"""
return QuantityRange(self.xmin, self.xmax)
# -----------------------------------------------------------------
@property
def ymin(self):
"""
This function ...
:return:
"""
return - self.truncation * self.effective_radius * self.y_flattening
# -----------------------------------------------------------------
@property
def ymax(self):
"""
This function ...
:return:
"""
return self.truncation * self.effective_radius * self.y_flattening
# -----------------------------------------------------------------
@property
def yrange(self):
"""
THis function ...
:return:
"""
return QuantityRange(self.ymin, self.ymax)
# -----------------------------------------------------------------
@property
def zmin(self):
"""
This function ...
:return:
"""
return - self.truncation * self.effective_radius * self.z_flattening
# -----------------------------------------------------------------
@property
def zmax(self):
"""
This function ...
:return:
"""
return self.truncation * self.effective_radius * self.z_flattening
# -----------------------------------------------------------------
@property
def zrange(self):
"""
This function ...
:return:
"""
return QuantityRange(self.zmin, self.zmax)
# -----------------------------------------------------------------
@property
def rho0(self):
"""
This function ...
:return:
"""
# FROM SKIRT
return 1.0 / self.effective_radius**3
# -----------------------------------------------------------------
@property
def b(self):
"""
This function ...
:return:
"""
# FROM SKIRT
return 2.0 * self.index - 1.0 / 3.0 + 4.0 / 405.0 / self.index + 46.0 / 25515.0 / self.index**2 + 131.0 / 1148175.0 / (self.index**3)
# -----------------------------------------------------------------
def _sersic_function(self, s):
"""
This function ...
:param s:
:return:
"""
# FROM SKIRT
# Ns = _sv.size()
# if s <= _sv[0]: return _Sv[0]
# elif (s >= _sv[Ns - 1])
# return _Sv[Ns - 1]
# else:
# i = NR::locate_clip(_sv, s)
# return NR::interpolate_loglog(s, _sv[i], _sv[i + 1], _Sv[i], _Sv[i + 1]);
pass
# -----------------------------------------------------------------
def _spherical_density(self, radius):
"""
This function ...
:param radius:
:return:
"""
# FROM SKIRT
#amplitude = 1.
#s = radius / self.effective_radius
#return self.rho0 * sersic_function(s) #(*_sersicfunction)(s)
# ADAPTED FROM 2D SERSIC (ASTROPY):
#amplitude = self.rho0
#return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
# -----------------------------------------------------------------
def density(self, x, y, height):
"""
This function ...
:param x:
:param y:
:param height:
:return:
"""
# FROM SKIRT
#m = np.sqrt(radius**2 + height**2 / self.z_flattening**2)
#return 1.0 / self.z_flattening * self._spherical_density(m)
# ADAPTED FROM 2D SERSIC (ASTROPY)
bn = gammaincinv(2. * self.index, 0.5)
x0 = 0.0
y0 = 0.0
z0 = 0.0
#a, b = self.effective_radius, (1 - ellip) * self.effective_radius
a, b = self.effective_radius, self.y_flattening * self.effective_radius
c = self.z_flattening * self.effective_radius
#print("a", a)
#print("b", b)
#print("c", c)
# TODO: for now, we ignored the tilt angle
#cos_theta, sin_theta = np.cos(theta), np.sin(theta)
#x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
#x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
x_maj = x - x0
x_min = y - y0
zz = height - z0
amplitude = self.rho0
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2 + (zz / c) **2)
#print("z", z)
return amplitude * np.exp(-bn * (z ** (1 / self.index) - 1))
# -----------------------------------------------------------------
def density_function(self, unit="pc", normalize=False):
"""
This function ...
:param unit:
:param normalize:
:return:
"""
# Parse the unit
unit = u(unit)
density_unit = unit**(-3)
amplitude = self.rho0.to(density_unit).value
bn = gammaincinv(2. * self.index, 0.5)
# Calcualte unitless
a = self.effective_radius.to(unit).value
b = (self.y_flattening * self.effective_radius).to(unit).value
c = (self.z_flattening * self.effective_radius).to(unit).value
# Define the function
def sersic(x, y, z):
"""
This function ...
:param x:
:param y:
:param z:
:return:
"""
t = np.sqrt((x / a) ** 2 + (y / b) ** 2 + (z / c) ** 2)
result = amplitude * np.exp(-bn * (t ** (1 / self.index) - 1))
if normalize: result /= np.sum(result)
return result
# Return the function
return sersic
# -----------------------------------------------------------------
# IN SKIFILE:
# # Create and add the new exponential disk geometry
#attrs = {"radialScale": str(radial_scale), "axialScale": str(axial_scale), "radialTrunc": str(radial_truncation), "axialTrunc": str(axial_truncation), "innerRadius": str(inner_radius)}
# _Rmax = radialTrunc
# _zmax = axialTrunc
# _Rmin = innerRadius
# -----------------------------------------------------------------
class ExponentialDiskModel3D(Model3D):
"""
This function ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(ExponentialDiskModel3D, self).__init__()
# Define properties
self.add_property("radial_scale", "quantity", "radial scale")
self.add_property("axial_scale", "quantity", "axial scale")
self.add_property("radial_truncation", "real", "radial truncation", 0)
self.add_property("axial_truncation", "real", "axial truncation", 0)
self.add_property("inner_radius", "real", "inner radius", 0)
self.add_property("tilt", "angle", "tilt", Angle(0.0, "deg"))
# Truncation
self.add_property("truncation", "real", "truncation limit (in units of the radial and axial scales", default_truncation)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
@classmethod
def from_2d(cls, exponentialdiskmodel2d, inclination, position_angle):
"""
This function ...
:param exponentialdiskmodel2d:
:param inclination:
:param position_angle:
:return:
"""
# Get the radial scale
radial_scale = exponentialdiskmodel2d.scalelength
# Calculate the intrinsic flattening
flattening = intrinsic_z_flattening(exponentialdiskmodel2d.axial_ratio, inclination)
# Calculate the axial scale
axial_scale = flattening * radial_scale
# Calculate the tilt angle of the disk (tilt w.r.t. the x-axis)
#tilt = deproject_pa_to_tilt(parameters.PA - position_angle, inclination)
tilt = deproject_pa_to_tilt(position_angle - exponentialdiskmodel2d.position_angle, inclination)
tilt = Angle(90., "deg") - tilt
# Create a new exponential disk model and return it
return cls(radial_scale=radial_scale, axial_scale=axial_scale, tilt=tilt)
# -----------------------------------------------------------------
@property
def xmin(self):
"""
This function ...
:return:
"""
if self.radial_truncation > 0: return - self.radial_truncation
else: return - self.truncation * self.radial_scale
# -----------------------------------------------------------------
@property
def xmax(self):
"""
This function ...
:return:
"""
if self.radial_truncation > 0: return self.radial_truncation
else: return self.truncation * self.radial_scale
# -----------------------------------------------------------------
@property
def xrange(self):
"""
This function ...
:return:
"""
return QuantityRange(self.xmin, self.xmax)
# -----------------------------------------------------------------
@property
def ymin(self):
"""
This function ...
:return:
"""
if self.radial_truncation > 0: return - self.radial_truncation
else: return - self.truncation * self.radial_scale
# -----------------------------------------------------------------
@property
def ymax(self):
"""
This function ...
:return:
"""
if self.radial_truncation > 0: return self.radial_truncation
else: return self.truncation * self.radial_scale
# -----------------------------------------------------------------
@property
def yrange(self):
"""
This function ...
:return:
"""
return QuantityRange(self.ymin, self.ymax)
# -----------------------------------------------------------------
@property
def zmin(self):
"""
This function ...
:return:
"""
if self.axial_truncation > 0: return - self.axial_truncation
else: return - self.truncation * self.axial_scale
# -----------------------------------------------------------------
@property
def zmax(self):
"""
This function ...
:return:
"""
if self.axial_truncation > 0: return self.axial_truncation
else: return self.truncation * self.axial_scale
# -----------------------------------------------------------------
@property
def zrange(self):
"""
This function ...
:return:
"""
return QuantityRange(self.zmin, self.zmax)
# -----------------------------------------------------------------
@property
def intz(self):
"""
This function ...
:return:
"""
if self.axial_truncation > 0: return -2.0 * self.axial_scale * (np.exp(- self.axial_truncation / self.axial_scale) - 1.)
else: return 2.0 * self.axial_scale
# -----------------------------------------------------------------
@property
def tmin(self):
"""
This function ...
:return:
"""
if self.inner_radius > 0: return np.exp(- self.inner_radius / self.radial_scale) * (1.0 + self.inner_radius / self.radial_scale)
else: return 1.0
# -----------------------------------------------------------------
@property
def tmax(self):
"""
This function ...
:return:
"""
if self.radial_truncation > 0: return np.exp(- self.radial_truncation / self.radial_scale) * (1.0 + self.radial_truncation / self.radial_scale)
else: return 0.0
# -----------------------------------------------------------------
@property
def intR(self):
"""
Thisf unction ...
:return:
"""
return self.radial_scale**2 * (self.tmin - self.tmax)
# -----------------------------------------------------------------
@property
def intphi(self):
"""
This function ...
:return:
"""
return 2.0 * np.pi
# -----------------------------------------------------------------
@property
def rho0(self):
"""
This function ...
:return:
"""
return 1.0 / (self.intR * self.intphi * self.intz)
# -----------------------------------------------------------------
def surface_density(self, x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
radius = np.sqrt(x ** 2 + y ** 2)
# Checks
if self.radial_truncation > 0.0 and radius > self.radial_truncation: return 0.0
if radius < self.inner_radius: return 0.0
# Return the surface density
return self.rho0 * np.exp(- radius / self.radial_scale)
# -----------------------------------------------------------------
def density(self, x, y, z):
"""
This function ...
:param x:
:param y:
:param z:
:return:
"""
absz = abs(z)
# Check
if self.axial_truncation > 0.0 and absz > self.axial_truncation: return 0.0
# Return the density
return self.surface_density(x, y) * np.exp(- absz / self.axial_scale)
# -----------------------------------------------------------------
def surface_density_function(self, unit="pc", normalize=False):
"""
This function ...
:param unit:
:param normalize:
:return:
"""
# Parse the unit
unit = u(unit)
density_unit = unit ** -3
amplitude = self.rho0.to(density_unit).value
radialtrunc = self.radial_truncation.to(unit).value if self.radial_truncation > 0.0 else None
innerradius = self.inner_radius.to(unit).value if self.inner_radius > 0.0 else None
radialscale = self.radial_scale.to(unit).value
# Define the function
def exponentialdisk(x, y):
radius = np.sqrt(x ** 2 + y ** 2)
density = amplitude * np.exp(- radius / radialscale)
if radialtrunc is not None: density[radius > radialtrunc] = 0.0
if innerradius is not None: density[radius < innerradius] = 0.0
if normalize: density /= np.sum(density)
return density
# Return the function
return exponentialdisk
# -----------------------------------------------------------------
def density_function(self, unit="pc", normalize=False):
"""
This function ...
:param unit:
:param normalize:
:return:
"""
# Parse the unit
unit = u(unit)
density_unit = unit**-3
amplitude = self.rho0.to(density_unit).value
radialtrunc = self.radial_truncation.to(unit).value if self.radial_truncation > 0.0 else None
axialtrunc = self.axial_truncation.to(unit).value if self.axial_truncation > 0.0 else None
innerradius = self.inner_radius.to(unit).value if self.inner_radius > 0.0 else None
radialscale = self.radial_scale.to(unit).value
axialscale = self.axial_scale.to(unit).value
# Define the function
def exponentialdisk(x, y, z):
"""
This function ...
:param x:
:param y:
:param z:
:return:
"""
radius = np.sqrt(x ** 2 + y ** 2)
height = abs(z)
density = amplitude * np.exp(- radius / radialscale) * np.exp(- height / axialscale)
if radialtrunc is not None: density[radius > radialtrunc] = 0.0
if axialtrunc is not None: density[height > axialtrunc] = 0.0
if innerradius is not None: density[radius < innerradius] = 0.0
if normalize: density /= np.sum(density)
return density
# Return the function
return exponentialdisk
# -----------------------------------------------------------------
class RingModel3D(Model):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(RingModel3D, self).__init__()
# Define the properties
self.add_property("radius", "quantity", "radius of the ring")
self.add_property("width", "quantity", "width of the ring")
self.add_property("height", "quantity", "height of the ring")
# Set the properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
class DeprojectionModel3D(Model3D):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(DeprojectionModel3D, self).__init__()
# position angle: -360 deg to 360 deg
# inclination: 0 deg to 90 deg
# center in pixel coordinates!
# Define the properties
self.add_property("filename", "string", "name of the input FITS file")
self.add_property("pixelscale", "quantity", "physical pixelscale of the FITS image")
self.add_property("position_angle", "angle", "position angle")
self.add_property("inclination", "angle", "inclination")
self.add_property("x_size", "positive_integer", "number of x pixels in the map")
self.add_property("y_size", "positive_integer", "number of y pixels in the map")
self.add_property("x_center", "real", "x center in image coordinates")
self.add_property("y_center", "real", "y center in image coordinates")
self.add_property("scale_height", "quantity", "scale height")
# Path of the directory containing the map
self.add_property("dirpath", "string", "path to the directory where the map may be present", fs.cwd())
# Distance (if not defined in the map)
self.add_property("distance", "quantity", "distance to the galaxy")
# Truncation
self.add_property("axial_truncation", "real", "truncation in units of the scale height", default_truncation)
# Truncate zero values around the luminosity of the map
self.add_property("truncate_zeros", "boolean", "truncate in the xy plane around where the map is above zero", True)
# Set the properties
self.set_properties(kwargs)
# The map that is loaded
self._map = None
# -----------------------------------------------------------------
@classmethod
def from_wcs(cls, wcs, galaxy_center, distance, pa, inclination, filepath, scale_height):
"""
This function ...
:param wcs:
:param galaxy_center:
:param distance:
:param pa:
:param inclination:
:param filepath
:param scale_height:
:return:
"""
# Get the center pixel
pixel_center = galaxy_center.to_pixel(wcs)
xc = pixel_center.x
yc = pixel_center.y
# Get the pixelscale in physical units
pixelscale_angular = wcs.average_pixelscale.to("deg") # in deg
pixelscale = (pixelscale_angular * distance).to("pc", equivalencies=dimensionless_angles())
# Get the number of x and y pixels
x_size = wcs.xsize
y_size = wcs.ysize
# Create the deprojection model
deprojection = cls(filename=filepath, pixelscale=pixelscale, position_angle=pa, inclination=inclination,
x_size=x_size, y_size=y_size, x_center=xc, y_center=yc, scale_height=scale_height, distance=distance)
# Return the deprojection model
return deprojection
# -----------------------------------------------------------------
@property
def center(self):
return PixelCoordinate(x=self.x_center, y=self.y_center)
# -----------------------------------------------------------------
@property
def filepath(self):
return fs.absolute_or_in(self.filename, self.dirpath)
# -----------------------------------------------------------------
@property
def has_map(self):
"""
has_map == True means that the map can be accessed via self.map
:return:
"""
return self._map is not None or fs.is_file(self.filepath)
# -----------------------------------------------------------------
@property
def map_is_loaded(self):
return self._map is not None
# -----------------------------------------------------------------
@property
def map(self):
"""
This function ...
:return:
"""
if self._map is not None: return self._map
elif fs.is_file(self.filepath):
# Load the frame
frame = Frame.from_file(self.filepath)
# Verify the image properties
if self.x_size != frame.xsize: raise ValueError("Number of x pixels does not correspond with the number of x pixels of the image")
if self.y_size != frame.ysize: raise ValueError("Number of y pixels does not correspond with the number of y pixels of the image")
# Normalize the map
frame.normalize()
# Set the map
self._map = frame
# Return the map
return self._map
# Error
else: raise ValueError("Map cannot be loaded from '" + self.filepath + "'")
# -----------------------------------------------------------------
@map.setter
def map(self, value):
"""
This function ...
:param value:
:return:
"""
# Make copy
frame = value.copy()
# Verify properties
if self.x_size != frame.xsize: raise ValueError("Number of x pixels does not correspond with the number of x pixels of the image")
if self.y_size != frame.ysize: raise ValueError("Number of y pixels does not correspond with the number of y pixels of the image")
# Normalize
frame.normalize()
# Set the map
self._map = frame
# -----------------------------------------------------------------
@lazyproperty
def map_zeros(self):
return self.map.zeroes
# -----------------------------------------------------------------
@lazyproperty
def map_nonzeroes(self):
return self.map.nonzeroes
# -----------------------------------------------------------------
@lazyproperty
def map_zeros_pixels(self):
return self.map.zeroes_pixels
# -----------------------------------------------------------------
@lazyproperty
def map_nonzeroes_pixels(self):
return self.map.nonzeroes_pixels
# -----------------------------------------------------------------
@lazyproperty
def map_nonzeroes_x(self):
return self.map.nonzeroes_x
# -----------------------------------------------------------------
@lazyproperty
def map_nonzeroes_y(self):
return self.map.nonzeroes_y
# -----------------------------------------------------------------
@lazyproperty
def min_nonzero_x(self):
return np.min(self.map_nonzeroes_x)
# -----------------------------------------------------------------
@lazyproperty
def max_nonzero_x(self):
return np.max(self.map_nonzeroes_x)
# -----------------------------------------------------------------
@lazyproperty
def min_nonzero_y(self):
return np.min(self.map_nonzeroes_y)
# -----------------------------------------------------------------
@lazyproperty
def max_nonzero_y(self):
return np.max(self.map_nonzeroes_y)
# -----------------------------------------------------------------
@lazyproperty
def ntruncated_min_x(self):
if self.truncate_zeros: return self.min_nonzero_x
else: return 0.
# -----------------------------------------------------------------
@lazyproperty
def ntruncated_max_x(self):
if self.truncate_zeros: return self.xsize - self.max_nonzero_x
else: return 0.
# -----------------------------------------------------------------
@lazyproperty
def ntruncated_min_y(self):
if self.truncate_zeros: return self.min_nonzero_y
else: return 0.
# -----------------------------------------------------------------
@lazyproperty
def ntruncated_max_y(self):
if self.truncate_zeros: return self.ysize - self.max_nonzero_y
else: return 0.0
# -----------------------------------------------------------------
@property
def xsize(self):
## NO: don't return the map xsize, return the deprojected xsize? -> NO: not needed so far
assert self.map.xsize == self.x_size
return self.map.xsize
# -----------------------------------------------------------------
@property
def ysize(self):
## NO: don't return the map ysize, return the deprojected ysize? -> NO: not needed so far
assert self.map.ysize == self.y_size
return self.map.ysize
# -----------------------------------------------------------------
@property
def galaxy_distance(self):
if self.distance is not None: return self.distance
else: return self.map.distance
# -----------------------------------------------------------------
@property
def has_distance(self):
return self.distance is not None
# -----------------------------------------------------------------
@property
def has_galaxy_distance(self):
return self.has_distance or (self.has_map and self.map.distance is not None)
# -----------------------------------------------------------------
@property
def angular_pixelscale(self):
"""
This function ....
:return:
"""
# Check whether the distance is defined
if not self.has_galaxy_distance: raise ValueError("Distance of the map is not defined")
# Calculate the pixelscale in degrees
pixelscale = self.pixelscale / self.galaxy_distance * u("rad")
return Pixelscale(pixelscale.to("arcsec"))
# -----------------------------------------------------------------
@lazyproperty
def projection(self):
"""
This function ...
:return:
"""
# Check whether the distance is defined
if not self.has_galaxy_distance: raise ValueError("Distance of the map is not defined")
azimuth = 0.0
# Create the 'earth' projection system
return GalaxyProjection.from_deprojection(self, self.galaxy_distance, azimuth)
# -----------------------------------------------------------------
@lazyproperty
def faceon_projection(self):
"""
This function ...
:return:
"""
# Check whether the distance is defined
if self.galaxy_distance is None: raise ValueError("Distance of the map is not defined")
# Create the face-on projection system
return FaceOnProjection.from_deprojection(self, self.galaxy_distance)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_projection(self):
"""
This function ...
:return:
"""
# Check whether the distance is defined
if self.galaxy_distance is None: raise ValueError("Distance of the map is not defined")
# Create the edge-on projection system
return EdgeOnProjection.from_deprojection(self, self.galaxy_distance)
# -----------------------------------------------------------------
@lazyproperty
def frame_instrument(self):
return FrameInstrument.from_projection(self.projection)
# -----------------------------------------------------------------
@lazyproperty
def simple_instrument(self):
return SimpleInstrument.from_projection(self.projection)
# -----------------------------------------------------------------
@lazyproperty
def sed_instrument(self):
return SEDInstrument.from_projection(self.projection)
# -----------------------------------------------------------------
@lazyproperty
def full_instrument(self):
return FullInstrument.from_projection(self.projection)
# -----------------------------------------------------------------
@lazyproperty
def faceon_frame_instrument(self):
return FrameInstrument.from_projection(self.faceon_projection)
# -----------------------------------------------------------------
@lazyproperty
def faceon_simple_instrument(self):
return SimpleInstrument.from_projection(self.faceon_projection)
# -----------------------------------------------------------------
@lazyproperty
def faceon_sed_instrument(self):
return SEDInstrument.from_projection(self.faceon_projection)
# -----------------------------------------------------------------
@lazyproperty
def faceon_full_instrument(self):
return FullInstrument.from_projection(self.faceon_projection)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_frame_instrument(self):
return FrameInstrument.from_projection(self.edgeon_projection)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_simple_instrument(self):
return SimpleInstrument.from_projection(self.edgeon_projection)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_sed_instrument(self):
return SEDInstrument.from_projection(self.edgeon_projection)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_full_instrument(self):
return FullInstrument.from_projection(self.edgeon_projection)
# -----------------------------------------------------------------
@property
def xmax(self):
#print("ntruncated max x: " + str(self.ntruncated_max_x) + " of " + str(self.xsize))
return (self.x_size - self.x_center - self.ntruncated_max_x) * self.pixelscale
# -----------------------------------------------------------------
@property
def map_xmax(self):
return (self.x_size - self.x_center) * self.pixelscale
# -----------------------------------------------------------------
@property
def xmin(self):
#print("ntruncated min x: " + str(self.ntruncated_min_x) + " of " + str(self.xsize))
return - (self.x_center - self.ntruncated_min_x) * self.pixelscale
# -----------------------------------------------------------------
@property
def map_xmin(self):
return - self.x_center * self.pixelscale
# -----------------------------------------------------------------
@property
def xrange(self):
return QuantityRange(self.xmin, self.xmax)
# -----------------------------------------------------------------
@property
def ymax(self):
#print("ntruncated max y: " + str(self.ntruncated_max_y) + " of " + str(self.ysize))
return (self.y_size - self.y_center - self.ntruncated_max_y) * self.pixelscale
# -----------------------------------------------------------------
@property
def map_ymax(self):
return (self.y_size - self.y_center) * self.pixelscale
# -----------------------------------------------------------------
@property
def ymin(self):
#print("ntruncated min y: " + str(self.ntruncated_min_y) + " of " + str(self.ysize))
return - (self.y_center - self.ntruncated_min_y) * self.pixelscale
# -----------------------------------------------------------------
@property
def map_ymin(self):
return - self.y_center * self.pixelscale
# -----------------------------------------------------------------
@property
def yrange(self):
return QuantityRange(self.ymin, self.ymax)
# -----------------------------------------------------------------
@property
def zmin(self):
return - self.axial_truncation * self.scale_height
# -----------------------------------------------------------------
@property
def zmax(self):
return self.axial_truncation * self.scale_height
# -----------------------------------------------------------------
@property
def zrange(self):
return QuantityRange(self.zmin, self.zmax)
# -----------------------------------------------------------------
@property
def position_angle_radians(self):
return self.position_angle.to("rad").value
# -----------------------------------------------------------------
@property
def inclination_radians(self):
return self.inclination.to("rad").value
# -----------------------------------------------------------------
@property
def cospa(self):
# Calculate the sines and cosines of the position angle and inclination
return np.cos(self.position_angle_radians)
# -----------------------------------------------------------------
@property
def sinpa(self):
return np.sin(self.position_angle_radians)
# -----------------------------------------------------------------
@property
def cosi(self):
return np.cos(self.inclination_radians)
# -----------------------------------------------------------------
@property
def sini(self):
return np.sin(self.inclination_radians)
# -----------------------------------------------------------------
@property
def deltay(self):
return self.pixelscale
# -----------------------------------------------------------------
@property
def deltax(self):
# Calculate the physical pixel size in the x direction of the galactic plane
return self.pixelscale / self.cosi
# -----------------------------------------------------------------
@lazyproperty
def corner1(self):
return self.derotate(self.xmax, self.ymax)
# -----------------------------------------------------------------
@lazyproperty
def corner2(self):
return self.derotate(self.xmin, self.ymax)
# -----------------------------------------------------------------
@lazyproperty
def corner3(self):
return self.derotate(self.xmin, self.ymin)
# -----------------------------------------------------------------
@lazyproperty
def corner4(self):
return self.derotate(self.xmax, self.ymin)
# -----------------------------------------------------------------
def rotate(self, x, y):
"""
This function ...
:param x:
:param y:
"""
# Cache the original values of x and y
xorig = x
yorig = y
# Calculate the coordinates in the plane of the image
x = self.sinpa * xorig + self.cospa * yorig
y = -self.cospa * xorig + self.sinpa * yorig
# Return
return x, y
# -----------------------------------------------------------------
def rotate_arrays(self, x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
xorig = np.copy(x)
yorig = np.copy(y)
# Calculate the coordinates in the plane of the image
x = self.sinpa * xorig + self.cospa * yorig
y = -self.cospa * xorig + self.sinpa * yorig
# Return
return x, y
# -----------------------------------------------------------------
def derotate(self, x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
# Cache the original values of x and y
xorig = x
yorig = y
#// Calculate the coordinates in the rotated plane
x = (self.sinpa * xorig) - (self.cospa * yorig)
y = (self.cospa * xorig) + (self.sinpa * yorig)
# Return
return x, y
# -----------------------------------------------------------------
def project(self, x):
"""
This function ...
:return:
"""
return x * self.cosi
# -----------------------------------------------------------------
def project_array(self, x):
"""
This function ...
:param x:
:return:
"""
return x * self.cosi
# -----------------------------------------------------------------
def deproject(self, x):
"""
This function ...
:param x:
:return:
"""
return x / self.cosi
# -----------------------------------------------------------------
def surface_density(self, x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
# Project and rotate the x and y coordinates
x = self.project(x)
x, y = self.rotate(x, y)
# Find the corresponding pixel in the image
i = numbers.round_down_to_int((x - self.map_xmin) / self.deltay)
j = numbers.round_down_to_int((y - self.map_ymin) / self.deltay)
# Not on the image
if i < 0 or i >= self.x_size or j < 0 or j >= self.y_size: return 0.0
# Return the density
return self.map[j, i]
# -----------------------------------------------------------------
@property
def density_normalization(self):
return 1. / (2. * self.scale_height) / (self.deltax * self.deltay)
# -----------------------------------------------------------------
def density(self, x, y, z):
"""
This function ...
:param x:
:param y:
:param z:
:return:
"""
# Return the density
z = abs(z)
return self.surface_density(x, y) * np.exp(- z / self.scale_height) * self.density_normalization
# -----------------------------------------------------------------
@property
def shape(self):
return PixelShape(self.ysize, self.xsize)
# -----------------------------------------------------------------
@property
def npixels(self):
return self.shape.ntotalpixels
# -----------------------------------------------------------------
def surface_density_function(self, unit="pc", normalize=False):
"""
This function ...
:param unit:
:param normalize:
:return:
"""
# Get properties as scalars (no units)
xmin_scalar = self.map_xmin.to(unit).value
ymin_scalar = self.map_ymin.to(unit).value
deltay_scalar = self.deltay.to(unit).value
deltax_scalar = self.deltax.to(unit).value
# Define the density function
def deprojection(x, y):
# Project and rotate coordinates
x = self.project_array(x)
x, y = self.rotate_arrays(x, y)
# Determine the coordinate mapping
x_mapping = ((x - xmin_scalar) / deltay_scalar - 0.5).astype(int)
y_mapping = ((y - ymin_scalar) / deltay_scalar - 0.5).astype(int)
xy = x_mapping + self.xsize * y_mapping
nx = xy.shape[0]
ny = xy.shape[1]
#xy = xy[:, :, 0]
arrangement_x = range(self.xsize) * self.ysize
lists = [[value] * self.xsize for value in range(self.ysize)]
arrangement_y = list(itertools.chain.from_iterable(lists))
xy_flattened = xy.flatten()
x_mapping = [arrangement_x[k] if 0 <= k < self.npixels else -1 for k in xy_flattened]
y_mapping = [arrangement_y[k] if 0 <= k < self.npixels else -1 for k in xy_flattened]
mapping = [y_mapping, x_mapping]
cval = 0.0
data = ndimage.map_coordinates(self.map.data, mapping, mode='constant', cval=cval)
# Reshape into the
deprojected = data.reshape((ny, nx))
# Normalize?
if normalize: deprojected /= np.sum(deprojected)
return deprojected
# Return the function
return deprojection
# -----------------------------------------------------------------
def density_function(self, unit="pc", normalize=False):
"""
This function ...
:param unit:
:param normalize:
:return:
"""
# Get properties as scalars (no units)
xmin_scalar = self.map_xmin.to(unit).value
ymin_scalar = self.map_ymin.to(unit).value
deltay_scalar = self.deltay.to(unit).value
deltax_scalar = self.deltax.to(unit).value
scale_height_scalar = self.scale_height.to(unit).value
# Define the density function
def deprojection(x, y, z):
"""
Thisf unction ...
:param x:
:param y:
:param z:
:return:
"""
# Project and rotate coordinates
x = self.project_array(x)
x, y = self.rotate_arrays(x, y)
#i_array = numbers.round_down_to_int((x - xmin_scalar) / deltay_scalar)
#j_array = numbers.round_down_to_int((y - ymin_scalar) / deltay_scalar)
# grid = mapping
#grid = np.array([yy1.reshape(outshape), xx1.reshape(outshape)])
# Use Scipy to create the new image
#data = scipy.ndimage.map_coordinates(frame, mapping, **kwargs)
# Determine the coordinate mapping
#x_mapping = (x - xmin_scalar) / deltay_scalar - 0.5
#y_mapping = (y - ymin_scalar) / deltay_scalar - 0.5
x_mapping = ((x - xmin_scalar) / deltay_scalar - 0.5).astype(int)
y_mapping = ((y - ymin_scalar) / deltay_scalar - 0.5).astype(int)
#x_mapping = x_mapping.flatten()
#y_mapping = y_mapping.flatten()
xy = x_mapping + self.xsize * y_mapping
#print("xy", xy)
#print("x_mapping", x_mapping)
#print("y_mapping", y_mapping)
#mapping = np.array([np.array([xi, yi]) for xi, yi in zip(x_mapping.flat, y_mapping.flat)])
#mapping = np.array([x_mapping, y_mapping])
#mapping = np.vstack((x_mapping,y_mapping))
#print("mapping", mapping)
#print(mapping.shape)
#print("output shape", mapping.shape[1:])
#print("input.ndim", self.map.data.ndim)
# output_shape = coordinates.shape[1:]
# input.ndim < 1 or len(output_shape) < 1:
#print(xy)
#print(xy.shape)
nx = xy.shape[0]
ny = xy.shape[1]
#nz = xy.shape[2]
#print(xy[:,:,0])
xy = xy[:, :, 0]
#print(xy)
arrangement_x = range(self.xsize) * self.ysize
lists = [[value] * self.xsize for value in range(self.ysize)]
#print(lists)
#arrangement_y = itertools.chain(*lists)
arrangement_y = list(itertools.chain.from_iterable(lists))
#print(xy.shape)
#print("output shape", xy.shape[1:])
#print("input.ndim", self.map.data.flatten().ndim)
# Create mapping
#mapping = [[arrangement_y[yi], arrangement_x[xi]] for xi, yi in xy]
#print("mapping", mapping)
#print([k for k in xy])
xy_flattened = xy.flatten()
#print(arrangement_x)
#print(arrangement_y.shape)
x_mapping = [arrangement_x[k] if 0<=k<self.npixels else -1 for k in xy_flattened]
y_mapping = [arrangement_y[k] if 0<=k<self.npixels else -1 for k in xy_flattened]
#mapping = [x_mapping, y_mapping]
mapping = [y_mapping, x_mapping]
#cval = float('nan')
cval = 0.0
data = ndimage.map_coordinates(self.map.data, mapping, mode='constant', cval=cval)
#data = ndimage.map_coordinates(self.map.data.flatten(), xy, mode="constant", cval=cval)
#nx = x.shape[2]
#ny = y.shape[1]
#print("nx", nx)
#print("")
# Reshape into the
deprojected = data.reshape((ny, nx, 1))
# Return
z = abs(z)
result = deprojected * np.exp(- z / scale_height_scalar) / (2. * scale_height_scalar) / (deltax_scalar * deltay_scalar)
# Normalize?
if normalize: result /= np.sum(result)
return result
# Return the function
return deprojection
# -----------------------------------------------------------------
def intrinsic_z_flattening(qprime, inclination):
"""
This function ...
:param qprime:
:param inclination:
"""
# Get the inclination angle in radians
i = inclination.to("radian").value
# Calculate the intrinsic flattening
difference = qprime**2 - math.cos(i)**2
if difference < 0:
# Give warning
warnings.warn("Could not convert an apparent flattening of " + str(qprime) + " with an inclination of " + str(inclination) + " to a vertical flattening: using the apparent flattening")
q = qprime
else: q = math.sqrt(difference/math.sin(i)**2)
# Return the intrinsic flattening
return q
# -----------------------------------------------------------------
def intrinsic_y_flattening(qprime, inclination):
"""
This function ...
:param qprime:
:param inclination:
:return:
"""
# Get the inclination angle in radians
i = inclination.to("radian").value
# Calculate the 'inclination' w.r.t. the y axis
#i_wrt_y = 0.5 * math.pi - i
print(inclination)
print(qprime)
print(i_wrt_y)
print(math.cos(i_wrt_y))
print(math.sin(i_wrt_y))
#qprime =
# Calculate the intrinsic flattening
q = math.sqrt((qprime ** 2 - math.cos(i_wrt_y) ** 2) / math.sin(i_wrt_y) ** 2)
# Return the intrinsic flattening
return q
# -----------------------------------------------------------------
def project_azimuth_to_pa(azimuth, inclination):
"""
This function ...
:param azimuth:
:param inclination:
"""
# Get the azimuth angle and inclination in radians
azimuth_radian = azimuth.to("radian").value
i_radian = inclination.to("radian").value
denominator = math.sqrt(math.cos(azimuth_radian)**2 * math.cos(i_radian)**2 + math.sin(azimuth_radian)**2)
cos_pa = math.cos(azimuth_radian) * math.cos(i_radian) / denominator
sin_pa = math.sin(azimuth_radian) / denominator
pa_radian = math.atan2(sin_pa, cos_pa) * u("radian")
return pa_radian.to("deg")
# -----------------------------------------------------------------
def deproject_pa_to_azimuth(pa, inclination):
"""
This function ...
:param pa:
:param inclination:
:return:
"""
# Get the PA and inclination in radians
pa_radian = pa.to("radian").value
i_radian = inclination.to("radian").value
denominator = math.sqrt(math.cos(pa_radian)**2 + math.sin(pa_radian)**2 * math.cos(i_radian)**2)
cos_azimuth = math.cos(pa_radian) / denominator
sin_azimuth = math.sin(pa_radian) * math.cos(i_radian) / denominator
azimuth_radian = math.atan2(sin_azimuth, cos_azimuth) * u("radian")
return azimuth_radian.to("deg")
# -----------------------------------------------------------------
def project_tilt_to_pa(tilt, inclination):
"""
This function ...
:param tilt:
:param inclination:
:return:
"""
# Get the tilt angle and inclination in radians
tilt_radian = tilt.to("radian").value
i_radian = inclination.to("radian").value
denominator = math.sqrt(math.sin(tilt_radian)**2 * math.sin(i_radian)**2 + math.cos(tilt_radian)**2)
cos_pa = math.sin(tilt_radian) * math.sin(i_radian) / denominator
sin_pa = math.cos(tilt_radian) / denominator
pa_radian = math.atan2(sin_pa, cos_pa) * u("radian")
return pa_radian.to("deg")
# -----------------------------------------------------------------
def deproject_pa_to_tilt(pa, inclination):
"""
This function ...
:param pa:
:param inclination:
:return:
"""
# Get the PA and inclination in radians
pa_radian = pa.to("radian").value
i_radian = inclination.to("radian").value
denominator = math.sqrt(math.sin(pa_radian)**2 * math.sin(i_radian)**2 + math.cos(pa_radian)**2)
cos_tilt = math.sin(pa_radian) * math.sin(i_radian) / denominator
sin_tilt = math.cos(pa_radian) / denominator
tilt_radian = math.atan2(sin_tilt, cos_tilt) * u("radian")
return tilt_radian.to("deg")
# -----------------------------------------------------------------
# Test the deprojection functions:
#test_angles = []
#test_angles.append(Angle(33, "deg"))
#test_angles.append(Angle(45, "deg"))
#test_angles.append(Angle(0, "deg"))
#test_angles.append(Angle(90, "deg"))
#test_angles.append(Angle(189, "deg"))
#for test_angle in test_angles:
# result = deproject_pa_to_tilt(test_angle, Angle(0.0, "deg"))
# print("Should fail/be zero:", Angle(90., "deg") - result, test_angle)
# result = deproject_pa_to_tilt(test_angle, Angle(90., "deg"))
# print("Should be the same:", Angle(90., "deg") - result, test_angle)
#result = project_tilt_to_pa(test_angle, Angle(0.0, "deg"))
#print("Should fail/be zero:", Angle(90., "deg") - result, test_angle)
#result = project_tilt_to_pa(test_angle, Angle(90., "deg"))
#print("Should be the same:", Angle(90., "deg") - result, test_angle)
# -----------------------------------------------------------------
#
# 2D MODELS
#
# -----------------------------------------------------------------
def load_2d_model(path):
"""
This function ...
:param path:
:return:
"""
# Get the first line of the file
with open(path, 'r') as f: first_line = f.readline()
# Create the appropriate model
if "SersicModel2D" in first_line: return SersicModel2D.from_file(path)
elif "ExponentialDiskModel2D" in first_line: return ExponentialDiskModel2D.from_file(path)
else: raise ValueError("Unrecognized model file")
# -----------------------------------------------------------------
class SersicModel2D(Model2D):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(SersicModel2D, self).__init__()
# Define properties
self.add_property("rel_contribution", "real", "relative contribution")
self.add_property("fluxdensity", "quantity", "flux density")
self.add_property("axial_ratio", "real", "axial ratio")
self.add_property("position_angle", "angle", "position angle") # (degrees ccw from North)
self.add_property("effective_radius", "quantity", "effective radius")
self.add_property("index", "real", "sersic index")
# Truncation
self.add_property("truncation", "real", "truncation radius relative to the effective radius", default_truncation)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
@property
def xmin(self):
return - self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def xmax(self):
return self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def xrange(self):
return QuantityRange(self.xmin, self.xmax)
# -----------------------------------------------------------------
@property
def ymin(self):
return - self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def ymax(self):
return self.truncation * self.effective_radius
# -----------------------------------------------------------------
@property
def yrange(self):
return QuantityRange(self.ymin, self.ymax)
# -----------------------------------------------------------------
@property
def position_angle_radian(self):
return self.position_angle.to("rad").value
# -----------------------------------------------------------------
def density(self, x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
# FROM ASTROPY, SERSIC2D FUNCTION
bn = gammaincinv(2. * self.index, 0.5)
# 1 - ELLIP = AXIAL RATIO?
x_0 = y_0 = 0.0
#a, b = self.effective_radius, (1. - ellip) * self.effective_radius
a, b = self.effective_radius, self.axial_ratio * self.effective_radius
cos_theta, sin_theta = np.cos(self.position_angle_radian), np.sin(self.position_angle_radian)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
amplitude = 1.
return amplitude * np.exp( -bn * (z ** (1 / self.index) - 1))
# -----------------------------------------------------------------
class ExponentialDiskModel2D(Model2D):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ExponentialDiskModel2D, self).__init__()
# Define properties
self.add_property("rel_contribution", "real", "relative contribution")
self.add_property("fluxdensity", "quantity", "flux density")
self.add_property("axial_ratio", "real", "axial ratio")
self.add_property("position_angle", "angle", "position_angle") # (degrees ccw from North)
self.add_property("mu0", "quantity", "surface brightness at center")
self.add_property("scalelength", "quantity", "scale length")
# The truncation
self.add_property("truncation", "real", "truncation length in units of the scalelength", default_truncation)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
@property
def xmin(self):
return - self.truncation * self.scalelength
# -----------------------------------------------------------------
@property
def xmax(self):
return self.truncation * self.scalelength
# -----------------------------------------------------------------
@property
def xrange(self):
return QuantityRange(self.xmin, self.xmax)
# -----------------------------------------------------------------
@property
def ymin(self):
return - self.truncation * self.scalelength
# -----------------------------------------------------------------
@property
def ymax(self):
return self.truncation * self.scalelength
# -----------------------------------------------------------------
@property
def yrange(self):
return QuantityRange(self.ymin, self.ymax)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/basics/models.py
|
Python
|
agpl-3.0
| 69,727
|
[
"Galaxy"
] |
d20709532cb8d8e55e845a34c615325b2f633ec04f37d43afc0a23e538471ba0
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils import deprecated
from ..utils.sparsefuncs import mean_variance_axis0
from ..utils.extmath import (fast_logdet, safe_sparse_dot, randomized_svd,
fast_dot)
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`mean_` : array, [n_features]
Per-feature empirical mean, estimated from the training set.
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
`noise_variance_` : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
if self.whiten:
components_ = V / (S[:, np.newaxis] / sqrt(n_samples))
else:
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
X = array2d(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
@deprecated("ProbabilisticPCA will be removed in 0.16. WARNING: the "
"covariance estimation was previously incorrect, your "
"output might be different than under the previous versions. "
"Use PCA that implements score and score_samples. To work with "
"homoscedastic=False, you should use FactorAnalysis.")
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
explained_variance = self.explained_variance_.copy()
if homoscedastic:
explained_variance -= self.noise_variance_
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
explained_variance,
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = self.noise_variance_
else:
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`mean_` : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if sparse.issparse(X):
warnings.warn("Sparse matrix support is deprecated in 0.15"
" and will be dropped in 0.17. In particular"
" computed explained variance is incorrect on"
" sparse data. Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if sparse.issparse(X):
self.mean_ = None
else:
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
if sparse.issparse(X):
_, full_var = mean_variance_axis0(X)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/decomposition/pca.py
|
Python
|
apache-2.0
| 26,604
|
[
"Gaussian"
] |
252e7747c9076ec8b6575c7a7c5b407bdc5aa61e60ea86c3cd237a5eeb1a39c4
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import sys
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state
from ..linear_model import SGDClassifier
from ..linear_model import SGDRegressor
from ..linear_model import LinearRegression
from ..linear_model import Lasso
from ..linear_model import LassoCV
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
from tree_pruning import Desision
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
self.l1_clf_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X, x_std=None, mean = 0.0, std = 0.0):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict_options(X, self.l1_clf_, x_std, mean, std)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
elif self.l1_clf_ is None:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
else:
return proba
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
def random_pruning(self, version=1, proba=0.1):
"""Excute a post pruning method on a tree
"""
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if version < 1 or version > 3:
raise Exception("Invalid pruning version [1-3]")
coin = Desision(propability=proba)
return self.tree_.random_pruning(version, coin)
def l1_pruning(self, X, y, alpha = 1.0):
"""Excute a post pruning method on a tree
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# No regularization
if alpha == 0:
return
# Determine the size of the sparse matrix of nodes
n_samples, n_features = X.shape
if n_samples != y.shape[0]:
raise ValueError("X and Y arrays doesn t match")
node_count = self.tree_.get_node_count()
# Build the sparce matrix of nodes
lil_nodes = lil_matrix((n_samples, node_count), dtype=np.int8)
self.tree_.usage_init(X, lil_nodes)
csr_nodes = lil_nodes.tocsr()
self.instantiate_l1_clf(loss='squared_loss', penalty='l1', alpha=alpha)
self.l1_clf_.fit(csr_nodes, y)
if self.l1_clf_.coef_.ndim > 1:
coef = np.amax(np.fabs(self.l1_clf_.coef_), axis=0)
else:
coef = self.l1_clf_.coef_
# Pruning in itself
self.tree_.usage_pruning(0, coef)
def get_l1_clf(self):
"""Return the l1 classifier associated to L1 based pruning
"""
return self.l1_clf_
def get_size(self):
"""return the size of the tree (in Bytes)
"""
return sys.getsizeof(self) + self.tree_.get_size()
def get_nodes_number(self):
"""return the number of nodes of the tree
"""
return 1 + self.tree_.get_nb_childs(0)
def get_leafs_number(self):
"""return the number of leaf of the tree
"""
return self.tree_.get_nb_leaf(0)
def get_mean_depth(self):
"""return the average depth of the three
"""
return float(
self.tree_.get_sum_leaf_depth(0, 0)) / self.tree_.get_nb_leaf(0)
def get_max_depth(self):
"""return the max depth of the tree
"""
return self.tree_.get_max_depth(0, 0)
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
def predict_proba(self, X, x_std=None, mean=0.0, std=0.0):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict_options(X, self.l1_clf_, x_std, mean, std)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X, x_std=None, mean=0.0, std=0.0):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X, x_std, mean, std)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
def instantiate_l1_clf(self, loss, penalty, alpha):
self.l1_clf_ = SGDClassifier(loss='squared_hinge', penalty=penalty, alpha=alpha)
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
def instantiate_l1_clf(self, loss, penalty, alpha):
if alpha== -1:
self.l1_clf_ = LassoCV()
else:
self.l1_clf_ = Lasso(alpha=alpha)
# self.l1_clf_ = SGDRegressor(loss='squared_loss', penalty=penalty, alpha=alpha)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
thilbern/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 34,935
|
[
"Brian"
] |
3450ad19e0ba2c9d47f0ac1dcac1a352b0de1a427fc329684e591b4fe787b311
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# _generate_pyx.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(name, doc):
docdict[name] = doc
add_newdoc("_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("voigt_profile",
r"""
voigt_profile(x, sigma, gamma, out=None)
Voigt profile.
The Voigt profile is a convolution of a 1D Normal distribution with
standard deviation ``sigma`` and a 1D Cauchy distribution with half-width at
half-maximum ``gamma``.
Parameters
----------
x : array_like
Real argument
sigma : array_like
The standard deviation of the Normal distribution part
gamma : array_like
The half-width at half-maximum of the Cauchy distribution part
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The Voigt profile at the given arguments
Notes
-----
It can be expressed in terms of Faddeeva function
.. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}},
.. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma}
where :math:`w(z)` is the Faddeeva function.
See Also
--------
wofz : Faddeeva function
References
----------
.. [1] https://en.wikipedia.org/wiki/Voigt_profile
""")
add_newdoc("wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("agm",
"""
agm(a, b)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
Returns
-------
float
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("beta",
r"""
beta(a, b, out=None)
Beta function.
This function is defined in [1]_ as
.. math::
B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt
= \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)},
where :math:`\Gamma` is the gamma function.
Parameters
----------
a, b : array-like
Real-valued arguments
out : ndarray, optional
Optional output array for the function result
Returns
-------
scalar or ndarray
Value of the beta function
See Also
--------
gamma : the gamma function
betainc : the incomplete beta function
betaln : the natural logarithm of the absolute
value of the beta function
References
----------
.. [1] NIST Digital Library of Mathematical Functions,
Eq. 5.12.1. https://dlmf.nist.gov/5.12
Examples
--------
>>> import scipy.special as sc
The beta function relates to the gamma function by the
definition given above:
>>> sc.beta(2, 3)
0.08333333333333333
>>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)
0.08333333333333333
As this relationship demonstrates, the beta function
is symmetric:
>>> sc.beta(1.7, 2.4)
0.16567527689031739
>>> sc.beta(2.4, 1.7)
0.16567527689031739
This function satisfies :math:`B(1, b) = 1/b`:
>>> sc.beta(1, 4)
0.25
""")
add_newdoc("betainc",
r"""
betainc(a, b, x, out=None)
Incomplete beta function.
Computes the incomplete beta function, defined as [1]_:
.. math::
I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x
t^{a-1}(1-t)^{b-1}dt,
for :math:`0 \leq x \leq 1`.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
x : array-like
Real-valued such that :math:`0 \leq x \leq 1`,
the upper limit of integration
out : ndarray, optional
Optional output array for the function values
Returns
-------
array-like
Value of the incomplete beta function
See Also
--------
beta : beta function
betaincinv : inverse of the incomplete beta function
Notes
-----
The incomplete beta function is also sometimes defined
without the `gamma` terms, in which case the above
definition is the so-called regularized incomplete beta
function. Under this definition, you can get the incomplete
beta function by multiplying the result of the SciPy
function by `beta`.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
Let :math:`B(a, b)` be the `beta` function.
>>> import scipy.special as sc
The coefficient in terms of `gamma` is equal to
:math:`1/B(a, b)`. Also, when :math:`x=1`
the integral is equal to :math:`B(a, b)`.
Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.
>>> sc.betainc(0.2, 3.5, 1.0)
1.0
It satisfies
:math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,
where :math:`F` is the hypergeometric function `hyp2f1`:
>>> a, b, x = 1.4, 3.1, 0.5
>>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))
0.8148904036225295
>>> sc.betainc(a, b, x)
0.8148904036225296
This functions satisfies the relationship
:math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:
>>> sc.betainc(2.2, 3.1, 0.4)
0.49339638807619446
>>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)
0.49339638807619446
""")
add_newdoc("betaincinv",
r"""
betaincinv(a, b, y, out=None)
Inverse of the incomplete beta function.
Computes :math:`x` such that:
.. math::
y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}
\int_0^x t^{a-1}(1-t)^{b-1}dt,
where :math:`I_x` is the normalized incomplete beta
function `betainc` and
:math:`\Gamma` is the `gamma` function [1]_.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
y : array-like
Real-valued input
out : ndarray, optional
Optional output array for function values
Returns
-------
array-like
Value of the inverse of the incomplete beta function
See Also
--------
betainc : incomplete beta function
gamma : gamma function
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
>>> import scipy.special as sc
This function is the inverse of `betainc` for fixed
values of :math:`a` and :math:`b`.
>>> a, b = 1.2, 3.1
>>> y = sc.betainc(a, b, 0.2)
>>> sc.betaincinv(a, b, y)
0.2
>>>
>>> a, b = 7.5, 0.4
>>> x = sc.betaincinv(a, b, 0.5)
>>> sc.betainc(a, b, x)
0.5
""")
add_newdoc("betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("btdtr",
r"""
btdtr(a, b, x)
Cumulative distribution function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative distribution function of the beta distribution with
parameters `a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("cbrt",
"""
cbrt(x)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("ellipe",
r"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
This function is used in finding the circumference of an
ellipse with semi-major axis `a` and semi-minor axis `b`.
>>> from scipy import special
>>> a = 3.5
>>> b = 2.1
>>> e_sq = 1.0 - b**2/a**2 # eccentricity squared
Then the circumference is found using the following:
>>> C = 4*a*special.ellipe(e_sq) # circumference formula
>>> C
17.868899204378693
When `a` and `b` are the same (meaning eccentricity is 0),
this reduces to the circumference of a circle.
>>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b
21.991148575128552
>>> 2*np.pi*a # formula for circle of radius a
21.991148575128552
""")
add_newdoc("ellipeinc",
r"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipkinc(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipk",
r"""
ellipk(m)
Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ellipkinc",
r"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("erfc",
"""
erfc(x, out=None)
Complementary error function, ``1 - erf(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the complementary error function
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("erfi",
"""
erfi(z, out=None)
Imaginary error function, ``-i erf(i z)``.
Parameters
----------
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the imaginary error function
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("erfcx",
"""
erfcx(x, out=None)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the scaled complementary error function
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("exp1",
r"""
exp1(z, out=None)
Exponential integral E1.
For complex :math:`z \ne 0` the exponential integral can be defined as
[1]_
.. math::
E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt,
where the path of the integral does not cross the negative real
axis or pass through the origin.
Parameters
----------
z: array_like
Real or complex argument.
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral E1
See Also
--------
expi : exponential integral :math:`Ei`
expn : generalization of :math:`E_1`
Notes
-----
For :math:`x > 0` it is related to the exponential integral
:math:`Ei` (see `expi`) via the relation
.. math::
E_1(x) = -Ei(-x).
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.1
https://dlmf.nist.gov/6.2#E1
Examples
--------
>>> import scipy.special as sc
It has a pole at 0.
>>> sc.exp1(0)
inf
It has a branch cut on the negative real axis.
>>> sc.exp1(-1)
nan
>>> sc.exp1(complex(-1, 0))
(-1.8951178163559368-3.141592653589793j)
>>> sc.exp1(complex(-1, -0.0))
(-1.8951178163559368+3.141592653589793j)
It approaches 0 along the positive real axis.
>>> sc.exp1([1, 10, 100, 1000])
array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])
It is related to `expi`.
>>> x = np.array([1, 2, 3, 4])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exp10",
"""
exp10(x)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``10**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("exp2",
"""
exp2(x)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``2**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("expi",
r"""
expi(x, out=None)
Exponential integral Ei.
For real :math:`x`, the exponential integral is defined as [1]_
.. math::
Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt.
For :math:`x > 0` the integral is understood as a Cauchy principle
value.
It is extended to the complex plane by analytic continuation of
the function on the interval :math:`(0, \infty)`. The complex
variant has a branch cut on the negative real axis.
Parameters
----------
x: array_like
Real or complex valued argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral
Notes
-----
The exponential integrals :math:`E_1` and :math:`Ei` satisfy the
relation
.. math::
E_1(x) = -Ei(-x)
for :math:`x > 0`.
See Also
--------
exp1 : Exponential integral :math:`E_1`
expn : Generalized exponential integral :math:`E_n`
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.5
https://dlmf.nist.gov/6.2#E5
Examples
--------
>>> import scipy.special as sc
It is related to `exp1`.
>>> x = np.array([1, 2, 3, 4])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
The complex variant has a branch cut on the negative real axis.
>>> import scipy.special as sc
>>> sc.expi(-1 + 1e-12j)
(-0.21938393439552062+3.1415926535894254j)
>>> sc.expi(-1 - 1e-12j)
(-0.21938393439552062-3.1415926535894254j)
As the complex variant approaches the branch cut, the real parts
approach the value of the real variant.
>>> sc.expi(-1)
-0.21938393439552062
The SciPy implementation returns the real variant for complex
values on the branch cut.
>>> sc.expi(complex(-1, 0.0))
(-0.21938393439552062-0j)
>>> sc.expi(complex(-1, -0.0))
(-0.21938393439552062-0j)
""")
add_newdoc('expit',
"""
expit(x)
Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
The expit function, also known as the logistic sigmoid function, is
defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the
logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are `expit` of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("expm1",
"""
expm1(x)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("expn",
r"""
expn(n, x, out=None)
Generalized exponential integral En.
For integer :math:`n \geq 0` and real :math:`x \geq 0` the
generalized exponential integral is defined as [dlmf]_
.. math::
E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt.
Parameters
----------
n: array_like
Non-negative integers
x: array_like
Real argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the generalized exponential integral
See Also
--------
exp1 : special case of :math:`E_n` for :math:`n = 1`
expi : related to :math:`E_n` when :math:`n = 1`
References
----------
.. [dlmf] Digital Library of Mathematical Functions, 8.19.2
https://dlmf.nist.gov/8.19#E2
Examples
--------
>>> import scipy.special as sc
Its domain is nonnegative n and x.
>>> sc.expn(-1, 1.0), sc.expn(1, -1.0)
(nan, nan)
It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it
is equal to ``1 / (n - 1)``.
>>> sc.expn([0, 1, 2, 3, 4], 0)
array([ inf, inf, 1. , 0.5 , 0.33333333])
For n equal to 0 it reduces to ``exp(-x) / x``.
>>> x = np.array([1, 2, 3, 4])
>>> sc.expn(0, x)
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
>>> np.exp(-x) / x
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
For n equal to 1 it reduces to `exp1`.
>>> sc.expn(1, x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exprel",
r"""
exprel(x)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
Returns
-------
float
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative distribution function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fresnel",
r"""
fresnel(z, out=None)
Fresnel integrals.
The Fresnel integrals are defined as
.. math::
S(z) &= \int_0^z \cos(\pi t^2 /2) dt \\
C(z) &= \int_0^z \sin(\pi t^2 /2) dt.
See [dlmf]_ for details.
Parameters
----------
z : array_like
Real or complex valued argument
out : 2-tuple of ndarrays, optional
Optional output arrays for the function results
Returns
-------
S, C : 2-tuple of scalar or ndarray
Values of the Fresnel integrals
See Also
--------
fresnel_zeros : zeros of the Fresnel integrals
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/7.2#iii
Examples
--------
>>> import scipy.special as sc
As z goes to infinity along the real axis, S and C converge to 0.5.
>>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])
>>> S
array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])
>>> C
array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])
They are related to the error function `erf`.
>>> z = np.array([1, 2, 3, 4])
>>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z
>>> S, C = sc.fresnel(z)
>>> C + 1j*S
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
>>> 0.5 * (1 + 1j) * sc.erf(zeta)
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
""")
add_newdoc("gamma",
r"""
gamma(z)
Gamma function.
The Gamma function is defined as
.. math::
\Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt
for :math:`\Re(z) > 0` and is extended to the rest of the complex
plane by analytic continuation. See [dlmf]_ for more details.
Parameters
----------
z : array_like
Real or complex valued argument
Returns
-------
scalar or ndarray
Values of the Gamma function
Notes
-----
The Gamma function is often referred to as the generalized
factorial since :math:`\Gamma(n + 1) = n!` for natural numbers
:math:`n`. More generally it satisfies the recurrence relation
:math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`,
which, combined with the fact that :math:`\Gamma(1) = 1`, implies
the above identity for :math:`z = n`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
It is defined as
.. math::
P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the lower incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
gammainccinv : inverse of the regularized upper incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the CDF of the gamma distribution, so it starts at 0 and
monotonically increases to 1.
>>> sc.gammainc(0.5, [0, 1, 10, 100])
array([0. , 0.84270079, 0.99999226, 1. ])
It is equal to one minus the upper incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammainc(a, x)
0.6289066304773024
>>> 1 - sc.gammaincc(a, x)
0.6289066304773024
""")
add_newdoc("gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
It is defined as
.. math::
Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the upper incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammainc` is the regularized lower
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
gammainccinv : inverse to of the regularized upper incomplete
gamma function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the survival function of the gamma distribution, so it
starts at 1 and monotonically decreases to 0.
>>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])
array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,
0.00000000e+00])
It is equal to one minus the lower incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammaincc(a, x)
0.37109336952269756
>>> 1 - sc.gammainc(a, x)
0.37109336952269756
""")
add_newdoc("gammainccinv",
"""
gammainccinv(a, y)
Inverse of the upper incomplete gamma function with respect to `x`
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = Q(a, x)`. Here :math:`Q` is the upper incomplete
gamma function; see `gammaincc`. This is well-defined because the
upper incomplete gamma function is monotonic as can be seen from
its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Argument between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the upper incomplete gamma function
See Also
--------
gammaincc : regularized upper incomplete gamma function
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at infinity and monotonically decreases to 0.
>>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])
array([ inf, 1.35277173, 0.22746821, 0. ])
It inverts the upper incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammaincc(a, sc.gammainccinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 50]
>>> sc.gammainccinv(a, sc.gammaincc(a, x))
array([ 0., 10., 50.])
""")
add_newdoc("gammaincinv",
"""
gammaincinv(a, y)
Inverse to the lower incomplete gamma function with respect to `x`.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower
incomplete gamma function; see `gammainc`. This is well-defined
because the lower incomplete gamma function is monotonic as can be
seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Parameter between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the lower incomplete gamma function
See Also
--------
gammainc : regularized lower incomplete gamma function
gammaincc : regularized upper incomplete gamma function
gammainccinv : inverse of the regualizred upper incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at 0 and monotonically increases to infinity.
>>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])
array([0. , 0.00789539, 0.22746821, inf])
It inverts the lower incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammainc(a, sc.gammaincinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 25]
>>> sc.gammaincinv(a, sc.gammainc(a, x))
array([ 0. , 10. , 25.00001465])
""")
add_newdoc("gammaln",
r"""
gammaln(x, out=None)
Logarithm of the absolute value of the Gamma function.
Defined as
.. math::
\ln(\lvert\Gamma(x)\rvert)
where :math:`\Gamma` is the Gamma function. For more details on
the Gamma function, see [dlmf]_.
Parameters
----------
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the log of the absolute value of Gamma
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
It is the same function as the Python standard library function
:func:`math.lgamma`.
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal
with complex numbers via the relation ``exp(gammaln(x)) =
gammasgn(x) * gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
Examples
--------
>>> import scipy.special as sc
It has two positive zeros.
>>> sc.gammaln([1, 2])
array([0., 0.])
It has poles at nonpositive integers.
>>> sc.gammaln([0, -1, -2, -3, -4])
array([inf, inf, inf, inf, inf])
It asymptotically approaches ``x * log(x)`` (Stirling's formula).
>>> x = np.array([1e10, 1e20, 1e40, 1e80])
>>> sc.gammaln(x)
array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])
>>> x * np.log(x)
array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])
""")
add_newdoc("gammasgn",
r"""
gammasgn(x)
Sign of the gamma function.
It is defined as
.. math::
\text{gammasgn}(x) =
\begin{cases}
+1 & \Gamma(x) > 0 \\
-1 & \Gamma(x) < 0
\end{cases}
where :math:`\Gamma` is the Gamma function; see `gamma`. This
definition is complete since the Gamma function is never zero;
see the discussion after [dlmf]_.
Parameters
----------
x : array_like
Real argument
Returns
-------
scalar or ndarray
Sign of the Gamma function
Notes
-----
The Gamma function can be computed as ``gammasgn(x) *
np.exp(gammaln(x))``.
See Also
--------
gamma : the Gamma function
gammaln : log of the absolute value of the Gamma function
loggamma : analytic continuation of the log of the Gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> import scipy.special as sc
It is 1 for `x > 0`.
>>> sc.gammasgn([1, 2, 3, 4])
array([1., 1., 1., 1.])
It alternates between -1 and 1 for negative integers.
>>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])
array([-1., 1., -1., 1.])
It can be used to compute the Gamma function.
>>> x = [1.5, 0.5, -0.5, -1.5]
>>> sc.gammasgn(x) * np.exp(sc.gammaln(x))
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
>>> sc.gamma(x)
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
""")
add_newdoc("gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative distribution function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtrix
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("hyp0f1",
r"""
hyp0f1(v, z, out=None)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v : array_like
Real valued parameter
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The confluent hypergeometric limit function
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) =
f(z)`. See [1]_ for more information.
References
----------
.. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function",
http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html
Examples
--------
>>> import scipy.special as sc
It is one when `z` is zero.
>>> sc.hyp0f1(1, 0)
1.0
It is the limit of the confluent hypergeometric function as `q`
goes to infinity.
>>> q = np.array([1, 10, 100, 1000])
>>> v = 1
>>> z = 1
>>> sc.hyp1f1(q, v, z / q)
array([2.71828183, 2.31481985, 2.28303778, 2.27992985])
>>> sc.hyp0f1(v, z)
2.2795853023360673
It is related to Bessel functions.
>>> n = 1
>>> x = np.linspace(0, 1, 5)
>>> sc.jv(n, x)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
>>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
""")
add_newdoc("hyp1f1",
r"""
hyp1f1(a, b, x, out=None)
Confluent hypergeometric function 1F1.
The confluent hypergeometric function is defined by the series
.. math::
{}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k.
See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the
Pochhammer symbol; see `poch`.
Parameters
----------
a, b : array_like
Real parameters
x : array_like
Real or complex argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the confluent hypergeometric function
See also
--------
hyperu : another confluent hypergeometric function
hyp0f1 : confluent hypergeometric limit function
hyp2f1 : Gaussian hypergeometric function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/13.2#E2
Examples
--------
>>> import scipy.special as sc
It is one when `x` is zero:
>>> sc.hyp1f1(0.5, 0.5, 0)
1.0
It is singular when `b` is a nonpositive integer.
>>> sc.hyp1f1(0.5, -1, 0)
inf
It is a polynomial when `a` is a nonpositive integer.
>>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.hyp1f1(a, b, x)
array([-1., -3., -5., -7.])
>>> 1 + (a / b) * x
array([-1., -3., -5., -7.])
It reduces to the exponential function when `a = b`.
>>> sc.hyp1f1(2, 2, [1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
>>> np.exp([1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
""")
add_newdoc("hyp2f1",
r"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic
continuation [1]_.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [2]_.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/15.2
.. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
.. [3] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
>>> import scipy.special as sc
It has poles when `c` is a negative integer.
>>> sc.hyp2f1(1, 1, -2, 1)
inf
It is a polynomial when `a` or `b` is a negative integer.
>>> a, b, c = -1, 1, 1.5
>>> z = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, c, z)
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
>>> 1 + a * b * z / c
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
It is symmetric in `a` and `b`.
>>> a = np.linspace(0, 1, 5)
>>> b = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
>>> sc.hyp2f1(b, a, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
It contains many other functions as special cases.
>>> z = 0.5
>>> sc.hyp2f1(1, 1, 2, z)
1.3862943611198901
>>> -np.log(1 - z) / z
1.3862943611198906
>>> sc.hyp2f1(0.5, 1, 1.5, z**2)
1.098612288668109
>>> np.log((1 + z) / (1 - z)) / (2 * z)
1.0986122886681098
>>> sc.hyp2f1(0.5, 1, 1.5, -z**2)
0.9272952180016117
>>> np.arctan(z) / z
0.9272952180016123
""")
add_newdoc("hyperu",
r"""
hyperu(a, b, x, out=None)
Confluent hypergeometric function U
It is defined as the solution to the equation
.. math::
x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0
which satisfies the property
.. math::
U(a, b, x) \sim x^{-a}
as :math:`x \to \infty`. See [dlmf]_ for more details.
Parameters
----------
a, b : array_like
Real valued parameters
x : array_like
Real valued argument
out : ndarray
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of `U`
References
----------
.. [dlmf] NIST Digital Library of Mathematics Functions
https://dlmf.nist.gov/13.2#E6
Examples
--------
>>> import scipy.special as sc
It has a branch cut along the negative `x` axis.
>>> x = np.linspace(-0.1, -10, 5)
>>> sc.hyperu(1, 1, x)
array([nan, nan, nan, nan, nan])
It approaches zero as `x` goes to infinity.
>>> x = np.array([1, 10, 100])
>>> sc.hyperu(1, 1, x)
array([0.59634736, 0.09156333, 0.00990194])
It satisfies Kummer's transformation.
>>> a, b, x = 2, 1, 1
>>> sc.hyperu(a, b, x)
0.1926947246463881
>>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)
0.1926947246463881
""")
add_newdoc("i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``integral(k0(t)/t, t=x..inf)``
""")
add_newdoc("it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("kolmogi",
"""
kolmogi(p)
Inverse Survival Function of Kolmogorov distribution
It is the inverse function to `kolmogorov`.
Returns y such that ``kolmogorov(y) == p``.
Parameters
----------
p : float array_like
Probability
Returns
-------
float
The value(s) of kolmogi(p)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogorov : The Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
>>> from scipy.special import kolmogi
>>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,
0.57117327, 0. ])
""")
add_newdoc("kolmogorov",
r"""
kolmogorov(y)
Complementary cumulative distribution (Survival Function) function of
Kolmogorov distribution.
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
of a two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that ``sqrt(n) * max absolute deviation > y``.
Parameters
----------
y : float array_like
Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
multiplied by sqrt(n).
Returns
-------
float
The value(s) of kolmogorov(y)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogi : The Inverse Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
Show the probability of a gap at least as big as 0, 0.5 and 1.0.
>>> from scipy.special import kolmogorov
>>> from scipy.stats import kstwobign
>>> kolmogorov([0, 0.5, 1.0])
array([ 1. , 0.96394524, 0.26999967])
Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
the target distribution, a Normal(0, 1) distribution.
>>> from scipy.stats import norm, laplace
>>> n = 1000
>>> np.random.seed(seed=233423)
>>> lap01 = laplace(0, 1)
>>> x = np.sort(lap01.rvs(n))
>>> np.mean(x), np.std(x)
(-0.083073685397609842, 1.3676426568399822)
Construct the Empirical CDF and the K-S statistic Dn.
>>> target = norm(0,1) # Normal mean 0, stddev 1
>>> cdfs = target.cdf(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> Dn = np.max(gaps)
>>> Kn = np.sqrt(n) * Dn
>>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
Dn=0.058286, sqrt(n)*Dn=1.843153
>>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)),
... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))]))
For a sample of size n drawn from a N(0, 1) distribution:
the approximate Kolmogorov probability that sqrt(n)*Dn>=1.843153 is 0.002240
the approximate Kolmogorov probability that sqrt(n)*Dn<=1.843153 is 0.997760
Plot the Empirical CDF against the target N(0, 1) CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
>>> # Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("_kolmogc",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogci",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogp",
r"""
Internal function, do not use.
""")
add_newdoc("kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|x| < |v| + 20`)
- asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
Returns
-------
f : float
Quantiles, i.e. the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfd : float
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfn : float
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
nc : float
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
ndtr
""")
add_newdoc("log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("pbwa",
r"""
pbwa(a, x)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("poch",
r"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
Parameters
----------
z : array_like
(int or float)
m : array_like
(int or float)
Returns
-------
poch : ndarray
The value of the function.
""")
add_newdoc("pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("smirnov",
r"""
smirnov(n, d)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function,(aka the Survival Function) of Dn+ (or Dn-)
for a one-sided test of equality between an empirical and a
theoretical distribution. It is equal to the probability that the
maximum difference between a theoretical distribution and an empirical
one based on `n` samples is greater than d.
Parameters
----------
n : int
Number of samples
d : float array_like
Deviation between the Empirical CDF (ECDF) and the target CDF.
Returns
-------
float
The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnovi : The Inverse Survival Function for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
Examples
--------
>>> from scipy.special import smirnov
Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a sample of size 5
>>> smirnov(5, [0, 0.5, 1.0])
array([ 1. , 0.056, 0. ])
Compare a sample of size 5 drawn from a source N(0.5, 1) distribution against
a target N(0, 1) CDF.
>>> from scipy.stats import norm
>>> n = 5
>>> gendist = norm(0.5, 1) # Normal distribution, mean 0.5, stddev 1
>>> np.random.seed(seed=233423) # Set the seed for reproducibility
>>> x = np.sort(gendist.rvs(size=n))
>>> x
array([-0.20946287, 0.71688765, 0.95164151, 1.44590852, 3.08880533])
>>> target = norm(0, 1)
>>> cdfs = target.cdf(x)
>>> cdfs
array([ 0.41704346, 0.76327829, 0.82936059, 0.92589857, 0.99899518])
# Construct the Empirical CDF and the K-S statistics (Dn+, Dn-, Dn)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> np.set_printoptions(precision=3)
>>> cols
array([[ -2.095e-01, 2.000e-01, 4.170e-01, 4.170e-01, -2.170e-01],
[ 7.169e-01, 4.000e-01, 7.633e-01, 5.633e-01, -3.633e-01],
[ 9.516e-01, 6.000e-01, 8.294e-01, 4.294e-01, -2.294e-01],
[ 1.446e+00, 8.000e-01, 9.259e-01, 3.259e-01, -1.259e-01],
[ 3.089e+00, 1.000e+00, 9.990e-01, 1.990e-01, 1.005e-03]])
>>> gaps = cols[:, -2:]
>>> Dnpm = np.max(gaps, axis=0)
>>> print('Dn-=%f, Dn+=%f' % (Dnpm[0], Dnpm[1]))
Dn-=0.563278, Dn+=0.001005
>>> probs = smirnov(n, Dnpm)
>>> print(chr(10).join(['For a sample of size %d drawn from a N(0, 1) distribution:' % n,
... ' Smirnov n=%d: Prob(Dn- >= %f) = %.4f' % (n, Dnpm[0], probs[0]),
... ' Smirnov n=%d: Prob(Dn+ >= %f) = %.4f' % (n, Dnpm[1], probs[1])]))
For a sample of size 5 drawn from a N(0, 1) distribution:
Smirnov n=5: Prob(Dn- >= 0.563278) = 0.0250
Smirnov n=5: Prob(Dn+ >= 0.001005) = 0.9990
Plot the Empirical CDF against the target N(0, 1) CDF
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
# Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("smirnovi",
"""
smirnovi(n, p)
Inverse to `smirnov`
Returns `d` such that ``smirnov(n, d) == p``, the critical value
corresponding to `p`.
Parameters
----------
n : int
Number of samples
p : float array_like
Probability
Returns
-------
float
The value(s) of smirnovi(n, p), the critical values.
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnov : The Survival Function (SF) for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi, scipy.stats.kstwobign : Functions for the two-sided distribution
""")
add_newdoc("_smirnovc",
"""
_smirnovc(n, d)
Internal function, do not use.
""")
add_newdoc("_smirnovci",
"""
Internal function, do not use.
""")
add_newdoc("_smirnovp",
"""
_smirnovp(n, p)
Internal function, do not use.
""")
add_newdoc("spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("stdtr",
"""
stdtr(df, t)
Student t distribution cumulative distribution function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``. For ``x < 1`` the analytic continuation is
computed. For more information on the Riemann zeta function, see
[dlmf]_.
Parameters
----------
x : array_like of float
Values at which to compute zeta(x) - 1 (must be real).
Returns
-------
out : array_like
Values of zeta(x) - 1.
See Also
--------
zeta
Examples
--------
>>> from scipy.special import zetac, zeta
Some special values:
>>> zetac(2), np.pi**2/6 - 1
(0.64493406684822641, 0.6449340668482264)
>>> zetac(-1), -1.0/12 - 1
(-1.0833333333333333, -1.0833333333333333)
Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
>>> zetac(60), zeta(60) - 1
(8.673617380119933e-19, 0.0)
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/25
""")
add_newdoc("_riemann_zeta",
"""
Internal function, use `zeta` instead.
""")
add_newdoc("_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining `loggamma` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas `loggamma` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make `loggamma` useful for working in complex logspace.
On the real line `loggamma` is related to `gammaln` via
``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
rounding error.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("_cospi",
"""
Internal function, do not use.
""")
add_newdoc("owens_t",
"""
owens_t(h, a)
Owen's T Function.
The function T(h, a) gives the probability of the event
(X > h and 0 < Y < a * X) where X and Y are independent
standard normal random variables.
Parameters
----------
h: array_like
Input value.
a: array_like
Input value.
Returns
-------
t: scalar or ndarray
Probability of the event (X > h and 0 < Y < a * X),
where X and Y are independent standard normal random variables.
Examples
--------
>>> from scipy import special
>>> a = 3.5
>>> h = 0.78
>>> special.owens_t(h, a)
0.10877216734852274
References
----------
.. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
""")
|
jor-/scipy
|
scipy/special/add_newdocs.py
|
Python
|
bsd-3-clause
| 208,122
|
[
"Gaussian"
] |
5aaff20b2a145ac89c0296663520adc612d807ed58b71a533fa88499349a1c01
|
#!/usr/bin/env python
import sys
import gd_util
from Population import Population
################################################################################
if len(sys.argv) != 6:
gd_util.die('Usage')
input, input_type, ind_arg, pop_input, output = sys.argv[1:]
p_total = Population()
p_total.from_wrapped_dict(ind_arg)
p1 = Population()
p1.from_population_file(pop_input)
if not p_total.is_superset(p1):
gd_util.die('There is an individual in the population that is not in the SNP table')
################################################################################
prog = 'kinship_prep'
args = [ prog ]
args.append(input) # a Galaxy SNP table
args.append(0) # required number of reads for each individual to use a SNP
args.append(0) # required genotype quality for each individual to use a SNP
args.append(0) # minimum spacing between SNPs on the same scaffold
for tag in p1.tag_list():
if input_type == 'gd_genotype':
column, name = tag.split(':')
tag = '{0}:{1}'.format(int(column) - 2, name)
args.append(tag)
gd_util.run_program(prog, args)
# kinship.map
# kinship.ped
# kinship.dat
################################################################################
prog = 'king'
args = [ prog ]
args.append('-d')
args.append('kinship.dat')
args.append('-p')
args.append('kinship.ped')
args.append('-m')
args.append('kinship.map')
args.append('--kinship')
gd_util.run_program(prog, args)
# king.kin
################################################################################
valid_header = 'FID\tID1\tID2\tN_SNP\tZ0\tPhi\tHetHet\tIBS0\tKinship\tError\n'
with open('king.kin') as fh:
header = fh.readline()
if header != valid_header:
gd_util.die('crap')
with open(output, 'w') as ofh:
for line in fh:
elems = line.split('\t')
if len(elems) != 10:
gd_util.die('crap')
x = elems[1]
y = elems[2]
z = elems[8]
f = float(z)
message = ''
if f > 0.354:
message = 'duplicate or MZ twin'
elif f >= 0.177:
message = '1st degree relatives'
elif f >= 0.0884:
message = '2nd degree relatives'
elif f >= 0.0442:
message = '3rd degree relatives'
print >> ofh, '\t'.join([x, y, z, message])
################################################################################
sys.exit(0)
|
gigascience/galaxy-genome-diversity
|
tools/discover_familial_relationships/discover_familial_relationships.py
|
Python
|
gpl-3.0
| 2,516
|
[
"Galaxy"
] |
cbe4c440a8430ea34711424b20d530582d251cadfc53296108965a530528a28f
|
"""Neighbour Exploring Routing (NER) algorithm from J. Navaridas et al.
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast routing,
Parallel Computing (2014).
`http://dx.doi.org/10.1016/j.parco.2015.01.002`
"""
import heapq
from collections import deque
from ...geometry import concentric_hexagons, to_xyz, \
shortest_mesh_path_length, shortest_mesh_path, \
shortest_torus_path_length, shortest_torus_path
from .utils import longest_dimension_first, links_between
from ..exceptions import MachineHasDisconnectedSubregion
from ..constraints import RouteEndpointConstraint
from ..machine import Cores
from ...links import Links
from ...routing_table import Routes
from ..routing_tree import RoutingTree
_concentric_hexagons = {}
"""Memoized concentric_hexagons outputs, as lists. Access via
:py:func:`.memoized_concentric_hexagons`.
"""
def memoized_concentric_hexagons(radius):
"""A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons`
which memoizes the coordinates and stores them as a tuple. Note that the
caller must manually offset the coordinates as required.
This wrapper is used to avoid the need to repeatedly call
:py:func:`rig.geometry.concentric_hexagons` for every sink in a network.
This results in a relatively minor speedup (but at equally minor cost) in
large networks.
"""
out = _concentric_hexagons.get(radius)
if out is None:
out = tuple(concentric_hexagons(radius))
_concentric_hexagons[radius] = out
return out
def ner_net(source, destinations, width, height, wrap_around=False, radius=10):
"""Produce a shortest path tree for a given net using NER.
This is the kernel of the NER algorithm.
Parameters
----------
source : (x, y)
The coordinate of the source vertex.
destinations : iterable([(x, y), ...])
The coordinates of destination vertices.
width : int
Width of the system (nodes)
height : int
Height of the system (nodes)
wrap_around : bool
True if wrap-around links should be used, false if they should be
avoided.
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A RoutingTree is produced rooted at the source and visiting all
destinations but which does not contain any vertices etc. For
convenience, a dictionarry mapping from destination (x, y) coordinates
to the associated RoutingTree is provided to allow the caller to insert
these items.
"""
# Map from (x, y) to RoutingTree objects
route = {source: RoutingTree(source)}
# Handle each destination, sorted by distance from the source, closest
# first.
for destination in sorted(destinations,
key=(lambda destination:
shortest_mesh_path_length(
to_xyz(source), to_xyz(destination))
if not wrap_around else
shortest_torus_path_length(
to_xyz(source), to_xyz(destination),
width, height))):
# We shall attempt to find our nearest neighbouring placed node.
neighbour = None
# Try to find a nearby (within radius hops) node in the routing tree
# that we can route to (falling back on just routing to the source).
#
# In an implementation according to the algorithm's original
# specification looks for nodes at each point in a growing set of rings
# of concentric hexagons. If it doesn't find any destinations this
# means an awful lot of checks: 1261 for the default radius of 20.
#
# An alternative (but behaviourally identical) implementation scans the
# list of all route nodes created so far and finds the closest node
# which is < radius hops (falling back on the origin if no node is
# closer than radius hops). This implementation requires one check per
# existing route node. In most routes this is probably a lot less than
# 1261 since most routes will probably have at most a few hundred route
# nodes by the time the last destination is being routed.
#
# Which implementation is best is a difficult question to answer:
# * In principle nets with quite localised connections (e.g.
# nearest-neighbour or centroids traffic) may route slightly more
# quickly with the original algorithm since it may very quickly find
# a neighbour.
# * In nets which connect very spaced-out destinations the second
# implementation may be quicker since in such a scenario it is
# unlikely that a neighbour will be found.
# * In extremely high-fan-out nets (e.g. broadcasts), the original
# method is very likely to perform *far* better than the alternative
# method since most iterations will complete immediately while the
# alternative method must scan *all* the route vertices.
# As such, it should be clear that neither method alone is 'best' and
# both have degenerate performance in certain completely reasonable
# styles of net. As a result, a simple heuristic is used to decide
# which technique to use.
#
# The following micro-benchmarks are crude estimate of the
# runtime-per-iteration of each approach (at least in the case of a
# torus topology)::
#
# $ # Original approach
# $ python -m timeit --setup 'x, y, w, h, r = 1, 2, 5, 10, \
# {x:None for x in range(10)}' \
# 'x += 1; y += 1; x %= w; y %= h; (x, y) in r'
# 1000000 loops, best of 3: 0.207 usec per loop
# $ # Alternative approach
# $ python -m timeit --setup 'from rig.geometry import \
# shortest_torus_path_length' \
# 'shortest_torus_path_length( \
# (0, 1, 2), (3, 2, 1), 10, 10)'
# 1000000 loops, best of 3: 0.666 usec per loop
#
# From this we can approximately suggest that the alternative approach
# is 3x more expensive per iteration. A very crude heuristic is to use
# the original approach when the number of route nodes is more than
# 1/3rd of the number of routes checked by the original method.
concentric_hexagons = memoized_concentric_hexagons(radius)
if len(concentric_hexagons) < len(route) / 3:
# Original approach: Start looking for route nodes in a concentric
# spiral pattern out from the destination node.
for x, y in concentric_hexagons:
x += destination[0]
y += destination[1]
if wrap_around:
x %= width
y %= height
if (x, y) in route:
neighbour = (x, y)
break
else:
# Alternative approach: Scan over every route node and check to see
# if any are < radius, picking the closest one if so.
neighbour = None
neighbour_distance = None
for candidate_neighbour in route:
if wrap_around:
distance = shortest_torus_path_length(
to_xyz(candidate_neighbour), to_xyz(destination),
width, height)
else:
distance = shortest_mesh_path_length(
to_xyz(candidate_neighbour), to_xyz(destination))
if distance <= radius and (neighbour is None or
distance < neighbour_distance):
neighbour = candidate_neighbour
neighbour_distance = distance
# Fall back on routing directly to the source if no nodes within radius
# hops of the destination was found.
if neighbour is None:
neighbour = source
# Find the shortest vector from the neighbour to this destination
if wrap_around:
vector = shortest_torus_path(to_xyz(neighbour),
to_xyz(destination),
width, height)
else:
vector = shortest_mesh_path(to_xyz(neighbour), to_xyz(destination))
# The longest-dimension-first route may inadvertently pass through an
# already connected node. If the route is allowed to pass through that
# node it would create a cycle in the route which would be VeryBad(TM).
# As a result, we work backward through the route and truncate it at
# the first point where the route intersects with a connected node.
ldf = longest_dimension_first(vector, neighbour, width, height)
i = len(ldf)
for direction, (x, y) in reversed(ldf):
i -= 1
if (x, y) in route:
# We've just bumped into a node which is already part of the
# route, this becomes our new neighbour and we truncate the LDF
# route. (Note ldf list is truncated just after the current
# position since it gives (direction, destination) pairs).
neighbour = (x, y)
ldf = ldf[i + 1:]
break
# Take the longest dimension first route.
last_node = route[neighbour]
for direction, (x, y) in ldf:
this_node = RoutingTree((x, y))
route[(x, y)] = this_node
last_node.children.append((Routes(direction), this_node))
last_node = this_node
return (route[source], route)
def copy_and_disconnect_tree(root, machine):
"""Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links.
"""
new_root = None
# Lookup for copied routing tree {(x, y): RoutingTree, ...}
new_lookup = {}
# List of missing connections in the copied routing tree [(new_parent,
# new_child), ...]
broken_links = set()
# A queue [(new_parent, direction, old_node), ...]
to_visit = deque([(None, None, root)])
while to_visit:
new_parent, direction, old_node = to_visit.popleft()
if old_node.chip in machine:
# Create a copy of the node
new_node = RoutingTree(old_node.chip)
new_lookup[new_node.chip] = new_node
else:
# This chip is dead, move all its children into the parent node
assert new_parent is not None, \
"Net cannot be sourced from a dead chip."
new_node = new_parent
if new_parent is None:
# This is the root node
new_root = new_node
elif new_node is not new_parent:
# If this node is not dead, check connectivity to parent node (no
# reason to check connectivity between a dead node and its parent).
if direction in links_between(new_parent.chip,
new_node.chip,
machine):
# Is connected via working link
new_parent.children.append((direction, new_node))
else:
# Link to parent is dead (or original parent was dead and the
# new parent is not adjacent)
broken_links.add((new_parent.chip, new_node.chip))
# Copy children
for child_direction, child in old_node.children:
to_visit.append((new_node, child_direction, child))
return (new_root, new_lookup, broken_links)
def a_star(sink, heuristic_source, sources, machine, wrap_around):
"""Use A* to find a path from any of the sources to the sink.
Note that the heuristic means that the search will proceed towards
heuristic_source without any concern for any other sources. This means that
the algorithm may miss a very close neighbour in order to pursue its goal
of reaching heuristic_source. This is not considered a problem since 1) the
heuristic source will typically be in the direction of the rest of the tree
and near by and often the closest entity 2) it prevents us accidentally
forming loops in the rest of the tree since we'll stop as soon as we touch
any part of it.
Parameters
----------
sink : (x, y)
heuristic_source : (x, y)
An element from `sources` which is used as a guiding heuristic for the
A* algorithm.
sources : set([(x, y), ...])
machine : :py:class:`~rig.place_and_route.Machine`
wrap_around : bool
Consider wrap-around links in heuristic distance calculations.
Returns
-------
[(:py:class:`~rig.routing_table.Routes`, (x, y)), ...]
A path starting with a coordinate in `sources` and terminating at
connected neighbour of `sink` (i.e. the path does not include `sink`).
The direction given is the link down which to proceed from the given
(x, y) to arrive at the next point in the path.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path cannot be found.
"""
# Select the heuristic function to use for distances
if wrap_around:
heuristic = (lambda node:
shortest_torus_path_length(to_xyz(node),
to_xyz(heuristic_source),
machine.width, machine.height))
else:
heuristic = (lambda node:
shortest_mesh_path_length(to_xyz(node),
to_xyz(heuristic_source)))
# A dictionary {node: (direction, previous_node}. An entry indicates that
# 1) the node has been visited and 2) which node we hopped from (and the
# direction used) to reach previous_node. This may be None if the node is
# the sink.
visited = {sink: None}
# The node which the tree will be reconnected to
selected_source = None
# A heap (accessed via heapq) of (distance, (x, y)) where distance is the
# distance between (x, y) and heuristic_source and (x, y) is a node to
# explore.
to_visit = [(heuristic(sink), sink)]
while to_visit:
_, node = heapq.heappop(to_visit)
# Terminate if we've found the destination
if node in sources:
selected_source = node
break
# Try all neighbouring locations. Note: link identifiers are from the
# perspective of the neighbour, not the current node!
for neighbour_link in Links:
vector = neighbour_link.opposite.to_vector()
neighbour = ((node[0] + vector[0]) % machine.width,
(node[1] + vector[1]) % machine.height)
# Skip links which are broken
if (neighbour[0], neighbour[1], neighbour_link) not in machine:
continue
# Skip neighbours who have already been visited
if neighbour in visited:
continue
# Explore all other neighbours
visited[neighbour] = (neighbour_link, node)
heapq.heappush(to_visit, (heuristic(neighbour), neighbour))
# Fail of no paths exist
if selected_source is None:
raise MachineHasDisconnectedSubregion(
"Could not find path from {} to {}".format(
sink, heuristic_source))
# Reconstruct the discovered path, starting from the source we found and
# working back until the sink.
path = [(Routes(visited[selected_source][0]), selected_source)]
while visited[path[-1][1]][1] != sink:
node = visited[path[-1][1]][1]
direction = Routes(visited[node][0])
path.append((direction, node))
return path
def route_has_dead_links(root, machine):
"""Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
"""
for direction, (x, y), routes in root.traverse():
for route in routes:
if (x, y, route) not in machine:
return True
return False
def avoid_dead_links(root, machine, wrap_around=False):
"""Modify a RoutingTree to route-around dead links in a Machine.
Uses A* to reconnect disconnected branches of the tree (due to dead links
in the machine).
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
wrap_around : bool
Consider wrap-around links in pathfinding heuristics.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A new RoutingTree is produced rooted as before. A dictionarry mapping
from (x, y) to the associated RoutingTree is provided for convenience.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path to reconnect the tree cannot be found.
"""
# Make a copy of the RoutingTree with all broken parts disconnected
root, lookup, broken_links = copy_and_disconnect_tree(root, machine)
# For each disconnected subtree, use A* to connect the tree to *any* other
# disconnected subtree. Note that this process will eventually result in
# all disconnected subtrees being connected, the result is a fully
# connected tree.
for parent, child in broken_links:
child_chips = set(c.chip for c in lookup[child])
# Try to reconnect broken links to any other part of the tree
# (excluding this broken subtree itself since that would create a
# cycle).
path = a_star(child, parent,
set(lookup).difference(child_chips),
machine, wrap_around)
# Add new RoutingTree nodes to reconnect the child to the tree.
last_node = lookup[path[0][1]]
last_direction = path[0][0]
for direction, (x, y) in path[1:]:
if (x, y) not in child_chips:
# This path segment traverses new ground so we must create a
# new RoutingTree for the segment.
new_node = RoutingTree((x, y))
# A* will not traverse anything but chips in this tree so this
# assert is meerly a sanity check that this ocurred correctly.
assert (x, y) not in lookup, "Cycle created."
lookup[(x, y)] = new_node
else:
# This path segment overlaps part of the disconnected tree
# (A* doesn't know where the disconnected tree is and thus
# doesn't avoid it). To prevent cycles being introduced, this
# overlapped node is severed from its parent and merged as part
# of the A* path.
new_node = lookup[(x, y)]
# Find the node's current parent and disconnect it.
for node in lookup[child]: # pragma: no branch
dn = [(d, n) for d, n in node.children if n == new_node]
assert len(dn) <= 1
if dn:
node.children.remove(dn[0])
# A node can only have one parent so we can stop now.
break
last_node.children.append((Routes(last_direction), new_node))
last_node = new_node
last_direction = direction
last_node.children.append((last_direction, lookup[child]))
return (root, lookup)
def route(vertices_resources, nets, machine, constraints, placements,
allocations={}, core_resource=Cores, radius=20):
"""Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
"""
wrap_around = machine.has_wrap_around_links()
# Vertices constrained to route to a specific link. {vertex: route}
route_to_endpoint = {}
for constraint in constraints:
if isinstance(constraint, RouteEndpointConstraint):
route_to_endpoint[constraint.vertex] = constraint.route
routes = {}
for net in nets:
# Generate routing tree (assuming a perfect machine)
root, lookup = ner_net(placements[net.source],
set(placements[sink] for sink in net.sinks),
machine.width, machine.height,
wrap_around, radius)
# Fix routes to avoid dead chips/links
if route_has_dead_links(root, machine):
root, lookup = avoid_dead_links(root, machine, wrap_around)
# Add the sinks in the net to the RoutingTree
for sink in net.sinks:
tree_node = lookup[placements[sink]]
if sink in route_to_endpoint:
# Sinks with route-to-endpoint constraints must be routed
# in the according directions.
tree_node.children.append((route_to_endpoint[sink], sink))
else:
cores = allocations.get(sink, {}).get(core_resource, None)
if cores is not None:
# Sinks with the core_resource resource specified must be
# routed to that set of cores.
for core in range(cores.start, cores.stop):
tree_node.children.append((Routes.core(core), sink))
else:
# Sinks without that resource are simply included without
# an associated route
tree_node.children.append((None, sink))
routes[net] = root
return routes
|
project-rig/rig
|
rig/place_and_route/route/ner.py
|
Python
|
gpl-2.0
| 24,812
|
[
"VisIt"
] |
bf95ddb1b0c3f0d20021d5628d3b5bfea4eb96832d72ba9d15461ed871405e3a
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Script to generate KML file of nominal scene footprints for each NBAR scene in DB
Created on 25/09/2013
@author: u76345
'''
import xml.dom.minidom
import argparse
from datetime import datetime
import logging, os, re, copy
from agdc import DataCube
from EOtools.utils import log_multiline
#===============================================================================
# # Set top level standard output
# console_handler = logging.StreamHandler(sys.stdout)
# console_handler.setLevel(logging.INFO)
# console_formatter = logging.Formatter('%(message)s')
# console_handler.setFormatter(console_formatter)
#===============================================================================
logger = logging.getLogger('datacube.' + __name__)
class SceneKMLGenerator(DataCube):
'''
classdocs
'''
def parse_args(self):
"""Parse the command line arguments.
Returns:
argparse namespace object
"""
logger.debug(' Calling parse_args()')
_arg_parser = argparse.ArgumentParser('stacker')
# N.B: modtran_root is a direct overrides of config entries
# and its variable name must be prefixed with "_" to allow lookup in conf file
_arg_parser.add_argument('-C', '--config', dest='config_file',
default=os.path.join(self.agdc_root, 'agdc_default.conf'),
help='Stacker configuration file')
_arg_parser.add_argument('-d', '--debug', dest='debug',
default=False, action='store_const', const=True,
help='Debug mode flag')
_arg_parser.add_argument('-o', '--output', dest='output_file',
required=False, default=1,
help='Output file path')
_arg_parser.add_argument('-s', '--start_date', dest='start_date',
required=False, default=None,
help='Start Date in dd/mm/yyyy format')
_arg_parser.add_argument('-e', '--end_date', dest='end_date',
required=False, default=None,
help='End Date in dd/mm/yyyy format')
_arg_parser.add_argument('-a', '--satellite', dest='satellite',
required=False, default=None,
help='Short Satellite name (e.g. LS5, LS7)')
_arg_parser.add_argument('-t', '--thumbnail', dest='thumbnail_size',
required=False, default=512,
help='Thumbnail side length in pixels')
_arg_parser.add_argument('-n', '--sensor', dest='sensor',
required=False, default=None,
help='Sensor Name (e.g. TM, ETM+)')
return _arg_parser.parse_args()
def getChildNodesByName(self, node, nodeName):
return [child_node for child_node in node.childNodes if child_node.nodeName == nodeName]
def __init__(self, source_datacube=None, default_tile_type_id=1):
"""Constructor
Arguments:
source_datacube: Optional DataCube object whose connection and data will be shared
tile_type_id: Optional tile_type_id value (defaults to 1)
"""
if source_datacube:
# Copy values from source_datacube and then override command line args
self.__dict__ = copy(source_datacube.__dict__)
args = self.parse_args()
# Set instance attributes for every value in command line arguments file
for attribute_name in args.__dict__.keys():
attribute_value = args.__dict__[attribute_name]
self.__setattr__(attribute_name, attribute_value)
else:
DataCube.__init__(self) # Call inherited constructor
# Attempt to parse dates from command line arguments or config file
try:
self.start_date = datetime.strptime(self.start_date, '%Y%m%d').date()
except:
try:
self.start_date = datetime.strptime(self.start_date, '%d/%m/%Y').date()
except:
try:
self.start_date = datetime.strptime(self.start_date, '%Y-%m-%d').date()
except:
self.start_date= None
try:
self.end_date = datetime.strptime(self.end_date, '%Y%m%d').date()
except:
try:
self.end_date = datetime.strptime(self.end_date, '%d/%m/%Y').date()
except:
try:
self.end_date = datetime.strptime(self.end_date, '%Y-%m-%d').date()
except:
self.end_date= None
try:
self.thumbnail_size = int(self.thumbnail_size)
except:
self.thumbnail_size = 512
# Other variables set from config file only - not used
try:
self.min_path = int(self.min_path)
except:
self.min_path = None
try:
self.max_path = int(self.max_path)
except:
self.max_path = None
try:
self.min_row = int(self.min_row)
except:
self.min_row = None
try:
self.max_row = int(self.max_row)
except:
self.max_row = None
self.style_dict = {
# 'IconStyle': {'scale': 0.4, 'Icon': {'href': 'http://maps.google.com/mapfiles/kml/shapes/star.png'}},
'LabelStyle': {'color': '9900ffff', 'scale': 1},
'LineStyle': {'color': '990000ff', 'width': 2},
'PolyStyle': {'color': '997f7fff', 'fill': 1, 'outline': 1}
}
def generate(self, kml_filename=None, wrs_shapefile='WRS-2_bound_world.kml'):
'''
Generate a KML file
'''
def write_xml_file(filename, dom_tree, save_backup=False):
"""Function write the metadata contained in self._metadata_dict to an XML file
Argument:
filename: Metadata file to be written
uses_attributes: Boolean flag indicating whether to write values to tag attributes
"""
logger.debug('write_file(%s) called', filename)
if save_backup and os.path.exists(filename + '.bck'):
os.remove(filename + '.bck')
if os.path.exists(filename):
if save_backup:
os.rename(filename, filename + '.bck')
else:
os.remove(filename)
# Open XML document
try:
outfile = open(filename, 'w')
assert outfile is not None, 'Unable to open XML file ' + filename + ' for writing'
logger.debug('Writing XML file %s', filename)
# Strip all tabs and EOLs from around values, remove all empty lines
outfile.write(re.sub('\>(\s+)(\n\t*)\<',
'>\\2<',
re.sub('(\<\w*[^/]\>)\n(\t*\n)*(\t*)([^<>\n]*)\n\t*\n*(\t+)(\</\w+\>)',
'\\1\\4\\6',
dom_tree.toprettyxml(encoding='utf-8')
)
)
)
finally:
outfile.close()
def get_wrs_placemark_node(wrs_document_node, placemark_name):
"""
Return a clone of the WRS placemark node with the specified name
"""
try:
return [placemark_node for placemark_node in self.getChildNodesByName(wrs_document_node, 'Placemark')
if self.getChildNodesByName(placemark_node, 'name')[0].childNodes[0].nodeValue == placemark_name][0].cloneNode(True)
except:
return None
def create_placemark_node(wrs_document_node, acquisition_info):
"""
Create a new placemark node for the specified acquisition
"""
logger.info('Processing %s', acquisition_info['dataset_name'])
wrs_placemark_name = '%d_%d' % (acquisition_info['path'], acquisition_info['row'])
kml_placemark_name = acquisition_info['dataset_name']
placemark_node = get_wrs_placemark_node(wrs_document_node, wrs_placemark_name)
self.getChildNodesByName(placemark_node, 'name')[0].childNodes[0].nodeValue = kml_placemark_name
kml_time_span_node = kml_dom_tree.createElement('TimeSpan')
placemark_node.appendChild(kml_time_span_node)
kml_time_begin_node = kml_dom_tree.createElement('begin')
kml_time_begin_text_node = kml_dom_tree.createTextNode(acquisition_info['start_datetime'].isoformat())
kml_time_begin_node.appendChild(kml_time_begin_text_node)
kml_time_span_node.appendChild(kml_time_begin_node)
kml_time_end_node = kml_dom_tree.createElement('end')
kml_time_end_text_node = kml_dom_tree.createTextNode(acquisition_info['end_datetime'].isoformat())
kml_time_end_node.appendChild(kml_time_end_text_node)
kml_time_span_node.appendChild(kml_time_end_node)
description_node = self.getChildNodesByName(placemark_node, 'description')[0]
description_node.childNodes[0].data = '''<strong>Geoscience Australia ARG25 Dataset</strong>
<table cellspacing="1" cellpadding="1">
<tr>
<td>Satellite:</td>
<td>%(satellite)s</td>
</tr>
<tr>
<td>Sensor:</td>
<td>%(sensor)s</td>
</tr>
<tr>
<td>Start date/time (UTC):</td>
<td>%(start_datetime)s</td>
</tr>
<tr>
<td>End date/time (UTC):</td>
<td>%(end_datetime)s</td>
</tr>
<tr>
<td>WRS Path-Row:</td>
<td>%(path)03d-%(row)03d</td>
</tr>
<tr>
<td>Bounding Box (LL,UR):</td>
<td>(%(ll_lon)f,%(lr_lat)f),(%(ur_lon)f,%(ul_lat)f)</td>
</tr>
<tr>
<td>Est. Cloud Cover (USGS):</td>
<td>%(cloud_cover)s%%</td>
</tr>
<tr>
<td>GCP Count:</td>
<td>%(gcp_count)s</td>
</tr>
<tr>
<td>
<a href="http://eos.ga.gov.au/thredds/wms/LANDSAT/%(year)04d/%(month)02d/%(dataset_name)s_BX.nc?REQUEST=GetMap&SERVICE=WMS&VERSION=1.3.0&LAYERS=FalseColour741&STYLES=&FORMAT=image/png&TRANSPARENT=TRUE&CRS=CRS:84&BBOX=%(ll_lon)f,%(lr_lat)f,%(ur_lon)f,%(ul_lat)f&WIDTH=%(thumbnail_size)d&HEIGHT=%(thumbnail_size)d">View thumbnail</a>
</td>
<td>
<a href="http://eos.ga.gov.au/thredds/fileServer/LANDSAT/%(year)04d/%(month)02d/%(dataset_name)s_BX.nc">Download full NetCDF file</a>
</td>
</tr>
</table>''' % acquisition_info
return placemark_node
kml_filename = kml_filename or self.output_file
assert kml_filename, 'Output filename must be specified'
wrs_dom_tree = xml.dom.minidom.parse(wrs_shapefile)
wrs_document_element = wrs_dom_tree.documentElement
wrs_document_node = self.getChildNodesByName(wrs_document_element, 'Document')[0]
kml_dom_tree = xml.dom.minidom.getDOMImplementation().createDocument(wrs_document_element.namespaceURI,
'kml',
wrs_dom_tree.doctype)
kml_document_element = kml_dom_tree.documentElement
# Copy document attributes
for attribute_value in wrs_document_element.attributes.items():
kml_document_element.setAttribute(attribute_value[0], attribute_value[1])
kml_document_node = kml_dom_tree.createElement('Document')
kml_document_element.appendChild(kml_document_node)
# Copy all child nodes of the "Document" node except placemarks
for wrs_child_node in [child_node for child_node in wrs_document_node.childNodes
if child_node.nodeName != 'Placemark']:
kml_child_node = kml_dom_tree.importNode(wrs_child_node, True)
kml_document_node.appendChild(kml_child_node)
# Update document name
doc_name = 'Geoscience Australia ARG-25 Landsat Scenes'
if self.satellite or self.sensor:
doc_name += ' for'
if self.satellite:
doc_name += ' %s' % self.satellite
if self.sensor:
doc_name += ' %s' % self.sensor
if self.start_date:
doc_name += ' from %s' % self.start_date
if self.end_date:
doc_name += ' to %s' % self.end_date
logger.debug('Setting document name to "%s"', doc_name)
self.getChildNodesByName(kml_document_node, 'name')[0].childNodes[0].data = doc_name
# Update style nodes as specified in self.style_dict
for style_node in self.getChildNodesByName(kml_document_node, 'Style'):
logger.debug('Style node found')
for tag_name in self.style_dict.keys():
tag_nodes = self.getChildNodesByName(style_node, tag_name)
if tag_nodes:
logger.debug('\tExisting tag node found for %s', tag_name)
tag_node = tag_nodes[0]
else:
logger.debug('\tCreating new tag node for %s', tag_name)
tag_node = kml_dom_tree.createElement(tag_name)
style_node.appendChild(tag_node)
for attribute_name in self.style_dict[tag_name].keys():
attribute_nodes = self.getChildNodesByName(tag_node, attribute_name)
if attribute_nodes:
logger.debug('\t\tExisting attribute node found for %s', attribute_name)
attribute_node = attribute_nodes[0]
text_node = attribute_node.childNodes[0]
text_node.data = str(self.style_dict[tag_name][attribute_name])
else:
logger.debug('\t\tCreating new attribute node for %s', attribute_name)
attribute_node = kml_dom_tree.createElement(attribute_name)
tag_node.appendChild(attribute_node)
text_node = kml_dom_tree.createTextNode(str(self.style_dict[tag_name][attribute_name]))
attribute_node.appendChild(text_node)
self.db_cursor = self.db_connection.cursor()
sql = """-- Find all NBAR acquisitions
select satellite_name as satellite, sensor_name as sensor,
x_ref as path, y_ref as row,
start_datetime, end_datetime,
dataset_path,
ll_lon, ll_lat,
lr_lon, lr_lat,
ul_lon, ul_lat,
ur_lon, ur_lat,
cloud_cover::integer, gcp_count::integer
from
(
select *
from dataset
where level_id = 2 -- NBAR
) dataset
inner join acquisition a using(acquisition_id)
inner join satellite using(satellite_id)
inner join sensor using(satellite_id, sensor_id)
where (%(start_date)s is null or end_datetime::date >= %(start_date)s)
and (%(end_date)s is null or end_datetime::date <= %(end_date)s)
and (%(satellite)s is null or satellite_tag = %(satellite)s)
and (%(sensor)s is null or sensor_name = %(sensor)s)
order by end_datetime
;
"""
params = {
'start_date': self.start_date,
'end_date': self.end_date,
'satellite': self.satellite,
'sensor': self.sensor
}
log_multiline(logger.debug, self.db_cursor.mogrify(sql, params), 'SQL', '\t')
self.db_cursor.execute(sql, params)
field_list = ['satellite',
'sensor',
'path',
'row',
'start_datetime',
'end_datetime',
'dataset_path',
'll_lon',
'll_lat',
'lr_lon',
'lr_lat',
'ul_lon',
'ul_lat',
'ur_lon',
'ur_lat',
'cloud_cover',
'gcp_count'
]
for record in self.db_cursor:
acquisition_info = {}
for field_index in range(len(field_list)):
acquisition_info[field_list[field_index]] = record[field_index]
acquisition_info['year'] = acquisition_info['end_datetime'].year
acquisition_info['month'] = acquisition_info['end_datetime'].month
acquisition_info['thumbnail_size'] = self.thumbnail_size
acquisition_info['dataset_name'] = re.search('[^/]+$', acquisition_info['dataset_path']).group(0)
log_multiline(logger.debug, acquisition_info, 'acquisition_info', '\t')
placemark_node = create_placemark_node(wrs_document_node, acquisition_info)
kml_document_node.appendChild(placemark_node)
logger.info('Writing KML to %s', kml_filename)
write_xml_file(kml_filename, kml_dom_tree)
def main():
skg = SceneKMLGenerator()
assert skg.output_file, 'No output file specified'
skg.generate()
if __name__ == '__main__':
main()
|
ama-jharrison/agdc
|
agdc/deprecated/scene_kml_generator.py
|
Python
|
apache-2.0
| 18,633
|
[
"NetCDF"
] |
c3419a2c884b43ec2aac6f7fa36c74ed199d5e3586e7bffdeb2411e539115f13
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
# Copyright 2017, National University of Ireland and The James Hutton Insitute
# Author: Nicholas Waters
#
# This code is part of the riboSeed package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""
"""
import os
import sys
import subprocess
import argparse
import multiprocessing
import glob
from Bio import SeqIO
import pandas as pd
from Bio.Blast.Applications import NcbiblastnCommandline
from .shared_methods import set_up_logging, combine_contigs
def get_args(test_args=None): # pragma: no cover
parser = argparse.ArgumentParser(prog="ribo score",
description="This does some simple blasting to detect correctness " +
"of riboSeed results")
parser.prog = "ribo score"
parser.add_argument("indir",
help="dir containing a genbank file, assembly files" +
"as fastas. Usually the 'mauve' dir in the riboSeed " +
"results")
parser.add_argument("-o", "--output", dest='output',
help="directory in which to place the output files",
default=None)
parser.add_argument("-l", "--flanking_length",
help="length of flanking regions, in bp; " +
"default: %(default)s",
default=1000, type=int, dest="flanking")
parser.add_argument("-p", "--min_percent", dest="min_percent",
help="minimum percent identity",
default=97, type=int)
parser.add_argument("-f", "--assembly_ext", dest="assembly_ext",
help="extenssion of reference, usually fasta",
default="fasta", type=str)
parser.add_argument("-g", "--ref_ext", dest="ref_ext",
help="extension of reference, usually .gb",
default="gb", type=str)
parser.add_argument("-F", "--blast_Full", dest="blast_full",
help="if true, blast full sequences along with " +
"just the flanking. Interpretation is not " +
"implemented currently as false positives cant " +
"be detected this way",
default=False, action="store_true")
parser.add_argument("-v", "--verbosity", dest='verbosity',
action="store",
default=2, type=int, choices=[1, 2, 3, 4, 5],
help="Logger writes debug to file in output dir; " +
"this sets verbosity level sent to stderr. " +
" 1 = debug(), 2 = info(), 3 = warning(), " +
"4 = error() and 5 = critical(); " +
"default: %(default)s")
# parser.add_argument("-t", "--blast_type",
# help="blastn or tblastx", default="tblastx")
if test_args is None:
args = parser.parse_args(sys.argv[2:])
else:
args = parser.parse_args(test_args)
return(args)
def make_nuc_nuc_recip_blast_cmds(
query_list, output, subject_file=None, logger=None):
"""given a file, make a blast cmd, and return path to output csv
"""
assert logger is not None, "must use logging"
blast_cmds = []
blast_outputs = []
recip_blast_outputs = []
for f in query_list:
# run forward, nuc aganst prot, blast
output_path_tab = str(
os.path.join(output,
os.path.splitext(os.path.basename(f))[0] +
"_vs_ref.tab"))
blast_cline = NcbiblastnCommandline(query=f,
subject=subject_file,
# evalue=.001,
outfmt=6,
#outfmt="'6 qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen'",
out=output_path_tab)
add_params = str(" -num_threads 1 -num_alignments 50")
blast_command = str(str(blast_cline) + add_params)
blast_cmds.append(blast_command)
blast_outputs.append(output_path_tab)
# run reverse, prot against nuc, blast
recip_output_path_tab = os.path.join(
output,
"ref_vs_" + os.path.splitext(os.path.basename(f))[0] + ".tab")
recip_blast_cline = NcbiblastnCommandline(
query=subject_file,
subject=f,
# evalue=.001,
outfmt=6,
#outfmt="'6 qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen'",
out=recip_output_path_tab)
recip_blast_command = str(str(recip_blast_cline) + add_params)
blast_cmds.append(recip_blast_command)
recip_blast_outputs.append(recip_output_path_tab)
return(blast_cmds, blast_outputs, recip_blast_outputs)
def merge_outfiles(filelist, outfile):
"""
"""
# only grab .tab files, ie, the blast output
filelist = [i for i in filelist if i.split(".")[-1:] == ['tab']]
if len(filelist) == 1:
# print("only one file found! no merging needed")
return(filelist)
else:
# print("merging all the blast results to %s" % outfile)
nfiles = len(filelist)
fout = open(outfile, "a")
# first file:
with open(filelist[0]) as firstf:
for line in firstf:
fout.write(line)
# now the rest:
for num in range(1, nfiles):
with open(filelist[num]) as otherf:
for line in otherf:
fout.write(line)
fout.close()
return(outfile)
def BLAST_tab_to_df(path):
colnames = ["query_id", "subject_id", "identity_perc", "alignment_length",
"mismatches", "gap_opens", "q_start", "q_end", "s_start",
"s_end", "evalue", "bit_score"]
with open(path) as tab:
raw_csv_results = pd.read_csv(
tab, comment="#", sep="\t", names=colnames)
return raw_csv_results
def filter_recip_BLAST_df(df1, df2, min_percent, min_lens, logger=None):
""" results from pd.read_csv with default BLAST output 6 columns
returns a df
"""
assert logger is not None, "must use a logger"
logger.debug("shape of blast results")
logger.debug("shape of recip blast results")
# df1['genome'] = df1.query_id.str.split('_').str.get(0)
# df2['genome'] = df2.subject_id.str.split('_').str.get(0)
df1['genome'] = df1.query_id
df2['genome'] = df2.subject_id
logger.debug(df1.shape)
logger.debug(df2.shape)
# recip structure
filtered = pd.DataFrame(columns=df1.columns)
unq_subject = df1.subject_id.unique()
unq_query = df1.genome.unique()
recip_hits = []
nonrecip_hits = []
for gene in unq_subject:
for genome in unq_query:
logger.debug("Checking %s in %s for reciprocity" % (gene, genome))
tempdf1 = df1.loc[(df1["subject_id"] == gene) &
(df1["genome"] == genome), ]
tempdf2 = df2.loc[(df2["query_id"] == gene) &
(df2["genome"] == genome), ]
if tempdf1.empty or tempdf2.empty:
logger.info("skipping %s in %s", gene, genome)
else:
subset1 = tempdf1.loc[
(tempdf1["identity_perc"] > min_percent)
]
subset2 = tempdf2.loc[
(tempdf2["identity_perc"] > min_percent)
]
logger.debug("grouped df shape: ")
logger.debug(tempdf1.shape)
logger.debug("grouped df2 shape: " )
logger.debug(tempdf2.shape)
if subset1.empty or subset2.empty:
logger.info("No reciprocol hits for %s in %s", gene, genome)
logger.debug(tempdf1)
logger.debug(tempdf2)
nonrecip_hits.append([gene, genome])
else:
if subset1.iloc[0]["query_id"] == subset2.iloc[0]["subject_id"]:
recip_hits.append([gene, genome])
logger.debug("Reciprocol hits for %s in %s!", gene, genome)
if subset1.iloc[0]["alignment_length"] >= \
(min_lens[subset1.iloc[0]["query_id"]] - 0):
filtered = filtered.append(subset1)
logger.info("%s in %s passed min len test!", gene, genome)
else:
pass
else:
nonrecip_hits.append([gene, genome])
logger.debug("No reciprocol hits for %s in %s",
gene, genome)
# logger.debug(subset.shape)
logger.debug("Non-reciprocal genes:")
logger.debug(nonrecip_hits)
logger.debug("Reciprocal genes:")
logger.debug(recip_hits)
logger.debug("filtered shape:")
logger.debug(filtered.shape)
return(filtered)
def checkBlastForMisjoin(df, fasta, ref_lens, BUF, flanking, logger=None):
""" results from pd.read_csv with default BLAST output 6 columns
returns a df
"""
logger.debug("length of references:")
logger.debug(ref_lens)
df['name'] = df.query_id.str.replace("_upstream", "").str.replace("_downstream", "")
# df['name2'] = df.name.str.replace("_downstream", "")
df['query_name'] = df['name'].str.split('flanking').str.get(0)
where = []
for i, row in df.iterrows():
where.append("down" if "downstream" in row['query_id'] else "up")
df['where'] = where
assert logger is not None, "must use a logger"
# print(ref_lens)
print("\n")
queries = df.query_name.unique()
# subjects = df.subject_id.unique()
sdf = df.loc[(df["alignment_length"] > (flanking * 0.9) - BUF)]
naughty_nice_list = []
for query in queries:
logger.debug("checking hits for %s", query)
tempdf = sdf.loc[(df["query_name"] == query)]
for i, row in tempdf.iterrows():
# print("outer row")
subject_start = None
# if both start the same (around 1), we have the first hit
if row["s_start"] - 1 < BUF and abs(row["q_start"] - 1) < BUF:
subject_start = row["subject_id"]
ref_len = ref_lens[row["subject_id"]]
logger.debug("checking %s and %s, len %d",
query, subject_start, ref_len)
# print(tempdf)
foundpair = False
for i, innerrow in tempdf.iterrows():
subject_len = ref_lens[innerrow["subject_id"]]
subject_end = innerrow["subject_id"]
# if hit extends to end of reference
logger.debug("subject len: %s", subject_len)
logger.debug(innerrow)
logger.debug(abs(innerrow["s_end"] - subject_len))
if (abs(innerrow["s_end"] - subject_len)) < BUF:
# if same contig
if subject_start == subject_end:
naughty_nice_list.append(
[fasta, "good", query, subject_start, subject_end])
foundpair = True
else:
naughty_nice_list.append(
[fasta, "bad", query, subject_start, subject_end]
)
foundpair = True
if not foundpair:
naughty_nice_list.append(
[fasta, "?", query, subject_start, "?"])
print("Results for %s:" % fasta)
for line in naughty_nice_list:
print("\t".join(line))
print("\n")
return(naughty_nice_list)
def write_results(df, fasta_name, outfile, logger=None):
#% parse output
assert logger is not None, "must use a logger"
logger.debug("writing out the results")
with open(outfile, "a") as outf:
outf.write("# {0} \n".format(fasta_name))
df.to_csv(outf)
def parseDirContents(dirname, ref_ext, assembly_ext):
"""retursn a tuple (ref, [assembly1, assembly2, etc])
"""
return (glob.glob(dirname + "*" + ref_ext)[0],
glob.glob(dirname + "*" + assembly_ext))
def getScanCmd(ref, outroot, other_args):
""" returns (cmd, path/to/dir/)
"""
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
if ref.endswith(".gb"):
return (None, ref)
resulting_gb = os.path.join(outroot, "scan", "scannedScaffolds.gb")
return (
"ribo scan {0} --min_length 5000 -o {1}{2}".format(
ref,
os.path.join(outroot, "scan"),
other_args
), resulting_gb
)
def getSelectCmd(gb, outroot, other_args):
resulting_clusters = os.path.join(outroot, "select",
"riboSelect_grouped_loci.txt")
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
return ("ribo select {0} -o {1}{2}".format(
gb,
os.path.join(outroot, "select"),
other_args
), resulting_clusters)
def getSnagCmd(scangb, cluster, flank, outroot, other_args=""):
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
return ("ribo snag {0} {1} -l {2} --just_extract -o {3}{4}".format(
scangb,
cluster,
flank,
os.path.join(outroot, "snag"),
other_args
), os.path.join(outroot, "snag"))
def check_scan_select_snag_retruncodes(subreturns, logger):
if subreturns[0].returncode != 0:
logger.error("error with riboScan! Check the riboScan log files")
sys.exit(1)
if subreturns[1].returncode != 0:
logger.error("error with riboSelect! Check the riboSelect log files")
sys.exit(1)
if subreturns[2].returncode != 0:
logger.info("error with riboSnag! This often happens if " +
"the assembly doesnt reconstruct any rDNAs.")
# note the lack of sys exit
def main(args, logger=None):
if args.output is None:
args.output = os.path.dirname(
os.path.join(args.indir, "")
) + "_riboScored"
output_root = os.path.abspath(os.path.expanduser(args.output))
if not os.path.isdir(output_root):
sys.stderr.write("creating output directory %s\n" % output_root)
os.makedirs(output_root)
else:
sys.stderr.write("Output Directory already exists!\n")
sys.exit(1)
log_path = os.path.join(output_root, "riboScore.log")
if logger is None:
logger = set_up_logging(verbosity=args.verbosity,
outfile=log_path,
name=__name__)
logger.debug("All settings used:")
for k, v in sorted(vars(args).items()):
logger.debug("{0}: {1}".format(k, v))
if not os.path.isdir(os.path.join(args.indir, "")) or len(
os.listdir(os.path.join(args.indir, ""))) == 0:
logger.error("input directory doesnt exist or is empty! Exiting...")
sys.exit(1)
gb, fastas = parseDirContents(dirname=os.path.join(args.indir, ""),
ref_ext=args.ref_ext,
assembly_ext=args.assembly_ext)
# snags from reference
bs_dir1 = os.path.join(output_root, "bridgeSeeds_ref")
scancmd1, scangb1 = getScanCmd(ref=gb, outroot=bs_dir1, other_args="--name riboScore")
selectcmd1, cluster1 = getSelectCmd(gb=scangb1, outroot=bs_dir1,
other_args="-s 16S:23S")
snagcmd1, snagdir1 = getSnagCmd(scangb=scangb1, cluster=cluster1,
flank=args.flanking,
outroot=bs_dir1,
other_args="")
logger.info(
"Running riboScan, riboSelect, and riboSnag on reference: %s", gb)
report_list = []
for i in [scancmd1, selectcmd1, snagcmd1]:
if i is None:
continue
logger.debug(i)
subprocess.run(
[i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
for index, fasta in enumerate(fastas):
logger.debug("processing %s", fasta)
this_root = os.path.join(
args.output, os.path.splitext(os.path.basename(fasta))[0])
bs_dir2 = os.path.join(this_root, "bridgeSeeds_contigs")
os.makedirs(bs_dir2)
# snags from assembly
scancmd2, scangb2 = getScanCmd(ref=fasta, outroot=bs_dir2,
other_args='--name riboScore')
selectcmd2, cluster2 = getSelectCmd(gb=scangb2, outroot=bs_dir2,
other_args="-s 16S:23S")
snagcmd2, snagdir2 = getSnagCmd(scangb=scangb2, cluster=cluster2,
flank=args.flanking,
outroot=bs_dir2)
logger.info(
"Running riboScan, riboSelect, and riboSnag on " +
"%s, assembly %d of %d",
fasta, index + 1, len(fastas))
returncodes = []
for i in [scancmd2, selectcmd2, snagcmd2]:
if i is None:
continue
logger.debug(i)
returncodes.append(subprocess.run(
[i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)) # we check later due to likely de novo failure
check_scan_select_snag_retruncodes(
subreturns=returncodes, logger=logger)
ref_snags = sorted(glob.glob(
snagdir1 + "/*_riboSnag.fasta"))
if args.blast_full:
full_blast_results = os.path.join(this_root, "BLAST")
os.makedirs(full_blast_results)
combined_full_snags = combine_contigs(
contigs_dir=snagdir2,
pattern="*riboSnag",
contigs_name="combinedSnags",
logger=logger)
commands, paths_to_outputs, paths_to_recip_outputs = \
make_nuc_nuc_recip_blast_cmds(
query_list=ref_snags,
subject_file=combined_full_snags,
output=full_blast_results,
logger=logger)
else:
commands = []
contig_snags = sorted(glob.glob(
os.path.join(snagdir2, "") +
"*_riboSnag.fasta"))
contig_snags_flanking = sorted(glob.glob(
os.path.join(snagdir2, "flanking_regions_output", "") +
"*_riboSnag_flanking_regions.fasta"))
logger.debug(contig_snags)
logger.debug(contig_snags_flanking)
# combine the assembly contigs
if len(contig_snags) == 0:
report_list.append("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(
os.path.abspath(os.path.expanduser(args.indir)), # 0
os.path.basename(fasta), # 1
len(ref_snags), # 2
0, # 3
0, # 4
0 # 5
))
continue
combined_flanking_snags = combine_contigs(
contigs_dir=os.path.join(
snagdir2, "flanking_regions_output", ""),
pattern="*riboSnag_flanking_regions",
contigs_name="combinedSnagFlanking",
logger=logger)
ref_snag_dict = {}
contig_snag_dict = {}
for snag in ref_snags:
rec = SeqIO.read(snag, "fasta")
ref_snag_dict[rec.id] = len(rec.seq)
for snag in contig_snags:
rec = SeqIO.read(snag, "fasta")
contig_snag_dict[rec.id] = len(rec.seq)
logger.debug(ref_snag_dict)
logger.debug(contig_snag_dict)
flanking_blast_results = os.path.join(this_root, "BLAST_flanking")
os.makedirs(flanking_blast_results)
f_commands, f_paths_to_outputs, f_paths_to_recip_outputs = \
make_nuc_nuc_recip_blast_cmds(
query_list=ref_snags,
subject_file=combined_flanking_snags,
output=flanking_blast_results,
logger=logger)
# check for existing blast results
pool = multiprocessing.Pool()
logger.debug("Running the following commands in parallel " +
"(this could take a while):")
logger.debug("\n" + "\n".join([x for x in commands + f_commands]))
logger.info("Running BLAST commands")
results = [
pool.apply_async(subprocess.run,
(cmd,),
{"shell": sys.platform != "win32",
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"check": True})
for cmd in commands + f_commands]
pool.close()
pool.join()
reslist = []
reslist.append([r.get() for r in results])
logger.info("Parsing BLAST results")
if args.blast_full:
merged_tab = merge_outfiles(
filelist=paths_to_outputs,
outfile=os.path.join(this_root, "merged_results.tab"))
recip_merged_tab = merge_outfiles(
filelist=paths_to_recip_outputs,
outfile=os.path.join(this_root, "recip_merged_results.tab"))
resultsdf = BLAST_tab_to_df(merged_tab)
recip_resultsdf = BLAST_tab_to_df(recip_merged_tab)
filtered_hits = filter_recip_BLAST_df(
df1=resultsdf,
df2=recip_resultsdf,
min_lens=ref_snag_dict,
min_percent=args.min_percent,
logger=logger)
write_results(
outfile=os.path.join(output_root,
"riboScore_hits_fulllength.txt"),
fasta_name=fasta,
df=filtered_hits, logger=logger)
f_merged_tab = merge_outfiles(
filelist=f_paths_to_outputs,
outfile=os.path.join(
this_root, "merged_flanking_results.tab"))
f_recip_merged_tab = merge_outfiles(
filelist=f_paths_to_recip_outputs,
outfile=os.path.join(
this_root, "recip_merged_flanking_results.tab"))
# this currently doesnt get used
f_resultsdf = BLAST_tab_to_df(f_merged_tab)
# we use the reciprocal results
f_recip_resultsdf = BLAST_tab_to_df(f_recip_merged_tab)
# 5 columns: [fasta, good/bad/?, query, startseq, end_seq]
flanking_hits = checkBlastForMisjoin(
fasta=fasta,
df=f_recip_resultsdf,
ref_lens=ref_snag_dict,
flanking=args.flanking,
BUF=50, logger=logger)
with open(os.path.join(output_root, "riboScore_hits.txt"), "a") as f:
for line in flanking_hits:
f.write("\t".join(line) + "\n")
good_hits = 0 + sum([1 for x in flanking_hits if x[1] == "good"])
ambig_hits = 0 + sum([1 for x in flanking_hits if x[1] == "?"])
bad_hits = 0 + sum([1 for x in flanking_hits if x[1] == "bad"])
report_list.append("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(
os.path.abspath(os.path.expanduser(args.indir)), # 0
os.path.basename(fasta), # 1
len(ref_snags), # 2
good_hits, # 3
ambig_hits, # 4
bad_hits # 5
))
logger.debug("report list:")
logger.debug(report_list)
with open(os.path.join(output_root, "riboScore_report.txt"), "a") as r:
for line in report_list:
r.write(line)
|
nickp60/riboSeed
|
riboSeed/riboScore.py
|
Python
|
mit
| 24,260
|
[
"BLAST"
] |
a2c23051fdcd6d7dc64b5c772e2269c1df7e95c00538f7a055216b165627b96d
|
#ImportModules
import ShareYourSystem as SYS
#Definition
MyBrianer=SYS.BrianerClass(
).set(
'-Populations',
[
(
'ManagingBeforeSetVariable',
{
'NeurongroupingBrianKwargDict':
{
'model':
'''
dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt
dge/dt = -ge/(5*ms) : volt
dgi/dt = -gi/(10*ms) : volt
''',
'threshold':'v>-50*mV',
'reset':'v=-60*mV'
},
'get':'/-Spikes/|Run',
}
),
(
'set',
{
'#liarg:#lambda':{
'|#NeuronStr':{
'get':'>>self.NeurongroupingBrianKwargDict[\'N\']=#UnitsInt',
}
},
'#map':[
['#NeuronStr','#UnitsInt'],
[
['E','3200'],
['I','800']
]
]
}
)
]
).network(
['Populations']
).brian()
#print
print('MyBrianer is ')
SYS._print(MyBrianer)
print(
MyBrianer['/-Populations/|E'].NeurongroupedBrianVariable.equations._equations.keys()
)
"""
#init
import brian2
map(
lambda __BrianedNeuronGroup:
__BrianedNeuronGroup.__setattr__(
'v',
-60*brian2.mV
),
MyBrianer.BrianedNeuronGroupsList
)
#run
MyBrianer.simulate(300)
#plot
ME=MyBrianer['/-Populations/|E/-Spikes/|Run'].SpikeMonitor
MI=MyBrianer['/-Populations/|I/-Spikes/|Run'].SpikeMonitor
from matplotlib import pyplot
pyplot.plot(ME.t/brian2.ms, ME.i, 'r.')
pyplot.plot(MI.t/brian2.ms, ME.source.N+MI.i, 'b.')
pyplot.show()
"""
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleDoc.py
|
Python
|
mit
| 1,386
|
[
"Brian"
] |
9ef1d86a69149e29639bb1489d65f8a4c37673ad54af40852922d71e37a967ea
|
from distutils.core import setup
setup(
name='RMQ-Utils',
version='0.0.5',
author='Brian Hines',
author_email='brian@projectweekend.net',
packages=['rmq_utils'],
url='https://github.com/projectweekend/RMQ-Utils',
license='LICENSE.txt',
description='Utilities for managing RabbitMQ.',
long_description=open('README.txt').read(),
install_requires=[
"pyrabbit == 1.1.0",
"pika == 0.9.14",
],
)
|
projectweekend/RMQ-Utils
|
setup.py
|
Python
|
mit
| 452
|
[
"Brian"
] |
be68ed3a85a70cd66546b367aeccad58aab82b83d0623160202e37b2f266c1d2
|
from octopus.modules.es.testindex import ESTestCase
from service import licences
class TestModels(ESTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_01_variations(self):
vars = licences.make_variations(["a", "b", "c"])
assert "a b c" in vars
assert "a b-c" in vars
assert "a-b c" in vars
assert "a-b-c" in vars
assert len(vars) == 4
def test_02_variant_map(self):
vmap = licences.make_variation_map(["a", "b", "c"], "abc")
vars = vmap.keys()
assert "a b c" in vars
assert "a b-c" in vars
assert "a-b c" in vars
assert "a-b-c" in vars
assert "A B C" in vars
assert "A B-C" in vars
assert "A-B C" in vars
assert "A-B-C" in vars
assert len(vars) == 8
vals = vmap.values()
for v in vals:
assert v == "abc"
|
CottageLabs/oacwellcome
|
service/tests/unit/test_licences.py
|
Python
|
apache-2.0
| 926
|
[
"Octopus"
] |
480cec6b7623059255f0101a73755c03061e7de6ba538a02166813bb5466e75c
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
from peacock.utils.FileCache import FileCache
from peacock.utils import Testing
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def checkFileCache(self, fc, dirty=True, val={}, path_data={}):
self.assertEqual(fc.dirty, dirty)
self.assertEqual(fc.val, val)
self.assertEqual(fc.path_data, path_data)
@unittest.skip("Needs update for Python 3")
def testBasic(self):
key = "test_FileCache"
obj = {"foo": "bar"}
FileCache.clearAll(key)
fc = FileCache(key, "/no_exist", 1)
self.checkFileCache(fc)
self.assertEqual(fc.no_exist, True)
ret = fc.add(obj)
self.assertEqual(ret, False)
self.assertEqual(fc.dirty, True)
exe_path = Testing.find_moose_test_exe()
fc = FileCache(key, exe_path, 1)
self.checkFileCache(fc)
val = fc.read()
self.assertEqual(val, None)
ret = fc.add(obj)
self.assertEqual(ret, True)
self.assertEqual(fc.dirty, False)
ret = fc.add(obj)
self.assertEqual(ret, False)
self.assertEqual(fc.dirty, False)
val = fc.read()
self.assertEqual(val, obj)
self.assertEqual(fc.dirty, False)
fc = FileCache(key, exe_path, 1)
self.assertEqual(fc.dirty, False)
# different data version
fc = FileCache(key, exe_path, 2)
self.assertEqual(fc.dirty, True)
FileCache.clearAll(key)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/utils/test_FileCache.py
|
Python
|
lgpl-2.1
| 1,899
|
[
"MOOSE"
] |
cdf9a31ab1682b8b338c01356139c5789a5d8a636af688d634c0ecf6c4dd8699
|
from __future__ import absolute_import
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, BytesLiteral=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object)
from . import Nodes
from . import ExprNodes
from . import PyrexTypes
from . import Visitor
from . import Builtin
from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
from .StringEncoding import EncodedString, BytesLiteral
from .Errors import error
from .ParseTreeTransforms import SkipDeclarations
import copy
import codecs
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class _YieldNodeCollector(Visitor.TreeVisitor):
"""
YieldExprNode finder for generator expressions.
"""
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
# everything below these nodes is out of scope:
def visit_GeneratorExpressionNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def _find_single_yield_expression(node):
collector = _YieldNodeCollector()
collector.visitchildren(node)
if len(collector.yield_nodes) != 1:
return None, None
yield_node = collector.yield_nodes[0]
try:
return yield_node.arg, collector.yield_stat_nodes[yield_node]
except KeyError:
return None, None
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if isinstance(node.operand2, ExprNodes.IndexNode):
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterator, reversed=False):
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterator, method=None, keys=True, values=False)
# C array (slice) iteration?
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterator, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
if iterator.args is None:
arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
else:
arg_count = len(iterator.args)
if arg_count and iterator.self is not None:
arg_count -= 1
function = iterator.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterator.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.SimpleCallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterator)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterator)
# range() iteration?
if Options.convert_range and node.target.type.is_int:
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name in ('range', 'xrange'):
return self._transform_range_iteration(node, iterator, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_AS_STRING",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value = '-1')
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = BytesLiteral(slice_node.value.encode('latin1'))
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_char_ptr_type).coerce_to(
PyrexTypes.c_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode(
slice_node.pos, value=str(len(bytes_value)),
constant_result=len(bytes_value),
type=PyrexTypes.c_py_ssize_t_type),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode(
node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif isinstance(slice_node, ExprNodes.IndexNode):
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, (int,long)) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=str(abs(step_value)),
constant_result=abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
type=PyrexTypes.c_py_ssize_t_type)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
is_buffer_access=False,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1', constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, (int, long)):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_env())
else:
bound1 = args[0].coerce_to_integer(self.current_env())
bound2 = args[1].coerce_to_integer(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
bound2_ref_node = None
if reversed:
bound1, bound2 = bound2, bound1
abs_step = abs(step_value)
if abs_step != 1:
if (isinstance(bound1.constant_result, (int, long)) and
isinstance(bound2.constant_result, (int, long))):
# calculate final bounds now
if step_value < 0:
begin_value = bound2.constant_result
end_value = bound1.constant_result
bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
else:
begin_value = bound1.constant_result
end_value = bound2.constant_result
bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
bound1 = ExprNodes.IntNode(
bound1.pos, value=str(bound1_value), constant_result=bound1_value,
type=PyrexTypes.spanning_type(bound1.type, bound2.type))
else:
# evaluate the same expression as above at runtime
bound2_ref_node = UtilNodes.LetRefNode(bound2)
spanning_type = PyrexTypes.spanning_type(bound1.type, bound2.type)
if step.type.is_int and abs(step_value) < 0x7FFF:
# Avoid loss of integer precision warnings.
spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type)
else:
spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
if step_value < 0:
begin_value = bound2_ref_node
end_value = bound1
final_op = '-'
else:
begin_value = bound1
end_value = bound2_ref_node
final_op = '+'
bound1 = ExprNodes.binop_node(
bound1.pos,
operand1=ExprNodes.binop_node(
bound1.pos,
operand1=bound2_ref_node,
operator=final_op, # +/-
operand2=ExprNodes.MulNode(
bound1.pos,
operand1=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
operator='*',
operand2=ExprNodes.DivNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=begin_value,
operator='-',
operand2=end_value,
type=spanning_type),
operator='-',
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_step_type),
operator='//',
operand2=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
operator=final_op, # +/-
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_type)
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_integer(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
method_node, dict_len_temp_addr, is_dict_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
class SwitchTransform(Visitor.EnvTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, (ExprNodes.CoerceToTempNode,
ExprNodes.CoerceToBooleanNode)):
cond = cond.arg
elif isinstance(cond, ExprNodes.BoolBinopResultNode):
cond = cond.arg.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is not None:
return self.NO_MATCH
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
elif not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = list(map(ord, set(string_literal.value)))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.has_constant_result():
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
try:
if value.entry.cname in seen:
return True
except AttributeError:
return True # play safe
seen.add(value.entry.cname)
return False
def visit_IfStatNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos,
conditions = conditions,
body = if_clause.body))
condition_values = [
cond for case in cases for cond in case.conditions]
if len(condition_values) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(condition_values):
self.visitchildren(node)
return node
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
node.wrap_operands(self.current_env()) # in case we changed the operands
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=true_val.coerce_to(node.type, self.current_env()),
first=True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=false_val.coerce_to(node.type, self.current_env()),
first=True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
return replacement
def visit_EvalWithTempExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
# drop unused expression temp from FlattenInListTransform
orig_expr = node.subexpression
temp_ref = node.lazy_temp
self.visitchildren(node)
if node.subexpression is not orig_expr:
# node was restructured => check if temp is still used
if not Visitor.tree_contains(node.subexpression, temp_ref):
return node.subexpression
return node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects
return node
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
try:
# Trial optimisation to avoid redundant temp
# assignments. However, since is_simple() is meant to
# be called after type analysis, we ignore any errors
# and just play safe in that case.
is_simple_arg = arg.is_simple()
except Exception:
is_simple_arg = False
if not is_simple_arg:
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while isinstance(obj_node, ExprNodes.AttributeNode):
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if isinstance(obj_node, ExprNodes.NameNode):
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif isinstance(node, ExprNodes.IndexNode):
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not isinstance(node.base, ExprNodes.NameNode):
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
def _handle_simple_function_slice(self, node, pos_args):
arg_count = len(pos_args)
start = step = None
if arg_count == 1:
stop, = pos_args
elif arg_count == 2:
start, stop = pos_args
elif arg_count == 3:
start, stop, step = pos_args
else:
self._error_wrong_arg_count('slice', node, pos_args)
return node
return ExprNodes.SliceNode(
node.pos,
start=start or ExprNodes.NoneNode(node.pos),
stop=stop,
step=step or ExprNodes.NoneNode(node.pos))
def _handle_simple_function_ord(self, node, pos_args):
"""Unpack ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_long_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
)
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
)
return node
# sequence processing
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(x for L in LL for x in L)
into
for L in LL:
for x in L:
if not x:
_result = False
break
else:
continue
break
else:
_result = True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(x for L in LL for x in L)
into
for L in LL:
for x in L:
if x:
_result = True
break
else:
continue
break
else:
_result = False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
generator_body = gen_expr_node.def_node.gbody
loop_node = generator_body.body
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression)
test_node = Nodes.IfStatNode(
yield_expression.pos, else_clause=None, if_clauses=[
Nodes.IfClauseNode(
yield_expression.pos,
condition=condition,
body=Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any))
)]
)
loop = loop_node
while isinstance(loop.body, Nodes.LoopNode):
next_loop = loop.body
loop.body = Nodes.StatListNode(loop.body.pos, stats=[
loop.body,
Nodes.BreakStatNode(yield_expression.pos)
])
next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos)
loop = next_loop
loop_node.else_clause = Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any))
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all')
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
list_node = pos_args[0]
loop_node = list_node.loop
elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
gen_expr_node = arg
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
list_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='sorted',
comprehension_type=Builtin.list_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=list_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
elif arg.is_sequence_constructor:
# sorted([a, b, c]) or sorted((a, b, c)). The result is always a list,
# so starting off with a fresh one is more efficient.
list_node = loop_node = arg.as_list()
else:
# Interestingly, PySequence_List works on a lot of non-sequence
# things as well.
list_node = loop_node = ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=True)
result_node = UtilNodes.ResultRefNode(
pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False)
list_assign_node = Nodes.SingleAssignmentNode(
node.pos, lhs=result_node, rhs=list_node, first=True)
sort_method = ExprNodes.AttributeNode(
node.pos, obj=result_node, attribute=EncodedString('sort'),
# entry ? type ?
needs_none_check=False)
sort_node = Nodes.ExprStatNode(
node.pos, expr=ExprNodes.SimpleCallNode(
node.pos, function=sort_method, args=[]))
sort_node.analyse_declarations(self.current_env())
return UtilNodes.TempResultFromStatNode(
result_node,
Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node]))
def __handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
# FIXME: currently nonfunctional
yield_expression = None
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
else:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
# builtin type creation
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by an inlined comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node,
orig_func='set' if target_type is Builtin.set_type else 'list',
comprehension_type=target_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by an inlined { a:b for ... }
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='dict',
comprehension_type=Builtin.dict_type)
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr = yield_expression.args[0],
value_expr = yield_expression.args[1],
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
visit_Node = Visitor.VisitorTransform.recurse_to_children
def get_constant_value_node(self, name_node):
if name_node.cf_state is None:
return None
if name_node.cf_state.cf_is_null:
return None
entry = self.current_env().lookup(name_node.name)
if not entry or (not entry.cf_assignments
or len(entry.cf_assignments) != 1):
# not just a single assignment in all closures
return None
return entry.cf_assignments[0].rhs
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
if not self.current_directives.get('optimize.inline_defnode_calls'):
return node
function_name = node.function
if not function_name.is_name:
return node
function = self.get_constant_value_node(function_name)
if not isinstance(function, ExprNodes.PyCFunctionNode):
return node
inlined = ExprNodes.InlinedDefNodeCallNode(
node.pos, function_name=function_name,
function=function, args=node.args)
if inlined.can_be_inlined():
return self.replace(node, inlined)
return node
class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def _visit_PyTypeTestNode(self, node):
# disabled - appears to break assignments in some cases, and
# also drops a None check, which might still be required
"""Flatten redundant type checks after tree changes.
"""
old_arg = node.arg
self.visitchildren(node)
if old_arg is node.arg or node.arg.type != node.type:
return node
return node.arg
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type == arg.type:
return arg
else:
return arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif isinstance(arg, ExprNodes.IndexNode) and not arg.is_buffer_access:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args=[
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp=True,
utility_code=UtilityCode.load_cached(
'bytes_index', 'StringTools.c'))
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
if not isinstance(function, ExprNodes.NameNode) \
or not function.type.is_builtin_type \
or not isinstance(arg.arg_tuple, ExprNodes.TupleNode):
return node
args = arg.arg_tuple.args
if len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_unbound_cmethod_node(self.current_env())
if method is None:
return node
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_list(self, node, function, pos_args):
"""Turn list(ob) into PySequence_List(ob).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=node.is_temp)
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
PySequence_Tuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is Builtin.list_type:
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=node.is_temp)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_Tuple", self.PySequence_Tuple_func_type,
args=pos_args, is_temp=node.is_temp)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
self.replace(node, result)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return self.replace(node, ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
py_name="set"))
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = load_c_utility('pyobject_as_double'),
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode(node.pos, value="0", constant_result=0,
type=PyrexTypes.py_object_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True)
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
### builtin functions
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_char_ptr_type, None)
])
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_py_unicode_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value="-1")
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
Builtin.frozenset_type: "PySet_GET_SIZE",
Builtin.dict_type: "PyDict_Size",
}.get
_ext_types_with_pysize = set(["cpython.array.array"])
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temps = []
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if len(types) == 1 and not types[0].type is Builtin.type_type:
return node # nothing to improve here
if arg.is_attribute or not arg.is_simple():
arg = UtilNodes.ResultRefNode(arg)
temps.append(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
builtin_type = None
if test_type_node.is_name:
if test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = entry.type.type_check_function(exact=False)
if type_check_function in tests:
continue
tests.append(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
if not test_type_node.is_literal:
test_type_node = UtilNodes.ResultRefNode(test_type_node)
temps.append(test_type_node)
type_check_function = 'PyObject_IsInstance'
type_check_args = [arg, test_type_node]
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args=type_check_args,
is_temp=True,
))
def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.wrap_operands(env)
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
for temp in temps[::-1]:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.UnicodeNode):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value="-1")
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
],
has_varargs=True) # to fake the additional macro args that lack a proper C type
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
obj = args[0]
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=[obj],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
py_index = ExprNodes.NoneNode(index.pos)
orig_index_type = index.type
if not index.type.is_int:
if isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif is_list:
if index.type.is_pyobject:
py_index = index.coerce_to_simple(self.current_env())
index = ExprNodes.CloneNode(py_index)
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
else:
return node
elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
return node
elif isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
# real type might still be larger at runtime
if not orig_index_type.is_int:
orig_index_type = index.type
if not orig_index_type.create_to_py_utility_code(self.current_env()):
return node
convert_func = orig_index_type.to_py_function
conversion_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=[obj, py_index, index,
ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0),
constant_result=orig_index_type.signed and 1 or 0,
type=PyrexTypes.c_int_type),
ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
orig_index_type.empty_declaration_code()),
ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
Pyx_PyInt_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("intval", PyrexTypes.c_long_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
Pyx_PyFloat_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("fval", PyrexTypes.c_double_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_object___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('And', node, function, args, is_unbound_method)
def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
return node
if isinstance(args[1], ExprNodes.IntNode):
if not (-2**30 <= args[1].constant_result <= 2**30):
return node
elif isinstance(args[1], ExprNodes.FloatNode):
if not (-2**53 <= args[1].constant_result <= 2**53):
return node
else:
return node
return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_float___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
"""
Optimise math operators for (likely) float or small integer operations.
"""
if len(args) != 2:
return node
if not node.type.is_pyobject:
return node
# When adding IntNode/FloatNode to something else, assume other operand is also numeric.
# Prefer constants on RHS as they allows better size control for some operators.
num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
if isinstance(args[1], num_nodes):
if args[0].type is not PyrexTypes.py_object_type:
return node
numval = args[1]
arg_order = 'ObjC'
elif isinstance(args[0], num_nodes):
if args[1].type is not PyrexTypes.py_object_type:
return node
numval = args[0]
arg_order = 'CObj'
else:
return node
if not numval.has_constant_result():
return node
is_float = isinstance(numval, ExprNodes.FloatNode)
if is_float:
if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
return node
elif operator == 'Divide':
# mixed old-/new-style division is not currently optimised for integers
return node
elif abs(numval.constant_result) > 2**30:
return node
args = list(args)
args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
numval.pos, value=numval.value, constant_result=numval.constant_result,
type=PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type))
inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace))
utility_code = TempitaUtilityCode.load_cached(
"PyFloatBinop" if is_float else "PyIntBinop", "Optimize.c",
context=dict(op=operator, order=arg_order))
return self._substitute_method_call(
node, function, "__Pyx_Py%s_%s%s" % ('Float' if is_float else 'Int', operator, arg_order),
self.Pyx_PyFloat_BinopInt_func_type if is_float else self.Pyx_PyInt_BinopInt_func_type,
'__%s__' % operator[:3].lower(), is_unbound_method, args,
may_return_none=True,
with_none_check=False,
utility_code=utility_code)
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyUnicode_Join_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method):
"""
unicode.join() builds a list first => see if we can do this more efficiently
"""
if len(args) != 2:
self._error_wrong_arg_count('unicode.join', node, args, "2")
return node
if isinstance(args[1], ExprNodes.GeneratorExpressionNode):
gen_expr_node = args[1]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is not None:
inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='list',
comprehension_type=Builtin.list_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=inlined_genexpr.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
args[1] = inlined_genexpr
return self._substitute_method_call(
node, function,
"PyUnicode_Join", self.PyUnicode_Join_func_type,
'join', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function,
"__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = BytesLiteral(value)
value.encoding = encoding
return ExprNodes.BytesNode(
string_node.pos, value=value, type=Builtin.bytes_type)
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# normalise input nodes
string_node = args[0]
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if not start:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type,
cname="PyUnicode_Decode%s" % codec_name)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args=[string_node],
is_temp=False,
utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = 'decode_cpp_string'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=BytesLiteral(encoding.utf8encode()),
type=PyrexTypes.c_char_ptr_type)
elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
str_tailmatch_utility_code, +1)
def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
str_tailmatch_utility_code, -1)
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
if with_none_check and args and not args[0].is_literal:
self_arg = args[0]
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, function.obj.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [attr_name])
args[0] = self_arg
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
General rules:
- We calculate float constants to make them available to the
compiler, but we do not aggregate them into a single literal
node to prevent any loss of precision.
- We recursively calculate constants from non-literal nodes to
make them available to the compiler, but we only aggregate
literal nodes at each step. Non-literal nodes are never merged
into a single node.
"""
def __init__(self, reevaluate=False):
"""
The reevaluate argument specifies whether constant values that were
previously computed should be recomputed.
"""
super(ConstantFolding, self).__init__()
self.reevaluate = reevaluate
def _calculate_const(self, node):
if (not self.reevaluate and
node.constant_result is not ExprNodes.constant_value_not_set):
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.values():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
ExprNodes.IntNode, ExprNodes.FloatNode]
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def _bool_node(self, node, value):
value = bool(value)
return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_UnopNode(self, node):
self._calculate_const(node)
if not node.has_constant_result():
if node.operator == '!':
return self._handle_NotNode(node)
return node
if not node.operand.is_literal:
return node
if node.operator == '!':
return self._bool_node(node, node.constant_result)
elif isinstance(node.operand, ExprNodes.BoolNode):
return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
type=PyrexTypes.c_int_type,
constant_result=int(node.constant_result))
elif node.operator == '+':
return self._handle_UnaryPlusNode(node)
elif node.operator == '-':
return self._handle_UnaryMinusNode(node)
return node
_negate_operator = {
'in': 'not_in',
'not_in': 'in',
'is': 'is_not',
'is_not': 'is'
}.get
def _handle_NotNode(self, node):
operand = node.operand
if isinstance(operand, ExprNodes.PrimaryCmpNode):
operator = self._negate_operator(operand.operator)
if operator:
node = copy.copy(operand)
node.operator = operator
node = self.visit_PrimaryCmpNode(node)
return node
def _handle_UnaryMinusNode(self, node):
def _negate(value):
if value.startswith('-'):
value = value[1:]
else:
value = '-' + value
return value
node_type = node.operand.type
if isinstance(node.operand, ExprNodes.FloatNode):
# this is a safe operation
return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
type=node_type,
constant_result=node.constant_result)
if node_type.is_int and node_type.signed or \
isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
type=node_type,
longness=node.operand.longness,
constant_result=node.constant_result)
return node
def _handle_UnaryPlusNode(self, node):
if (node.operand.has_constant_result() and
node.constant_result == node.operand.constant_result):
return node.operand
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if not node.operand1.has_constant_result():
return node
if node.operand1.constant_result:
if node.operator == 'and':
return node.operand2
else:
return node.operand1
else:
if node.operator == 'and':
return node.operand1
else:
return node.operand2
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
return node
operand1, operand2 = node.operand1, node.operand2
if not operand1.is_literal or not operand2.is_literal:
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = operand1.type, operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1.is_numeric and type2.is_numeric:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
widest_type = PyrexTypes.py_object_type
target_class = self._widest_node_class(operand1, operand2)
if target_class is None:
return node
elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
if target_class is ExprNodes.IntNode:
unsigned = getattr(operand1, 'unsigned', '') and \
getattr(operand2, 'unsigned', '')
longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
len(getattr(operand2, 'longness', '')))]
new_node = ExprNodes.IntNode(pos=node.pos,
unsigned=unsigned, longness=longness,
value=str(int(node.constant_result)),
constant_result=int(node.constant_result))
# IntNode is smart about the type it chooses, so we just
# make sure we were not smarter this time
if widest_type.is_pyobject or new_node.type.is_pyobject:
new_node.type = PyrexTypes.py_object_type
else:
new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
else:
if target_class is ExprNodes.BoolNode:
node_value = node.constant_result
else:
node_value = str(node.constant_result)
new_node = target_class(pos=node.pos, type = widest_type,
value = node_value,
constant_result = node.constant_result)
return new_node
def visit_AddNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if node.operand1.is_string_literal and node.operand2.is_string_literal:
# some people combine string literals with a '+'
str1, str2 = node.operand1, node.operand2
if isinstance(str1, ExprNodes.UnicodeNode) and isinstance(str2, ExprNodes.UnicodeNode):
bytes_value = None
if str1.bytes_value is not None and str2.bytes_value is not None:
if str1.bytes_value.encoding == str2.bytes_value.encoding:
bytes_value = BytesLiteral(str1.bytes_value + str2.bytes_value)
bytes_value.encoding = str1.bytes_value.encoding
string_value = EncodedString(node.constant_result)
return ExprNodes.UnicodeNode(
str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value)
elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode):
if str1.value.encoding == str2.value.encoding:
bytes_value = BytesLiteral(node.constant_result)
bytes_value.encoding = str1.value.encoding
return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result)
# all other combinations are rather complicated
# to get right in Py2/3: encodings, unicode escapes, ...
return self.visit_BinopNode(node)
def visit_MulNode(self, node):
self._calculate_const(node)
if node.operand1.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand1, node.operand2)
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, (int, long)) and factor.constant_result <= 0:
del sequence_node.args[:]
sequence_node.mult_factor = None
elif sequence_node.mult_factor is not None:
if (isinstance(factor.constant_result, (int, long)) and
isinstance(sequence_node.mult_factor.constant_result, (int, long))):
value = sequence_node.mult_factor.constant_result * factor.constant_result
sequence_node.mult_factor = ExprNodes.IntNode(
sequence_node.mult_factor.pos,
value=str(value), constant_result=value)
else:
# don't know if we can combine the factors, so don't
return self.visit_BinopNode(node)
else:
sequence_node.mult_factor = factor
return sequence_node
def visit_MergedDictNode(self, node):
"""Unpack **args in place if we can."""
self.visitchildren(node)
args = []
items = []
def add(arg):
if arg.is_dict_literal:
if items:
items[0].key_value_pairs.extend(arg.key_value_pairs)
else:
items.append(arg)
elif isinstance(arg, ExprNodes.MergedDictNode):
for child_arg in arg.keyword_args:
add(child_arg)
else:
if items:
args.append(items[0])
del items[:]
args.append(arg)
for arg in node.keyword_args:
add(arg)
if items:
args.append(items[0])
if len(args) == 1:
arg = args[0]
if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode):
return arg
node.keyword_args[:] = args
self._calculate_const(node)
return node
def visit_MergedSequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
is_set = node.type is Builtin.set_type
args = []
values = []
def add(arg):
if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor):
if values:
values[0].args.extend(arg.args)
else:
values.append(arg)
elif isinstance(arg, ExprNodes.MergedSequenceNode):
for child_arg in arg.args:
add(child_arg)
else:
if values:
args.append(values[0])
del values[:]
args.append(arg)
for arg in node.args:
add(arg)
if values:
args.append(values[0])
if len(args) == 1:
arg = args[0]
if ((is_set and arg.is_set_literal) or
(arg.is_sequence_constructor and arg.type is node.type) or
isinstance(arg, ExprNodes.MergedSequenceNode)):
return arg
node.args[:] = args
self._calculate_const(node)
return node
def visit_SequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
args = []
for arg in node.args:
if not arg.is_starred:
args.append(arg)
elif arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
node.args[:] = args
self._calculate_const(node)
return node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
return node
def visit_SimpleCallNode(self, node):
"""
Replace generic calls to isinstance(x, type) by a more efficient type check.
Replace likely Python method calls by a specialised PyMethodCallNode.
"""
self.visitchildren(node)
function = node.function
if function.type.is_cfunction and function.is_name:
if function.name == 'isinstance' and len(node.args) == 2:
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
cython_scope = self.context.cython_scope
function.entry = cython_scope.lookup('PyObject_TypeCheck')
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
elif (self.current_directives.get("optimize.unpack_method_calls")
and node.is_temp and function.type.is_pyobject):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
# simple call, now exclude calls to objects that are definitely not methods
may_be_a_method = True
if function.type is Builtin.type_type:
may_be_a_method = False
elif function.is_name:
if function.entry.is_builtin:
may_be_a_method = False
elif function.cf_state:
# local functions/classes are definitely not methods
non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
may_be_a_method = any(
assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
for assignment in function.cf_state)
if may_be_a_method:
node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
def visit_NoneCheckNode(self, node):
"""Remove None checks from expressions that definitely do not
carry a None value.
"""
self.visitchildren(node)
if not node.arg.may_be_none():
return node.arg
return node
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
|
madjar/cython
|
Cython/Compiler/Optimize.py
|
Python
|
apache-2.0
| 182,733
|
[
"VisIt"
] |
9dbaaf411ff0d515362857d112ea32f0193ab442008c1b567684d1e6e0a2efb0
|
#!/usr/bin/python
#
# Copyright (c) 2012, Psiphon Inc.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from psi_api import Psiphon3Server
from psi_ssh_connection import SSHConnection, OSSHConnection
import json
import os
import subprocess
import optparse
DATA_FILENAME = 'psi_client.dat'
CLIENT_VERSION = 1
CLIENT_PLATFORM = 'Python'
SOCKS_PORT = 1080
LOCAL_HOST_IP = '127.0.0.1'
GLOBAL_HOST_IP = '0.0.0.0'
class Data(object):
def __init__(self, data):
self.data = data
@staticmethod
def load():
try:
with open(DATA_FILENAME, 'r') as data_file:
data = Data(json.loads(data_file.read()))
# Validate
data.servers()[0]
data.propagation_channel_id()
data.sponsor_id()
except (IOError, ValueError, KeyError, IndexError, TypeError) as error:
print '\nPlease obtain a valid %s file and try again.\n' % (DATA_FILENAME,)
raise
return data
def save(self):
with open(DATA_FILENAME+'.new', 'w') as data_file:
data_file.write(json.dumps(self.data))
os.rename(DATA_FILENAME+'.new', DATA_FILENAME)
def servers(self):
return self.data['servers']
def propagation_channel_id(self):
return self.data['propagation_channel_id']
def sponsor_id(self):
return self.data['sponsor_id']
def move_first_server_entry_to_bottom(self):
servers = self.servers()
if len(servers) > 1:
servers.append(servers.pop(0))
return True
else:
return False
def do_handshake(server, data, relay):
handshake_response = server.handshake(relay)
# handshake might update the server list with newly discovered servers
data.save()
return handshake_response
def print_sponsor_message(handshake_response):
home_pages = handshake_response['Homepage']
if len(home_pages) > 0:
print '\nPlease visit our sponsor\'s homepage%s:' % ('s' if len(home_pages) > 1 else '',)
for home_page in home_pages:
print home_page
print ''
def make_ssh_connection(server, relay, bind_all):
if bind_all:
listen_address=GLOBAL_HOST_IP
else:
listen_address=LOCAL_HOST_IP
if relay == 'OSSH':
ssh_connection = OSSHConnection(server, SOCKS_PORT, str(listen_address))
elif relay == 'SSH':
ssh_connection = SSHConnection(server, SOCKS_PORT, str(listen_address))
else:
assert False
ssh_connection.connect()
return ssh_connection
def connect_to_server(data, relay, bind_all, test=False):
assert relay in ['SSH', 'OSSH']
server = Psiphon3Server(data.servers(), data.propagation_channel_id(), data.sponsor_id(), CLIENT_VERSION, CLIENT_PLATFORM)
if server.relay_not_supported(relay):
raise Exception('Server does not support %s' % relay)
handshake_performed = False
if not server.can_attempt_relay_before_handshake(relay):
handshake_response = do_handshake(server, data, relay)
handshake_performed = True
ssh_connection = make_ssh_connection(server, relay, bind_all)
ssh_connection.test_connection()
server.set_socks_proxy(SOCKS_PORT)
if not handshake_performed:
try:
handshake_response = do_handshake(server, data, relay)
handshake_performed = True
except Exception as e:
print 'DEBUG: handshake request: ' + str(e)
connected_performed = False
if handshake_performed:
print_sponsor_message(handshake_response)
try:
server.connected(relay)
connected_performed = True
except Exception as e:
print 'DEBUG: connected request: ' + str(e)
if test:
print 'Testing connection to ip %s' % server.ip_address
ssh_connection.disconnect_on_success(test_site=test)
else:
print 'Press Ctrl-C to terminate.'
try:
ssh_connection.wait_for_disconnect()
except KeyboardInterrupt as e:
if connected_performed:
try:
server.disconnected(relay)
except Exception as e:
print 'DEBUG: disconnected request: ' + str(e)
ssh_connection.disconnect()
def _test_executable(path):
if os.path.isfile(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.call(path, stdout=devnull, stderr=devnull)
return True
except OSError:
pass
return False
def connect(bind_all, test=False):
while True:
data = Data.load()
try:
relay = 'SSH'
# NOTE that this path is also hard-coded in psi_ssh_connection
ossh_path = './ssh'
if _test_executable(ossh_path):
relay = 'OSSH'
else:
print '%s is not a valid executable. Using standard ssh.' % (ossh_path,)
connect_to_server(data, relay, bind_all, test)
break
except Exception as error:
print 'DEBUG: %s connection: %s' % (relay, str(error))
if test:
break
if not data.move_first_server_entry_to_bottom():
print 'DEBUG: could not reorder servers'
break
data.save()
print 'Trying next server...'
def test_all_servers(bind_all=False):
data = Data.load()
for _ in data.servers():
connect(bind_all, test=True)
print 'DEBUG: moving server to bottom'
if not data.move_first_server_entry_to_bottom():
print "could not reorder servers"
break
data.save()
if __name__ == "__main__":
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option("--expose", "-e", dest="expose",
action="store_true", help="Expose SOCKS proxy to the network")
parser.add_option("--test-servers", "-t", dest="test_servers",
action="store_true", help="Test all servers")
(options, _) = parser.parse_args()
if options.test_servers:
test_all_servers()
elif options.expose:
connect(True)
else:
connect(False)
|
MewX/Psiphon3-for-Linux
|
pyclient/psi_client.py
|
Python
|
gpl-2.0
| 6,909
|
[
"VisIt"
] |
3c0b4c25db017a7f11c62cf24410f139e141dafc491c8711fd81e6dbff598c78
|
"""
Acceptance tests for Studio related to course reruns.
"""
import random
from nose.plugins.attrib import attr
from bok_choy.promise import EmptyPromise
from ..pages.studio.index import DashboardPage
from ..pages.studio.course_rerun import CourseRerunPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.lms.courseware import CoursewarePage
from ..fixtures.course import XBlockFixtureDesc
from .base_studio_test import StudioCourseTest
@attr('shard_2')
class CourseRerunTest(StudioCourseTest):
"""
Feature: Courses can be rerun
"""
__test__ = True
SECTION_NAME = 'Rerun Section'
SUBSECITON_NAME = 'Rerun Subsection'
UNIT_NAME = 'Rerun Unit'
COMPONENT_NAME = 'Rerun Component'
COMPONENT_CONTENT = 'Test Content'
def setUp(self):
"""
Login as global staff because that's the only way to rerun a course.
"""
super(CourseRerunTest, self).setUp(is_staff=True)
self.dashboard_page = DashboardPage(self.browser)
def populate_course_fixture(self, course_fixture):
"""
Create a sample course with one section, one subsection, one unit, and one component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECITON_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(
XBlockFixtureDesc('html', self.COMPONENT_NAME, self.COMPONENT_CONTENT)
)
)
)
)
def test_course_rerun(self):
"""
Scenario: Courses can be rurun
Given I have a course with a section, subsesction, vertical, and html component with content 'Test Content'
When I visit the course rerun page
And I type 'test_rerun' in the course run field
And I click Create Rerun
And I visit the course listing page
And I wait for all courses to finish processing
And I click on the course with run 'test_rerun'
Then I see a rerun notification on the course outline page
And when I click 'Dismiss' on the notification
Then I do not see a rerun notification
And when I expand the subsection and click on the unit
And I click 'View Live Version'
Then I see one html component with the content 'Test Content'
"""
course_info = (self.course_info['org'], self.course_info['number'], self.course_info['run'])
self.dashboard_page.visit()
self.dashboard_page.create_rerun(self.course_info['display_name'])
rerun_page = CourseRerunPage(self.browser, *course_info)
rerun_page.wait_for_page()
course_run = 'test_rerun_' + str(random.randrange(1000000, 9999999))
rerun_page.course_run = course_run
rerun_page.create_rerun()
def finished_processing():
self.dashboard_page.visit()
return not self.dashboard_page.has_processing_courses
EmptyPromise(finished_processing, "Rerun finished processing", try_interval=5, timeout=60).fulfill()
self.dashboard_page.click_course_run(course_run)
outline_page = CourseOutlinePage(self.browser, *course_info)
outline_page.wait_for_page()
self.assertTrue(outline_page.has_rerun_notification)
outline_page.dismiss_rerun_notification()
EmptyPromise(lambda: not outline_page.has_rerun_notification, "Rerun notification dismissed").fulfill()
subsection = outline_page.section(self.SECTION_NAME).subsection(self.SUBSECITON_NAME)
subsection.toggle_expand()
unit_page = subsection.unit(self.UNIT_NAME).go_to()
unit_page.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 1)
self.assertEqual(courseware.xblock_component_html_content(), self.COMPONENT_CONTENT)
|
epam-mooc/edx-platform
|
common/test/acceptance/tests/test_studio_rerun.py
|
Python
|
agpl-3.0
| 4,115
|
[
"VisIt"
] |
ed05acd520abe00fcbf99652478ca9210247260f50213b0c4bf579f95557d683
|
"""
A program to solve the 2D Klein Gordon equation using a
second order semi-explicit method
More information on visualization can be found on the Mayavi
website, in particular:
http://github.enthought.com/mayavi/mayavi/mlab.html
which was last checked on 6 April 2012
"""
import math
import numpy
from mayavi import mlab
import matplotlib.pyplot as plt
import time
# Grid
Lx=3.0 # Period 2*pi*Lx
Ly=3.0 # Period 2*pi*Ly
Nx=512 # Number of harmonics
Ny=512 # Number of harmonics
Nt=200 # Number of time slices
tmax=5.0 # Maximum time
dt=tmax/Nt # time step
plotgap=10 # time steps between plots
Es= 1.0 # focusing (+1) or defocusing (-1) parameter
numplots=Nt/plotgap # number of plots to make
x = [i*2.0*math.pi*(Lx/Nx) for i in xrange(-Nx/2,1+Nx/2)]
y = [i*2.0*math.pi*(Ly/Ny) for i in xrange(-Ny/2,1+Ny/2)]
k_x = (1.0/Lx)*numpy.array([complex(0,1)*n for n in range(0,Nx/2) \
+ [0] + range(-Nx/2+1,0)])
k_y = (1.0/Ly)*numpy.array([complex(0,1)*n for n in range(0,Ny/2) \
+ [0] + range(-Ny/2+1,0)])
kxm=numpy.zeros((Nx,Ny), dtype=complex)
kym=numpy.zeros((Nx,Ny), dtype=complex)
xx=numpy.zeros((Nx,Ny), dtype=float)
yy=numpy.zeros((Nx,Ny), dtype=float)
for i in xrange(Nx):
for j in xrange(Ny):
kxm[i,j] = k_x[i]
kym[i,j] = k_y[j]
xx[i,j] = x[i]
yy[i,j] = y[j]
# allocate arrays
unew=numpy.zeros((Nx,Ny), dtype=float)
u=numpy.zeros((Nx,Ny), dtype=float)
uold=numpy.zeros((Nx,Ny), dtype=float)
vnew=numpy.zeros((Nx,Ny), dtype=complex)
v=numpy.zeros((Nx,Ny), dtype=complex)
vold=numpy.zeros((Nx,Ny), dtype=complex)
ux=numpy.zeros((Nx,Ny), dtype=float)
uy=numpy.zeros((Nx,Ny), dtype=float)
vx=numpy.zeros((Nx,Ny), dtype=complex)
vy=numpy.zeros((Nx,Ny), dtype=complex)
Kineticenergy=numpy.zeros((Nx,Ny), dtype=complex)
Potentialenergy=numpy.zeros((Nx,Ny), dtype=complex)
Strainenergy=numpy.zeros((Nx,Ny), dtype=complex)
EnKin=numpy.zeros((numplots), dtype=float)
EnPot=numpy.zeros((numplots), dtype=float)
EnStr=numpy.zeros((numplots), dtype=float)
En=numpy.zeros((numplots), dtype=float)
Enchange=numpy.zeros((numplots-1),dtype=float)
tdata=numpy.zeros((numplots), dtype=float)
nonlin=numpy.zeros((Nx,Ny), dtype=float)
nonlinhat=numpy.zeros((Nx,Ny), dtype=complex)
u=0.1*numpy.exp(-(xx**2 + yy**2))*numpy.sin(10*xx+12*yy)
uold=u
v=numpy.fft.fft2(u)
vold=numpy.fft.fft2(uold)
src = mlab.surf(xx,yy,u,colormap='YlGnBu',warp_scale='auto')
mlab.scalarbar(object=src)
mlab.xlabel('x',object=src)
mlab.ylabel('y',object=src)
mlab.zlabel('u',object=src)
# initial energy
vx=0.5*kxm*(v+vold)
vy=0.5*kym*(v+vold)
ux=numpy.fft.ifft2(vx)
uy=numpy.fft.ifft2(vy)
Kineticenergy=0.5*((u-uold)/dt)**2
Strainenergy=0.5*(ux)**2 + 0.5*(uy)**2
Potentialenergy=0.5*(0.5*(u+uold))**2 - Es*0.25*(0.5*(u+uold))**4
Kineticenergy=numpy.fft.fft2(Kineticenergy)
Strainenergy=numpy.fft.fft2(Strainenergy)
Potentialenergy=numpy.fft.fft2(Potentialenergy)
EnKin[0]=numpy.real(Kineticenergy[0,0])
EnPot[0]=numpy.real(Potentialenergy[0,0])
EnStr[0]=numpy.real(Strainenergy[0,0])
En[0]=EnStr[0]+EnPot[0]+EnKin[0]
EnO=En[0]
t=0.0
tdata[0]=t
plotnum=0
#solve pde and plot results
for nt in xrange(numplots-1):
for n in xrange(plotgap):
nonlin=u**3
nonlinhat=numpy.fft.fft2(nonlin)
vnew=( (0.25*(kxm**2 + kym**2 - 1)*(2*v+vold)
+(2*v-vold)/(dt*dt) +Es*nonlinhat)/
(1/(dt*dt) - (kxm**2 + kym**2 -1)*0.25 ) )
unew=numpy.real(numpy.fft.ifft2(vnew))
t+=dt
# update old terms
vold=v
v=vnew
uold=u
u=unew
plotnum+=1
src.mlab_source.scalars = unew
vx=0.5*kxm*(v+vold)
vy=0.5*kym*(v+vold)
ux=numpy.fft.ifft2(vx)
uy=numpy.fft.ifft2(vy)
Kineticenergy=0.5*((u-uold)/dt)**2
Strainenergy=0.5*(ux)**2 + 0.5*(uy)**2
Potentialenergy=0.5*(0.5*(u+uold))**2 - Es*0.25*(0.5*(u+uold))**4
Kineticenergy=numpy.fft.fft2(Kineticenergy)
Strainenergy=numpy.fft.fft2(Strainenergy)
Potentialenergy=numpy.fft.fft2(Potentialenergy)
EnKin[plotnum]=numpy.real(Kineticenergy[0,0])
EnPot[plotnum]=numpy.real(Potentialenergy[0,0])
EnStr[plotnum]=numpy.real(Strainenergy[0,0])
En[plotnum]=EnStr[plotnum]+EnPot[plotnum]+EnKin[plotnum]
Enchange[plotnum-1]=numpy.log(abs(1-En[plotnum]/EnO))
tdata[plotnum]=t
plt.figure()
plt.plot(tdata,En,'r+',tdata,EnKin,'b:',tdata,EnPot,'g-.',tdata,EnStr,'y--')
plt.xlabel('Time')
plt.ylabel('Energy')
plt.legend(('Total', 'Kinetic','Potential','Strain'))
plt.title('Time Dependence of Energy Components')
plt.show()
plt.figure()
plt.plot(Enchange,'r-')
plt.title('Time Dependence of Change in Total Energy')
plt.show()
|
openmichigan/PSNM
|
PythonPrograms/Programs/PythonCode/KleinGordon2D.py
|
Python
|
bsd-2-clause
| 4,657
|
[
"Mayavi"
] |
e7d9ae5de1fc4574202b230b9ac8c244873703fce4791b5fed196d77b15be910
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, time
m = 500
n = 250
display = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def Rectang(height,width):
A = El.DistMatrix()
El.Uniform( A, height, width )
return A
A = Rectang(m,n)
b = El.DistMatrix()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
startNNLS = time.clock()
x = El.NNLS( A, b )
endNNLS = time.clock()
if worldRank == 0:
print "NNLS time:", endNNLS-startNNLS, "seconds"
if display:
El.Display( x, "x" )
e = El.DistMatrix()
El.Copy( b, e )
El.Gemv( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x - b ||_2 =", eTwoNorm
startLS = time.clock()
xLS = El.LeastSquares( A, b )
endLS = time.clock()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
El.Copy( b, e )
El.Gemv( El.NORMAL, -1., A, xLS, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
justusc/Elemental
|
examples/interface/NNLSDense.py
|
Python
|
bsd-3-clause
| 1,406
|
[
"Gaussian"
] |
1411467af6c482986daf7a2d5776e22826e74c51b6255156cae869bbd18390ab
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test CF-NetCDF file loading and saving.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# Import iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import os
import os.path
import shutil
import stat
import tempfile
import biggus
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis.trajectory
import iris.fileformats._pyke_rules.compiled_krb.fc_rules_cf_fc as pyke_rules
import iris.fileformats.netcdf
import iris.std_names
import iris.util
import iris.coord_systems as icoord_systems
from iris.tests import mock
import iris.tests.stock as stock
@tests.skip_data
class TestNetCDFLoad(tests.IrisTest):
def test_monotonic(self):
cubes = iris.load(tests.get_data_path(
('NetCDF', 'testing', 'test_monotonic_coordinate.nc')))
cubes = sorted(cubes, key=lambda cube: cube.var_name)
self.assertCML(cubes, ('netcdf', 'netcdf_monotonic.cml'))
def test_load_global_xyt_total(self):
# Test loading single xyt CF-netCDF file.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_total_column_co2.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_global_xyt_total.cml'))
def test_load_global_xyt_hires(self):
# Test loading another single xyt CF-netCDF file.
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_hires_wind_u_for_ipcc4.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_global_xyt_hires.cml'))
def test_missing_time_bounds(self):
# Check we can cope with a missing bounds variable.
with self.temp_filename(suffix='nc') as filename:
# Tweak a copy of the test data file to rename (we can't delete)
# the time bounds variable.
src = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
shutil.copyfile(src, filename)
dataset = nc.Dataset(filename, mode='a')
dataset.renameVariable('time_bnds', 'foo')
dataset.close()
cube = iris.load_cube(filename, 'eastward_wind')
def test_load_global_xyzt_gems(self):
# Test loading single xyzt CF-netCDF file (multi-cube).
cubes = iris.load(tests.get_data_path(('NetCDF', 'global', 'xyz_t',
'GEMS_CO2_Apr2006.nc')))
cubes = sorted(cubes, key=lambda cube: cube.name())
self.assertCML(cubes, ('netcdf', 'netcdf_global_xyzt_gems.cml'))
# Check the masked array fill value is propogated through the data
# manager loading.
lnsp = cubes[1]
self.assertTrue(ma.isMaskedArray(lnsp.data))
self.assertEqual(-32767.0, lnsp.data.fill_value)
def test_load_global_xyzt_gems_iter(self):
# Test loading stepped single xyzt CF-netCDF file (multi-cube).
for i, cube in enumerate(sorted(
iris.load(
tests.get_data_path(('NetCDF', 'global', 'xyz_t',
'GEMS_CO2_Apr2006.nc'))),
key=lambda cube: cube.name())):
self.assertCML(cube, ('netcdf',
'netcdf_global_xyzt_gems_iter_%d.cml' % i))
def test_load_rotated_xy_land(self):
# Test loading single xy rotated pole CF-netCDF file.
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'rotated', 'xy', 'rotPole_landAreaFraction.nc')))
# Make sure the AuxCoords have lazy data.
self.assertIsInstance(cube.coord('latitude')._points, biggus.Array)
self.assertCML(cube, ('netcdf', 'netcdf_rotated_xy_land.cml'))
def test_load_rotated_xyt_precipitation(self):
# Test loading single xyt rotated pole CF-netCDF file.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'rotated', 'xyt',
'small_rotPole_precipitation.nc')))
self.assertCML(cube, ('netcdf',
'netcdf_rotated_xyt_precipitation.cml'))
def test_load_tmerc_grid_and_clim_bounds(self):
# Test loading a single CF-netCDF file with a transverse Mercator
# grid_mapping and a time variable with climatology.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_tmerc_and_climatology.cml'))
def test_load_tmerc_grid_with_projection_origin(self):
# Test loading a single CF-netCDF file with a transverse Mercator
# grid_mapping that uses longitude_of_projection_origin and
# scale_factor_at_projection_origin instead of
# longitude_of_central_meridian and scale_factor_at_central_meridian.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'transverse_mercator',
'projection_origin_attributes.nc')))
expected = icoord_systems.TransverseMercator(
latitude_of_projection_origin=49.0,
longitude_of_central_meridian=-2.0,
false_easting=400000.0,
false_northing=-100000.0,
scale_factor_at_central_meridian=0.9996012717,
ellipsoid=icoord_systems.GeogCS(
semi_major_axis=6377563.396, semi_minor_axis=6356256.91))
self.assertEqual(cube.coord('projection_x_coordinate').coord_system,
expected)
self.assertEqual(cube.coord('projection_y_coordinate').coord_system,
expected)
def test_load_lcc_grid(self):
# Test loading a single CF-netCDF file with Lambert conformal conic
# grid mapping.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'lambert_conformal',
'test_lcc.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_lcc.cml'))
def test_missing_climatology(self):
# Check we can cope with a missing climatology variable.
with self.temp_filename(suffix='nc') as filename:
# Tweak a copy of the test data file to rename (we can't delete)
# the climatology variable.
src = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
shutil.copyfile(src, filename)
dataset = nc.Dataset(filename, mode='a')
dataset.renameVariable('climatology_bounds', 'foo')
dataset.close()
cube = iris.load_cube(filename, 'Mean temperature')
def test_load_merc_grid(self):
# Test loading a single CF-netCDF file with a Mercator grid_mapping
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'mercator',
'toa_brightness_temperature.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_merc.cml'))
def test_load_stereographic_grid(self):
# Test loading a single CF-netCDF file with a stereographic
# grid_mapping.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'stereographic',
'toa_brightness_temperature.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_stereo.cml'))
def test_cell_methods(self):
# Test exercising CF-netCDF cell method parsing.
cubes = iris.load(tests.get_data_path(('NetCDF', 'testing',
'cell_methods.nc')))
# TEST_COMPAT mod - new cube merge doesn't sort in the same way - test
# can pass by manual sorting...
cubes = iris.cube.CubeList(sorted(cubes, key=lambda cube: cube.name()))
# TEST_COMPAT mod - different versions of the Python module
# `netCDF4` give different data arrays: MaskedArray vs ndarray
# Since we're not interested in the data we can just normalise
# to MaskedArray (to minimise the change).
for cube in cubes:
# Force the fill value to be the default netCDF fill value
# to ensure it matches the previous behaviour.
cube.data = ma.masked_equal(cube.data, -2147483647)
self.assertCML(cubes, ('netcdf', 'netcdf_cell_methods.cml'))
def test_deferred_loading(self):
# Test exercising CF-netCDF deferred loading and deferred slicing.
# shape (31, 161, 320)
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc')))
# Consecutive index on same dimension.
self.assertCML(cube[0], ('netcdf', 'netcdf_deferred_index_0.cml'))
self.assertCML(cube[0][0], ('netcdf', 'netcdf_deferred_index_1.cml'))
self.assertCML(cube[0][0][0], ('netcdf',
'netcdf_deferred_index_2.cml'))
# Consecutive slice on same dimension.
self.assertCML(cube[0:20], ('netcdf', 'netcdf_deferred_slice_0.cml'))
self.assertCML(cube[0:20][0:10], ('netcdf',
'netcdf_deferred_slice_1.cml'))
self.assertCML(cube[0:20][0:10][0:5], ('netcdf',
'netcdf_deferred_slice_2.cml'))
# Consecutive tuple index on same dimension.
self.assertCML(cube[(0, 8, 4, 2, 14, 12), ],
('netcdf', 'netcdf_deferred_tuple_0.cml'))
self.assertCML(cube[(0, 8, 4, 2, 14, 12), ][(0, 2, 4, 1), ],
('netcdf', 'netcdf_deferred_tuple_1.cml'))
subcube = cube[(0, 8, 4, 2, 14, 12), ][(0, 2, 4, 1), ][(1, 3), ]
self.assertCML(subcube, ('netcdf', 'netcdf_deferred_tuple_2.cml'))
# Consecutive mixture on same dimension.
self.assertCML(cube[0:20:2][(9, 5, 8, 0), ][3],
('netcdf', 'netcdf_deferred_mix_0.cml'))
self.assertCML(cube[(2, 7, 3, 4, 5, 0, 9, 10), ][2:6][3],
('netcdf', 'netcdf_deferred_mix_0.cml'))
self.assertCML(cube[0][(0, 2), (1, 3)],
('netcdf', 'netcdf_deferred_mix_1.cml'))
def test_units(self):
# Test exercising graceful cube and coordinate units loading.
cube0, cube1 = sorted(iris.load(tests.get_data_path(('NetCDF',
'testing',
'units.nc'))),
key=lambda cube: cube.var_name)
self.assertCML(cube0, ('netcdf', 'netcdf_units_0.cml'))
self.assertCML(cube1, ('netcdf', 'netcdf_units_1.cml'))
class TestNetCDFCRS(tests.IrisTest):
def setUp(self):
class Var(object):
pass
self.grid = Var()
def test_lat_lon_major_minor(self):
major = 63781370
minor = 63567523
self.grid.semi_major_axis = major
self.grid.semi_minor_axis = minor
crs = pyke_rules.build_coordinate_system(self.grid)
self.assertEqual(crs, icoord_systems.GeogCS(major, minor))
def test_lat_lon_earth_radius(self):
earth_radius = 63700000
self.grid.earth_radius = earth_radius
crs = pyke_rules.build_coordinate_system(self.grid)
self.assertEqual(crs, icoord_systems.GeogCS(earth_radius))
class SaverPermissions(tests.IrisTest):
def test_noexist_directory(self):
# Test capture of suitable exception raised on writing to a
# non-existent directory.
dir_name = os.path.join(tempfile.gettempdir(), 'non_existent_dir')
fnme = os.path.join(dir_name, 'tmp.nc')
with self.assertRaises(IOError):
with iris.fileformats.netcdf.Saver(fnme, 'NETCDF4'):
pass
def test_bad_permissions(self):
# Non-exhaustive check that wrong permissions results in a suitable
# exception being raised.
dir_name = tempfile.mkdtemp()
fnme = os.path.join(dir_name, 'tmp.nc')
try:
os.chmod(dir_name, stat.S_IREAD)
with self.assertRaises(IOError):
iris.fileformats.netcdf.Saver(fnme, 'NETCDF4')
self.assertFalse(os.path.exists(fnme))
finally:
os.rmdir(dir_name)
@tests.skip_data
class TestSave(tests.IrisTest):
def test_hybrid(self):
cube = stock.realistic_4d()
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_realistic_4d.cdl'))
def test_no_hybrid(self):
cube = stock.realistic_4d()
cube.remove_aux_factory(cube.aux_factories[0])
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
# Check the netCDF file against CDL expected output.
self.assertCDL(
file_out, ('netcdf', 'netcdf_save_realistic_4d_no_hybrid.cdl'))
def test_scalar_cube(self):
cube = stock.realistic_4d()[0, 0, 0, 0]
with self.temp_filename(suffix='.nc') as filename:
iris.save(cube, filename, netcdf_format='NETCDF3_CLASSIC')
self.assertCDL(filename, ('netcdf',
'netcdf_save_realistic_0d.cdl'))
def test_no_name_cube(self):
# Cube with no names.
cube = iris.cube.Cube(np.arange(20, dtype=np.float64).reshape((4, 5)))
dim0 = iris.coords.DimCoord(np.arange(4, dtype=np.float64))
dim1 = iris.coords.DimCoord(np.arange(5, dtype=np.float64), units='m')
other = iris.coords.AuxCoord('foobar', units='no_unit')
cube.add_dim_coord(dim0, 0)
cube.add_dim_coord(dim1, 1)
cube.add_aux_coord(other)
with self.temp_filename(suffix='.nc') as filename:
iris.save(cube, filename, netcdf_format='NETCDF3_CLASSIC')
self.assertCDL(filename, ('netcdf', 'netcdf_save_no_name.cdl'))
class TestNetCDFSave(tests.IrisTest):
def setUp(self):
self.cubell = iris.cube.Cube(np.arange(4).reshape(2, 2),
'air_temperature')
self.cube = iris.cube.Cube(np.zeros([2, 2]),
standard_name='surface_temperature',
long_name=None,
var_name='temp',
units='K')
self.cube2 = iris.cube.Cube(np.ones([1, 2, 2]),
standard_name=None,
long_name='Something Random',
var_name='temp2',
units='K')
self.cube3 = iris.cube.Cube(np.ones([2, 2, 2]),
standard_name=None,
long_name='Something Random',
var_name='temp3',
units='K')
self.cube4 = iris.cube.Cube(np.zeros([10]),
standard_name='air_temperature',
long_name=None,
var_name='temp',
units='K')
self.cube5 = iris.cube.Cube(np.ones([20]),
standard_name=None,
long_name='air_temperature',
var_name='temp2',
units='K')
self.cube6 = iris.cube.Cube(np.ones([10]),
standard_name=None,
long_name='air_temperature',
var_name='temp3',
units='K')
@tests.skip_data
def test_netcdf_save_format(self):
# Read netCDF input file.
file_in = tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc'))
cube = iris.load_cube(file_in)
with self.temp_filename(suffix='.nc') as file_out:
# Test default NETCDF4 file format saving.
iris.save(cube, file_out)
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF4',
'Failed to save as NETCDF4 format')
ds.close()
# Test NETCDF4_CLASSIC file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF4_CLASSIC')
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF4_CLASSIC',
'Failed to save as NETCDF4_CLASSIC format')
ds.close()
# Test NETCDF3_CLASSIC file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF3_CLASSIC',
'Failed to save as NETCDF3_CLASSIC format')
ds.close()
# Test NETCDF4_64BIT file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF3_64BIT')
ds = nc.Dataset(file_out)
self.assertTrue(ds.file_format in ['NETCDF3_64BIT',
'NETCDF3_64BIT_OFFSET'],
'Failed to save as NETCDF3_64BIT format')
ds.close()
# Test invalid file format saving.
with self.assertRaises(ValueError):
iris.save(cube, file_out, netcdf_format='WIBBLE')
@tests.skip_data
def test_netcdf_save_single(self):
# Test saving a single CF-netCDF file.
# Read PP input file.
file_in = tests.get_data_path(
('PP', 'cf_processing',
'000003000000.03.236.000128.1990.12.01.00.00.b.pp'))
cube = iris.load_cube(file_in)
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_single.cdl'))
# TODO investigate why merge now make time an AuxCoord rather than a
# DimCoord and why forecast_period is 'preferred'.
@tests.skip_data
def test_netcdf_save_multi2multi(self):
# Test saving multiple CF-netCDF files.
# Read PP input file.
file_in = tests.get_data_path(('PP', 'cf_processing',
'abcza_pa19591997_daily_29.b.pp'))
cubes = iris.load(file_in)
# Save multiple cubes to multiple files.
for index, cube in enumerate(cubes):
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_multi_%d.cdl' % index))
@tests.skip_data
def test_netcdf_save_multi2single(self):
# Test saving multiple cubes to a single CF-netCDF file.
# Read PP input file.
file_in = tests.get_data_path(('PP', 'cf_processing',
'abcza_pa19591997_daily_29.b.pp'))
cubes = iris.load(file_in)
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
# Check that it is the same on loading
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_multiple.cdl'))
def test_netcdf_multi_nocoord(self):
# Testing the saving of a cublist with no coords.
cubes = iris.cube.CubeList([self.cube, self.cube2, self.cube3])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_nocoord.cdl'))
def test_netcdf_multi_samevarnme(self):
# Testing the saving of a cublist with cubes of the same var_name.
self.cube2.var_name = self.cube.var_name
cubes = iris.cube.CubeList([self.cube, self.cube2])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_samevar.cdl'))
def test_netcdf_multi_with_coords(self):
# Testing the saving of a cublist with coordinates.
lat = iris.coords.DimCoord(np.arange(2),
long_name=None, var_name='lat',
units='degree_north')
lon = iris.coords.DimCoord(np.arange(2), standard_name='longitude',
long_name=None, var_name='lon',
units='degree_east')
rcoord = iris.coords.DimCoord(np.arange(1), standard_name=None,
long_name='Rnd Coordinate',
units=None)
self.cube.add_dim_coord(lon, 0)
self.cube.add_dim_coord(lat, 1)
self.cube2.add_dim_coord(lon, 1)
self.cube2.add_dim_coord(lat, 2)
self.cube2.add_dim_coord(rcoord, 0)
cubes = iris.cube.CubeList([self.cube, self.cube2])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_wcoord.cdl'))
def test_netcdf_multi_wtih_samedimcoord(self):
time1 = iris.coords.DimCoord(np.arange(10),
standard_name='time',
var_name='time')
time2 = iris.coords.DimCoord(np.arange(20),
standard_name='time',
var_name='time')
self.cube4.add_dim_coord(time1, 0)
self.cube5.add_dim_coord(time2, 0)
self.cube6.add_dim_coord(time1, 0)
cubes = iris.cube.CubeList([self.cube4, self.cube5, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_samedimcoord.cdl'))
def test_netcdf_multi_conflict_name_dup_coord(self):
# Duplicate coordinates with modified variable names lookup.
latitude1 = iris.coords.DimCoord(np.arange(10),
standard_name='latitude')
time2 = iris.coords.DimCoord(np.arange(2),
standard_name='time')
latitude2 = iris.coords.DimCoord(np.arange(2),
standard_name='latitude')
self.cube6.add_dim_coord(latitude1, 0)
self.cube.add_dim_coord(latitude2[:], 1)
self.cube.add_dim_coord(time2[:], 0)
cubes = iris.cube.CubeList([self.cube, self.cube6, self.cube6.copy()])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(
file_out, ('netcdf', 'multi_dim_coord_slightly_different.cdl'))
@tests.skip_data
def test_netcdf_hybrid_height(self):
# Test saving a CF-netCDF file which contains a hybrid height
# (i.e. dimensionless vertical) coordinate.
# Read PP input file.
file_in = tests.get_data_path(
('PP', 'COLPEX', 'small_colpex_theta_p_alt.pp'))
cube = iris.load_cube(file_in, 'air_potential_temperature')
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out,
('netcdf', 'netcdf_save_hybrid_height.cdl'))
# Read netCDF file.
cube = iris.load_cube(file_out)
# Check the PP read, netCDF write, netCDF read mechanism.
self.assertCML(cube,
('netcdf', 'netcdf_save_load_hybrid_height.cml'))
@tests.skip_data
def test_netcdf_save_ndim_auxiliary(self):
# Test saving CF-netCDF with multi-dimensional auxiliary coordinates.
# Read netCDF input file.
file_in = tests.get_data_path(
('NetCDF', 'rotated', 'xyt', 'small_rotPole_precipitation.nc'))
cube = iris.load_cube(file_in)
# Write Cube to nerCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_ndim_auxiliary.cdl'))
# Read the netCDF file.
cube = iris.load_cube(file_out)
# Check the netCDF read, write, read mechanism.
self.assertCML(cube, ('netcdf',
'netcdf_save_load_ndim_auxiliary.cml'))
def test_netcdf_save_conflicting_aux(self):
# Test saving CF-netCDF with multi-dimensional auxiliary coordinates,
# with conflicts.
self.cube4.add_aux_coord(iris.coords.AuxCoord(np.arange(10),
'time'), 0)
self.cube6.add_aux_coord(iris.coords.AuxCoord(np.arange(10, 20),
'time'), 0)
cubes = iris.cube.CubeList([self.cube4, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_conf_aux.cdl'))
def test_netcdf_save_gridmapping(self):
# Test saving CF-netCDF from a cubelist with various grid mappings.
c1 = self.cubell
c2 = self.cubell.copy()
c3 = self.cubell.copy()
coord_system = icoord_systems.GeogCS(6371229)
coord_system2 = icoord_systems.GeogCS(6371228)
coord_system3 = icoord_systems.RotatedGeogCS(30, 30)
c1.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'latitude', long_name='1', units='degrees',
coord_system=coord_system), 1)
c1.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'longitude', long_name='1', units='degrees',
coord_system=coord_system), 0)
c2.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'latitude', long_name='2', units='degrees',
coord_system=coord_system2), 1)
c2.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'longitude', long_name='2', units='degrees',
coord_system=coord_system2), 0)
c3.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'grid_latitude', long_name='3', units='degrees',
coord_system=coord_system3), 1)
c3.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'grid_longitude', long_name='3', units='degrees',
coord_system=coord_system3), 0)
cubes = iris.cube.CubeList([c1, c2, c3])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_gridmapmulti.cdl'))
def test_netcdf_save_conflicting_names(self):
# Test saving CF-netCDF with a dimension name corresponding to
# an existing variable name (conflict).
self.cube4.add_dim_coord(iris.coords.DimCoord(np.arange(10),
'time'), 0)
self.cube6.add_aux_coord(iris.coords.AuxCoord(1, 'time'), None)
cubes = iris.cube.CubeList([self.cube4, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_conf_name.cdl'))
@tests.skip_data
def test_trajectory(self):
file_in = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
cube = iris.load_cube(file_in)
# extract a trajectory
xpoint = cube.coord('longitude').points[:10]
ypoint = cube.coord('latitude').points[:10]
sample_points = [('latitude', xpoint), ('longitude', ypoint)]
traj = iris.analysis.trajectory.interpolate(cube, sample_points)
# save, reload and check
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(traj, temp_filename)
reloaded = iris.load_cube(temp_filename)
self.assertCML(reloaded,
('netcdf', 'save_load_traj.cml'),
checksum=False)
self.assertArrayEqual(traj.data, reloaded.data)
def test_attributes(self):
# Should be global attributes.
aglobals = {'history': 'A long time ago...',
'title': 'Attribute test',
'foo': 'bar'}
for k, v in six.iteritems(aglobals):
self.cube.attributes[k] = v
# Should be overriden.
aover = {'Conventions': 'TEST'}
for k, v in six.iteritems(aover):
self.cube.attributes[k] = v
# Should be data varible attributes.
avars = {'standard_error_multiplier': 23,
'flag_masks': 'a',
'flag_meanings': 'b',
'flag_values': 'c',
'STASH': iris.fileformats.pp.STASH(1, 2, 3)}
for k, v in six.iteritems(avars):
self.cube.attributes[k] = v
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename)
# Load the dataset.
ds = nc.Dataset(filename, 'r')
exceptions = []
# Should be global attributes.
for gkey in aglobals:
if getattr(ds, gkey) != aglobals.get(gkey):
exceptions.append('{} != {}'.format(getattr(ds, gkey),
aglobals.get(gkey)))
# Should be overriden.
for okey in aover:
if getattr(ds, okey) == aover.get(okey):
exceptions.append('{} != {}'.format(getattr(ds, okey),
avars.get(okey)))
dv = ds['temp']
# Should be data varible attributes;
# except STASH -> um_stash_source.
for vkey in avars:
if vkey != 'STASH' and (getattr(dv, vkey) != avars.get(vkey)):
exceptions.append('{} != {}'.format(getattr(dv, vkey),
avars.get(vkey)))
if getattr(dv, 'um_stash_source') != avars.get('STASH'):
exc = '{} != {}'.format(getattr(dv, 'um_stash_source'),
avars.get(vkey))
exceptions.append(exc)
self.assertEqual(exceptions, [])
def test_conflicting_attributes(self):
# Should be data variable attributes.
self.cube.attributes['foo'] = 'bar'
self.cube2.attributes['foo'] = 'orange'
with self.temp_filename(suffix='.nc') as filename:
iris.save([self.cube, self.cube2], filename)
self.assertCDL(filename, ('netcdf', 'netcdf_save_confl_attr.cdl'))
def test_conflicting_global_attributes(self):
# Should be data variable attributes, but raise a warning.
attr_name = 'history'
self.cube.attributes[attr_name] = 'Team A won.'
self.cube2.attributes[attr_name] = 'Team B won.'
expected_msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
with self.temp_filename(suffix='.nc') as filename:
with mock.patch('warnings.warn') as warn:
iris.save([self.cube, self.cube2], filename)
warn.assert_called_with(expected_msg)
self.assertCDL(filename,
('netcdf', 'netcdf_save_confl_global_attr.cdl'))
def test_no_global_attributes(self):
# Should all be data variable attributes.
# Different keys.
self.cube.attributes['a'] = 'a'
self.cube2.attributes['b'] = 'a'
self.cube3.attributes['c'] = 'a'
self.cube4.attributes['d'] = 'a'
self.cube5.attributes['e'] = 'a'
self.cube6.attributes['f'] = 'a'
# Different values.
self.cube.attributes['g'] = 'p'
self.cube2.attributes['g'] = 'q'
self.cube3.attributes['g'] = 'r'
self.cube4.attributes['g'] = 's'
self.cube5.attributes['g'] = 't'
self.cube6.attributes['g'] = 'u'
# One different value.
self.cube.attributes['h'] = 'v'
self.cube2.attributes['h'] = 'v'
self.cube3.attributes['h'] = 'v'
self.cube4.attributes['h'] = 'w'
self.cube5.attributes['h'] = 'v'
self.cube6.attributes['h'] = 'v'
cubes = [self.cube, self.cube2, self.cube3,
self.cube4, self.cube5, self.cube6]
with self.temp_filename(suffix='.nc') as filename:
iris.save(cubes, filename)
self.assertCDL(filename, ('netcdf',
'netcdf_save_no_global_attr.cdl'))
class TestNetCDF3SaveInteger(tests.IrisTest):
def setUp(self):
self.cube = iris.cube.Cube(np.zeros((2, 2), dtype=np.float64),
standard_name='surface_temperature',
long_name=None,
var_name='temp',
units='K')
def test_int64_dimension_coord_netcdf3(self):
coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.int64),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_dimension_coord_netcdf3.cml'),
checksum=False)
def test_int64_auxiliary_coord_netcdf3(self):
coord = iris.coords.AuxCoord(np.array([1, 2], dtype=np.int64),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_auxiliary_coord_netcdf3.cml'),
checksum=False)
def test_int64_data_netcdf3(self):
self.cube.data = self.cube.data.astype(np.int64)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_data_netcdf3.cml'))
def test_uint32_dimension_coord_netcdf3(self):
coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.uint32),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_dimension_coord_netcdf3.cml'),
checksum=False)
def test_uint32_auxiliary_coord_netcdf3(self):
coord = iris.coords.AuxCoord(np.array([1, 2], dtype=np.uint32),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_auxiliary_coord_netcdf3.cml'),
checksum=False)
def test_uint32_data_netcdf3(self):
self.cube.data = self.cube.data.astype(np.uint32)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_data_netcdf3.cml'))
def test_uint64_dimension_coord_netcdf3(self):
# Points that cannot be safely cast to int32.
coord = iris.coords.DimCoord(np.array([0, 18446744073709551615],
dtype=np.uint64),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
def test_uint64_auxiliary_coord_netcdf3(self):
# Points that cannot be safely cast to int32.
coord = iris.coords.AuxCoord(np.array([0, 18446744073709551615],
dtype=np.uint64),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
def test_uint64_data_netcdf3(self):
# Data that cannot be safely cast to int32.
self.cube.data = self.cube.data.astype(np.uint64)
self.cube.data[0, 1] = 18446744073709551615
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
class TestCFStandardName(tests.IrisTest):
def setUp(self):
pass
def test_std_name_lookup_pass(self):
# Test performing a CF standard name look-up hit.
self.assertTrue('time' in iris.std_names.STD_NAMES)
def test_std_name_lookup_fail(self):
# Test performing a CF standard name look-up miss.
self.assertFalse('phenomenon_time' in iris.std_names.STD_NAMES)
@tests.skip_data
class TestNetCDFUKmoProcessFlags(tests.IrisTest):
def test_process_flags(self):
# Test single process flags
for _, process_desc in iris.fileformats.pp.LBPROC_PAIRS[1:]:
# Get basic cube and set process flag manually
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = (process_desc,)
# Save cube to netCDF
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(ll_cube, temp_filename)
# Reload cube
cube = iris.load_cube(temp_filename)
# Check correct number and type of flags
self.assertTrue(
len(cube.attributes["ukmo__process_flags"]) == 1,
"Mismatch in number of process flags.")
process_flag = cube.attributes["ukmo__process_flags"][0]
self.assertEqual(process_flag, process_desc)
# Test mutiple process flags
multiple_bit_values = ((128, 64), (4096, 1024), (8192, 1024))
# Maps lbproc value to the process flags that should be created
multiple_map = {bits: [iris.fileformats.pp.lbproc_map[bit] for
bit in bits] for bits in multiple_bit_values}
for bits, descriptions in six.iteritems(multiple_map):
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = descriptions
# Save cube to netCDF
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(ll_cube, temp_filename)
# Reload cube
cube = iris.load_cube(temp_filename)
# Check correct number and type of flags
process_flags = cube.attributes["ukmo__process_flags"]
self.assertTrue(len(process_flags) == len(bits), 'Mismatch in '
'number of process flags.')
self.assertEqual(set(process_flags), set(descriptions))
if __name__ == "__main__":
tests.main()
|
zak-k/iris
|
lib/iris/tests/test_netcdf.py
|
Python
|
gpl-3.0
| 42,650
|
[
"NetCDF"
] |
41e1ce50522f4e5ed69df79cc2dab0cdedaf04763f4ff5521cde8daead1afd92
|
"""
This module calculates corrections for the species listed below, fitted to the experimental and computed
entries given to the CorrectionCalculator constructor.
"""
import os
import warnings
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import plotly.graph_objects as go
from monty.serialization import loadfn
from ruamel import yaml
from scipy.optimize import curve_fit
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.analysis.structure_analyzer import sulfide_type
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
def _func(x, *m):
"""
Helper function for curve_fit.
"""
return np.dot(x, m)
class CorrectionCalculator:
"""
A CorrectionCalculator contains experimental and computed entries which it uses to compute corrections.
It graphs residual errors after applying the computed corrections and creates the MPCompatibility.yaml
file the Correction classes use.
Attributes:
species: list of species that corrections are being calculated for
exp_compounds: list of dictionaries which each contain a compound's formula and experimental data
calc_compounds: dictionary of ComputedEntry objects
corrections: list of corrections in same order as species list
corrections_std_error: list of the variances of the corrections in same order as species list
corrections_dict: dictionary of format {'species': (value, uncertainty)} for easier correction lookup
"""
def __init__(
self,
species: List[str] = [
"oxide",
"peroxide",
"superoxide",
"S",
"F",
"Cl",
"Br",
"I",
"N",
"Se",
"Si",
"Sb",
"Te",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"W",
"Mo",
"H",
],
max_error: float = 0.1,
allow_unstable: Union[float, bool] = 0.1,
exclude_polyanions: List[str] = [
"SO4",
"SO3",
"CO3",
"NO3",
"NO2",
"OCl3",
"ClO3",
"ClO4",
"HO",
"ClO",
"SeO3",
"TiO3",
"TiO4",
"WO4",
"SiO3",
"SiO4",
"Si2O5",
"PO3",
"PO4",
"P2O7",
],
) -> None:
"""
Initializes a CorrectionCalculator.
Args:
species: list of species to calculate corrections for
max_error: maximum tolerable relative uncertainty in experimental energy.
Compounds with relative uncertainty greater than this value will be excluded from the fit
allow_unstable: whether unstable entries are to be included in the fit. If True, all compounds will
be included regardless of their energy above hull. If False or a float, compounds with
energy above hull greater than the given value (defaults to 0.1 eV/atom) will be
excluded
exclude_polyanions: a list of polyanions that contain additional sources of error that may negatively
influence the quality of the fitted corrections. Compounds with these polyanions
will be excluded from the fit
"""
self.species = species
self.max_error = max_error
if not allow_unstable:
self.allow_unstable = 0.1
else:
self.allow_unstable = allow_unstable
self.exclude_polyanions = exclude_polyanions
self.corrections: List[float] = []
self.corrections_std_error: List[float] = []
self.corrections_dict: Dict[str, Tuple[float, float]] = {} # {'species': (value, uncertainty)}
# to help the graph_residual_error_per_species() method differentiate between oxygen containing compounds
if "oxide" in self.species:
self.oxides: List[str] = []
if "peroxide" in self.species:
self.peroxides: List[str] = []
if "superoxide" in self.species:
self.superoxides: List[str] = []
if "S" in self.species:
self.sulfides: List[str] = []
def compute_from_files(self, exp_gz: str, comp_gz: str):
"""
Args:
exp_gz: name of .json.gz file that contains experimental data
data in .json.gz file should be a list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
comp_gz: name of .json.gz file that contains computed entries
data in .json.gz file should be a dictionary of {chemical formula: ComputedEntry}
"""
exp_entries = loadfn(exp_gz)
calc_entries = loadfn(comp_gz)
return self.compute_corrections(exp_entries, calc_entries)
def compute_corrections(self, exp_entries: list, calc_entries: dict) -> dict:
"""
Computes the corrections and fills in correction, corrections_std_error, and corrections_dict.
Args:
exp_entries: list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
calc_entries: dictionary of computed entries, of the form {chemical formula: ComputedEntry}
Raises:
ValueError: calc_compounds is missing an entry
"""
self.exp_compounds = exp_entries
self.calc_compounds = calc_entries
self.names: List[str] = []
self.diffs: List[float] = []
self.coeff_mat: List[List[float]] = []
self.exp_uncer: List[float] = []
# remove any corrections in calc_compounds
for entry in self.calc_compounds.values():
entry.correction = 0
for cmpd_info in self.exp_compounds:
# to get consistent element ordering in formula
name = Composition(cmpd_info["formula"]).reduced_formula
allow = True
compound = self.calc_compounds.get(name, None)
if not compound:
warnings.warn(f"Compound {name} is not found in provided computed entries and is excluded from the fit")
continue
# filter out compounds with large uncertainties
relative_uncertainty = abs(cmpd_info["uncertainty"] / cmpd_info["exp energy"])
if relative_uncertainty > self.max_error:
allow = False
warnings.warn(
"Compound {} is excluded from the fit due to high experimental uncertainty ({}%)".format(
name, relative_uncertainty
)
)
# filter out compounds containing certain polyanions
for anion in self.exclude_polyanions:
if anion in name or anion in cmpd_info["formula"]:
allow = False
warnings.warn(f"Compound {name} contains the polyanion {anion} and is excluded from the fit")
break
# filter out compounds that are unstable
if isinstance(self.allow_unstable, float):
try:
eah = compound.data["e_above_hull"]
except KeyError:
raise ValueError("Missing e above hull data")
if eah > self.allow_unstable:
allow = False
warnings.warn(f"Compound {name} is unstable and excluded from the fit (e_above_hull = {eah})")
if allow:
comp = Composition(name)
elems = list(comp.as_dict())
reactants = []
for elem in elems:
try:
elem_name = Composition(elem).reduced_formula
reactants.append(self.calc_compounds[elem_name])
except KeyError:
raise ValueError("Computed entries missing " + elem)
rxn = ComputedReaction(reactants, [compound])
rxn.normalize_to(comp)
energy = rxn.calculated_reaction_energy
coeff = []
for specie in self.species:
if specie == "oxide":
if compound.data["oxide_type"] == "oxide":
coeff.append(comp["O"])
self.oxides.append(name)
else:
coeff.append(0)
elif specie == "peroxide":
if compound.data["oxide_type"] == "peroxide":
coeff.append(comp["O"])
self.peroxides.append(name)
else:
coeff.append(0)
elif specie == "superoxide":
if compound.data["oxide_type"] == "superoxide":
coeff.append(comp["O"])
self.superoxides.append(name)
else:
coeff.append(0)
elif specie == "S":
if Element("S") in comp:
sf_type = "sulfide"
if compound.data.get("sulfide_type"):
sf_type = compound.data["sulfide_type"]
elif hasattr(compound, "structure"):
sf_type = sulfide_type(compound.structure)
if sf_type == "sulfide":
coeff.append(comp["S"])
self.sulfides.append(name)
else:
coeff.append(0)
else:
coeff.append(0)
else:
try:
coeff.append(comp[specie])
except ValueError:
raise ValueError(f"We can't detect this specie: {specie}")
self.names.append(name)
self.diffs.append((cmpd_info["exp energy"] - energy) / comp.num_atoms)
self.coeff_mat.append([i / comp.num_atoms for i in coeff])
self.exp_uncer.append((cmpd_info["uncertainty"]) / comp.num_atoms)
# for any exp entries with no uncertainty value, assign average uncertainty value
sigma = np.array(self.exp_uncer)
sigma[sigma == 0] = np.nan
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=RuntimeWarning
) # numpy raises warning if the entire array is nan values
mean_uncer = np.nanmean(sigma)
sigma = np.where(np.isnan(sigma), mean_uncer, sigma)
if np.isnan(mean_uncer):
# no uncertainty values for any compounds, don't try to weight
popt, self.pcov = curve_fit(_func, self.coeff_mat, self.diffs, p0=np.ones(len(self.species)))
else:
popt, self.pcov = curve_fit(
_func,
self.coeff_mat,
self.diffs,
p0=np.ones(len(self.species)),
sigma=sigma,
absolute_sigma=True,
)
self.corrections = popt.tolist()
self.corrections_std_error = np.sqrt(np.diag(self.pcov)).tolist()
for i, v in enumerate(self.species):
self.corrections_dict[v] = (
round(self.corrections[i], 3),
round(self.corrections_std_error[i], 4),
)
# set ozonide correction to 0 so that this species does not receive a correction
# while other oxide types do
self.corrections_dict["ozonide"] = (0, 0)
return self.corrections_dict
def graph_residual_error(self) -> go.Figure:
"""
Graphs the residual errors for all compounds after applying computed corrections.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_graph = self.names.copy()
abs_errors, labels_graph = (list(t) for t in zip(*sorted(zip(abs_errors, labels_graph)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_graph,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors"),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(self.diffs)))))
print("Mean = " + str(abs(np.mean(np.array(self.diffs)))))
print("Std Dev = " + str(np.std(np.array(self.diffs))))
return fig
def graph_residual_error_per_species(self, specie: str) -> go.Figure:
"""
Graphs the residual errors for each compound that contains specie after applying computed corrections.
Args:
specie: the specie/group that residual errors are being plotted for
Raises:
ValueError: the specie is not a valid specie that this class fits corrections for
"""
if specie not in self.species:
raise ValueError("not a valid specie")
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_species = self.names.copy()
diffs_cpy = self.diffs.copy()
num = len(labels_species)
if specie in ("oxide", "peroxide", "superoxide", "S"):
if specie == "oxide":
compounds = self.oxides
elif specie == "peroxide":
compounds = self.peroxides
elif specie == "superoxides":
compounds = self.superoxides
else:
compounds = self.sulfides
for i in range(num):
if labels_species[num - i - 1] not in compounds:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
else:
for i in range(num):
if not Composition(labels_species[num - i - 1])[specie]:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
abs_errors, labels_species = (list(t) for t in zip(*sorted(zip(abs_errors, labels_species)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_species,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors for " + specie),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(diffs_cpy)))))
print("Mean = " + str(abs(np.mean(np.array(diffs_cpy)))))
print("Std Dev = " + str(np.std(np.array(diffs_cpy))))
return fig
def make_yaml(self, name: str = "MP2020", dir: Optional[str] = None) -> None:
"""
Creates the _name_Compatibility.yaml that stores corrections as well as _name_CompatibilityUncertainties.yaml
for correction uncertainties.
Args:
name: str, alternate name for the created .yaml file.
Default: "MP2020"
dir: str, directory in which to save the file. Pass None (default) to
save the file in the current working directory.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
# elements with U values
ggaucorrection_species = ["V", "Cr", "Mn", "Fe", "Co", "Ni", "W", "Mo"]
comp_corr: Dict[str, float] = {}
o: Dict[str, float] = {}
f: Dict[str, float] = {}
comp_corr_error: Dict[str, float] = {}
o_error: Dict[str, float] = {}
f_error: Dict[str, float] = {}
for specie in list(self.species) + ["ozonide"]:
if specie in ggaucorrection_species:
o[specie] = self.corrections_dict[specie][0]
f[specie] = self.corrections_dict[specie][0]
o_error[specie] = self.corrections_dict[specie][1]
f_error[specie] = self.corrections_dict[specie][1]
else:
comp_corr[specie] = self.corrections_dict[specie][0]
comp_corr_error[specie] = self.corrections_dict[specie][1]
outline = """\
Name:
Corrections:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
Uncertainties:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
"""
fn = name + "Compatibility.yaml"
if dir:
path = os.path.join(dir, fn)
else:
path = fn
yml = yaml.YAML()
yml.default_flow_style = False
contents = yml.load(outline)
contents["Name"] = name
# make CommentedMap so comments can be added
contents["Corrections"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o)
contents["Corrections"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f)
contents["Corrections"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr)
contents["Uncertainties"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o_error)
contents["Uncertainties"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f_error)
contents["Uncertainties"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr_error)
contents["Corrections"].yaml_set_start_comment("Energy corrections in eV/atom", indent=2)
contents["Corrections"]["GGAUMixingCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to transition metal oxides\nand fluorides to "
+ 'make GGA and GGA+U energies compatible\nwhen compat_type = "Advanced" (default)',
indent=4,
)
contents["Corrections"]["CompositionCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to any compound containing\nthese species as anions",
indent=4,
)
contents["Uncertainties"].yaml_set_start_comment(
"Uncertainties corresponding to each energy correction (eV/atom)", indent=2
)
with open(path, "w") as file:
yml.dump(contents, file)
|
materialsproject/pymatgen
|
pymatgen/entries/correction_calculator.py
|
Python
|
mit
| 20,358
|
[
"pymatgen"
] |
67537230563733f2a79e9209acdffefb412021dbc4a9dff54e9746ce8f4faee3
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CDataVector(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.compartment=self.model.createCompartment("Comp1")
self.model.createMetabolite("metab1","Comp1")
self.model.createMetabolite("metab2","Comp1")
self.metab=self.model.createMetabolite("metab3","Comp1")
self.model.createMetabolite("metab4","Comp1")
self.model.createMetabolite("metab5","Comp1")
self.vector=self.compartment.getMetabolites()
def test_add(self):
v=COPASI.CompartmentVectorNS()
comp=COPASI.CCompartment("test_compartment")
self.assert_(v.addAndOwn(comp))
self.assert_(v.size()==1)
def test_remove(self):
n=self.vector.size()
self.vector.removeObject(self.metab)
self.assert_(self.vector.size()==n-1)
self.vector.remove(0)
self.assert_(self.vector.size()==n-2)
def test_remove_viaName(self):
n=self.vector.size()
self.vector.removeByName(self.metab.getObjectName())
self.assert_(self.vector.size()==n-1)
def test_size(self):
n=self.vector.size()
self.assert_(type(n)==IntType)
self.assert_(n==5)
def test_getIndex(self):
index=self.vector.getIndex(self.metab)
self.assert_(type(index)==IntType)
self.assert_(index==2)
index=self.vector.getIndexByName(self.metab.getObjectName())
self.assert_(type(index)==IntType)
self.assert_(index==2)
def suite():
tests=[
'test_add'
,'test_remove'
,'test_remove_viaName'
,'test_size'
,'test_getIndex'
]
return unittest.TestSuite(map(Test_CDataVector,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
copasi/COPASI
|
copasi/bindings/python/unittests/Test_CCopasiVector.py
|
Python
|
artistic-2.0
| 2,748
|
[
"COPASI"
] |
d90bd092b1a59d249317fb1fdce725830bf0da10b1ada5cef60b58629ade7c0e
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements input and output processing from Nwchem.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "6/5/13"
import re
from string import Template
from six import string_types
from six.moves import zip
from monty.io import zopen
from pymatgen.core import Molecule,Structure
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import Energy
from pymatgen.core.units import FloatWithUnit
class NwTask(PMGSONable):
"""
Base task for Nwchem.
"""
theories = {"g3gn": "some description",
"scf": "Hartree-Fock",
"dft": "DFT",
"esp": "ESP",
"sodft": "Spin-Orbit DFT",
"mp2": "MP2 using a semi-direct algorithm",
"direct_mp2": "MP2 using a full-direct algorithm",
"rimp2": "MP2 using the RI approximation",
"ccsd": "Coupled-cluster single and double excitations",
"ccsd(t)": "Coupled-cluster linearized triples approximation",
"ccsd+t(ccsd)": "Fourth order triples contribution",
"mcscf": "Multiconfiguration SCF",
"selci": "Selected CI with perturbation correction",
"md": "Classical molecular dynamics simulation",
"pspw": "Pseudopotential plane-wave DFT for molecules and "
"insulating solids using NWPW",
"band": "Pseudopotential plane-wave DFT for solids using NWPW",
"tce": "Tensor Contraction Engine"}
operations = {"energy": "Evaluate the single point energy.",
"gradient": "Evaluate the derivative of the energy with "
"respect to nuclear coordinates.",
"optimize": "Minimize the energy by varying the molecular "
"structure.",
"saddle": "Conduct a search for a transition state (or "
"saddle point).",
"hessian": "Compute second derivatives.",
"frequencies": "Compute second derivatives and print out an "
"analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"vscf": "Compute anharmonic contributions to the "
"vibrational modes.",
"property": "Calculate the properties for the wave "
"function.",
"dynamics": "Perform classical molecular dynamics.",
"thermodynamics": "Perform multi-configuration "
"thermodynamic integration using "
"classical MD.",
"": "dummy"}
def __init__(self, charge, spin_multiplicity, basis_set,
title=None, theory="dft", operation="optimize",
theory_directives=None, alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set used for the task as a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"}.
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations and dielectric
constant of 78, you'd supply {'cosmo': {"dielectric": 78}}.
"""
#Basic checks.
if theory.lower() not in NwTask.theories.keys():
raise NwInputError("Invalid theory {}".format(theory))
if operation.lower() not in NwTask.operations.keys():
raise NwInputError("Invalid operation {}".format(operation))
self.charge = charge
self.spin_multiplicity = spin_multiplicity
self.title = title if title is not None else "{} {}".format(theory,
operation)
self.theory = theory
self.basis_set = basis_set
self.operation = operation
self.theory_directives = theory_directives \
if theory_directives is not None else {}
self.alternate_directives = alternate_directives \
if alternate_directives is not None else {}
def __str__(self):
bset_spec = []
for el, bset in sorted(self.basis_set.items(), key=lambda x: x[0]):
bset_spec.append(" {} library \"{}\"".format(el, bset))
theory_spec = []
if self.theory_directives:
theory_spec.append("{}".format(self.theory))
for k in sorted(self.theory_directives.keys()):
theory_spec.append(" {} {}".format(k, self.theory_directives[
k]))
theory_spec.append("end")
for k in sorted(self.alternate_directives.keys()):
theory_spec.append(k)
for k2 in sorted(self.alternate_directives[k].keys()):
theory_spec.append(" {} {}".format(
k2, self.alternate_directives[k][k2]))
theory_spec.append("end")
t = Template("""title "$title"
charge $charge
basis
$bset_spec
end
$theory_spec
task $theory $operation""")
return t.substitute(
title=self.title, charge=self.charge,
bset_spec="\n".join(bset_spec),
theory_spec="\n".join(theory_spec),
theory=self.theory, operation=self.operation)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"title": self.title, "theory": self.theory,
"operation": self.operation, "basis_set": self.basis_set,
"theory_directives": self.theory_directives,
"alternate_directives": self.alternate_directives}
@classmethod
def from_dict(cls, d):
return NwTask(charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
title=d["title"], theory=d["theory"],
operation=d["operation"], basis_set=d["basis_set"],
theory_directives=d["theory_directives"],
alternate_directives=d["alternate_directives"])
@classmethod
def from_molecule(cls, mol, theory, charge=None, spin_multiplicity=None,
basis_set="6-31g", title=None,
operation="optimize", theory_directives=None,
alternate_directives=None):
"""
Very flexible arguments to support many types of potential setups.
Users should use more friendly static methods unless they need the
flexibility.
Args:
mol: Input molecule
charge: Charge of the molecule. If None, charge on molecule is
used. Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
basis_set: The basis set to be used as string or a dict. E.g.,
{"C": "6-311++G**", "H": "6-31++G**"} or "6-31G". If string,
same basis set is used for all elements.
title: Title for the task. Defaults to None, which means a title
based on the theory and operation of the task is
autogenerated.
theory: The theory used for the task. Defaults to "dft".
operation: The operation for the task. Defaults to "optimize".
theory_directives: A dict of theory directives. For example,
if you are running dft calculations, you may specify the
exchange correlation functional using {"xc": "b3lyp"}.
alternate_directives: A dict of alternate directives. For
example, to perform cosmo calculations with DFT, you'd supply
{'cosmo': "cosmo"}.
"""
title = title if title is not None else "{} {} {}".format(
re.sub("\s", "", mol.formula), theory, operation)
charge = charge if charge is not None else mol.charge
nelectrons = - charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
charge, spin_multiplicity))
elif charge == mol.charge:
spin_multiplicity = mol.spin_multiplicity
else:
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set(mol.composition.get_el_amt_dict().keys())
if isinstance(basis_set, string_types):
basis_set = {el: basis_set for el in elements}
return NwTask(charge, spin_multiplicity, basis_set,
title=title, theory=theory, operation=operation,
theory_directives=theory_directives,
alternate_directives=alternate_directives)
@classmethod
def dft_task(cls, mol, xc="b3lyp", **kwargs):
"""
A class method for quickly creating DFT tasks with optional
cosmo parameter .
Args:
mol: Input molecule
xc: Exchange correlation to use.
dielectric: Using water dielectric
\*\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
t = NwTask.from_molecule(mol, theory="dft", **kwargs)
t.theory_directives.update({"xc": xc,
"mult": t.spin_multiplicity})
return t
@classmethod
def esp_task(cls, mol, **kwargs):
"""
A class method for quickly creating ESP tasks with RESP
charge fitting.
Args:
mol: Input molecule
\*\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task.
"""
return NwTask.from_molecule(mol, theory="esp", **kwargs)
class NwInput(PMGSONable):
"""
An object representing a Nwchem input file, which is essentially a list
of tasks on a particular molecule.
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
tasks: List of NwTasks.
directives: List of root level directives as tuple. E.g.,
[("start", "water"), ("print", "high")]
geometry_options: Additional list of options to be supplied to the
geometry. E.g., ["units", "angstroms", "noautoz"]. Defaults to
("units", "angstroms").
symmetry_options: Addition list of option to be supplied to the
symmetry. E.g. ["c1"] to turn off the symmetry
memory_options: Memory controlling options. str.
E.g "total 1000 mb stack 400 mb"
"""
def __init__(self, mol, tasks, directives=None,
geometry_options=("units", "angstroms"),
symmetry_options=None,
memory_options=None):
"""
"""
self._mol = mol
self.directives = directives if directives is not None else []
self.tasks = tasks
self.geometry_options = geometry_options
self.symmetry_options = symmetry_options
self.memory_options = memory_options
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
def __str__(self):
o = []
if self.memory_options:
o.append('memory ' + self.memory_options)
for d in self.directives:
o.append("{} {}".format(d[0], d[1]))
o.append("geometry "
+ " ".join(self.geometry_options))
if self.symmetry_options:
o.append(" symmetry " + " ".join(self.symmetry_options))
for site in self._mol:
o.append(" {} {} {} {}".format(site.specie.symbol, site.x, site.y,
site.z))
o.append("end\n")
for t in self.tasks:
o.append(str(t))
o.append("")
return "\n".join(o)
def write_file(self, filename):
with zopen(filename, "w") as f:
f.write(self.__str__())
def as_dict(self):
return {
"mol": self._mol.as_dict(),
"tasks": [t.as_dict() for t in self.tasks],
"directives": [list(t) for t in self.directives],
"geometry_options": list(self.geometry_options),
"symmetry_options": self.symmetry_options,
"memory_options": self.memory_options
}
@classmethod
def from_dict(cls, d):
return NwInput(Molecule.from_dict(d["mol"]),
tasks=[NwTask.from_dict(dt) for dt in d["tasks"]],
directives=[tuple(li) for li in d["directives"]],
geometry_options=d["geometry_options"],
symmetry_options=d["symmetry_options"],
memory_options=d["memory_options"])
@classmethod
def from_string(cls, string_input):
"""
Read an NwInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
NwInput object
"""
directives = []
tasks = []
charge = None
spin_multiplicity = None
title = None
basis_set = None
theory_directives = {}
geom_options = None
symmetry_options = None
memory_options = None
lines = string_input.strip().split("\n")
while len(lines) > 0:
l = lines.pop(0).strip()
if l == "":
continue
toks = l.split()
if toks[0].lower() == "geometry":
geom_options = toks[1:]
l = lines.pop(0).strip()
toks = l.split()
if toks[0].lower() == "symmetry":
symmetry_options = toks[1:]
l = lines.pop(0).strip()
#Parse geometry
species = []
coords = []
while l.lower() != "end":
toks = l.split()
species.append(toks[0])
coords.append([float(i) for i in toks[1:]])
l = lines.pop(0).strip()
mol = Molecule(species, coords)
elif toks[0].lower() == "charge":
charge = int(toks[1])
elif toks[0].lower() == "title":
title = l[5:].strip().strip("\"")
elif toks[0].lower() == "basis":
#Parse basis sets
l = lines.pop(0).strip()
basis_set = {}
while l.lower() != "end":
toks = l.split()
basis_set[toks[0]] = toks[-1].strip("\"")
l = lines.pop(0).strip()
elif toks[0].lower() in NwTask.theories:
#Parse theory directives.
theory = toks[0].lower()
l = lines.pop(0).strip()
theory_directives[theory] = {}
while l.lower() != "end":
toks = l.split()
theory_directives[theory][toks[0]] = toks[-1]
if toks[0] == "mult":
spin_multiplicity = float(toks[1])
l = lines.pop(0).strip()
elif toks[0].lower() == "task":
tasks.append(
NwTask(charge=charge,
spin_multiplicity=spin_multiplicity,
title=title, theory=toks[1],
operation=toks[2], basis_set=basis_set,
theory_directives=theory_directives.get(toks[1])))
elif toks[0].lower() == "memory":
memory_options = ' '.join(toks[1:])
else:
directives.append(l.strip().split())
return NwInput(mol, tasks=tasks, directives=directives,
geometry_options=geom_options,
symmetry_options=symmetry_options,
memory_options=memory_options)
@classmethod
def from_file(cls, filename):
"""
Read an NwInput from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
NwInput object
"""
with zopen(filename) as f:
return cls.from_string(f.read())
class NwInputError(Exception):
"""
Error class for NwInput.
"""
pass
class NwOutput(object):
"""
A Nwchem output file parser. Very basic for now - supports only dft and
only parses energies and geometries. Please note that Nwchem typically
outputs energies in either au or kJ/mol. All energies are converted to
eV in the parser.
Args:
filename: Filename to read.
"""
def __init__(self, filename):
self.filename = filename
with zopen(filename) as f:
data = f.read()
chunks = re.split("NWChem Input Module", data)
if re.search("CITATION", chunks[-1]):
chunks.pop()
preamble = chunks.pop(0)
self.job_info = self._parse_preamble(preamble)
self.data = [self._parse_job(c) for c in chunks]
def _parse_preamble(self, preamble):
info = {}
for l in preamble.split("\n"):
toks = l.split("=")
if len(toks) > 1:
info[toks[0].strip()] = toks[-1].strip()
return info
def _parse_job(self, output):
energy_patt = re.compile("Total \w+ energy\s+=\s+([\.\-\d]+)")
#In cosmo solvation results; gas phase energy = -152.5044774212
energy_gas_patt = re.compile("gas phase energy\s+=\s+([\.\-\d]+)")
#In cosmo solvation results; sol phase energy = -152.5044774212
energy_sol_patt = re.compile("sol phase energy\s+=\s+([\.\-\d]+)")
coord_patt = re.compile("\d+\s+(\w+)\s+[\.\-\d]+\s+([\.\-\d]+)\s+"
"([\.\-\d]+)\s+([\.\-\d]+)")
lat_vector_patt = re.compile("a[123]=<\s+([\.\-\d]+)\s+"
"([\.\-\d]+)\s+([\.\-\d]+)\s+>")
corrections_patt = re.compile("([\w\-]+ correction to \w+)\s+="
"\s+([\.\-\d]+)")
preamble_patt = re.compile("(No. of atoms|No. of electrons"
"|SCF calculation type|Charge|Spin "
"multiplicity)\s*:\s*(\S+)")
error_defs = {
"calculations not reaching convergence": "Bad convergence",
"Calculation failed to converge": "Bad convergence",
"geom_binvr: #indep variables incorrect": "autoz error",
"dft optimize failed": "Geometry optimization failed"}
data = {}
energies = []
frequencies = None
corrections = {}
molecules = []
structures = []
species = []
coords = []
lattice = []
errors = []
basis_set = {}
bset_header = []
parse_geom = False
parse_freq = False
parse_bset = False
job_type = ""
for l in output.split("\n"):
for e, v in error_defs.items():
if l.find(e) != -1:
errors.append(v)
if parse_geom:
if l.strip() == "Atomic Mass":
if lattice:
structures.append(Structure(lattice, species, coords,
coords_are_cartesian=True))
else:
molecules.append(Molecule(species, coords))
species = []
coords = []
lattice = []
parse_geom = False
else:
m = coord_patt.search(l)
if m:
species.append(m.group(1).capitalize())
coords.append([float(m.group(2)), float(m.group(3)),
float(m.group(4))])
m = lat_vector_patt.search(l)
if m:
lattice.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
if parse_freq:
if len(l.strip()) == 0:
if len(frequencies[-1][1]) == 0:
continue
else:
parse_freq = False
else:
vibs = [float(vib) for vib in l.strip().split()[1:]]
num_vibs = len(vibs)
for mode, dis in zip(frequencies[-num_vibs:], vibs):
mode[1].append(dis)
elif parse_bset:
if l.strip() == "":
parse_bset = False
else:
toks = l.split()
if toks[0] != "Tag" and not re.match("\-+", toks[0]):
basis_set[toks[0]] = dict(zip(bset_header[1:],
toks[1:]))
elif toks[0] == "Tag":
bset_header = toks
bset_header.pop(4)
bset_header = [h.lower() for h in bset_header]
else:
m = energy_patt.search(l)
if m:
energies.append(Energy(m.group(1), "Ha").to("eV"))
continue
m = energy_gas_patt.search(l)
if m:
cosmo_scf_energy = energies[-1]
energies[-1] = dict()
energies[-1].update({"cosmo scf": cosmo_scf_energy})
energies[-1].update({"gas phase":
Energy(m.group(1), "Ha").to("eV")})
m = energy_sol_patt.search(l)
if m:
energies[-1].update(
{"sol phase": Energy(m.group(1), "Ha").to("eV")})
m = preamble_patt.search(l)
if m:
try:
val = int(m.group(2))
except ValueError:
val = m.group(2)
k = m.group(1).replace("No. of ", "n").replace(" ", "_")
data[k.lower()] = val
elif l.find("Geometry \"geometry\"") != -1:
parse_geom = True
elif l.find("Summary of \"ao basis\"") != -1:
parse_bset = True
elif l.find("P.Frequency") != -1:
parse_freq = True
if not frequencies:
frequencies = []
frequencies.extend([(float(freq), []) for freq
in l.strip().split()[1:]])
elif job_type == "" and l.strip().startswith("NWChem"):
job_type = l.strip()
if job_type == "NWChem DFT Module" and \
"COSMO solvation results" in output:
job_type += " COSMO"
else:
m = corrections_patt.search(l)
if m:
corrections[m.group(1)] = FloatWithUnit(
m.group(2), "kJ mol^-1").to("eV atom^-1")
if frequencies:
for freq, mode in frequencies:
mode[:] = zip(*[iter(mode)]*3)
data.update({"job_type": job_type, "energies": energies,
"corrections": corrections,
"molecules": molecules,
"structures": structures,
"basis_set": basis_set,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": frequencies})
return data
|
Dioptas/pymatgen
|
pymatgen/io/nwchemio.py
|
Python
|
mit
| 26,194
|
[
"Gaussian",
"NWChem",
"pymatgen"
] |
4db58270fbbd571e4dffab64cff3dfa14cb05984b0a095e3643436cfcb084541
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Train a small multi-layer perceptron with fully connected layers on MNIST data.
This example has some command line arguments that enable different neon features.
Examples:
python examples/mnist_mlp.py -b gpu -e 10
Run the example for 10 epochs using the NervanaGPU backend
python examples/mnist_mlp.py --eval_freq 1
After each training epoch, process the validation/test data
set through the model and display the cost.
python examples/mnist_mlp.py --serialize 1 -s checkpoint.pkl
After every iteration of training, dump the model to a pickle
file named "checkpoint.pkl". Changing the serialize parameter
changes the frequency at which the model is saved.
python examples/mnist_mlp.py --model_file checkpoint.pkl
Before starting to train the model, set the model state to
the values stored in the checkpoint file named checkpoint.pkl.
"""
from neon.callbacks.callbacks import Callbacks
from neon.data import MNIST
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, Tree, Sequential
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
from neon import logger as neon_logger
import deepstacks
from deepstacks.macros import *
from deepstacks.neon import curr_layer,curr_stacks,curr_flags,curr_model
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# load up the mnist data set
dataset = MNIST(path=args.data_dir)
train_set = dataset.train_iter
valid_set = dataset.valid_iter
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setup model layers
#layers = [Affine(nout=100, init=init_norm, activation=Rectlin()),
# Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
import neon
l_in = deepstacks.neon.InputLayer((None,)+train_set.shape,'image')
network,stacks,paramlayers,errors,watchpoints=deepstacks.neon.build_network(l_in,(
(0,100,0,0,0,0,{'dense'}),
(0,100,0,0,0,0,{'dense'}),
((0,1),0,0,0,0,0,{'add'}),
(0,10,0,0,0,0,{'dense':True,'nonlinearity':Logistic(shortcut=True)}),
))
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
cost,extra_layers,tagslice = deepstacks.neon.get_loss(errors,watchpoints,cost)
network = Tree([network]+extra_layers)
inputs = deepstacks.neon.get_inputs(network)
#assert tuple(inputs)==('image',)
#print network.get_description()
layers = network
#cost = GeneralizedCost(costfunc=CrossEntropyBinary())
#layers = [
# Sequential(layers=(
# l_in,
# Affine(nout=100, init=init_norm, activation=Rectlin()),
# Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))
# ))
# ]
# setup optimizer
optimizer = GradientDescentMomentum(
0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
error_rate = mlp.eval(valid_set, metric=Misclassification())
neon_logger.display('Misclassification error = %.1f%%' % (error_rate * 100))
|
guoxuesong/deepstacks
|
examples/neon/mnist_mergesum.py
|
Python
|
mit
| 4,240
|
[
"Gaussian"
] |
c1b5167ecaf1f4b171d7ed0f8f81a43ca0940c30f794b94411d68c4225b9aedc
|
import POVME.packages.binana.peel as peel
import POVME.packages.pymolecule.pymolecule as pymolecule
my_params = peel.defaultParams
trp = pymolecule.Molecule()
trp.fileio.load_pdb_into( 'trp.pdb', bonds_by_distance=True, serial_reindex = True, resseq_reindex=False)
my_peel = peel.peel(trp, my_params)
my_peel.write_vmd_script('visualize_trp.vmd', peel.defaultParams)
my_feature_maps = my_peel.create_feature_maps([-20,20,-20,20,-20,20], 0.5)
my_feature_maps['hbondAcceptor'].write_pdb('HBA.pdb')
my_feature_maps['hbondDonor'].write_pdb('HBD.pdb')
my_feature_maps['aromatic'].write_pdb('ARO.pdb')
print "Done!"
|
POVME/POVME
|
POVME/packages/binana/tests/peel_trp_basic/trpTest.py
|
Python
|
mit
| 613
|
[
"VMD"
] |
68fdca0e615db5978754f2fa3a0a9843558fcbe33c6978fd9e107e1e6952a708
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.