gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import curses
import curses.textpad
from mbf import *
# http://docs.python.org/2/library/curses.html
class MainScreen:
def __init__( self, screen, month ):
self.filedir = ""
self.whichwin = "cat"
self.screen = screen
self.month = month
self.loadscreens()
def loadscreens( self ):
self.screen.erase()
curses.mousemask(1)
curses.start_color()
curses.noecho()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_BLUE)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_WHITE)
self.screen.bkgd(curses.color_pair(1))
self.screen.box()
screenheight, screenwidth = self.screen.getmaxyx()
screenheight -= 1
self.cmdheight = screenheight - 1
acctwinwidth = 42
self.screen.addstr(1,acctwinwidth+2-len(self.month.name)/2, " "+self.month.name+" ", curses.A_REVERSE)
self.screen.refresh()
self.winheight = screenheight-3
self.acctwin = curses.newwin( self.winheight,
acctwinwidth,
2,
2
)
self.acctwinstartline = 2
self.printtoacctwin()
catwinx = 2 + acctwinwidth + 1
catwinwidth = min(70, screenwidth - catwinx - 2)
self.catwin = curses.newwin( self.winheight,
catwinwidth,
2,
catwinx
)
self.catwinstartline = 2 #start at line 2 and go for it
self.printtocatwin( )
editx = catwinx + 1 + catwinwidth
if editx > screenwidth - 30:
self.editpad = False
else:
editwidth = screenwidth - editx - 2
self.editwin = curses.newwin( self.winheight,
editwidth,
2,
editx
)
self.editpad = curses.newwin( screenheight-9,
editwidth-4,
4,
editx+2
)
self.editwin.clear()
self.editwin.box()
self.editwin.bkgd(curses.color_pair(3))
self.editwin.addstr(0,2,"Edit")
self.editwin.refresh()
self.editpad.clear()
self.editpad.bkgd(curses.color_pair(3))
self.editpad.refresh()
self.editor = curses.textpad.Textbox( self.editpad )
self.resetblinker()
def switchwhichwin( self , towin=0 ):
if towin:
if towin == "cat":
if self.editpad:
if self.filedir == "":
self.editwin.addstr(0,2,"Edit")
else:
self.editwin.addstr(0,2,"Edit "+self.filedir )
self.acctwin.addstr(0,2,"Accounts" )
self.catwin.addstr(0,2,"Categories", curses.A_UNDERLINE)
self.whichwin = "cat"
elif towin == "acct":
if self.editpad:
if self.filedir == "":
self.editwin.addstr(0,2,"Edit")
else:
self.editwin.addstr(0,2,"Edit "+self.filedir )
self.catwin.addstr(0,2,"Categories") # erase special text effect on Categories
self.acctwin.addstr(0,2,"Accounts", curses.A_UNDERLINE)
self.whichwin = "acct"
elif self.editpad: # towin == "edit"
if self.filedir == "":
self.editwin.addstr(0,2,"Edit", curses.A_UNDERLINE )
else:
self.editwin.addstr(0,2,"Edit "+self.filedir, curses.A_UNDERLINE )
self.catwin.addstr(0,2,"Categories") # erase special text effect on Categories
self.acctwin.addstr(0,2,"Accounts")
self.editwin.addstr(self.winheight-3,2,"C-g", curses.A_BOLD)
self.editwin.addstr(self.winheight-3,5,": get out")
self.editwin.addstr(self.winheight-3,18,"C-o / C-k", curses.A_BOLD)
self.editwin.addstr(self.winheight-3,27,": insert/kill line")
self.editwin.addstr(self.winheight-2,2,"C-a", curses.A_BOLD)
self.editwin.addstr(self.winheight-2,5,": start left")
self.editwin.addstr(self.winheight-2,19,"C-n / C-p", curses.A_BOLD)
self.editwin.addstr(self.winheight-2,28,": next/prev line")
self.whichwin = "edit"
self.acctwin.refresh()
self.catwin.refresh()
if self.editpad:
self.editwin.refresh()
else:
# if no window was given, then switch between categories and accounts
if self.whichwin == "cat":
self.switchwhichwin("acct")
elif self.whichwin == "acct":
self.switchwhichwin("cat")
def printtoacctwin( self ):
line = self.acctwinstartline
acctwinheight, acctwinwidth = self.acctwin.getmaxyx()
self.acctwin.clear()
self.acctwin.bkgd(curses.color_pair(3))
self.acctwin.box()
if self.whichwin == "acct":
self.acctwin.addstr(0,2,"Accounts", curses.A_UNDERLINE)
else:
self.acctwin.addstr(0,2,"Accounts")
starttotaldough = Dough(0)
endtotaldough = Dough(0)
for account, accountname in self.month.accountlist.iteritems():
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,2,accountname+" ("+account+")", curses.A_BOLD)
line += 1
try:
sbalance = self.month.categories[account.upper()].metavalues["startingbalance"]
except KeyError:
sbalance = Dough(0)
starttotaldough += sbalance
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,3,"start "+str(sbalance))
line += 1
ebalance = self.month.categories[account.upper()].metavalues["endingbalance"]
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,5,"end "+str(ebalance))
line += 1
endtotaldough += ebalance
line += 1
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,2,"Totals", curses.A_BOLD)
line += 1
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,3,"start "+str(starttotaldough.clean()))
line += 1
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,5,"end "+str(endtotaldough.clean()))
line += 1
if line > 0 and line < acctwinheight - 1:
self.acctwin.addstr(line,3,"delta "+str(endtotaldough-starttotaldough))
line += 1
self.acctwin.refresh()
def printcat( self, cat, line ):
catwinheight, catwinwidth = self.catwin.getmaxyx()
# if not an account, it's a category we can analyze more
if cat.metaflags["business"]:
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2,"Business "+str(cat.name), curses.A_BOLD)
else:
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2,str(cat.name), curses.A_BOLD)
if cat.metaflags["income"]:
catincome = cat.metavalues["changebudget"]
line += 1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5,"got "+str(catincome))
else:
try:
catbudget = cat.metavalues["budget"].clean()
budgetenough = False
except KeyError:
# assume budget enough
budgetenough = True
catbudget = -cat.metavalues["changebudget"].clean()
rightadjustx = 32
if not cat.metaflags["business"]:
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,rightadjustx,("budget "+str(catbudget)).rjust(catwinwidth-rightadjustx-2))
line += 1
try:
catchange = -cat.metavalues["changeactual"]
except KeyError:
catchange = Dough(0)
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5,"spent "+str(catchange))
if budgetenough:
catbalance = Dough(0)
else:
try:
catbalance = cat.metavalues["endingbalance"]
except KeyError:
catbalance = Dough(0)
if catbalance != Dough(0):
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,rightadjustx,("left "+str(catbalance)).rjust(catwinwidth-rightadjustx-2))
line += 1
return line
def printtocatwin( self ):
catwinheight, catwinwidth = self.catwin.getmaxyx()
line = self.catwinstartline
self.catwin.clear()
self.catwin.bkgd(curses.color_pair(3))
self.catwin.box()
self.catwin.addstr(0,2,"Categories")
if self.whichwin == "cat":
self.catwin.addstr(0,2,"Categories", curses.A_UNDERLINE)
else:
self.catwin.addstr(0,2,"Categories")
sortedcategories = (self.month.categories.keys())
sortedcategories.sort()
for category in sortedcategories:
cat = self.month.categories[category]
if not ( cat.metaflags["account"] or cat.metaflags["business"]):
line = self.printcat( cat, line )
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Monthly expected income", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.monthlyexpectedincome))
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Monthly expected outpour", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.monthlyexpectedoutpour))
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Accumulated anti-savings", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.accumulatedantisavings))
line+= 2
for category in sortedcategories:
if category in self.month.categories:
cat = self.month.categories[category]
if cat.metaflags["business"] and not cat.metaflags["account"]:
line = self.printcat( cat, line )
line += 1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Total actual income", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.totalactualincome))
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Total actual spendings", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.totalactualspendings))
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,2, "Delta", curses.A_BOLD)
line+=1
if line > 0 and line < catwinheight - 1:
self.catwin.addstr(line,5, str(self.month.totalactualincome-self.month.totalactualspendings))
line+=1
self.catwin.refresh()
def modifystartline( self, deltaline ):
if self.whichwin == "acct":
self.acctwinstartline += deltaline
self.printtoacctwin()
elif self.whichwin == "cat":
self.catwinstartline += deltaline
self.printtocatwin()
def resetblinker( self ):
self.screen.addstr(self.cmdheight, 1, "")
def getcmd( self, cmdstring = "cmd:"):
self.screen.addstr(self.cmdheight, 1, " ")
self.screen.addstr(self.cmdheight, 1, cmdstring, curses.A_REVERSE)
self.screen.refresh()
curses.echo()
s = self.screen.getstr( self.cmdheight, len(cmdstring)+2, 64 )
# set back to no echo
curses.noecho()
self.screen.addstr(self.cmdheight, 1, " ")
self.resetblinker()
self.screen.refresh()
return s
def show( self, something ):
self.screen.addstr(self.cmdheight, 2, str(something) )
def edit( self, filename="" ):
if self.editpad:
# erase the old edit window
self.editwin.erase()
self.editwin.box()
# get ready the name to put on the top of the edit window
if filename != "":
self.filedir = os.path.join(self.month.rootdir, filename)
elif self.filedir == "":
self.filedir = "scratch"
# then officially switch
self.switchwhichwin("edit")
# grab data from the file
try:
with open(self.filedir) as f:
data = f.readlines()
except:
data = [ "" ]
#self.editwin.addstr(0,2,"Edit "+self.filedir, curses.A_UNDERLINE)
#self.editwin.refresh()
# get the editting pad ready
self.editpad.erase()
# add in the data
i = 0
while i < len(data):
self.editpad.addstr(i,0, data[i])
i += 1
# then refresh it
self.editpad.refresh()
# make it so the editor doesn't strip newlines... we'll take care of it later
self.editor.stripspaces = False
# the unintended side effect is that it puts lots of white space at the end
# of lines.
data = self.editor.edit()
s = "notavalue"
while not (s[0] == "Y" or s[0] == "N"):
s = self.getcmd("save file? (y/n):")
s = s.upper()
if s[0] == "Y":
# save data to filedir.
# but first we clean it up a bit
data = data.split('\n')
# first clear out right spaces
i = 0
while i < len(data):
data[i] = data[i].rstrip()
i += 1
# then clear out all empty lines at the end
i = len(data) - 1
while i >= 0:
if data[i] == "":
del data[i]
i -= 1
else:
break
# then save
with open(self.filedir, 'w') as f:
for line in data:
f.write(line+"\n")
# reset everything
self.month.reset()
self.month.grandtotal()
self.printtoacctwin( )
self.printtocatwin( )
self.show( "file "+self.filedir+" written." )
else:
self.show( "not saving." )
# erase the last lines of the edit window
self.editwin.addstr(self.winheight-3,2," ")
self.editwin.addstr(self.winheight-2,2," ")
# after done editing, switch to category moving
self.switchwhichwin( "cat" )
else:
self.show( "widen screen" )
# define a main function
def main( screen, month, rootdir ):
mainscreen = MainScreen( screen, month )
c = 0
while c != 27:
mainscreen.resetblinker()
c = screen.getch()
if c == ord('/') or c == ord(':') or c == ord(';'):
# for input, use echo (so user can see what is going on )
s = mainscreen.getcmd()
if len(s) > 0:
split = s.split()
if split[0][0].lower() == "q":
c = 27
elif split[0][0].lower() == "e":
if len(split) > 1:
mainscreen.edit(split[1])
else:
mainscreen.edit()
elif s == "reload":
month.reset()
month.grandtotal()
mainscreen = MainScreen( screen, month )
elif split[0] == "load" or split[0] == "open":
if len(split) == 1:
mainscreen.show("use load YYYY/mm, or open mm")
else:
root, YYYY0, mm = getrootYYYYmm(split, rootdir)
if root:
month = Month( root, YYYY, mm )
month.grandtotal()
mainscreen = MainScreen( screen, month )
rootdir = root
else:
mainscreen.show("not valid directory")
elif s == "generate":
if month.generatenextmonth():
while not ( s[0] == "Y" or s[0].lower() == "n" ):
s = mainscreen.getcmd("destroy existing "+month.nextyearmonth+"? Y/n")
if s[0] == "Y":
month.generatenextmonth( True )
mainscreen.show(month.nextyearmonth+" generated!")
else:
mainscreen.show(month.nextyearmonth+" generated!")
else:
mainscreen.show("unknown command: "+s)
elif c == ord('h') or c == curses.KEY_LEFT:
pass
elif c == ord('j') or c == curses.KEY_DOWN:
mainscreen.modifystartline( -1 )
elif c == ord('J') or c == 336:
mainscreen.modifystartline( -5 )
elif c == ord('k') or c == curses.KEY_UP:
mainscreen.modifystartline( 1 )
elif c == ord('K') or c == 337:
mainscreen.modifystartline( 5 )
elif c == ord('l') or c == curses.KEY_RIGHT:
pass
elif c == ord('\r') or c == ord('\n'): # enter
pass
elif c == 9:
# tab
mainscreen.switchwhichwin()
elif c == curses.KEY_MOUSE:
m = curses.getmouse()
mainscreen.show( "mouse" )
elif c == curses.KEY_RESIZE:
ok = True
if mainscreen.whichwin == "edit":
mainscreen.switchwhichwin( "cat" )
ok = False
mainscreen.loadscreens()
if not ok:
mainscreen.show("Don't resize when editting. Changes are lost.")
else:
mainscreen.show( c )
curses.flash()
curses.beep() #
# run the main function only if this module is executed as the main script
# (if you import this as a module then nothing is executed)
if __name__=="__main__":
root, YYYY, mm = getrootYYYYmm(sys.argv)
if root:
month = Month( root, YYYY, mm )
else:
sys.exit(" Month is unavailable in pynances. Try YYYY"+os.sep+"mm" )
month.grandtotal()
# call the main function, but wrap it so that the terminal will be ok
# if the program screws up.
try:
curses.wrapper( main, month, root )
except KeyboardInterrupt:
print "Got KeyboardInterrupt exception. Exiting..."
exit()
|
|
import logging
from datetime import datetime, timedelta
from .models import Challenge
from .utils import get_challenge_id as _id
from users.models import UserNode, Profile
from users.utils import user_node
from geo.continents import (AFRICA, ASIA, EUROPE, NORTH_AMERICA, SOUTH_AMERICA,
OCEANIA, ANTARCTICA)
from geo.groups import (european_countries, island_countries, countries_with_desert, french_speaking_countries,
original_eu_countries, original_us_states, continental_us_states, all_us_states)
from stats.models import SharingHistory, VIA_TWITTER, VIA_FACEBOOK, VIA_QR, VIA_POSTER
all_challenges = {}
class ChallengeImpl:
""" Base class for the implementation of challenges. """
def is_completed_by(self, profile):
return False
class TheEmber(ChallengeImpl):
""" Share your Spark with one other person """
def is_completed_by(self, profile):
return profile.total_shares >= 1
all_challenges[_id(1, 1)] = TheEmber()
class OnLocation(ChallengeImpl):
""" Add your location """
def is_completed_by(self, profile):
return profile.boost1_completed
all_challenges[_id(1, 2)] = OnLocation()
class ParentTrap(ChallengeImpl):
""" Tell us where you got your Spark """
def is_completed_by(self, profile):
return profile.boost2_completed
all_challenges[_id(1, 3)] = ParentTrap()
class Socialized(ChallengeImpl):
""" Obtain a share from Facebook """
def is_completed_by(self, profile):
return SharingHistory.has_gained_shares_via(profile, VIA_FACEBOOK)
all_challenges[_id(2, 1)] = Socialized()
class TwitterThreat(ChallengeImpl):
""" Obtain a share from Twitter """
def is_completed_by(self, profile):
return SharingHistory.has_gained_shares_via(profile, VIA_TWITTER)
all_challenges[_id(2, 2)] = TwitterThreat()
class Multisparker(ChallengeImpl):
""" Sign in on both your phone and your desktop Web browser """
def is_completed_by(self, profile):
return profile.login_desktop and profile.login_mobile
all_challenges[_id(2, 3)] = Multisparker()
class FaceOff(ChallengeImpl):
""" Complete a face-to-face share via the QR code on your phone """
def is_completed_by(self, profile):
return SharingHistory.has_gained_shares_via(profile, VIA_QR)
all_challenges[_id(2, 4)] = FaceOff()
class MilesAway(ChallengeImpl):
""" Share with someone new who lives over 100 miles away """
def is_completed_by(self, profile):
from spark.utils import distance
DISTANCE = 160.9344 # 100 miles to km
if profile.boost1_completed:
for child in profile.children_profiles:
if (child.boost1_completed and distance((profile.latitude, profile.longitude),
(child.latitude, child.longitude)) >= DISTANCE):
return True
return False
all_challenges[_id(2, 5)] = MilesAway()
class LongDistanceRelationship(ChallengeImpl):
""" Share with someone new in a different country """
def is_completed_by(self, profile):
if profile.boost1_completed:
for child in profile.children_profiles:
if child.boost1_completed and (profile.country_code != child.country_code):
return True
return False
all_challenges[_id(2, 6)] = LongDistanceRelationship()
class BakersDozen(ChallengeImpl):
""" Complete 13 shares """
def is_completed_by(self, profile):
return profile.total_shares >= 13
all_challenges[_id(2, 7)] = BakersDozen()
class DawnPatrol(ChallengeImpl):
""" Share with someone between 6am and 10am (Local time for the recipient.) """
def is_completed_by(self, profile):
shares = SharingHistory.objects.filter(parent=profile)
for share in shares:
if share.local_hour:
if share.local_hour >= 6 and share.local_hour <= 10:
return True
return False
all_challenges[_id(3, 1)] = DawnPatrol()
class StreetTeam(ChallengeImpl):
""" Share with someone via a printed flyer """
def is_completed_by(self, profile):
return SharingHistory.has_gained_shares_via(profile, VIA_POSTER)
all_challenges[_id(3, 2)] = StreetTeam()
class ContinentalCrown(ChallengeImpl):
""" Share with someone new on a different continent """
def is_completed_by(self, profile):
if profile.boost1_completed:
for child in profile.children_profiles:
if child.boost1_completed and (profile.continent_code != child.continent_code):
return True
return False
all_challenges[_id(3, 3)] = ContinentalCrown()
class TripleThreat(ChallengeImpl):
""" Complete 3 shares in a single 12-hour period """
def is_completed_by(self, profile):
if profile.total_shares >= 3:
now = datetime.now()
share = SharingHistory.objects.order_by('-date_shared')[2]
if share and (share.date_shared > now - timedelta(hours=12)):
return True
return False
all_challenges[_id(3, 4)] = TripleThreat()
class ChainGang(ChallengeImpl):
""" Create a chain by having someone you've shared with share with someone else """
def is_completed_by(self, profile):
return profile.longest_chain >= 2
all_challenges[_id(3, 5)] = ChainGang()
class XXSparks(ChallengeImpl):
""" Complete 20 shares """
def is_completed_by(self, profile):
return profile.total_shares >= 20
all_challenges[_id(3, 6)] = XXSparks()
class NightShift(ChallengeImpl):
""" Share with someone between 2am and 4am. (Local time for the recipient.) """
def is_completed_by(self, profile):
shares = SharingHistory.objects.filter(parent=profile)
for share in shares:
if share.local_hour:
if share.local_hour >= 2 and share.local_hour <= 4:
return True
return False
all_challenges[_id(4, 1)] = NightShift()
class OctoSparker(ChallengeImpl):
""" Share your Spark to 8 different U.S. states """
def is_completed_by(self, profile):
states = set()
for child in profile.children_profiles:
if child.us_state and child.us_state in all_us_states:
states.add(child.us_state)
return len(states) >= 8
all_challenges[_id(4, 2)] = OctoSparker()
class Euroflame(ChallengeImpl):
""" Share your Spark to 5 different E.U. countries """
def is_completed_by(self, profile):
countries = set()
for child in profile.children_profiles:
if child.continent_code and child.continent_code == EUROPE:
countries.add(child.country_code)
return len(countries) >= 5
all_challenges[_id(4, 3)] = Euroflame()
class OptimalVelocity(ChallengeImpl):
""" Complete 6 shares within a single 12-hour period """
def is_completed_by(self, profile):
if profile.total_shares >= 6:
now = datetime.now()
share = SharingHistory.objects.order_by('-date_shared')[5]
if share and (share.date_shared > now - timedelta(hours=12)):
return True
return False
all_challenges[_id(4, 4)] = OptimalVelocity()
class SpeedSpark(ChallengeImpl):
""" Complete 2 or more shares in a single hour """
def is_completed_by(self, profile):
if profile.total_shares >= 2:
now = datetime.now()
share = SharingHistory.objects.order_by('-date_shared')[1]
if share and (share.date_shared > now - timedelta(hours=1)):
return True
return False
all_challenges[_id(4, 5)] = SpeedSpark()
class XLSparks(ChallengeImpl):
""" Complete 40 shares """
def is_completed_by(self, profile):
return profile.total_shares >= 40
all_challenges[_id(4, 6)] = XLSparks()
class Super60(ChallengeImpl):
""" Share with 60 people """
def is_completed_by(self, profile):
return profile.total_shares >= 60
all_challenges[_id(5, 1)] = Super60()
class HundredHitter(ChallengeImpl):
""" Share with 100 people """
def is_completed_by(self, profile):
return profile.total_shares >= 100
all_challenges[_id(5, 2)] = HundredHitter()
class Super250(ChallengeImpl):
""" Share with 250 people """
def is_completed_by(self, profile):
return profile.total_shares >= 250
all_challenges[_id(5, 3)] = Super250()
class Super500(ChallengeImpl):
""" Share with 500 people """
def is_completed_by(self, profile):
return profile.total_shares >= 500
all_challenges[_id(5, 4)] = Super500()
class Super1000(ChallengeImpl):
""" Share with 1000 people """
def is_completed_by(self, profile):
return profile.total_shares >= 1000
all_challenges[_id(5, 5)] = Super1000()
class Trifecta(ChallengeImpl):
""" Non-Android user who shares with three people """
def is_completed_by(self, profile):
return profile.is_non_android and profile.total_shares >= 3
all_challenges[_id(6, 1)] = Trifecta()
class YoureADime(ChallengeImpl):
""" Non-Android user who shares with ten people """
def is_completed_by(self, profile):
return profile.is_non_android and profile.total_shares >= 10
all_challenges[_id(6, 2)] = YoureADime()
class Backpacker(ChallengeImpl):
""" Share your Spark to 3 continents """
def is_completed_by(self, profile):
return profile.total_continents_sparked >= 3
all_challenges[_id(6, 3)] = Backpacker()
class Super7(ChallengeImpl):
""" Share your Spark to all 7 continents """
def is_completed_by(self, profile):
return profile.total_continents_sparked == 7
all_challenges[_id(6, 4)] = Super7()
class PenguinSuit(ChallengeImpl):
""" Share to Antarctica """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.continent_code and child.continent_code == ANTARCTICA:
return True
return False
all_challenges[_id(6, 5)] = PenguinSuit()
class PolarPower(ChallengeImpl):
""" Share to Arctic Circle """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.latitude and child.latitude > 66.5622:
return True
return False
all_challenges[_id(6, 6)] = PolarPower()
class CapitalPower(ChallengeImpl):
""" Share to the capital of any country """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.major_city and child.major_city.is_capital:
return True
return False
all_challenges[_id(6, 7)] = CapitalPower()
class PuddleJumper(ChallengeImpl):
""" Share between the US and UK """
def is_completed_by(self, profile):
if profile.country_code == 'UK':
for child in profile.children_profiles:
if child.country_code and child.country_code == 'US':
return True
if profile.country_code == 'US':
for child in profile.children_profiles:
if child.country_code and child.country_code == 'UK':
return True
return False
all_challenges[_id(6, 8)] = PuddleJumper()
class TimeWarp(ChallengeImpl):
""" Share with someone in each of the 10 different timezones """
def is_completed_by(self, profile):
shares = SharingHistory.objects.filter(parent=profile)
timezones = set([share.timezone for share in shares if share.timezone])
return len(timezones) >= 10
all_challenges[_id(6, 9)] = TimeWarp()
class IslandHopper(ChallengeImpl):
""" Share your Spark with someone on an island (Hawaii, Japan, etc) """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.country_code and child.country_code in island_countries:
return True
return False
all_challenges[_id(6, 10)] = IslandHopper()
class ViveLaLumiere(ChallengeImpl):
""" Share your Spark to someone in a French-speaking country """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.country_code and child.country_code in french_speaking_countries:
return True
return False
all_challenges[_id(6, 11)] = ViveLaLumiere()
class EarthSandwich(ChallengeImpl):
""" Share with someone roughly on the other side of the globe """
def is_completed_by(self, profile):
from spark.utils import distance
if profile.boost1_completed:
for child in profile.children_profiles:
if child.boost1_completed:
d = distance((profile.latitude, profile.longitude), (child.latitude, child.longitude))
# Earth's circumference is 40,075 km.
# Check if the child is located farther than 40,000 km divided by 2, with a 2,000 km tolerance
if d > 18000:
return True
return False
all_challenges[_id(6, 12)] = EarthSandwich()
class PanAmericano(ChallengeImpl):
""" Share your Spark between a North and South American city """
def is_completed_by(self, profile):
if profile.continent_code:
if profile.continent_code == NORTH_AMERICA:
for child in profile.children_profiles:
if child.continent_code and child.continent_code == SOUTH_AMERICA:
return True
if profile.continent_code == SOUTH_AMERICA:
for child in profile.children_profiles:
if child.continent_code and child.continent_code == NORTH_AMERICA:
return True
return False
all_challenges[_id(6, 13)] = PanAmericano()
class FeelTheHeat(ChallengeImpl):
""" Share to a country with a desert in it """
def is_completed_by(self, profile):
for child in profile.children_profiles:
if child.country_code and child.country_code in countries_with_desert:
return True
return False
all_challenges[_id(6, 14)] = FeelTheHeat()
class TheColonial(ChallengeImpl):
""" Share to a friend in each of the original 13 US states """
def is_completed_by(self, profile):
states = set()
for child in profile.children_profiles:
if child.us_state and child.us_state in original_us_states:
states.add(child.us_state)
return len(states) == len(original_us_states)
all_challenges[_id(6, 15)] = TheColonial()
class AllAmerican(ChallengeImpl):
""" Share to someone in each continental state """
def is_completed_by(self, profile):
states = set()
for child in profile.children_profiles:
if child.us_state and child.us_state in continental_us_states:
states.add(child.us_state)
return len(states) == len(continental_us_states)
all_challenges[_id(6, 16)] = AllAmerican()
class Brussels(ChallengeImpl):
""" Share with someone in each original EU country """
def is_completed_by(self, profile):
countries = set()
for child in profile.children_profiles:
if child.country_code and child.country_code in original_eu_countries:
countries.add(child.country_code)
return len(countries) == len(original_eu_countries)
all_challenges[_id(6, 17)] = Brussels()
class TheAmazon(ChallengeImpl):
""" Share to or from Brazil """
def is_completed_by(self, profile):
# Share from Brazil
if profile.total_shares > 0 and profile.country_code == 'BR':
return True
# Share to Brazil
for child in profile.children_profiles:
if child.country_code and child.country_code == 'BR':
return True
return False
all_challenges[_id(6, 18)] = TheAmazon()
class HallOfFamer(ChallengeImpl):
""" Person with the most shares """
def is_completed_by(self, profile):
max_share_count = SharingHistory.get_max_share_count()
profile_share_count = SharingHistory.get_num_shares(profile)
return max_share_count == profile_share_count
all_challenges[_id(6, 19)] = HallOfFamer()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "funnel.marker"
_path_str = "funnel.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to funnel.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.funnel.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
import speech_recognition as sr
import argparse
import sys
import time
import base64
import enum
class SpeechController():
def __init__(self, gestureController, state_machine=None):
self._state_machine = state_machine
self._gesture_controller = gestureController
self.init_state = enum.SpeechState.INIT
self.done_init_seq = False
self.processed_speech = ""
self.n = 0
self.r = sr.Recognizer()
self.m = sr.Microphone()
self.listen_string = ""
def start_init_seq(self):
while not self.done_init_seq:
self._start_init_seq()
if self._state_machine != None:
self._state_machine.handler_speech_done_init_seq()
else:
print "Done init seq"
def _start_init_seq(self):
if self.init_state == enum.SpeechState.INIT:
test_initial = self.ask_user()
self._gesture_controller.animatedSay(test_initial)
print test_initial
self.n = 0
#resp = self.wait_for_response_or_until_timeout()
resp = True
if resp:
self.init_state = enum.SpeechState.WAIT_FOR_RESPONSE
else:
self.init_state = enum.SpeechState.TIMEOUT
return
if self.init_state == enum.SpeechState.WAIT_FOR_RESPONSE:
self.n += 1
if self.n >= 4:
self.init_state = enum.SpeechState.DONE
return
self.listen_string = self.listen()
if self.listen_string == "":
self.init_state = enum.SpeechState.WAIT_FOR_RESPONSE
self.n = self.n - 1
else:
self.init_state = enum.SpeechState.RESPONSE_RECEIVED
return
if self.init_state == enum.SpeechState.RESPONSE_RECEIVED:
response = self.process_response(self.listen_string, self.n)
print response
self._gesture_controller.animatedSay(response)
#call function that says response
#gestureController.animatedSpeechProxy.say(response)
if response == None:
self.n = self.n - 1
self._gesture_controller.animatedSay("Sorry, I did not catch that, could you please repeat?")
self.init_state = enum.SpeechState.WAIT_FOR_RESPONSE
return
if self.init_state == enum.SpeechState.DONE:
self.done_init_seq = True
return
# if not done:
# self.init_state = enum.SpeechState.ANSWER_BACK
# else:
# self.init_state = enum.SpeechState.DONE
# return
# if self.init_state == enum.SpeechState.DONE:
# call gesture controller and say text
# self.done_init_seq = True
def listen(self):
# print("A moment of silence, please...")
with self.m as source:
self.r.adjust_for_ambient_noise(source)
# print("Set minimum energy threshold to {}".format(self.r.energy_threshold))
# print("test1")
print("Say something!")
with self.m as source:
audio = self.r.listen(source)
print("Got it! Now to recognize it...")
var1 = ""
try:
# recognize speech using Google Speech Recognition
value = self.r.recognize_google(audio)
# we need some special handling here to correctly print unicode characters to standard output
if str is bytes: # this version of Python uses bytes for strings (Python 2)
print(u"You said {}".format(value).encode("utf-8"))
var1 = u"{}".format(value).encode("utf-8")
else:
print("You said {}".format(value))
print("test3")
except sr.UnknownValueError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
return var1
def process_response(self, var1, n):
my_dict = self.get_dictionary(n)
#print "[n = %d] MY_DICT: %s" % (n, my_dict)
#print "var1 = %s" % var1
#print "n = %d" % n
ret = ""
funct_dict = {}
for keyword, funct in my_dict.iteritems():
print "var1: %s, keyword: %s, find: %d" % (var1, keyword, var1.find(keyword))
if var1.find(keyword) >= 0:
funct_dict[keyword] = funct
if 'you' in funct_dict:
ret = funct_dict['you']
elif len(funct_dict) > 0:
ret = funct_dict.itervalues().next()
return ret
def ask_user(self):
return("Hey! My name is NAO, how are you?")
def get_dictionary(self, n):
if n == 1:
return {"you" : self.how_are_you(),\
"fine" : self.describe_exp(),\
"good" : self.describe_exp(),\
"thanks" : self.describe_exp(),\
"hi" : self.describe_exp(),\
"hello" : self.describe_exp(),\
"well" : self.describe_exp(),\
}
if n == 2:
return {"high" : self.increase_vol(),\
"higher" : self.increase_vol(),\
"hi" : self.increase_vol(),\
"hiya" : self.increase_vol(),\
"more" : self.increase_vol(),\
"low" : self.decrease_vol(),\
"lower" : self.decrease_vol(),\
"less" : self.decrease_vol(),\
"ok" : self.ok_vol(),\
"fine" : self.ok_vol(),\
}
if n == 3:
return {"fast" : self.increase_speed(),\
"faster" : self.increase_speed(),\
"hi" : self.increase_speed(),\
"slow" : self.decrease_speed(),\
"slower" : self.decrease_speed(),\
"ok" : self.ok_speed(),\
"fine" : self.ok_speed(),\
}
# if n == 4:
# return {"ok" : self.start_read(),\
# "alright" : self.start_read(),\
# "fine" : self.start_read(),\
# "sure" : self.start_read(),\
# }
def ask_user(self):
return("Hey! My name is Pinaoqio, how are you?")
def how_are_you(self):
return("I'm fine, thanks. " + self.describe_exp())
def describe_exp(self):
return ("Today, I am going to be reading a passage from an eagle in the snow, by Michael Morpurgo. Let's check the volume. Could you please say more or less if you would like higher or lower volume, or OK if the volume is fine.")
def increase_vol(self):
self._gesture_controller.incrementVol()
return("Great, I have increased the volume. What do you think about the speed? Please say faster, slower, or ok.")
def decrease_vol(self):
self._gesture_controller.decrementVol()
return("Great, I have decreased the volume. What do you think about the speed? Please say faster, slower, or ok.")
def ok_vol(self):
return("I am glad you like the volume. What do you think about the speed? Please say faster, slower, or ok.")
def increase_speed(self):
self._gesture_controller.incrementSpeed()
return("Alright, I have increased the speed. I am now ready to read you a story! Please say ok when you are ready!")
def decrease_speed(self):
self._gesture_controller.decrementSpeed()
return("Alright, I have decreased the speed. I am now ready to read you a story! Please say ok when you are ready!")
def ok_speed(self):
return("Perfect. I am now ready to read the a story! I will take a picture of the book to process its text, sorry for the wait!")
# def start_read(self):
# #start reading
# return("starting")
# def listen_while_reading()
# print("A moment of silence, please...")
# with m as source: r.adjust_for_ambient_noise(source)
# print("Set minimum energy threshold to {}".format(r.energy_threshold))
# while var1 != "stop":
# print("test1")
# print("Say something!")
# with m as source: audio = r.listen(source)
# print("Got it! Now to recognize it...")
# try:
# # recognize speech using Google Speech Recognition
# value = r.recognize_google(audio)
# # we need some special handling here to correctly print unicode characters to standard output
# if str is bytes: # this version of Python uses bytes for strings (Python 2)
# print(u"You said {}".format(value).encode("utf-8"))
# var1 = u"You said {}".format(value).encode("utf-8")
# else: # else: # this version of Python uses unicode for strings (Python 3+)
# this version of Python uses unicode for strings (Python 3+)
# print("You said {}".format(value))
# print("test3")
# except sr.UnknownValueError:
# print("Oops! Didn't catch that")
# except sr.RequestError as e:
# print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
# return var1
def main ():
speech_controller = SpeechController()
speech_controller.start_init_seq()
if __name__ == "__main__":
main()
|
|
from collections import OrderedDict, defaultdict, namedtuple, Counter
from collections.abc import Iterable
from copy import deepcopy
from numbers import Real
from pathlib import Path
import re
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.data
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# Units for density supported by OpenMC
DENSITY_UNITS = ('g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum',
'macro')
NuclideTuple = namedtuple('NuclideTuple', ['name', 'percent', 'percent_type'])
class Material(IDManagerMixin):
"""A material composed of a collection of nuclides/elements.
To create a material, one should create an instance of this class, add
nuclides or elements with :meth:`Material.add_nuclide` or
`Material.add_element`, respectively, and set the total material density
with `Material.set_density()`. The material can then be assigned to a cell
using the :attr:`Cell.fill` attribute.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
temperature : float, optional
Temperature of the material in Kelvin. If not specified, the material
inherits the default temperature applied to the model.
Attributes
----------
id : int
Unique identifier for the material
temperature : float
Temperature of the material in Kelvin.
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/m3',
'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only
applies in the case of a multi-group calculation.
depletable : bool
Indicate whether the material is depletable.
nuclides : list of namedtuple
List in which each item is a namedtuple consisting of a nuclide string,
the percent density, and the percent type ('ao' or 'wo'). The namedtuple
has field names ``name``, ``percent``, and ``percent_type``.
isotropic : list of str
Nuclides for which elastic scattering should be treated as though it
were isotropic in the laboratory system.
average_molar_mass : float
The average molar mass of nuclides in the material in units of grams per
mol. For example, UO2 with 3 nuclides will have an average molar mass
of 270 / 3 = 90 g / mol.
volume : float
Volume of the material in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Material.add_volume_information` method.
paths : list of str
The paths traversed through the CSG tree to reach each material
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this material throughout the geometry. This
property is initialized by calling the :meth:`Geometry.determine_paths`
method.
fissionable_mass : float
Mass of fissionable nuclides in the material in [g]. Requires that the
:attr:`volume` attribute is set.
"""
next_id = 1
used_ids = set()
def __init__(self, material_id=None, name='', temperature=None):
# Initialize class attributes
self.id = material_id
self.name = name
self.temperature = temperature
self._density = None
self._density_units = 'sum'
self._depletable = False
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = {}
self._isotropic = []
# A list of tuples (nuclide, percent, percent type)
self._nuclides = []
# The single instance of Macroscopic data present in this material
# (only one is allowed, hence this is different than _nuclides, etc)
self._macroscopic = None
# If specified, a list of table names
self._sab = []
def __repr__(self):
string = 'Material\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tTemperature', self._temperature)
string += '{: <16}=\t{}'.format('\tDensity', self._density)
string += ' [{}]\n'.format(self._density_units)
string += '{: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{: <16}=\t{}\n'.format('\tS(a,b)', sab)
string += '{: <16}\n'.format('\tNuclides')
for nuclide, percent, percent_type in self._nuclides:
string += '{: <16}'.format('\t{}'.format(nuclide))
string += '=\t{: <12} [{}]\n'.format(percent, percent_type)
if self._macroscopic is not None:
string += '{: <16}\n'.format('\tMacroscopic Data')
string += '{: <16}'.format('\t{}'.format(self._macroscopic))
return string
@property
def name(self):
return self._name
@property
def temperature(self):
return self._temperature
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def depletable(self):
return self._depletable
@property
def paths(self):
if self._paths is None:
raise ValueError('Material instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of material instances have not been determined. Call '
'the Geometry.determine_paths() method.')
return self._num_instances
@property
def nuclides(self):
return self._nuclides
@property
def isotropic(self):
return self._isotropic
@property
def average_molar_mass(self):
# Using the sum of specified atomic or weight amounts as a basis, sum
# the mass and moles of the material
mass = 0.
moles = 0.
for nuc in self.nuclides:
if nuc.percent_type == 'ao':
mass += nuc.percent * openmc.data.atomic_mass(nuc.name)
moles += nuc.percent
else:
moles += nuc.percent / openmc.data.atomic_mass(nuc.name)
mass += nuc.percent
# Compute and return the molar mass
return mass / moles
@property
def volume(self):
return self._volume
@name.setter
def name(self, name):
if name is not None:
cv.check_type('name for Material ID="{}"'.format(self._id),
name, str)
self._name = name
else:
self._name = ''
@temperature.setter
def temperature(self, temperature):
cv.check_type('Temperature for Material ID="{}"'.format(self._id),
temperature, (Real, type(None)))
self._temperature = temperature
@depletable.setter
def depletable(self, depletable):
cv.check_type('Depletable flag for Material ID="{}"'.format(self.id),
depletable, bool)
self._depletable = depletable
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('material volume', volume, Real)
self._volume = volume
@isotropic.setter
def isotropic(self, isotropic):
cv.check_iterable_type('Isotropic scattering nuclides', isotropic,
str)
self._isotropic = list(isotropic)
@property
def fissionable_mass(self):
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
Z = openmc.data.zam(nuc)[0]
if Z >= 90:
density += 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
return density*self.volume
@classmethod
def from_hdf5(cls, group):
"""Create material from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Material
Material instance
"""
mat_id = int(group.name.split('/')[-1].lstrip('material '))
name = group['name'][()].decode() if 'name' in group else ''
density = group['atom_density'][()]
if 'nuclide_densities' in group:
nuc_densities = group['nuclide_densities'][()]
# Create the Material
material = cls(mat_id, name)
material.depletable = bool(group.attrs['depletable'])
if 'volume' in group.attrs:
material.volume = group.attrs['volume']
if "temperature" in group.attrs:
material.temperature = group.attrs["temperature"]
# Read the names of the S(a,b) tables for this Material and add them
if 'sab_names' in group:
sab_tables = group['sab_names'][()]
for sab_table in sab_tables:
name = sab_table.decode()
material.add_s_alpha_beta(name)
# Set the Material's density to atom/b-cm as used by OpenMC
material.set_density(density=density, units='atom/b-cm')
if 'nuclides' in group:
nuclides = group['nuclides'][()]
# Add all nuclides to the Material
for fullname, density in zip(nuclides, nuc_densities):
name = fullname.decode().strip()
material.add_nuclide(name, percent=density, percent_type='ao')
if 'macroscopics' in group:
macroscopics = group['macroscopics'][()]
# Add all macroscopics to the Material
for fullname in macroscopics:
name = fullname.decode().strip()
material.add_macroscopic(name)
return material
def add_volume_information(self, volume_calc):
"""Add volume information to a material.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'material':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
def set_density(self, units, density=None):
"""Set the density of the material
Parameters
----------
units : {'g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}
Physical units of density.
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
cv.check_value('density units', units, DENSITY_UNITS)
self._density_units = units
if units == 'sum':
if density is not None:
msg = 'Density "{}" for Material ID="{}" is ignored ' \
'because the unit is "sum"'.format(density, self.id)
warnings.warn(msg)
else:
if density is None:
msg = 'Unable to set the density for Material ID="{}" ' \
'because a density value must be given when not using ' \
'"sum" unit'.format(self.id)
raise ValueError(msg)
cv.check_type('the density for Material ID="{}"'.format(self.id),
density, Real)
self._density = density
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str
Nuclide to add, e.g., 'Mo95'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
"""
cv.check_type('nuclide', nuclide, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add a Nuclide to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
# If nuclide name doesn't look valid, give a warning
try:
Z, _, _ = openmc.data.zam(nuclide)
except ValueError as e:
warnings.warn(str(e))
else:
# For actinides, have the material be depletable by default
if Z >= 89:
self.depletable = True
self._nuclides.append(NuclideTuple(nuclide, percent, percent_type))
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : str
Nuclide to remove
"""
cv.check_type('nuclide', nuclide, str)
# If the Material contains the Nuclide, delete it
for nuc in reversed(self.nuclides):
if nuclide == nuc.name:
self.nuclides.remove(nuc)
def add_macroscopic(self, macroscopic):
"""Add a macroscopic to the material. This will also set the
density of the material to 1.0, unless it has been otherwise set,
as a default for Macroscopic cross sections.
Parameters
----------
macroscopic : str
Macroscopic to add
"""
# Ensure no nuclides, elements, or sab are added since these would be
# incompatible with macroscopics
if self._nuclides or self._sab:
msg = 'Unable to add a Macroscopic data set to Material ID="{}" ' \
'with a macroscopic value "{}" as an incompatible data ' \
'member (i.e., nuclide or S(a,b) table) ' \
'has already been added'.format(self._id, macroscopic)
raise ValueError(msg)
if not isinstance(macroscopic, str):
msg = 'Unable to add a Macroscopic to Material ID="{}" with a ' \
'non-string value "{}"'.format(self._id, macroscopic)
raise ValueError(msg)
if self._macroscopic is None:
self._macroscopic = macroscopic
else:
msg = 'Unable to add a Macroscopic to Material ID="{}". ' \
'Only one Macroscopic allowed per ' \
'Material.'.format(self._id)
raise ValueError(msg)
# Generally speaking, the density for a macroscopic object will
# be 1.0. Therefore, lets set density to 1.0 so that the user
# doesnt need to set it unless its needed.
# Of course, if the user has already set a value of density,
# then we will not override it.
if self._density is None:
self.set_density('macro', 1.0)
def remove_macroscopic(self, macroscopic):
"""Remove a macroscopic from the material
Parameters
----------
macroscopic : str
Macroscopic to remove
"""
if not isinstance(macroscopic, str):
msg = 'Unable to remove a Macroscopic "{}" in Material ID="{}" ' \
'since it is not a string'.format(self._id, macroscopic)
raise ValueError(msg)
# If the Material contains the Macroscopic, delete it
if macroscopic == self._macroscopic:
self._macroscopic = None
def add_element(self, element, percent, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a natural element to the material
Parameters
----------
element : str
Element to add, e.g., 'Zr' or 'Zirconium'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_taget nuclide in percent (ao or wo).
If enrichment_taget is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U.
Default is None (natural composition).
enrichment_target: str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
.. versionadded:: 0.12
enrichment_type: {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
.. versionadded:: 0.12
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('nuclide', element, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
# Make sure element name is just that
if not element.isalpha():
raise ValueError("Element name should be given by the "
"element's symbol or name, e.g., 'Zr', 'zirconium'")
# Allow for element identifier to be given as a symbol or name
if len(element) > 2:
el = element.lower()
element = openmc.data.ELEMENT_SYMBOL.get(el)
if element is None:
msg = 'Element name "{}" not recognised'.format(el)
raise ValueError(msg)
if self._macroscopic is not None:
msg = 'Unable to add an Element to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if enrichment is not None and enrichment_target is None:
if not isinstance(enrichment, Real):
msg = 'Unable to add an Element to Material ID="{}" with a ' \
'non-floating point enrichment value "{}"'\
.format(self._id, enrichment)
raise ValueError(msg)
elif element != 'U':
msg = 'Unable to use enrichment for element {} which is not ' \
'uranium for Material ID="{}"'.format(element, self._id)
raise ValueError(msg)
# Check that the enrichment is in the valid range
cv.check_less_than('enrichment', enrichment, 100./1.008)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
if enrichment > 5.0:
msg = 'A uranium enrichment of {} was given for Material ID='\
'"{}". OpenMC assumes the U234/U235 mass ratio is '\
'constant at 0.008, which is only valid at low ' \
'enrichments. Consider setting the isotopic ' \
'composition manually for enrichments over 5%.'.\
format(enrichment, self._id)
warnings.warn(msg)
# Add naturally-occuring isotopes
element = openmc.Element(element)
for nuclide in element.expand(percent,
percent_type,
enrichment,
enrichment_target,
enrichment_type):
self.add_nuclide(*nuclide)
def add_elements_from_formula(self, formula, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a elements from a chemical formula to the material.
.. versionadded:: 0.12
Parameters
----------
formula : str
Formula to add, e.g., 'C2O', 'C6H12O6', or (NH4)2SO4.
Note this is case sensitive, elements must start with an uppercase
character. Multiplier numbers must be integers.
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_target nuclide in percent (ao or wo).
If enrichment_target is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U. Default is None (natural composition).
enrichment_target : str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
enrichment_type : {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('formula', formula, str)
if '.' in formula:
msg = 'Non-integer multiplier values are not accepted. The ' \
'input formula {} contains a "." character.'.format(formula)
raise ValueError(msg)
# Tokenizes the formula and check validity of tokens
tokens = re.findall(r"([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)", formula)
for row in tokens:
for token in row:
if token.isalpha():
if token == "n" or token not in openmc.data.ATOMIC_NUMBER:
msg = 'Formula entry {} not an element symbol.' \
.format(token)
raise ValueError(msg)
elif token not in ['(', ')', ''] and not token.isdigit():
msg = 'Formula must be made from a sequence of ' \
'element symbols, integers, and backets. ' \
'{} is not an allowable entry.'.format(token)
raise ValueError(msg)
# Checks that the number of opening and closing brackets are equal
if formula.count('(') != formula.count(')'):
msg = 'Number of opening and closing brackets is not equal ' \
'in the input formula {}.'.format(formula)
raise ValueError(msg)
# Checks that every part of the original formula has been tokenized
for row in tokens:
for token in row:
formula = formula.replace(token, '', 1)
if len(formula) != 0:
msg = 'Part of formula was not successfully parsed as an ' \
'element symbol, bracket or integer. {} was not parsed.' \
.format(formula)
raise ValueError(msg)
# Works through the tokens building a stack
mat_stack = [Counter()]
for symbol, multi1, opening_bracket, closing_bracket, multi2 in tokens:
if symbol:
mat_stack[-1][symbol] += int(multi1 or 1)
if opening_bracket:
mat_stack.append(Counter())
if closing_bracket:
stack_top = mat_stack.pop()
for symbol, value in stack_top.items():
mat_stack[-1][symbol] += int(multi2 or 1) * value
# Normalizing percentages
percents = mat_stack[0].values()
norm_percents = [float(i) / sum(percents) for i in percents]
elements = mat_stack[0].keys()
# Adds each element and percent to the material
for element, percent in zip(elements, norm_percents):
if enrichment_target is not None and element == re.sub(r'\d+$', '', enrichment_target):
self.add_element(element, percent, percent_type, enrichment,
enrichment_target, enrichment_type)
elif enrichment is not None and enrichment_target is None and element == 'U':
self.add_element(element, percent, percent_type, enrichment)
else:
self.add_element(element, percent, percent_type)
def add_s_alpha_beta(self, name, fraction=1.0):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
fraction : float
The fraction of relevant nuclei that are affected by the
:math:`S(\alpha,\beta)` table. For example, if the material is a
block of carbon that is 60% graphite and 40% amorphous then add a
graphite :math:`S(\alpha,\beta)` table with fraction=0.6.
"""
if self._macroscopic is not None:
msg = 'Unable to add an S(a,b) table to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if not isinstance(name, str):
msg = 'Unable to add an S(a,b) table to Material ID="{}" with a ' \
'non-string table name "{}"'.format(self._id, name)
raise ValueError(msg)
cv.check_type('S(a,b) fraction', fraction, Real)
cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)
cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)
new_name = openmc.data.get_thermal_name(name)
if new_name != name:
msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \
'Table "{}" is being renamed as "{}".'.format(name, new_name)
warnings.warn(msg)
self._sab.append((new_name, fraction))
def make_isotropic_in_lab(self):
self.isotropic = [x.name for x in self._nuclides]
def get_elements(self):
"""Returns all elements in the material
.. versionadded:: 0.12
Returns
-------
elements : list of str
List of element names
"""
return sorted({re.split(r'(\d+)', i)[0] for i in self.get_nuclides()})
def get_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : list of str
List of nuclide names
"""
return [x.name for x in self._nuclides]
def get_nuclide_densities(self):
"""Returns all nuclides in the material and their densities
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 3-tuples of
(nuclide, density percent, density percent type)
"""
# keep ordered dictionary for testing purposes
nuclides = OrderedDict()
for nuclide in self._nuclides:
nuclides[nuclide.name] = nuclide
return nuclides
def get_nuclide_atom_densities(self):
"""Returns all nuclides in the material and their atomic densities in
units of atom/b-cm
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are tuples of
(nuclide, density in atom/b-cm)
"""
sum_density = False
if self.density_units == 'sum':
sum_density = True
density = 0.
elif self.density_units == 'macro':
density = self.density
elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':
density = -self.density
elif self.density_units == 'kg/m3':
density = -0.001 * self.density
elif self.density_units == 'atom/b-cm':
density = self.density
elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':
density = 1.E-24 * self.density
# For ease of processing split out nuc, nuc_density,
# and nuc_density_type into separate arrays
nucs = []
nuc_densities = []
nuc_density_types = []
for nuclide in self.nuclides:
nucs.append(nuclide.name)
nuc_densities.append(nuclide.percent)
nuc_density_types.append(nuclide.percent_type)
nucs = np.array(nucs)
nuc_densities = np.array(nuc_densities)
nuc_density_types = np.array(nuc_density_types)
if sum_density:
density = np.sum(nuc_densities)
percent_in_atom = np.all(nuc_density_types == 'ao')
density_in_atom = density > 0.
sum_percent = 0.
# Convert the weight amounts to atomic amounts
if not percent_in_atom:
for n, nuc in enumerate(nucs):
nuc_densities[n] *= self.average_molar_mass / \
openmc.data.atomic_mass(nuc)
# Now that we have the atomic amounts, lets finish calculating densities
sum_percent = np.sum(nuc_densities)
nuc_densities = nuc_densities / sum_percent
# Convert the mass density to an atom density
if not density_in_atom:
density = -density / self.average_molar_mass * 1.E-24 \
* openmc.data.AVOGADRO
nuc_densities = density * nuc_densities
nuclides = OrderedDict()
for n, nuc in enumerate(nucs):
nuclides[nuc] = (nuc, nuc_densities[n])
return nuclides
def get_mass_density(self, nuclide=None):
"""Return mass density of one or all nuclides
Parameters
----------
nuclides : str, optional
Nuclide for which density is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Density of the nuclide/material in [g/cm^3]
"""
mass_density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
if nuclide is None or nuclide == nuc:
density_i = 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
mass_density += density_i
return mass_density
def get_mass(self, nuclide=None):
"""Return mass of one or all nuclides.
Note that this method requires that the :attr:`Material.volume` has
already been set.
Parameters
----------
nuclides : str, optional
Nuclide for which mass is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Mass of the nuclide/material in [g]
"""
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
return self.volume*self.get_mass_density(nuclide)
def clone(self, memo=None):
"""Create a copy of this material with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Material
The clone of this material
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths -- this is done so that when the clone is
# made, it doesn't create a copy of the paths (which are specific to
# an instance)
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
# Memoize the clone
memo[self] = clone
return memo[self]
def _get_nuclide_xml(self, nuclide):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide.name)
if nuclide.percent_type == 'ao':
xml_element.set("ao", str(nuclide.percent))
else:
xml_element.set("wo", str(nuclide.percent))
return xml_element
def _get_macroscopic_xml(self, macroscopic):
xml_element = ET.Element("macroscopic")
xml_element.set("name", macroscopic)
return xml_element
def _get_nuclides_xml(self, nuclides):
xml_elements = []
for nuclide in nuclides:
xml_elements.append(self._get_nuclide_xml(nuclide))
return xml_elements
def to_xml_element(self, cross_sections=None):
"""Return XML representation of the material
Parameters
----------
cross_sections : str
Path to an XML cross sections listing file
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
if self._depletable:
element.set("depletable", "true")
if self._volume:
element.set("volume", str(self._volume))
# Create temperature XML subelement
if self.temperature is not None:
element.set("temperature", str(self.temperature))
# Create density XML subelement
if self._density is not None or self._density_units == 'sum':
subelement = ET.SubElement(element, "density")
if self._density_units != 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
else:
raise ValueError('Density has not been set for material {}!'
.format(self.id))
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
else:
# Create macroscopic XML subelements
subelement = self._get_macroscopic_xml(self._macroscopic)
element.append(subelement)
if self._sab:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
if sab[1] != 1.0:
subelement.set("fraction", str(sab[1]))
if self._isotropic:
subelement = ET.SubElement(element, "isotropic")
subelement.text = ' '.join(self._isotropic)
return element
@classmethod
def mix_materials(cls, materials, fracs, percent_type='ao', name=None):
"""Mix materials together based on atom, weight, or volume fractions
.. versionadded:: 0.12
Parameters
----------
materials : Iterable of openmc.Material
Materials to combine
fracs : Iterable of float
Fractions of each material to be combined
percent_type : {'ao', 'wo', 'vo'}
Type of percentage, must be one of 'ao', 'wo', or 'vo', to signify atom
percent (molar percent), weight percent, or volume percent,
optional. Defaults to 'ao'
name : str
The name for the new material, optional. Defaults to concatenated
names of input materials with percentages indicated inside
parentheses.
Returns
-------
openmc.Material
Mixture of the materials
"""
cv.check_type('materials', materials, Iterable, Material)
cv.check_type('fracs', fracs, Iterable, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo', 'vo'})
fracs = np.asarray(fracs)
void_frac = 1. - np.sum(fracs)
# Warn that fractions don't add to 1, set remainder to void, or raise
# an error if percent_type isn't 'vo'
if not np.isclose(void_frac, 0.):
if percent_type in ('ao', 'wo'):
msg = ('A non-zero void fraction is not acceptable for '
'percent_type: {}'.format(percent_type))
raise ValueError(msg)
else:
msg = ('Warning: sum of fractions do not add to 1, void '
'fraction set to {}'.format(void_frac))
warnings.warn(msg)
# Calculate appropriate weights which are how many cc's of each
# material are found in 1cc of the composite material
amms = np.asarray([mat.average_molar_mass for mat in materials])
mass_dens = np.asarray([mat.get_mass_density() for mat in materials])
if percent_type == 'ao':
wgts = fracs * amms / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'wo':
wgts = fracs / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'vo':
wgts = fracs
# If any of the involved materials contain S(a,b) tables raise an error
sab_names = set(sab[0] for mat in materials for sab in mat._sab)
if sab_names:
msg = ('Currently we do not support mixing materials containing '
'S(a,b) tables')
raise NotImplementedError(msg)
# Add nuclide densities weighted by appropriate fractions
nuclides_per_cc = defaultdict(float)
mass_per_cc = defaultdict(float)
for mat, wgt in zip(materials, wgts):
for nuc, atoms_per_bcm in mat.get_nuclide_atom_densities().values():
nuc_per_cc = wgt*1.e24*atoms_per_bcm
nuclides_per_cc[nuc] += nuc_per_cc
mass_per_cc[nuc] += nuc_per_cc*openmc.data.atomic_mass(nuc) / \
openmc.data.AVOGADRO
# Create the new material with the desired name
if name is None:
name = '-'.join(['{}({})'.format(m.name, f) for m, f in
zip(materials, fracs)])
new_mat = openmc.Material(name=name)
# Compute atom fractions of nuclides and add them to the new material
tot_nuclides_per_cc = np.sum([dens for dens in nuclides_per_cc.values()])
for nuc, atom_dens in nuclides_per_cc.items():
new_mat.add_nuclide(nuc, atom_dens/tot_nuclides_per_cc, 'ao')
# Compute mass density for the new material and set it
new_density = np.sum([dens for dens in mass_per_cc.values()])
new_mat.set_density('g/cm3', new_density)
return new_mat
@classmethod
def from_xml_element(cls, elem):
"""Generate material from an XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
XML element
Returns
-------
openmc.Material
Material generated from XML element
"""
mat_id = int(elem.get('id'))
mat = cls(mat_id)
mat.name = elem.get('name')
if "temperature" in elem.attrib:
mat.temperature = float(elem.get("temperature"))
if 'volume' in elem.attrib:
mat.volume = float(elem.get('volume'))
mat.depletable = bool(elem.get('depletable'))
# Get each nuclide
for nuclide in elem.findall('nuclide'):
name = nuclide.attrib['name']
if 'ao' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['ao']))
elif 'wo' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['wo']), 'wo')
# Get each S(a,b) table
for sab in elem.findall('sab'):
fraction = float(sab.get('fraction', 1.0))
mat.add_s_alpha_beta(sab.get('name'), fraction)
# Get total material density
density = elem.find('density')
units = density.get('units')
if units == 'sum':
mat.set_density(units)
else:
value = float(density.get('value'))
mat.set_density(units, value)
# Check for isotropic scattering nuclides
isotropic = elem.find('isotropic')
if isotropic is not None:
mat.isotropic = isotropic.text.split()
return mat
class Materials(cv.CheckedList):
"""Collection of Materials used for an OpenMC simulation.
This class corresponds directly to the materials.xml input file. It can be
thought of as a normal Python list where each member is a
:class:`Material`. It behaves like a list as the following example
demonstrates:
>>> fuel = openmc.Material()
>>> clad = openmc.Material()
>>> water = openmc.Material()
>>> m = openmc.Materials([fuel])
>>> m.append(water)
>>> m += [clad]
Parameters
----------
materials : Iterable of openmc.Material
Materials to add to the collection
cross_sections : str
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the HDF5 cross section file.
"""
def __init__(self, materials=None):
super().__init__(Material, 'materials collection')
self._cross_sections = None
if materials is not None:
self += materials
@property
def cross_sections(self):
return self._cross_sections
@cross_sections.setter
def cross_sections(self, cross_sections):
cv.check_type('cross sections', cross_sections, str)
self._cross_sections = cross_sections
def append(self, material):
"""Append material to collection
Parameters
----------
material : openmc.Material
Material to append
"""
super().append(material)
def insert(self, index, material):
"""Insert material before index
Parameters
----------
index : int
Index in list
material : openmc.Material
Material to insert
"""
super().insert(index, material)
def make_isotropic_in_lab(self):
for material in self:
material.make_isotropic_in_lab()
def export_to_xml(self, path='materials.xml'):
"""Export material collection to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'materials.xml'.
"""
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'materials.xml'
# Write materials to the file one-at-a-time. This significantly reduces
# memory demand over allocating a complete ElementTree and writing it in
# one go.
with open(str(p), 'w', encoding='utf-8',
errors='xmlcharrefreplace') as fh:
# Write the header and the opening tag for the root element.
fh.write("<?xml version='1.0' encoding='utf-8'?>\n")
fh.write('<materials>\n')
# Write the <cross_sections> element.
if self._cross_sections is not None:
element = ET.Element('cross_sections')
element.text = str(self._cross_sections)
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the <material> elements.
for material in sorted(self, key=lambda x: x.id):
element = material.to_xml_element(self.cross_sections)
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the closing tag for the root element.
fh.write('</materials>\n')
@classmethod
def from_xml(cls, path='materials.xml'):
"""Generate materials collection from XML file
Parameters
----------
path : str, optional
Path to materials XML file
Returns
-------
openmc.Materials
Materials collection
"""
tree = ET.parse(path)
root = tree.getroot()
# Generate each material
materials = cls()
for material in root.findall('material'):
materials.append(Material.from_xml_element(material))
# Check for cross sections settings
xs = tree.find('cross_sections')
if xs is not None:
materials.cross_sections = xs.text
return materials
|
|
r"""
Convert a restructured text document to html.
Inline math markup can uses the *math* directive, or it can use latex
style *\$expression\$*. Math can be rendered using simple html and
unicode, or with mathjax.
"""
import re
from contextlib import contextmanager
# CRUFT: locale.getlocale() fails on some versions of OS X
# See https://bugs.python.org/issue18378
import locale
if hasattr(locale, '_parse_localename'):
try:
locale._parse_localename('UTF-8')
except ValueError:
_old_parse_localename = locale._parse_localename
def _parse_localename(localename):
code = locale.normalize(localename)
if code == 'UTF-8':
return None, code
else:
return _old_parse_localename(localename)
locale._parse_localename = _parse_localename
from docutils.core import publish_parts
from docutils.writers.html4css1 import HTMLTranslator
from docutils.nodes import SkipNode
# pylint: disable=unused-import
try:
from typing import Tuple
except ImportError:
pass
# pylint: enable=unused-import
def rst2html(rst, part="whole", math_output="mathjax"):
r"""
Convert restructured text into simple html.
Valid *math_output* formats for formulas include:
- html
- mathml
- mathjax
See `<http://docutils.sourceforge.net/docs/user/config.html#math-output>`_
for details.
The following *part* choices are available:
- whole: the entire html document
- html_body: document division with title and contents and footer
- body: contents only
There are other parts, but they don't make sense alone:
subtitle, version, encoding, html_prolog, header, meta,
html_title, title, stylesheet, html_subtitle, html_body,
body, head, body_suffix, fragment, docinfo, html_head,
head_prefix, body_prefix, footer, body_pre_docinfo, whole
"""
# Ick! mathjax doesn't work properly with math-output, and the
# others don't work properly with math_output!
if math_output == "mathjax":
# TODO: this is copied from docs/conf.py; there should be only one
mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-MML-AM_CHTML"
settings = {"math_output": math_output + " " + mathjax_path}
else:
settings = {"math-output": math_output}
# TODO: support stylesheets
#html_root = "/full/path/to/_static/"
#sheets = [html_root+s for s in ["basic.css","classic.css"]]
#settings["embed_styesheet"] = True
#settings["stylesheet_path"] = sheets
# math2html and mathml do not support \frac12
rst = replace_compact_fraction(rst)
# mathml, html do not support \tfrac
if math_output in ("mathml", "html"):
rst = rst.replace(r'\tfrac', r'\frac')
rst = replace_dollar(rst)
with suppress_html_errors():
parts = publish_parts(source=rst, writer_name='html',
settings_overrides=settings)
return parts[part]
@contextmanager
def suppress_html_errors():
r"""
Context manager for keeping error reports out of the generated HTML.
Within the context, system message nodes in the docutils parse tree
will be ignored. After the context, the usual behaviour will be restored.
"""
visit_system_message = HTMLTranslator.visit_system_message
HTMLTranslator.visit_system_message = _skip_node
yield None
HTMLTranslator.visit_system_message = visit_system_message
def _skip_node(self, node):
raise SkipNode
_compact_fraction = re.compile(r"(\\[cdt]?frac)([0-9])([0-9])")
def replace_compact_fraction(content):
r"""
Convert \frac12 to \frac{1}{2} for broken latex parsers
"""
return _compact_fraction.sub(r"\1{\2}{\3}", content)
_dollar = re.compile(r"(?:^|(?<=\s|[-(]))[$]([^\n]*?)(?<![\\])[$](?:$|(?=\s|[-.,;:?\\)]))")
_notdollar = re.compile(r"\\[$]")
def replace_dollar(content):
r"""
Convert dollar signs to inline math markup in rst.
"""
content = _dollar.sub(r":math:`\1`", content)
content = _notdollar.sub("$", content)
return content
def test_dollar():
"""
Test substitution of dollar signs with equivalent RST math markup
"""
assert replace_dollar(u"no dollar") == u"no dollar"
assert replace_dollar(u"$only$") == u":math:`only`"
assert replace_dollar(u"$first$ is good") == u":math:`first` is good"
assert replace_dollar(u"so is $last$") == u"so is :math:`last`"
assert replace_dollar(u"and $mid$ too") == u"and :math:`mid` too"
assert replace_dollar(u"$first$, $mid$, $last$") == u":math:`first`, :math:`mid`, :math:`last`"
assert replace_dollar(u"dollar\\$ escape") == u"dollar$ escape"
assert replace_dollar(u"dollar \\$escape\\$ too") == u"dollar $escape$ too"
assert replace_dollar(u"spaces $in the$ math") == u"spaces :math:`in the` math"
assert replace_dollar(u"emb\\ $ed$\\ ed") == u"emb\\ :math:`ed`\\ ed"
assert replace_dollar(u"$first$a") == u"$first$a"
assert replace_dollar(u"a$last$") == u"a$last$"
assert replace_dollar(u"$37") == u"$37"
assert replace_dollar(u"($37)") == u"($37)"
assert replace_dollar(u"$37 - $43") == u"$37 - $43"
assert replace_dollar(u"($37, $38)") == u"($37, $38)"
assert replace_dollar(u"a $mid$dle a") == u"a $mid$dle a"
assert replace_dollar(u"a ($in parens$) a") == u"a (:math:`in parens`) a"
assert replace_dollar(u"a (again $in parens$) a") == u"a (again :math:`in parens`) a"
def load_rst_as_html(filename):
# type: (str) -> str
"""Load rst from file and convert to html"""
from os.path import expanduser
with open(expanduser(filename)) as fid:
rst = fid.read()
html = rst2html(rst)
return html
def wxview(html, url="", size=(850, 540)):
# type: (str, str, Tuple[int, int]) -> "wx.Frame"
"""View HTML in a wx dialog"""
import wx
from wx.html2 import WebView
frame = wx.Frame(None, -1, size=size)
view = WebView.New(frame)
view.SetPage(html, url)
frame.Show()
return frame
def view_html_wxapp(html, url=""):
# type: (str, str) -> None
"""HTML viewer app in wx"""
import wx # type: ignore
app = wx.App()
frame = wxview(html, url) # pylint: disable=unused-variable
app.MainLoop()
def view_url_wxapp(url):
# type: (str) -> None
"""URL viewer app in wx"""
import wx # type: ignore
from wx.html2 import WebView
app = wx.App()
frame = wx.Frame(None, -1, size=(850, 540))
view = WebView.New(frame)
view.LoadURL(url)
frame.Show()
app.MainLoop()
def qtview(html, url=""):
# type: (str, str) -> "QWebView"
"""View HTML in a Qt dialog"""
try:
from PyQt5.QtWebKitWidgets import QWebView
from PyQt5.QtCore import QUrl
except ImportError:
from PyQt4.QtWebKit import QWebView
from PyQt4.QtCore import QUrl
helpView = QWebView()
helpView.setHtml(html, QUrl(url))
helpView.show()
return helpView
def view_html_qtapp(html, url=""):
# type: (str, str) -> None
"""HTML viewer app in Qt"""
import sys
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.QtGui import QApplication
app = QApplication([])
frame = qtview(html, url) # pylint: disable=unused-variable
sys.exit(app.exec_())
def view_url_qtapp(url):
# type: (str) -> None
"""URL viewer app in Qt"""
import sys
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.QtGui import QApplication
app = QApplication([])
try:
from PyQt5.QtWebKitWidgets import QWebView
from PyQt5.QtCore import QUrl
except ImportError:
from PyQt4.QtWebKit import QWebView
from PyQt4.QtCore import QUrl
frame = QWebView()
frame.load(QUrl(url))
frame.show()
sys.exit(app.exec_())
# Set default html viewer
view_html = view_html_qtapp
def can_use_qt():
# type: () -> bool
"""
Return True if QWebView exists.
Checks first in PyQt5 then in PyQt4
"""
try:
from PyQt5.QtWebKitWidgets import QWebView # pylint: disable=unused-import
return True
except ImportError:
try:
from PyQt4.QtWebKit import QWebView # pylint: disable=unused-import
return True
except ImportError:
return False
def view_help(filename, qt=False):
# type: (str, bool) -> None
"""View rst or html file. If *qt* use q viewer, otherwise use wx."""
import os
if qt:
qt = can_use_qt()
url = "file:///"+os.path.abspath(filename).replace("\\", "/")
if filename.endswith('.rst'):
html = load_rst_as_html(filename)
if qt:
view_html_qtapp(html, url)
else:
view_html_wxapp(html, url)
else:
if qt:
view_url_qtapp(url)
else:
view_url_wxapp(url)
def main():
# type: () -> None
"""Command line interface to rst or html viewer."""
import sys
view_help(sys.argv[1], qt=False)
if __name__ == "__main__":
main()
|
|
# oxy.io
# File: oxyio/views/object.py
# Desc: Flask routes for internal object calls
from flask import abort, request, g
from jinja2 import TemplateNotFound
from oxyio.models.user import User
from oxyio.data import get_object_or_404
from oxyio.web.route import html_api_route
from oxyio.web.request import get_request_data
from oxyio.web.response import redirect_or_jsonify, render_or_jsonify
from oxyio.web.user import (
has_object_permission, login_required, has_global_objects_permission
)
def view_object(object_id, module_name, object_type):
# Check permission (can't use decorator as need object_type)
if not has_object_permission(module_name, object_type, object_id, 'view'):
return abort(403)
g.module = module_name
g.object = object_type
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Apply it's pre_view function
obj.pre_view()
# Load view template file from module
return render_or_jsonify('{0}/view.html'.format(object_type), {
'action': 'view',
},
module_name=module_name,
object_type=object_type,
object=obj,
)
def view_edit_object(object_id, module_name, object_type):
# Check permission (can't use decorator as need object_type)
if not has_object_permission(module_name, object_type, object_id, 'edit'):
return abort(403)
g.module = module_name
g.object = object_type
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Apply it's pre_view_edit function
obj.pre_view_edit()
# Build it's form (respects.EDIT_FIELDS)
edit_form = obj.build_form()
template_data = {
'action': 'edit',
'edit_form': edit_form,
}
data = {
'module_name': module_name,
'object_type': object_type,
'object': obj,
}
# Try loading object specific template
try:
return render_or_jsonify(
'{0}/edit.html'.format(object_type), template_data, **data
)
# Default to standard template
except TemplateNotFound:
return render_or_jsonify('object/edit.html', template_data, **data)
def edit_object(object_id, module_name, object_type):
# Check permission
if not has_object_permission(module_name, object_type, object_id, 'edit'):
return abort(403)
g.module = module_name
g.object = object_type
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Get the request data
request_data = get_request_data()
try:
# Update the object with our request data
obj.edit(request_data)
# Validate & save the object
obj.save()
except (obj.EditRequestError, obj.ValidationError) as e:
return redirect_or_jsonify(error=e.message)
# Post edit & hooks
obj.post_edit()
for hook in obj.hooks['post_edit']:
hook()
return redirect_or_jsonify(
obj.edit_url,
success='{0} updated'.format(obj.TITLE),
)
def delete_object(object_id, module_name, object_type):
# Check permission
if not has_global_objects_permission(module_name, object_type, 'delete'):
return abort(403)
g.module = module_name
g.object = object_type
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Check pre_delete function
try:
obj.pre_delete()
except obj.DeletionError as e:
return redirect_or_jsonify(error=e.message)
# Delete!
obj.delete()
# Post delete & hooks
obj.post_delete()
for hook in obj.hooks['post_delete']:
hook()
def view_owner_object(object_id, module_name, object_type):
# Check permission (can't use decorator as need object_type)
if not has_global_objects_permission(module_name, object_type, 'owner'):
return abort(403)
g.module = module_name
g.object = object_type
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Get all users & user_groups
users = User.query.all()
# Load view template file from module
return render_or_jsonify('object/owner.html'.format(object_type), {
'action': 'owner',
'users': users,
},
module_name=module_name,
object_type=object_type,
object=obj,
)
def owner_object(object_id, module_name, object_type):
# Check permission
if not has_global_objects_permission(module_name, object_type, 'owner'):
return abort(403)
g.module = module_name
g.object = object_type
# Check user and/or group exist
user_id = request.form.get('user_id')
try:
user_id = int(user_id)
except ValueError:
return redirect_or_jsonify(error='Invalid user ID')
# Get object
obj = get_object_or_404(module_name, object_type, object_id)
# Set user_id & user_group_id
obj.user_id = user_id if user_id > 0 else None
# Save & redirect
obj.save()
return redirect_or_jsonify(success='{0} owner changed'.format(obj.TITLE))
def custom_function_object(object_id, view_func, permission, module_name, object_type):
# Get object from module
obj = get_object_or_404(module_name, object_type, object_id)
# Check permission
if not has_object_permission(module_name, object_type, object_id, permission):
return abort(403)
return view_func(obj)
def create_object_views(app, api_app, cls):
args = (cls.MODULE, cls.OBJECT)
# Create view view
html_api_route(
'/{0}/<int:object_id>'.format(cls.OBJECT),
methods=['GET'],
endpoint='view_{0}'.format(cls.OBJECT),
app=app, api_app=api_app,
)(login_required(lambda object_id: view_object(object_id, *args)))
# Create view edit view
app.route(
'/{0}/<int:object_id>/edit'.format(cls.OBJECT),
methods=['GET'],
endpoint='view_edit_{0}'.format(cls.OBJECT),
)(login_required(lambda object_id: view_edit_object(object_id, *args)))
# Create edit view
html_api_route(
'/{0}/<int:object_id>/edit'.format(cls.OBJECT),
methods=['POST'],
endpoint='edit_{0}'.format(cls.OBJECT),
app=app, api_app=api_app,
)(login_required(lambda object_id: edit_object(object_id, *args)))
# Create delete view
html_api_route(
'/{0}/<int:object_id>/delete'.format(cls.OBJECT),
methods=['POST'],
endpoint='delete_{0}'.format(cls.OBJECT),
app=app, api_app=api_app,
)(login_required(lambda object_id: delete_object(object_id, *args)))
# Add custom object routes
for name, methods, view_func, permission in cls.ROUTES:
html_api_route(
'/{0}/<int:object_id>/{1}'.format(cls.OBJECT, name),
methods=methods,
endpoint=view_func.__name__,
app=app, api_app=api_app,
)(login_required(
# Pass view_func as an arg to avoid loop overridding it
lambda object_id, func=view_func:
custom_function_object(object_id, func, permission, *args)
))
# Create owner views if an owned object
if cls.OWNABLE:
# Create view owner view
app.route(
'/{0}/<int:object_id>/owner'.format(cls.OBJECT),
methods=['GET'],
endpoint='view_owner_{0}'.format(cls.OBJECT),
)(login_required(lambda object_id: view_owner_object(object_id, *args)))
# Create owner view
html_api_route(
'/{0}/<int:object_id>/owner'.format(cls.OBJECT),
methods=['POST'],
endpoint='owner_{0}'.format(cls.OBJECT),
app=app, api_app=api_app,
)(login_required(lambda object_id: owner_object(object_id, *args)))
|
|
import time, os
from store import HTree, HStore
from fnv1a import get_hash as fnv1a
import unittest
import pickle
TEST_KEY = 'test'
TEST_VER = 2
TEST_HASH = (fnv1a(TEST_KEY)* 3) & 0xffff
class TestHTree(unittest.TestCase):
def setUp(self):
self.htree = HTree("t.tch", 0)
self.htree.clear()
def tearDown(self):
self.htree.close()
os.unlink("t.tch")
def testEmpty(self):
self.assertEqual(len(self.htree), 0)
self.assertEqual(hash(self.htree), 0)
self.assertEqual(self.htree.list(''), '')
def testAdd(self):
self.htree.add(TEST_KEY, TEST_VER, 3)
self.assertEqual(len(self.htree), 1)
self.assertEqual(hash(self.htree), TEST_HASH)
def testRemove(self):
self.htree.remove(TEST_KEY)
self.testEmpty()
self.testAdd()
self.htree.remove(TEST_KEY)
self.testEmpty()
def testSplit(self):
self.testAdd()
for i in range(200):
self.htree.add('a%d'%i, i, i, 0)
self.assertEqual(len(self.htree), 201)
self.assertEqual(hash(self.htree), 53137)
def testMerge(self):
self.testSplit()
for i in range(200):
self.htree.remove('a%d'%i)
self.assertEqual(len(self.htree), 1)
self.assertEqual(hash(self.htree), TEST_HASH)
def testList(self):
self.testAdd()
self.testAdd()
l = "%s %d %d\n" % (TEST_KEY, 3, TEST_VER)
self.assertEqual(self.htree.list(''), l)
self.testSplit()
self.assertEqual(len(self.htree.list('').split('\n')), 17)
def testPerformance(self):
st = time.time()
for i in range(200000):
self.htree.add('key%d'%i, i, 0, False)
t = time.time() - st
self.assertEqual(t<1, True)
self.htree.flush()
st = time.time()
for i in range(200000):
self.htree.add('key%d'%i, i, 0, False)
t = time.time() - st
self.assertEqual(t<1, True)
def testClear(self):
self.testSplit()
self.htree.clear()
self.testEmpty()
def testSave(self):
self.testSplit()
path = self.htree.path
l = len(self.htree)
h = hash(self.htree)
self.htree.close()
t = HTree(path, 0)
self.assertEqual(len(t), l)
self.assertEqual(hash(t), h)
t.close()
def testRestore(self):
self.testSplit()
path = self.htree.path
l = len(self.htree)
h = hash(self.htree)
self.htree.close()
import pytc
db = pytc.HDB()
db.open(path, pytc.HDBOREADER|pytc.HDBOWRITER)
try:
db.out("__pool__")
except:
pass
db.close()
t = HTree(path, 0)
self.assertEqual(len(t), l)
self.assertEqual(hash(t), h)
t.close()
import pytc
db = pytc.HDB()
db.open(path, pytc.HDBOREADER|pytc.HDBOWRITER)
#assert db["__sync__"] == "1"
try:
db.out("__sync__")
except:
pass
db.close()
t = HTree(path, 0)
self.assertEqual(len(t), l)
self.assertEqual(hash(t), h)
t.close()
def testGetHash(self):
self.testSplit()
h, c = self.htree.get_hash("@")
assert h == hash(self.htree)
assert c == len(self.htree)
h, ver = self.htree.get_hash(TEST_KEY)
assert h == 3
assert ver == TEST_VER
def testDepth(self):
self.testSplit()
h, c = self.htree.get_hash("@1")
s = self.htree.list("1")
t = HTree("tt.tch", 1)
for key, ver, ha in [l.split(' ') for l in s.split("\n") if l]:
t.add(key, int(ver), int(ha))
self.assertEqual(len(t), c)
self.assertEqual(hash(t), h)
self.assertEqual(t.list(''), s)
t.close()
os.unlink('tt.tch')
class TestHStore(unittest.TestCase):
height = 0
def setUp(self):
self.store = HStore("/tmp/tmpdb2", self.height)
self.store.clear()
def tearDown(self):
self.store.clear()
self.store.close()
def testSetGet(self):
self.assertEqual(self.store.get('test'), None)
self.store.set('test', 'value')
self.assertEqual(self.store.get('test'), 'value')
self.store.delete('test')
self.assertEqual(self.store.get('test'), None)
def testVersion(self):
self.store.delete('test')
self.assertEqual(self.store.get('test'), None)
self.store.set('test', 'value1', 0)
self.assertEqual(self.store.get('test'), 'value1')
#self.assertEqual(self.store.get('@'), 'test 1984411239 1\n')
self.store.set('test', 'value2', 0)
self.assertEqual(self.store.get('test'), 'value2')
self.store.set('test', 'value3', 2)
self.assertEqual(self.store.get('test'), 'value2')
self.store.set('test', 'value4', 4)
self.assertEqual(self.store.get('test'), 'value4')
self.store.delete('test')
self.assertEqual(self.store.get('test'), None)
def testHash(self):
for i in range(200):
self.store.set('/test/test%d.jpg'%i, 'value%d'%i)
s = self.store.get('@')
n = sum(int(l.split(' ')[2]) for l in s.split('\n') if l)
self.assertEqual(n, 200)
s = self.store.get('@0')
n = sum(1 for l in s.split('\n') if l)
if n == 16:
n = sum(int(l.split(' ')[2]) for l in s.split('\n') if l)
self.assertEqual(n, 10)
def testScan(self):
self.testHash()
self.store.close()
os.unlink(self.store.path + '/.0.index')
t = HStore(self.store.path, self.store.height)
t.check()
try:
s = t.get('@')
n = sum(int(l.split(' ')[2]) for l in s.split('\n') if l)
self.assertEqual(n, 200)
s = t.get('@0')
n = sum(1 for l in s.split('\n') if l)
if n == 16:
n = sum(int(l.split(' ')[2]) for l in s.split('\n') if l)
self.assertEqual(n, 10)
finally:
t.close()
def testRange(self):
self.store.close()
t = HStore(self.store.path, 1, 0, 8)
for i in range(200):
t.set('/test/test%d.jpg'%i, 'value%d'%i)
s = t.get('@')
n = sum(int(l.split(' ')[2]) for l in s.split('\n') if l)
t.close()
self.assertEqual(n, 110)
class TestHStore1(TestHStore):
def testFlag(self):
self.store.set("test_flag", "value", 2, 17)
v = self.store.get("?test_flag")
ver, hash, flag, modified = v.split(' ')
self.assertEqual(int(ver), 2)
self.assertEqual(int(flag), 17)
vh = fnv1a("value") + len("value") * 97
self.assertEqual(int(hash), vh);
class TestHStore2(TestHStore):
height = 1
class TestHStore3(TestHStore):
height = 2
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.i18n import _LI, _LW
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='The topic on which conductor nodes listen'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='Full class name for the Manager for conductor'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, None, columns_to_join=columns_to_join)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node,
None)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
None, None, None, None, None, False)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None, update_cells=True):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out,
last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=None)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance, legacy)
def vol_get_usage_by_time(self, context, start_time):
return self._manager.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context, host=None, topic=None,
binary=None)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic,
host=None, binary=None)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host, topic=None,
binary=None)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host,
binary=None)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host,
binary=None)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary, topic=None)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, it's no longer relevant
return self._manager.compute_node_update(context, node, values)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self._manager.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
def object_backport(self, context, objinst, target_version):
return self._manager.object_backport(context, objinst, target_version)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
legacy_bdm=True):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility.
utils.spawn_n(self._manager.rebuild_instance, context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
host=host,
preserve_ephemeral=preserve_ephemeral)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
# if we show the timeout message, make sure we show a similar
# message saying that everything is now working to avoid
# confusion
has_timedout = False
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
LOG.info(_LI('nova-conductor connection '
'established successfully'))
break
except messaging.MessagingTimeout:
has_timedout = True
LOG.warning(_LW('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor? '
'Reattempting establishment of '
'nova-conductor connection...'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
|
|
# -*- coding: utf-8 -*-
__doc__ = """
WebSocket within CherryPy is a tricky bit since CherryPy is
a threaded server which would choke quickly if each thread
of the server were kept attached to a long living connection
that WebSocket expects.
In order to work around this constraint, we take some advantage
of some internals of CherryPy as well as the introspection
Python provides.
Basically, whene the WebSocket upgrade is performed, we take over
the socket and let CherryPy take back the thread that was
associated with the upgrade request.
These operations require a bit of work at various levels of
the CherryPy framework but this module takes care of them
and from your application's perspective, this is abstracted.
Here are the various utilities provided by this module:
* WebSocketTool: The tool is in charge to perform the
HTTP upgrade and detach the socket from
CherryPy. It runs at various hook points of the
request's processing. Enable that tool at
any path you wish to handle as a WebSocket
handler.
* WebSocketPlugin: The plugin tracks the web socket handler
instanciated. It also cleans out websocket handler
which connection have been closed down.
Simple usage example:
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool, WebSocketHandler
from ws4py.server.handler.threadedhandler import EchoWebSocketHandler
cherrypy.config.update({'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
def index(self):
return 'some HTML with a websocket javascript connection'
@cherrypy.expose
def ws(self):
pass
cherrypy.quickstart(Root(), '/', config={'/ws': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocketHandler}})
Note that you can set the handler class on per-path basis,
meaning you could also dynamically change the class based
on other envrionmental settings (is the user authenticated for ex).
The current implementation of the handler is based on a thread that will
constantly read bytes from the socket and feed the stream instance with them
until an error or a close condition arise. This might be a bit
suboptimal and one could implement the handler in a different fashion
using a poll based socket handling (select, poll, tornado, gevent, etc.)
"""
import base64
from hashlib import sha1
import inspect
import socket
import cherrypy
from cherrypy import Tool
from cherrypy.process import plugins
from cherrypy.wsgiserver import HTTPConnection, HTTPRequest
from ws4py import WS_KEY
from ws4py.exc import HandshakeError
from ws4py.server.handler.threadedhandler import WebSocketHandler
__all__ = ['WebSocketTool', 'WebSocketPlugin']
class WebSocketTool(Tool):
def __init__(self):
Tool.__init__(self, 'before_request_body', self.upgrade)
def _setup(self):
conf = self._merged_args()
hooks = cherrypy.serving.request.hooks
p = conf.pop("priority", getattr(self.callable, "priority",
self._priority))
hooks.attach(self._point, self.callable, priority=p, **conf)
hooks.attach('before_finalize', self.complete,
priority=p)
hooks.attach('on_end_resource', self.cleanup_headers,
priority=70)
hooks.attach('on_end_request', self.start_handler,
priority=70)
def upgrade(self, protocols=None, extensions=None, version=8, handler_cls=WebSocketHandler):
"""
Performs the upgrade of the connection to the WebSocket
protocol.
The provided protocols may be a list of WebSocket
protocols supported by the instance of the tool.
When no list is provided and no protocol is either
during the upgrade, then the protocol parameter is
not taken into account. On the other hand,
if the protocol from the handshake isn't part
of the provided list, the upgrade fails immediatly.
"""
request = cherrypy.serving.request
request.process_request_body = False
ws_protocols = None
ws_location = None
ws_version = version
ws_key = None
ws_extensions = []
if request.method != 'GET':
raise HandshakeError('Method is not GET')
for key, expected_value in [('Upgrade', 'websocket'),
('Connection', 'Upgrade')]:
actual_value = request.headers.get(key)
if not actual_value:
raise HandshakeError('Header %s is not defined' % key)
if expected_value and expected_value not in actual_value:
raise HandshakeError('Illegal value for header %s: %s' %
(key, actual_value))
key = request.headers.get('Sec-WebSocket-Key')
if key:
ws_key = base64.b64decode(key)
if len(ws_key) != 16:
raise HandshakeError("WebSocket key's length is invalid")
version = request.headers.get('Sec-WebSocket-Version')
if version:
if version != str(ws_version):
raise HandshakeError('Unsupported WebSocket version')
else:
raise HandshakeError('WebSocket version required')
protocols = protocols or []
subprotocols = request.headers.get('Sec-WebSocket-Protocol')
if subprotocols:
ws_protocols = []
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
exts = extensions or []
extensions = request.headers.get('Sec-WebSocket-Extensions')
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
location = []
include_port = False
if request.scheme == "https":
location.append("wss://")
include_port = request.local.port != 443
else:
location.append("ws://")
include_port = request.local.port != 80
location.append('localhost')
if include_port:
location.append(":%d" % request.local.port)
location.append(request.path_info)
if request.query_string != "":
location.append("?%s" % request.query_string)
ws_location = ''.join(location)
response = cherrypy.serving.response
response.stream = True
response.status = '101 Switching Protocols'
response.headers['Content-Type'] = 'text/plain'
response.headers['Upgrade'] = 'websocket'
response.headers['Connection'] = 'Upgrade'
response.headers['Sec-WebSocket-Version'] = str(ws_version)
response.headers['Sec-WebSocket-Accept'] = base64.b64encode(sha1(key + WS_KEY).digest())
if ws_protocols:
response.headers['Sec-WebSocket-Protocol'] = ', '.join(ws_protocols)
if ws_extensions:
response.headers['Sec-WebSocket-Extensions'] = ','.join(ws_extensions)
addr = (request.remote.ip, request.remote.port)
ws_conn = request.rfile.rfile._sock
request.ws_handler = handler_cls(ws_conn, ws_protocols, ws_extensions)
# Start tracking the handler
cherrypy.engine.publish('handle-websocket', request.ws_handler, addr)
def complete(self):
"""
Sets some internal flags of CherryPy so that it
doesn't close the socket down.
"""
self._set_internal_flags()
def cleanup_headers(self):
"""
Some clients aren't that smart when it comes to
headers lookup.
"""
response = cherrypy.response
headers = response.header_list[:]
for (k, v) in headers:
if k.startswith('Sec-Web'):
response.header_list.remove((k, v))
response.header_list.append((k.replace('Sec-Websocket', 'Sec-WebSocket'), v))
def start_handler(self):
"""
Runs at the end of the request processing by calling
the opened method of the handler.
"""
request = cherrypy.request
request.ws_handler.opened()
request.ws_handler = None
# By doing this we detach the socket from
# the CherryPy stack avoiding memory leaks
request.rfile.rfile._sock = None
def _set_internal_flags(self):
"""
CherryPy has two internal flags that we are interested in
to enable WebSocket within the server. They can't be set via
a public API and considering I'd want to make this extension
as compatible as possible whilst refraining in exposing more
than should be within CherryPy, I prefer performing a bit
of introspection to set those flags. Even by Python standards
such introspection isn't the cleanest but it works well
enough in this case.
This also means that we do that only on WebSocket
connections rather than globally and therefore we do not
harm the rest of the HTTP server.
"""
current = inspect.currentframe()
while True:
if not current:
break
_locals = current.f_locals
if 'self' in _locals:
if type(_locals['self']) == HTTPRequest:
_locals['self'].close_connection = True
if type(_locals['self']) == HTTPConnection:
_locals['self'].linger = True
# HTTPConnection is more inner than
# HTTPRequest so we can leave once
# we're done here
return
_locals = None
current = current.f_back
class WebSocketPlugin(plugins.SimplePlugin):
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
self.handlers = []
def start(self):
cherrypy.log("Starting WebSocket processing")
self.bus.subscribe('handle-websocket', self.handle)
self.bus.subscribe('websocket-broadcast', self.broadcast)
self.bus.subscribe('main', self.cleanup)
def stop(self):
cherrypy.log("Terminating WebSocket processing")
self.bus.unsubscribe('main', self.cleanup)
self.bus.unsubscribe('handle-websocket', self.handle)
self.bus.unsubscribe('websocket-broadcast', self.broadcast)
self.cleanup()
def handle(self, ws_handler, peer_addr):
"""
Tracks the provided handler.
@param ws_handler: websocket handler instance
@param peer_addr: remote peer address for tracing purpose
"""
cherrypy.log("Managing WebSocket connection from %s:%d" % (peer_addr[0], peer_addr[1]))
self.handlers.append((ws_handler, peer_addr))
def cleanup(self):
"""
Performs a bit of cleanup on tracked handlers
by closing connection of terminated streams then
removing them from the tracked list.
"""
handlers = self.handlers[:]
for peer in handlers:
handler, addr = peer
if handler.terminated:
cherrypy.log("Removing WebSocket connection from peer: %s:%d" % (addr[0], addr[1]))
handler.close_connection()
handler._th.join()
self.handlers.remove(peer)
def broadcast(self, message, binary=False):
"""
Broadcasts a message to all connected clients known to
the server.
@param message: a message suitable to pass to the send() method
of the connected handler.
@param binary: whether or not the message is a binary one
"""
handlers = self.handlers[:]
for peer in handlers:
try:
handler, addr = peer
handler.send(message, binary)
except:
cherrypy.log(traceback=True)
if __name__ == '__main__':
import random
from ws4py.server.handler.threadedhandler import EchoWebSocketHandler
cherrypy.config.update({'server.socket_host': '127.0.0.1',
'server.socket_port': 9000})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
@cherrypy.tools.websocket(on=False)
def ws(self):
return """<html>
<head>
<script type='application/javascript' src='https://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js'> </script>
<script type='application/javascript'>
$(document).ready(function() {
var ws = new WebSocket('ws://192.168.0.10:8888/');
ws.onmessage = function (evt) {
$('#chat').val($('#chat').val() + evt.data + '\\n');
};
ws.onopen = function() {
ws.send("Hello there");
};
$('#chatform').submit(function() {
ws.send('%(username)s: ' + $('#message').val());
$('#message').val("");
return false;
});
});
</script>
</head>
<body>
<form action='/echo' id='chatform' method='get'>
<textarea id='chat' cols='35' rows='10'></textarea>
<br />
<label for='message'>%(username)s: </label><input type='text' id='message' />
<input type='submit' value='Send' />
</form>
</body>
</html>
""" % {'username': "User%d" % random.randint(0, 100)}
@cherrypy.expose
def index(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
cherrypy.quickstart(Root(), '/', config={'/': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocketHandler}})
|
|
# Run the tests in Programs/_testembed.c (tests for the CPython embedding APIs)
from test import support
import unittest
from collections import namedtuple
import contextlib
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
MS_WINDOWS = (os.name == 'nt')
MACOS = (sys.platform == 'darwin')
PYMEM_ALLOCATOR_NOT_SET = 0
PYMEM_ALLOCATOR_DEBUG = 2
PYMEM_ALLOCATOR_MALLOC = 3
# _PyCoreConfig_InitCompatConfig()
API_COMPAT = 1
# _PyCoreConfig_InitPythonConfig()
API_PYTHON = 2
# _PyCoreConfig_InitIsolatedConfig()
API_ISOLATED = 3
def debug_build(program):
program = os.path.basename(program)
name = os.path.splitext(program)[0]
return name.endswith("_d")
def remove_python_envvars():
env = dict(os.environ)
# Remove PYTHON* environment variables to get deterministic environment
for key in list(env):
if key.startswith('PYTHON'):
del env[key]
return env
class EmbeddingTestsMixin:
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if MS_WINDOWS:
ext = ("_d" if debug_build(sys.executable) else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args, env=None,
timeout=None, returncode=0, input=None,
cwd=None):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
if env is not None and MS_WINDOWS:
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env = env.copy()
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env,
cwd=cwd)
try:
(out, err) = p.communicate(input=input, timeout=timeout)
except:
p.terminate()
p.wait()
raise
if p.returncode != returncode and support.verbose:
print(f"--- {cmd} failed ---")
print(f"stdout:\n{out}")
print(f"stderr:\n{err}")
print(f"------")
self.assertEqual(p.returncode, returncode,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def run_repeated_init_and_subinterpreters(self):
out, err = self.run_embedded_interpreter("test_repeated_init_and_subinterpreters")
self.assertEqual(err, "")
# The output from _testembed looks like this:
# --- Pass 0 ---
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784
# interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368
# interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# --- Pass 1 ---
# ...
interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, "
r"thread state <(0x[\dA-F]+)>: "
r"id\(modules\) = ([\d]+)$")
Interp = namedtuple("Interp", "id interp tstate modules")
numloops = 0
current_run = []
for line in out.splitlines():
if line == "--- Pass {} ---".format(numloops):
self.assertEqual(len(current_run), 0)
if support.verbose > 1:
print(line)
numloops += 1
continue
self.assertLess(len(current_run), 5)
match = re.match(interp_pat, line)
if match is None:
self.assertRegex(line, interp_pat)
# Parse the line from the loop. The first line is the main
# interpreter and the 3 afterward are subinterpreters.
interp = Interp(*match.groups())
if support.verbose > 1:
print(interp)
self.assertTrue(interp.interp)
self.assertTrue(interp.tstate)
self.assertTrue(interp.modules)
current_run.append(interp)
# The last line in the loop should be the same as the first.
if len(current_run) == 5:
main = current_run[0]
self.assertEqual(interp, main)
yield current_run
current_run = []
class EmbeddingTests(EmbeddingTestsMixin, unittest.TestCase):
def test_subinterps_main(self):
for run in self.run_repeated_init_and_subinterpreters():
main = run[0]
self.assertEqual(main.id, '0')
def test_subinterps_different_ids(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
mainid = int(main.id)
for i, sub in enumerate(subs):
self.assertEqual(sub.id, str(mainid + i + 1))
def test_subinterps_distinct_state(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
if '0x0' in main:
# XXX Fix on Windows (and other platforms): something
# is going on with the pointers in Programs/_testembed.c.
# interp.interp is 0x0 and interp.modules is the same
# between interpreters.
raise unittest.SkipTest('platform prints pointers as 0x0')
for sub in subs:
# A new subinterpreter may have the same
# PyInterpreterState pointer as a previous one if
# the earlier one has already been destroyed. So
# we compare with the main interpreter. The same
# applies to tstate.
self.assertNotEqual(sub.interp, main.interp)
self.assertNotEqual(sub.tstate, main.tstate)
self.assertNotEqual(sub.modules, main.modules)
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape")
out, err = self.run_embedded_interpreter("test_forced_io_encoding", env=env)
if support.verbose > 1:
print()
print(out)
print(err)
expected_stream_encoding = "utf-8"
expected_errors = "surrogateescape"
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: iso8859-1",
"Expected errors: default",
"stdin: iso8859-1:{errors}",
"stdout: iso8859-1:{errors}",
"stderr: iso8859-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: iso8859-1",
"Expected errors: replace",
"stdin: iso8859-1:replace",
"stdout: iso8859-1:replace",
"stderr: iso8859-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stream_encoding,
out_encoding=expected_stream_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
def test_pre_initialization_api(self):
"""
Checks some key parts of the C-API that need to work before the runtine
is initialized (via Py_Initialize()).
"""
env = dict(os.environ, PYTHONPATH=os.pathsep.join(sys.path))
out, err = self.run_embedded_interpreter("test_pre_initialization_api", env=env)
if MS_WINDOWS:
expected_path = self.test_exe
else:
expected_path = os.path.join(os.getcwd(), "spam")
expected_output = f"sys.executable: {expected_path}\n"
self.assertIn(expected_output, out)
self.assertEqual(err, '')
def test_pre_initialization_sys_options(self):
"""
Checks that sys.warnoptions and sys._xoptions can be set before the
runtime is initialized (otherwise they won't be effective).
"""
env = remove_python_envvars()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
out, err = self.run_embedded_interpreter(
"test_pre_initialization_sys_options", env=env)
expected_output = (
"sys.warnoptions: ['once', 'module', 'default']\n"
"sys._xoptions: {'not_an_option': '1', 'also_not_an_option': '2'}\n"
"warnings.filters[:3]: ['default', 'module', 'once']\n"
)
self.assertIn(expected_output, out)
self.assertEqual(err, '')
def test_bpo20891(self):
"""
bpo-20891: Calling PyGILState_Ensure in a non-Python thread before
calling PyEval_InitThreads() must not crash. PyGILState_Ensure() must
call PyEval_InitThreads() for us in this case.
"""
out, err = self.run_embedded_interpreter("test_bpo20891")
self.assertEqual(out, '')
self.assertEqual(err, '')
def test_initialize_twice(self):
"""
bpo-33932: Calling Py_Initialize() twice should do nothing (and not
crash!).
"""
out, err = self.run_embedded_interpreter("test_initialize_twice")
self.assertEqual(out, '')
self.assertEqual(err, '')
def test_initialize_pymain(self):
"""
bpo-34008: Calling Py_Main() after Py_Initialize() must not fail.
"""
out, err = self.run_embedded_interpreter("test_initialize_pymain")
self.assertEqual(out.rstrip(), "Py_Main() after Py_Initialize: sys.argv=['-c', 'arg2']")
self.assertEqual(err, '')
def test_run_main(self):
out, err = self.run_embedded_interpreter("test_run_main")
self.assertEqual(out.rstrip(), "Py_RunMain(): sys.argv=['-c', 'arg2']")
self.assertEqual(err, '')
class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase):
maxDiff = 4096
UTF8_MODE_ERRORS = ('surrogatepass' if MS_WINDOWS else 'surrogateescape')
# Marker to read the default configuration: get_default_config()
GET_DEFAULT_CONFIG = object()
# Marker to ignore a configuration parameter
IGNORE_CONFIG = object()
PRE_CONFIG_COMPAT = {
'_config_init': API_COMPAT,
'allocator': PYMEM_ALLOCATOR_NOT_SET,
'parse_argv': 0,
'configure_locale': 1,
'coerce_c_locale': 0,
'coerce_c_locale_warn': 0,
'utf8_mode': 0,
}
if MS_WINDOWS:
PRE_CONFIG_COMPAT.update({
'legacy_windows_fs_encoding': 0,
})
PRE_CONFIG_PYTHON = dict(PRE_CONFIG_COMPAT,
_config_init=API_PYTHON,
parse_argv=1,
coerce_c_locale=GET_DEFAULT_CONFIG,
utf8_mode=GET_DEFAULT_CONFIG,
)
PRE_CONFIG_ISOLATED = dict(PRE_CONFIG_COMPAT,
_config_init=API_ISOLATED,
configure_locale=0,
isolated=1,
use_environment=0,
utf8_mode=0,
dev_mode=0,
coerce_c_locale=0,
)
COPY_PRE_CONFIG = [
'dev_mode',
'isolated',
'use_environment',
]
CONFIG_COMPAT = {
'_config_init': API_COMPAT,
'isolated': 0,
'use_environment': 1,
'dev_mode': 0,
'install_signal_handlers': 1,
'use_hash_seed': 0,
'hash_seed': 0,
'faulthandler': 0,
'tracemalloc': 0,
'import_time': 0,
'show_ref_count': 0,
'show_alloc_count': 0,
'dump_refs': 0,
'malloc_stats': 0,
'filesystem_encoding': GET_DEFAULT_CONFIG,
'filesystem_errors': GET_DEFAULT_CONFIG,
'pycache_prefix': None,
'program_name': GET_DEFAULT_CONFIG,
'parse_argv': 0,
'argv': [""],
'xoptions': [],
'warnoptions': [],
'pythonpath_env': None,
'home': None,
'executable': GET_DEFAULT_CONFIG,
'base_executable': GET_DEFAULT_CONFIG,
'prefix': GET_DEFAULT_CONFIG,
'base_prefix': GET_DEFAULT_CONFIG,
'exec_prefix': GET_DEFAULT_CONFIG,
'base_exec_prefix': GET_DEFAULT_CONFIG,
'module_search_paths': GET_DEFAULT_CONFIG,
'site_import': 1,
'bytes_warning': 0,
'inspect': 0,
'interactive': 0,
'optimization_level': 0,
'parser_debug': 0,
'write_bytecode': 1,
'verbose': 0,
'quiet': 0,
'user_site_directory': 1,
'configure_c_stdio': 0,
'buffered_stdio': 1,
'stdio_encoding': GET_DEFAULT_CONFIG,
'stdio_errors': GET_DEFAULT_CONFIG,
'skip_source_first_line': 0,
'run_command': None,
'run_module': None,
'run_filename': None,
'_install_importlib': 1,
'check_hash_pycs_mode': 'default',
'pathconfig_warnings': 1,
'_init_main': 1,
}
if MS_WINDOWS:
CONFIG_COMPAT.update({
'legacy_windows_stdio': 0,
})
CONFIG_PYTHON = dict(CONFIG_COMPAT,
_config_init=API_PYTHON,
configure_c_stdio=1,
parse_argv=1,
)
CONFIG_ISOLATED = dict(CONFIG_COMPAT,
_config_init=API_ISOLATED,
isolated=1,
use_environment=0,
user_site_directory=0,
dev_mode=0,
install_signal_handlers=0,
use_hash_seed=0,
faulthandler=0,
tracemalloc=0,
pathconfig_warnings=0,
)
if MS_WINDOWS:
CONFIG_ISOLATED['legacy_windows_stdio'] = 0
# global config
DEFAULT_GLOBAL_CONFIG = {
'Py_HasFileSystemDefaultEncoding': 0,
'Py_HashRandomizationFlag': 1,
'_Py_HasFileSystemDefaultEncodeErrors': 0,
}
COPY_GLOBAL_PRE_CONFIG = [
('Py_UTF8Mode', 'utf8_mode'),
]
COPY_GLOBAL_CONFIG = [
# Copy core config to global config for expected values
# True means that the core config value is inverted (0 => 1 and 1 => 0)
('Py_BytesWarningFlag', 'bytes_warning'),
('Py_DebugFlag', 'parser_debug'),
('Py_DontWriteBytecodeFlag', 'write_bytecode', True),
('Py_FileSystemDefaultEncodeErrors', 'filesystem_errors'),
('Py_FileSystemDefaultEncoding', 'filesystem_encoding'),
('Py_FrozenFlag', 'pathconfig_warnings', True),
('Py_IgnoreEnvironmentFlag', 'use_environment', True),
('Py_InspectFlag', 'inspect'),
('Py_InteractiveFlag', 'interactive'),
('Py_IsolatedFlag', 'isolated'),
('Py_NoSiteFlag', 'site_import', True),
('Py_NoUserSiteDirectory', 'user_site_directory', True),
('Py_OptimizeFlag', 'optimization_level'),
('Py_QuietFlag', 'quiet'),
('Py_UnbufferedStdioFlag', 'buffered_stdio', True),
('Py_VerboseFlag', 'verbose'),
]
if MS_WINDOWS:
COPY_GLOBAL_PRE_CONFIG.extend((
('Py_LegacyWindowsFSEncodingFlag', 'legacy_windows_fs_encoding'),
))
COPY_GLOBAL_CONFIG.extend((
('Py_LegacyWindowsStdioFlag', 'legacy_windows_stdio'),
))
EXPECTED_CONFIG = None
@classmethod
def tearDownClass(cls):
# clear cache
cls.EXPECTED_CONFIG = None
def main_xoptions(self, xoptions_list):
xoptions = {}
for opt in xoptions_list:
if '=' in opt:
key, value = opt.split('=', 1)
xoptions[key] = value
else:
xoptions[opt] = True
return xoptions
def _get_expected_config_impl(self):
env = remove_python_envvars()
code = textwrap.dedent('''
import json
import sys
import _testinternalcapi
configs = _testinternalcapi.get_configs()
data = json.dumps(configs)
data = data.encode('utf-8')
sys.stdout.buffer.write(data)
sys.stdout.buffer.flush()
''')
# Use -S to not import the site module: get the proper configuration
# when test_embed is run from a venv (bpo-35313)
args = [sys.executable, '-S', '-c', code]
proc = subprocess.run(args, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if proc.returncode:
raise Exception(f"failed to get the default config: "
f"stdout={proc.stdout!r} stderr={proc.stderr!r}")
stdout = proc.stdout.decode('utf-8')
# ignore stderr
try:
return json.loads(stdout)
except json.JSONDecodeError:
self.fail(f"fail to decode stdout: {stdout!r}")
def _get_expected_config(self):
cls = InitConfigTests
if cls.EXPECTED_CONFIG is None:
cls.EXPECTED_CONFIG = self._get_expected_config_impl()
# get a copy
configs = {}
for config_key, config_value in cls.EXPECTED_CONFIG.items():
config = {}
for key, value in config_value.items():
if isinstance(value, list):
value = value.copy()
config[key] = value
configs[config_key] = config
return configs
def get_expected_config(self, expected_preconfig, expected, env, api,
modify_path_cb=None):
cls = self.__class__
configs = self._get_expected_config()
pre_config = configs['pre_config']
for key, value in expected_preconfig.items():
if value is self.GET_DEFAULT_CONFIG:
expected_preconfig[key] = pre_config[key]
if not expected_preconfig['configure_locale'] or api == API_COMPAT:
# there is no easy way to get the locale encoding before
# setlocale(LC_CTYPE, "") is called: don't test encodings
for key in ('filesystem_encoding', 'filesystem_errors',
'stdio_encoding', 'stdio_errors'):
expected[key] = self.IGNORE_CONFIG
if not expected_preconfig['configure_locale']:
# UTF-8 Mode depends on the locale. There is no easy way
# to guess if UTF-8 Mode will be enabled or not if the locale
# is not configured.
expected_preconfig['utf8_mode'] = self.IGNORE_CONFIG
if expected_preconfig['utf8_mode'] == 1:
if expected['filesystem_encoding'] is self.GET_DEFAULT_CONFIG:
expected['filesystem_encoding'] = 'utf-8'
if expected['filesystem_errors'] is self.GET_DEFAULT_CONFIG:
expected['filesystem_errors'] = self.UTF8_MODE_ERRORS
if expected['stdio_encoding'] is self.GET_DEFAULT_CONFIG:
expected['stdio_encoding'] = 'utf-8'
if expected['stdio_errors'] is self.GET_DEFAULT_CONFIG:
expected['stdio_errors'] = 'surrogateescape'
if sys.platform == 'win32':
default_executable = self.test_exe
elif expected['program_name'] is not self.GET_DEFAULT_CONFIG:
default_executable = os.path.abspath(expected['program_name'])
else:
default_executable = os.path.join(os.getcwd(), '_testembed')
if expected['executable'] is self.GET_DEFAULT_CONFIG:
expected['executable'] = default_executable
if expected['base_executable'] is self.GET_DEFAULT_CONFIG:
expected['base_executable'] = default_executable
if expected['program_name'] is self.GET_DEFAULT_CONFIG:
expected['program_name'] = './_testembed'
config = configs['config']
for key, value in expected.items():
if value is self.GET_DEFAULT_CONFIG:
expected[key] = config[key]
pythonpath_env = expected['pythonpath_env']
if pythonpath_env is not None:
paths = pythonpath_env.split(os.path.pathsep)
expected['module_search_paths'] = [*paths, *expected['module_search_paths']]
if modify_path_cb is not None:
expected['module_search_paths'] = expected['module_search_paths'].copy()
modify_path_cb(expected['module_search_paths'])
for key in self.COPY_PRE_CONFIG:
if key not in expected_preconfig:
expected_preconfig[key] = expected[key]
def check_pre_config(self, configs, expected):
pre_config = dict(configs['pre_config'])
for key, value in list(expected.items()):
if value is self.IGNORE_CONFIG:
del pre_config[key]
del expected[key]
self.assertEqual(pre_config, expected)
def check_config(self, configs, expected):
config = dict(configs['config'])
for key, value in list(expected.items()):
if value is self.IGNORE_CONFIG:
del config[key]
del expected[key]
self.assertEqual(config, expected)
def check_global_config(self, configs):
pre_config = configs['pre_config']
config = configs['config']
expected = dict(self.DEFAULT_GLOBAL_CONFIG)
for item in self.COPY_GLOBAL_CONFIG:
if len(item) == 3:
global_key, core_key, opposite = item
expected[global_key] = 0 if config[core_key] else 1
else:
global_key, core_key = item
expected[global_key] = config[core_key]
for item in self.COPY_GLOBAL_PRE_CONFIG:
if len(item) == 3:
global_key, core_key, opposite = item
expected[global_key] = 0 if pre_config[core_key] else 1
else:
global_key, core_key = item
expected[global_key] = pre_config[core_key]
self.assertEqual(configs['global_config'], expected)
def check_all_configs(self, testname, expected_config=None,
expected_preconfig=None, modify_path_cb=None,
stderr=None, *, api, preconfig_api=None,
env=None, ignore_stderr=False, cwd=None):
new_env = remove_python_envvars()
if env is not None:
new_env.update(env)
env = new_env
if preconfig_api is None:
preconfig_api = api
if preconfig_api == API_ISOLATED:
default_preconfig = self.PRE_CONFIG_ISOLATED
elif preconfig_api == API_PYTHON:
default_preconfig = self.PRE_CONFIG_PYTHON
else:
default_preconfig = self.PRE_CONFIG_COMPAT
if expected_preconfig is None:
expected_preconfig = {}
expected_preconfig = dict(default_preconfig, **expected_preconfig)
if expected_config is None:
expected_config = {}
if api == API_PYTHON:
default_config = self.CONFIG_PYTHON
elif api == API_ISOLATED:
default_config = self.CONFIG_ISOLATED
else:
default_config = self.CONFIG_COMPAT
expected_config = dict(default_config, **expected_config)
self.get_expected_config(expected_preconfig,
expected_config, env,
api, modify_path_cb)
out, err = self.run_embedded_interpreter(testname,
env=env, cwd=cwd)
if stderr is None and not expected_config['verbose']:
stderr = ""
if stderr is not None and not ignore_stderr:
self.assertEqual(err.rstrip(), stderr)
try:
configs = json.loads(out)
except json.JSONDecodeError:
self.fail(f"fail to decode stdout: {out!r}")
self.check_pre_config(configs, expected_preconfig)
self.check_config(configs, expected_config)
self.check_global_config(configs)
def test_init_default_config(self):
self.check_all_configs("test_init_initialize_config", api=API_COMPAT)
def test_preinit_compat_config(self):
self.check_all_configs("test_preinit_compat_config", api=API_COMPAT)
def test_init_compat_config(self):
self.check_all_configs("test_init_compat_config", api=API_COMPAT)
def test_init_global_config(self):
preconfig = {
'utf8_mode': 1,
}
config = {
'program_name': './globalvar',
'site_import': 0,
'bytes_warning': 1,
'warnoptions': ['default::BytesWarning'],
'inspect': 1,
'interactive': 1,
'optimization_level': 2,
'write_bytecode': 0,
'verbose': 1,
'quiet': 1,
'buffered_stdio': 0,
'user_site_directory': 0,
'pathconfig_warnings': 0,
}
self.check_all_configs("test_init_global_config", config, preconfig,
api=API_COMPAT)
def test_init_from_config(self):
preconfig = {
'allocator': PYMEM_ALLOCATOR_MALLOC,
'utf8_mode': 1,
}
config = {
'install_signal_handlers': 0,
'use_hash_seed': 1,
'hash_seed': 123,
'tracemalloc': 2,
'import_time': 1,
'show_ref_count': 1,
'show_alloc_count': 1,
'malloc_stats': 1,
'stdio_encoding': 'iso8859-1',
'stdio_errors': 'replace',
'pycache_prefix': 'conf_pycache_prefix',
'program_name': './conf_program_name',
'argv': ['-c', 'arg2', ],
'parse_argv': 1,
'xoptions': [
'config_xoption1=3',
'config_xoption2=',
'config_xoption3',
'cmdline_xoption',
],
'warnoptions': [
'cmdline_warnoption',
'default::BytesWarning',
'config_warnoption',
],
'run_command': 'pass\n',
'site_import': 0,
'bytes_warning': 1,
'inspect': 1,
'interactive': 1,
'optimization_level': 2,
'write_bytecode': 0,
'verbose': 1,
'quiet': 1,
'configure_c_stdio': 1,
'buffered_stdio': 0,
'user_site_directory': 0,
'faulthandler': 1,
'check_hash_pycs_mode': 'always',
'pathconfig_warnings': 0,
}
self.check_all_configs("test_init_from_config", config, preconfig,
api=API_COMPAT)
def test_init_compat_env(self):
preconfig = {
'allocator': PYMEM_ALLOCATOR_MALLOC,
}
config = {
'use_hash_seed': 1,
'hash_seed': 42,
'tracemalloc': 2,
'import_time': 1,
'malloc_stats': 1,
'inspect': 1,
'optimization_level': 2,
'pythonpath_env': '/my/path',
'pycache_prefix': 'env_pycache_prefix',
'write_bytecode': 0,
'verbose': 1,
'buffered_stdio': 0,
'stdio_encoding': 'iso8859-1',
'stdio_errors': 'replace',
'user_site_directory': 0,
'faulthandler': 1,
'warnoptions': ['EnvVar'],
}
self.check_all_configs("test_init_compat_env", config, preconfig,
api=API_COMPAT)
def test_init_python_env(self):
preconfig = {
'allocator': PYMEM_ALLOCATOR_MALLOC,
'utf8_mode': 1,
}
config = {
'use_hash_seed': 1,
'hash_seed': 42,
'tracemalloc': 2,
'import_time': 1,
'malloc_stats': 1,
'inspect': 1,
'optimization_level': 2,
'pythonpath_env': '/my/path',
'pycache_prefix': 'env_pycache_prefix',
'write_bytecode': 0,
'verbose': 1,
'buffered_stdio': 0,
'stdio_encoding': 'iso8859-1',
'stdio_errors': 'replace',
'user_site_directory': 0,
'faulthandler': 1,
'warnoptions': ['EnvVar'],
}
self.check_all_configs("test_init_python_env", config, preconfig,
api=API_PYTHON)
def test_init_env_dev_mode(self):
preconfig = dict(allocator=PYMEM_ALLOCATOR_DEBUG)
config = dict(dev_mode=1,
faulthandler=1,
warnoptions=['default'])
self.check_all_configs("test_init_env_dev_mode", config, preconfig,
api=API_COMPAT)
def test_init_env_dev_mode_alloc(self):
preconfig = dict(allocator=PYMEM_ALLOCATOR_MALLOC)
config = dict(dev_mode=1,
faulthandler=1,
warnoptions=['default'])
self.check_all_configs("test_init_env_dev_mode_alloc", config, preconfig,
api=API_COMPAT)
def test_init_dev_mode(self):
preconfig = {
'allocator': PYMEM_ALLOCATOR_DEBUG,
}
config = {
'faulthandler': 1,
'dev_mode': 1,
'warnoptions': ['default'],
}
self.check_all_configs("test_init_dev_mode", config, preconfig,
api=API_PYTHON)
def test_preinit_parse_argv(self):
# Pre-initialize implicitly using argv: make sure that -X dev
# is used to configure the allocation in preinitialization
preconfig = {
'allocator': PYMEM_ALLOCATOR_DEBUG,
}
config = {
'argv': ['script.py'],
'run_filename': 'script.py',
'dev_mode': 1,
'faulthandler': 1,
'warnoptions': ['default'],
'xoptions': ['dev'],
}
self.check_all_configs("test_preinit_parse_argv", config, preconfig,
api=API_PYTHON)
def test_preinit_dont_parse_argv(self):
# -X dev must be ignored by isolated preconfiguration
preconfig = {
'isolated': 0,
}
config = {
'argv': ["python3", "-E", "-I",
"-X", "dev", "-X", "utf8", "script.py"],
'isolated': 0,
}
self.check_all_configs("test_preinit_dont_parse_argv", config, preconfig,
api=API_ISOLATED)
def test_init_isolated_flag(self):
config = {
'isolated': 1,
'use_environment': 0,
'user_site_directory': 0,
}
self.check_all_configs("test_init_isolated_flag", config, api=API_PYTHON)
def test_preinit_isolated1(self):
# _PyPreConfig.isolated=1, _PyCoreConfig.isolated not set
config = {
'isolated': 1,
'use_environment': 0,
'user_site_directory': 0,
}
self.check_all_configs("test_preinit_isolated1", config, api=API_COMPAT)
def test_preinit_isolated2(self):
# _PyPreConfig.isolated=0, _PyCoreConfig.isolated=1
config = {
'isolated': 1,
'use_environment': 0,
'user_site_directory': 0,
}
self.check_all_configs("test_preinit_isolated2", config, api=API_COMPAT)
def test_preinit_isolated_config(self):
self.check_all_configs("test_preinit_isolated_config", api=API_ISOLATED)
def test_init_isolated_config(self):
self.check_all_configs("test_init_isolated_config", api=API_ISOLATED)
def test_preinit_python_config(self):
self.check_all_configs("test_preinit_python_config", api=API_PYTHON)
def test_init_python_config(self):
self.check_all_configs("test_init_python_config", api=API_PYTHON)
def test_init_dont_configure_locale(self):
# _PyPreConfig.configure_locale=0
preconfig = {
'configure_locale': 0,
'coerce_c_locale': 0,
}
self.check_all_configs("test_init_dont_configure_locale", {}, preconfig,
api=API_PYTHON)
def test_init_read_set(self):
config = {
'program_name': './init_read_set',
'executable': 'my_executable',
}
def modify_path(path):
path.insert(1, "test_path_insert1")
path.append("test_path_append")
self.check_all_configs("test_init_read_set", config,
api=API_PYTHON,
modify_path_cb=modify_path)
def test_init_sys_add(self):
config = {
'faulthandler': 1,
'xoptions': [
'config_xoption',
'cmdline_xoption',
'sysadd_xoption',
'faulthandler',
],
'warnoptions': [
'ignore:::cmdline_warnoption',
'ignore:::sysadd_warnoption',
'ignore:::config_warnoption',
],
}
self.check_all_configs("test_init_sys_add", config, api=API_PYTHON)
def test_init_run_main(self):
code = ('import _testinternalcapi, json; '
'print(json.dumps(_testinternalcapi.get_configs()))')
config = {
'argv': ['-c', 'arg2'],
'program_name': './python3',
'run_command': code + '\n',
'parse_argv': 1,
}
self.check_all_configs("test_init_run_main", config, api=API_PYTHON)
def test_init_main(self):
code = ('import _testinternalcapi, json; '
'print(json.dumps(_testinternalcapi.get_configs()))')
config = {
'argv': ['-c', 'arg2'],
'program_name': './python3',
'run_command': code + '\n',
'parse_argv': 1,
'_init_main': 0,
}
self.check_all_configs("test_init_main", config,
api=API_PYTHON,
stderr="Run Python code before _Py_InitializeMain")
def test_init_parse_argv(self):
config = {
'parse_argv': 1,
'argv': ['-c', 'arg1', '-v', 'arg3'],
'program_name': './argv0',
'run_command': 'pass\n',
'use_environment': 0,
}
self.check_all_configs("test_init_parse_argv", config, api=API_PYTHON)
def test_init_dont_parse_argv(self):
pre_config = {
'parse_argv': 0,
}
config = {
'parse_argv': 0,
'argv': ['./argv0', '-E', '-c', 'pass', 'arg1', '-v', 'arg3'],
'program_name': './argv0',
}
self.check_all_configs("test_init_dont_parse_argv", config, pre_config,
api=API_PYTHON)
def default_program_name(self, config):
if MS_WINDOWS:
program_name = 'python'
executable = self.test_exe
else:
program_name = 'python3'
if MACOS:
executable = self.test_exe
else:
executable = shutil.which(program_name) or ''
config.update({
'program_name': program_name,
'base_executable': executable,
'executable': executable,
})
def test_init_setpath(self):
# Test Py_SetPath()
config = self._get_expected_config()
paths = config['config']['module_search_paths']
config = {
'module_search_paths': paths,
'prefix': '',
'base_prefix': '',
'exec_prefix': '',
'base_exec_prefix': '',
}
self.default_program_name(config)
env = {'TESTPATH': os.path.pathsep.join(paths)}
self.check_all_configs("test_init_setpath", config,
api=API_COMPAT, env=env,
ignore_stderr=True)
def test_init_setpath_config(self):
# Test Py_SetPath() with PyConfig
config = self._get_expected_config()
paths = config['config']['module_search_paths']
config = {
# set by Py_SetPath()
'module_search_paths': paths,
'prefix': '',
'base_prefix': '',
'exec_prefix': '',
'base_exec_prefix': '',
# overriden by PyConfig
'program_name': 'conf_program_name',
'base_executable': 'conf_executable',
'executable': 'conf_executable',
}
env = {'TESTPATH': os.path.pathsep.join(paths)}
self.check_all_configs("test_init_setpath_config", config,
api=API_PYTHON, env=env, ignore_stderr=True)
def module_search_paths(self, prefix=None, exec_prefix=None):
config = self._get_expected_config()
if prefix is None:
prefix = config['config']['prefix']
if exec_prefix is None:
exec_prefix = config['config']['prefix']
if MS_WINDOWS:
return config['config']['module_search_paths']
else:
ver = sys.version_info
return [
os.path.join(prefix, 'lib',
f'python{ver.major}{ver.minor}.zip'),
os.path.join(prefix, 'lib',
f'python{ver.major}.{ver.minor}'),
os.path.join(exec_prefix, 'lib',
f'python{ver.major}.{ver.minor}', 'lib-dynload'),
]
@contextlib.contextmanager
def tmpdir_with_python(self):
# Temporary directory with a copy of the Python program
with tempfile.TemporaryDirectory() as tmpdir:
# bpo-38234: On macOS and FreeBSD, the temporary directory
# can be symbolic link. For example, /tmp can be a symbolic link
# to /var/tmp. Call realpath() to resolve all symbolic links.
tmpdir = os.path.realpath(tmpdir)
if MS_WINDOWS:
# Copy pythonXY.dll (or pythonXY_d.dll)
ver = sys.version_info
dll = f'python{ver.major}{ver.minor}'
if debug_build(sys.executable):
dll += '_d'
dll += '.dll'
dll = os.path.join(os.path.dirname(self.test_exe), dll)
dll_copy = os.path.join(tmpdir, os.path.basename(dll))
shutil.copyfile(dll, dll_copy)
# Copy Python program
exec_copy = os.path.join(tmpdir, os.path.basename(self.test_exe))
shutil.copyfile(self.test_exe, exec_copy)
shutil.copystat(self.test_exe, exec_copy)
self.test_exe = exec_copy
yield tmpdir
def test_init_setpythonhome(self):
# Test Py_SetPythonHome(home) with PYTHONPATH env var
config = self._get_expected_config()
paths = config['config']['module_search_paths']
paths_str = os.path.pathsep.join(paths)
for path in paths:
if not os.path.isdir(path):
continue
if os.path.exists(os.path.join(path, 'os.py')):
home = os.path.dirname(path)
break
else:
self.fail(f"Unable to find home in {paths!r}")
prefix = exec_prefix = home
ver = sys.version_info
expected_paths = self.module_search_paths(prefix=home, exec_prefix=home)
config = {
'home': home,
'module_search_paths': expected_paths,
'prefix': prefix,
'base_prefix': prefix,
'exec_prefix': exec_prefix,
'base_exec_prefix': exec_prefix,
'pythonpath_env': paths_str,
}
self.default_program_name(config)
env = {'TESTHOME': home, 'PYTHONPATH': paths_str}
self.check_all_configs("test_init_setpythonhome", config,
api=API_COMPAT, env=env)
def copy_paths_by_env(self, config):
all_configs = self._get_expected_config()
paths = all_configs['config']['module_search_paths']
paths_str = os.path.pathsep.join(paths)
config['pythonpath_env'] = paths_str
env = {'PYTHONPATH': paths_str}
return env
@unittest.skipIf(MS_WINDOWS, 'Windows does not use pybuilddir.txt')
def test_init_pybuilddir(self):
# Test path configuration with pybuilddir.txt configuration file
with self.tmpdir_with_python() as tmpdir:
# pybuilddir.txt is a sub-directory relative to the current
# directory (tmpdir)
subdir = 'libdir'
libdir = os.path.join(tmpdir, subdir)
os.mkdir(libdir)
filename = os.path.join(tmpdir, 'pybuilddir.txt')
with open(filename, "w", encoding="utf8") as fp:
fp.write(subdir)
module_search_paths = self.module_search_paths()
module_search_paths[-1] = libdir
executable = self.test_exe
config = {
'base_executable': executable,
'executable': executable,
'module_search_paths': module_search_paths,
}
env = self.copy_paths_by_env(config)
self.check_all_configs("test_init_compat_config", config,
api=API_COMPAT, env=env,
ignore_stderr=True, cwd=tmpdir)
def test_init_pyvenv_cfg(self):
# Test path configuration with pyvenv.cfg configuration file
with self.tmpdir_with_python() as tmpdir, \
tempfile.TemporaryDirectory() as pyvenv_home:
ver = sys.version_info
if not MS_WINDOWS:
lib_dynload = os.path.join(pyvenv_home,
'lib',
f'python{ver.major}.{ver.minor}',
'lib-dynload')
os.makedirs(lib_dynload)
else:
lib_dynload = os.path.join(pyvenv_home, 'lib')
os.makedirs(lib_dynload)
# getpathp.c uses Lib\os.py as the LANDMARK
shutil.copyfile(os.__file__, os.path.join(lib_dynload, 'os.py'))
filename = os.path.join(tmpdir, 'pyvenv.cfg')
with open(filename, "w", encoding="utf8") as fp:
print("home = %s" % pyvenv_home, file=fp)
print("include-system-site-packages = false", file=fp)
paths = self.module_search_paths()
if not MS_WINDOWS:
paths[-1] = lib_dynload
else:
for index, path in enumerate(paths):
if index == 0:
paths[index] = os.path.join(tmpdir, os.path.basename(path))
else:
paths[index] = os.path.join(pyvenv_home, os.path.basename(path))
paths[-1] = pyvenv_home
executable = self.test_exe
exec_prefix = pyvenv_home
config = {
'base_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'base_executable': executable,
'executable': executable,
'module_search_paths': paths,
}
if MS_WINDOWS:
config['base_prefix'] = pyvenv_home
config['prefix'] = pyvenv_home
env = self.copy_paths_by_env(config)
self.check_all_configs("test_init_compat_config", config,
api=API_COMPAT, env=env,
ignore_stderr=True, cwd=tmpdir)
def test_global_pathconfig(self):
# Test C API functions getting the path configuration:
#
# - Py_GetExecPrefix()
# - Py_GetPath()
# - Py_GetPrefix()
# - Py_GetProgramFullPath()
# - Py_GetProgramName()
# - Py_GetPythonHome()
#
# The global path configuration (_Py_path_config) must be a copy
# of the path configuration of PyInterpreter.config (PyConfig).
ctypes = support.import_module('ctypes')
_testinternalcapi = support.import_module('_testinternalcapi')
def get_func(name):
func = getattr(ctypes.pythonapi, name)
func.argtypes = ()
func.restype = ctypes.c_wchar_p
return func
Py_GetPath = get_func('Py_GetPath')
Py_GetPrefix = get_func('Py_GetPrefix')
Py_GetExecPrefix = get_func('Py_GetExecPrefix')
Py_GetProgramName = get_func('Py_GetProgramName')
Py_GetProgramFullPath = get_func('Py_GetProgramFullPath')
Py_GetPythonHome = get_func('Py_GetPythonHome')
config = _testinternalcapi.get_configs()['config']
self.assertEqual(Py_GetPath().split(os.path.pathsep),
config['module_search_paths'])
self.assertEqual(Py_GetPrefix(), config['prefix'])
self.assertEqual(Py_GetExecPrefix(), config['exec_prefix'])
self.assertEqual(Py_GetProgramName(), config['program_name'])
self.assertEqual(Py_GetProgramFullPath(), config['executable'])
self.assertEqual(Py_GetPythonHome(), config['home'])
def test_init_warnoptions(self):
# lowest to highest priority
warnoptions = [
'ignore:::PyConfig_Insert0', # PyWideStringList_Insert(0)
'default', # PyConfig.dev_mode=1
'ignore:::env1', # PYTHONWARNINGS env var
'ignore:::env2', # PYTHONWARNINGS env var
'ignore:::cmdline1', # -W opt command line option
'ignore:::cmdline2', # -W opt command line option
'default::BytesWarning', # PyConfig.bytes_warnings=1
'ignore:::PySys_AddWarnOption1', # PySys_AddWarnOption()
'ignore:::PySys_AddWarnOption2', # PySys_AddWarnOption()
'ignore:::PyConfig_BeforeRead', # PyConfig.warnoptions
'ignore:::PyConfig_AfterRead'] # PyWideStringList_Append()
preconfig = dict(allocator=PYMEM_ALLOCATOR_DEBUG)
config = {
'dev_mode': 1,
'faulthandler': 1,
'bytes_warning': 1,
'warnoptions': warnoptions,
}
self.check_all_configs("test_init_warnoptions", config, preconfig,
api=API_PYTHON)
class AuditingTests(EmbeddingTestsMixin, unittest.TestCase):
def test_open_code_hook(self):
self.run_embedded_interpreter("test_open_code_hook")
def test_audit(self):
self.run_embedded_interpreter("test_audit")
def test_audit_subinterpreter(self):
self.run_embedded_interpreter("test_audit_subinterpreter")
def test_audit_run_command(self):
self.run_embedded_interpreter("test_audit_run_command", timeout=3, returncode=1)
def test_audit_run_file(self):
self.run_embedded_interpreter("test_audit_run_file", timeout=3, returncode=1)
def test_audit_run_interactivehook(self):
startup = os.path.join(self.oldcwd, support.TESTFN) + ".py"
with open(startup, "w", encoding="utf-8") as f:
print("import sys", file=f)
print("sys.__interactivehook__ = lambda: None", file=f)
try:
env = {**remove_python_envvars(), "PYTHONSTARTUP": startup}
self.run_embedded_interpreter("test_audit_run_interactivehook", timeout=5,
returncode=10, env=env)
finally:
os.unlink(startup)
def test_audit_run_startup(self):
startup = os.path.join(self.oldcwd, support.TESTFN) + ".py"
with open(startup, "w", encoding="utf-8") as f:
print("pass", file=f)
try:
env = {**remove_python_envvars(), "PYTHONSTARTUP": startup}
self.run_embedded_interpreter("test_audit_run_startup", timeout=5,
returncode=10, env=env)
finally:
os.unlink(startup)
def test_audit_run_stdin(self):
self.run_embedded_interpreter("test_audit_run_stdin", timeout=3, returncode=1)
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
from . import BPoly, PPoly
from .polyint import _isscalar
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator"]
class PchipInterpolator(object):
"""PCHIP 1-d monotonic cubic interpolation
x and y are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use axis
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
See Also
--------
Akima1DInterpolator
Notes
-----
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at x_k.
Preserves monotonicity in the interpolation data and does not overshoot
if the data is not smooth.
Determines the derivatives at the points x_k, d_k, by using PCHIP algorithm:
Let m_k be the slope of the kth segment (between k and k+1)
If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
else use weighted harmonic mean:
w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
where h_k is the spacing between x_k and x_{k+1}.
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
x = x.astype(float)
y = np.asarray(y)
if not np.issubdtype(y.dtype, np.inexact):
y = y.astype(float)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
self._bpoly = BPoly.from_derivatives(x, data, orders=None,
extrapolate=extrapolate)
self.axis = axis
def __call__(self, x, der=0, extrapolate=None):
"""
Evaluate the PCHIP interpolant or its derivative.
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
der : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
"""
out = self._bpoly(x, der, extrapolate)
return self._reshaper(x, out)
def derivative(self, der=1):
"""
Construct a piecewise polynomial representing the derivative.
Parameters
----------
der : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
Piecewise polynomial of order k2 = k - der representing the derivative
of this polynomial.
"""
t = object.__new__(self.__class__)
t.axis = self.axis
t._bpoly = self._bpoly.derivative(der)
return t
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
def _reshaper(self, x, out):
x = np.asarray(x)
l = x.ndim
transp = (tuple(range(l, l+self.axis)) + tuple(range(l)) +
tuple(range(l+self.axis, out.ndim)))
return out.transpose(transp)
@staticmethod
def _edge_case(m0, d1, out):
m0 = np.atleast_1d(m0)
d1 = np.atleast_1d(d1)
mask = (d1 != 0) & (m0 != 0)
out[mask] = 1.0/(1.0/m0[mask]+1.0/d1[mask])
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:,None]
y = y[:,None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
PchipInterpolator._edge_case(mk[0],dk[1], dk[0])
PchipInterpolator._edge_case(mk[-1],dk[-2], dk[-1])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : integer or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of *y* along the first axis must
be equal to the length of *x*.
axis : int, optional
Specifies the axis of *y* along which to interpolate. Interpolation
defaults to the last axis of *y*.
Methods
-------
__call__
See Also
--------
PchipInterpolator
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[0]:
raise ValueError("x.shape must equal y.shape[0]")
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the indices where the the slope at breakpoint is defined:
id_ = np.nonzero(f12 > 1e-9 * np.max(f12))[0]
# set the slope at breakpoint
t[id_] = (f1[id_] * m[id_ + 1] + f2[id_] * m[id_ + 2]) / f12[id_]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
def extend(self):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolor. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
|
|
from __future__ import division
"""
Author: Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["BalancedConsumer"]
import itertools
import logging
import math
import socket
import time
from uuid import uuid4
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeException, NodeExistsError
from kazoo.recipe.watchers import ChildrenWatch
from .common import OffsetType
from .exceptions import (KafkaException, PartitionOwnedError,
ConsumerStoppedException)
from .simpleconsumer import SimpleConsumer
log = logging.getLogger(__name__)
class BalancedConsumer():
"""
A self-balancing consumer for Kafka that uses ZooKeeper to communicate
with other balancing consumers.
Maintains a single instance of SimpleConsumer, periodically using the
consumer rebalancing algorithm to reassign partitions to this
SimpleConsumer.
"""
def __init__(self,
topic,
cluster,
consumer_group,
fetch_message_max_bytes=1024 * 1024,
num_consumer_fetchers=1,
auto_commit_enable=False,
auto_commit_interval_ms=60 * 1000,
queued_max_messages=2000,
fetch_min_bytes=1,
fetch_wait_max_ms=100,
offsets_channel_backoff_ms=1000,
offsets_commit_max_retries=5,
auto_offset_reset=OffsetType.LATEST,
consumer_timeout_ms=-1,
rebalance_max_retries=5,
rebalance_backoff_ms=2 * 1000,
zookeeper_connection_timeout_ms=6 * 1000,
zookeeper_connect='127.0.0.1:2181',
zookeeper=None,
auto_start=True,
reset_offset_on_start=False,
retry_backoff_ms=100,
max_retries=3):
"""Create a BalancedConsumer instance
:param topic: The topic this consumer should consume
:type topic: :class:`pykafka.topic.Topic`
:param cluster: The cluster to which this consumer should connect
:type cluster: :class:`pykafka.cluster.Cluster`
:param consumer_group: The name of the consumer group this consumer
should join.
:type consumer_group: str
:param fetch_message_max_bytes: The number of bytes of messages to
attempt to fetch with each fetch request
:type fetch_message_max_bytes: int
:param num_consumer_fetchers: The number of workers used to make
FetchRequests
:type num_consumer_fetchers: int
:param auto_commit_enable: If true, periodically commit to kafka the
offset of messages already fetched by this consumer. This also
requires that `consumer_group` is not `None`.
:type auto_commit_enable: bool
:param auto_commit_interval_ms: The frequency (in milliseconds) at which
the consumer's offsets are committed to kafka. This setting is
ignored if `auto_commit_enable` is `False`.
:type auto_commit_interval_ms: int
:param queued_max_messages: The maximum number of messages buffered for
consumption in the internal
:class:`pykafka.simpleconsumer.SimpleConsumer`
:type queued_max_messages: int
:param fetch_min_bytes: The minimum amount of data (in bytes) that the
server should return for a fetch request. If insufficient data is
available, the request will block until sufficient data is available.
:type fetch_min_bytes: int
:param fetch_wait_max_ms: The maximum amount of time (in milliseconds)
that the server will block before answering a fetch request if
there isn't sufficient data to immediately satisfy `fetch_min_bytes`.
:type fetch_wait_max_ms: int
:param offsets_channel_backoff_ms: Backoff time to retry failed offset
commits and fetches.
:type offsets_channel_backoff_ms: int
:param offsets_commit_max_retries: The number of times the offset commit
worker should retry before raising an error.
:type offsets_commit_max_retries: int
:param auto_offset_reset: What to do if an offset is out of range. This
setting indicates how to reset the consumer's internal offset
counter when an `OffsetOutOfRangeError` is encountered.
:type auto_offset_reset: :class:`pykafka.common.OffsetType`
:param consumer_timeout_ms: Amount of time (in milliseconds) the
consumer may spend without messages available for consumption
before returning None.
:type consumer_timeout_ms: int
:param rebalance_max_retries: The number of times the rebalance should
retry before raising an error.
:type rebalance_max_retries: int
:param rebalance_backoff_ms: Backoff time (in milliseconds) between
retries during rebalance.
:type rebalance_backoff_ms: int
:param zookeeper_connection_timeout_ms: The maximum time (in
milliseconds) that the consumer waits while establishing a
connection to zookeeper.
:type zookeeper_connection_timeout_ms: int
:param zookeeper_connect: Comma-separated (ip1:port1,ip2:port2) strings
indicating the zookeeper nodes to which to connect.
:type zookeeper_connect: str
:param zookeeper: A KazooClient connected to a Zookeeper instance.
If provided, `zookeeper_connect` is ignored.
:type zookeeper: :class:`kazoo.client.KazooClient`
:param auto_start: Whether the consumer should begin communicating
with zookeeper after __init__ is complete. If false, communication
can be started with `start()`.
:type auto_start: bool
:param reset_offset_on_start: Whether the consumer should reset its
internal offset counter to `self._auto_offset_reset` and commit that
offset immediately upon starting up
:type reset_offset_on_start: bool
:param max_retries: How many times to attempt to fetch messages
before raising an error.
:type max_retries: int
:param retry_backoff_ms: The amount of time (in milliseconds) to
back off during fetch request retries.
:type retry_backoff_ms: int
"""
self._cluster = cluster
self._consumer_group = consumer_group
self._topic = topic
self._auto_commit_enable = auto_commit_enable
self._auto_commit_interval_ms = auto_commit_interval_ms
self._fetch_message_max_bytes = fetch_message_max_bytes
self._fetch_min_bytes = fetch_min_bytes
self._rebalance_max_retries = rebalance_max_retries
self._num_consumer_fetchers = num_consumer_fetchers
self._queued_max_messages = queued_max_messages
self._fetch_wait_max_ms = fetch_wait_max_ms
self._rebalance_backoff_ms = rebalance_backoff_ms
self._consumer_timeout_ms = consumer_timeout_ms
self._offsets_channel_backoff_ms = offsets_channel_backoff_ms
self._offsets_commit_max_retries = offsets_commit_max_retries
self._auto_offset_reset = auto_offset_reset
self._zookeeper_connect = zookeeper_connect
self._zookeeper_connection_timeout_ms = zookeeper_connection_timeout_ms
self._reset_offset_on_start = reset_offset_on_start
self._running = False
self._rebalancing_lock = cluster.handler.Lock()
self._consumer = None
self._consumer_id = "{hostname}:{uuid}".format(
hostname=socket.gethostname(),
uuid=uuid4()
)
self._partitions = set()
self._setting_watches = True
self._max_retries = max_retries
self._retry_backoff_ms = retry_backoff_ms
self._topic_path = '/consumers/{group}/owners/{topic}'.format(
group=self._consumer_group,
topic=self._topic.name)
self._consumer_id_path = '/consumers/{group}/ids'.format(
group=self._consumer_group)
self._zookeeper = None
if zookeeper is not None:
self._zookeeper = zookeeper
if auto_start is True:
self.start()
def __repr__(self):
return "<{module}.{name} at {id_} (consumer_group={group})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
group=self._consumer_group
)
def _setup_checker_worker(self):
"""Start the zookeeper partition checker thread"""
def checker():
while True:
time.sleep(120)
if not self._running:
break
self._check_held_partitions()
log.debug("Checker thread exiting")
log.debug("Starting checker thread")
return self._cluster.handler.spawn(checker)
@property
def partitions(self):
return self._consumer.partitions if self._consumer else None
@property
def held_offsets(self):
"""Return a map from partition id to held offset for each partition"""
if not self._consumer:
return None
return dict((p.partition.id, p.last_offset_consumed)
for p in self._consumer._partitions_by_id.itervalues())
def start(self):
"""Open connections and join a cluster."""
if self._zookeeper is None:
self._setup_zookeeper(self._zookeeper_connect,
self._zookeeper_connection_timeout_ms)
self._zookeeper.ensure_path(self._topic_path)
self._add_self()
self._set_watches()
self._rebalance()
self._running = True
self._setup_checker_worker()
def stop(self):
"""Close the zookeeper connection and stop consuming.
This method should be called as part of a graceful shutdown process.
"""
self._zookeeper.stop()
self._consumer.stop()
self._running = False
def _setup_zookeeper(self, zookeeper_connect, timeout):
"""Open a connection to a ZooKeeper host.
:param zookeeper_connect: The 'ip:port' address of the zookeeper node to
which to connect.
:type zookeeper_connect: str
:param timeout: Connection timeout (in milliseconds)
:type timeout: int
"""
self._zookeeper = KazooClient(zookeeper_connect, timeout=timeout / 1000)
self._zookeeper.start()
def _setup_internal_consumer(self, start=True):
"""Instantiate an internal SimpleConsumer.
If there is already a SimpleConsumer instance held by this object,
disable its workers and mark it for garbage collection before
creating a new one.
"""
reset_offset_on_start = self._reset_offset_on_start
if self._consumer is not None:
self._consumer.stop()
# only use this setting for the first call to
# _setup_internal_consumer. subsequent calls should not
# reset the offsets, since they can happen at any time
reset_offset_on_start = False
self._consumer = SimpleConsumer(
self._topic,
self._cluster,
consumer_group=self._consumer_group,
partitions=list(self._partitions),
auto_commit_enable=self._auto_commit_enable,
auto_commit_interval_ms=self._auto_commit_interval_ms,
fetch_message_max_bytes=self._fetch_message_max_bytes,
fetch_min_bytes=self._fetch_min_bytes,
num_consumer_fetchers=self._num_consumer_fetchers,
queued_max_messages=self._queued_max_messages,
fetch_wait_max_ms=self._fetch_wait_max_ms,
consumer_timeout_ms=self._consumer_timeout_ms,
offsets_channel_backoff_ms=self._offsets_channel_backoff_ms,
offsets_commit_max_retries=self._offsets_commit_max_retries,
auto_offset_reset=self._auto_offset_reset,
reset_offset_on_start=reset_offset_on_start,
auto_start=start,
retry_backoff_ms=self._retry_backoff_ms,
max_retries=self._max_retries
)
def _decide_partitions(self, participants):
"""Decide which partitions belong to this consumer.
Uses the consumer rebalancing algorithm described here
http://kafka.apache.org/documentation.html
It is very important that the participants array is sorted,
since this algorithm runs on each consumer and indexes into the same
array. The same array index operation must return the same
result on each consumer.
:param participants: Sorted list of ids of all other consumers in this
consumer group.
:type participants: Iterable of str
"""
# Freeze and sort partitions so we always have the same results
p_to_str = lambda p: '-'.join([p.topic.name, str(p.leader.id), str(p.id)])
all_parts = self._topic.partitions.values()
all_parts.sort(key=p_to_str)
# get start point, # of partitions, and remainder
participants.sort() # just make sure it's sorted.
idx = participants.index(self._consumer_id)
parts_per_consumer = math.floor(len(all_parts) / len(participants))
remainder_ppc = len(all_parts) % len(participants)
start = parts_per_consumer * idx + min(idx, remainder_ppc)
num_parts = parts_per_consumer + (0 if (idx + 1 > remainder_ppc) else 1)
# assign partitions from i*N to (i+1)*N - 1 to consumer Ci
new_partitions = itertools.islice(all_parts, start, start + num_parts)
new_partitions = set(new_partitions)
log.info('Balancing %i participants for %i partitions.\nOwning %i partitions.',
len(participants), len(all_parts), len(new_partitions))
log.debug('My partitions: %s', [p_to_str(p) for p in new_partitions])
return new_partitions
def _get_participants(self):
"""Use zookeeper to get the other consumers of this topic.
:return: A sorted list of the ids of the other consumers of this
consumer's topic
"""
try:
consumer_ids = self._zookeeper.get_children(self._consumer_id_path)
except NoNodeException:
log.debug("Consumer group doesn't exist. "
"No participants to find")
return []
participants = []
for id_ in consumer_ids:
try:
topic, stat = self._zookeeper.get("%s/%s" % (self._consumer_id_path, id_))
if topic == self._topic.name:
participants.append(id_)
except NoNodeException:
pass # disappeared between ``get_children`` and ``get``
participants.sort()
return participants
def _set_watches(self):
"""Set watches in zookeeper that will trigger rebalances.
Rebalances should be triggered whenever a broker, topic, or consumer
znode is changed in zookeeper. This ensures that the balance of the
consumer group remains up-to-date with the current state of the
cluster.
"""
self._setting_watches = True
# Set all our watches and then rebalance
broker_path = '/brokers/ids'
try:
self._broker_watcher = ChildrenWatch(
self._zookeeper, broker_path,
self._brokers_changed
)
except NoNodeException:
raise Exception(
'The broker_path "%s" does not exist in your '
'ZooKeeper cluster -- is your Kafka cluster running?'
% broker_path)
self._topics_watcher = ChildrenWatch(
self._zookeeper,
'/brokers/topics',
self._topics_changed
)
self._consumer_watcher = ChildrenWatch(
self._zookeeper, self._consumer_id_path,
self._consumers_changed
)
self._setting_watches = False
def _add_self(self):
"""Register this consumer in zookeeper.
This method ensures that the number of participants is at most the
number of partitions.
"""
participants = self._get_participants()
if len(self._topic.partitions) <= len(participants):
raise KafkaException("Cannot add consumer: more consumers than partitions")
path = '{path}/{id_}'.format(
path=self._consumer_id_path,
id_=self._consumer_id
)
self._zookeeper.create(
path, self._topic.name, ephemeral=True, makepath=True)
def _rebalance(self):
"""Claim partitions for this consumer.
This method is called whenever a zookeeper watch is triggered.
"""
if self._consumer is not None:
self.commit_offsets()
with self._rebalancing_lock:
log.info('Rebalancing consumer %s for topic %s.' % (
self._consumer_id, self._topic.name)
)
for i in xrange(self._rebalance_max_retries):
try:
# If retrying, be sure to make sure the
# partition allocation is correct.
participants = self._get_participants()
partitions = self._decide_partitions(participants)
old_partitions = self._partitions - partitions
self._remove_partitions(old_partitions)
new_partitions = partitions - self._partitions
self._add_partitions(new_partitions)
# Only re-create internal consumer if something changed.
if old_partitions or new_partitions:
self._setup_internal_consumer()
log.info('Rebalancing Complete.')
break
except PartitionOwnedError as ex:
if i == self._rebalance_max_retries - 1:
log.warning('Failed to acquire partition %s after %d retries.',
ex.partition, i)
raise
log.info('Unable to acquire partition %s. Retrying', ex.partition)
time.sleep(i * (self._rebalance_backoff_ms / 1000))
def _path_from_partition(self, p):
"""Given a partition, return its path in zookeeper.
:type p: :class:`pykafka.partition.Partition`
"""
return "%s/%s-%s" % (self._topic_path, p.leader.id, p.id)
def _remove_partitions(self, partitions):
"""Remove partitions from the zookeeper registry for this consumer.
Also remove these partitions from the consumer's internal
partition registry.
:param partitions: The partitions to remove.
:type partitions: Iterable of :class:`pykafka.partition.Partition`
"""
for p in partitions:
assert p in self._partitions
self._zookeeper.delete(self._path_from_partition(p))
self._partitions -= partitions
def _add_partitions(self, partitions):
"""Add partitions to the zookeeper registry for this consumer.
Also add these partitions to the consumer's internal partition registry.
:param partitions: The partitions to add.
:type partitions: Iterable of :class:`pykafka.partition.Partition`
"""
for p in partitions:
try:
self._zookeeper.create(
self._path_from_partition(p),
value=self._consumer_id,
ephemeral=True
)
self._partitions.add(p)
except NodeExistsError:
raise PartitionOwnedError(p)
def _check_held_partitions(self):
"""Double-check held partitions against zookeeper
Ensure that the partitions held by this consumer are the ones that
zookeeper thinks it's holding. If not, rebalance.
"""
log.info("Checking held partitions against ZooKeeper")
# build a set of partition ids zookeeper says we own
zk_partition_ids = set()
all_partitions = self._zookeeper.get_children(self._topic_path)
for partition_slug in all_partitions:
owner_id, stat = self._zookeeper.get(
'{path}/{slug}'.format(
path=self._topic_path, slug=partition_slug))
if owner_id == self._consumer_id:
zk_partition_ids.add(int(partition_slug.split('-')[1]))
# build a set of partition ids we think we own
internal_partition_ids = set([p.id for p in self._partitions])
# compare the two sets, rebalance if necessary
if internal_partition_ids != zk_partition_ids:
log.warning("Internal partition registry doesn't match ZooKeeper!")
log.debug("Internal partition ids: %s\nZooKeeper partition ids: %s",
internal_partition_ids, zk_partition_ids)
self._rebalance()
def _brokers_changed(self, brokers):
if self._setting_watches:
return
log.debug("Rebalance triggered by broker change")
self._rebalance()
def _consumers_changed(self, consumers):
if self._setting_watches:
return
log.debug("Rebalance triggered by consumer change")
self._rebalance()
def _topics_changed(self, topics):
if self._setting_watches:
return
log.debug("Rebalance triggered by topic change")
self._rebalance()
def get_last_committed_offsets(self):
"""Get the offsets of the consumer
:returns dict with partition id as key and offset of the partition as value
"""
if not self._consumer:
raise ConsumerStoppedException("Internal consumer is stopped")
return self._consumer.get_last_committed_offsets()
def get_partition_lags(self):
"""Get the number of pending messages per partition for the consumer
:returns dict with partition id as key and lag of the partition as value
"""
if not self._consumer:
raise ConsumerStoppedException("Internal consumer is stopped")
return self._consumer.get_partition_lags()
def reset_offsets(self, partition_offsets=None):
"""Reset offsets for the specified partitions
Issue an OffsetRequest for each partition and set the appropriate
returned offset in the OwnedPartition
:param partition_offsets: (`partition`, `offset`) pairs to reset
where `partition` is the partition for which to reset the offset
and `offset` is the new offset the partition should have
:type partition_offsets: Iterable of
(:class:`pykafka.partition.Partition`, int)
"""
if not self._consumer:
raise ConsumerStoppedException("Internal consumer is stopped")
self._consumer.reset_offsets(partition_offsets=partition_offsets)
def consume(self, block=True):
"""Get one message from the consumer
:param block: Whether to block while waiting for a message
:type block: bool
"""
def consumer_timed_out():
"""Indicates whether the consumer has received messages recently"""
if self._consumer_timeout_ms == -1:
return False
disp = (time.time() - self._last_message_time) * 1000.0
return disp > self._consumer_timeout_ms
message = None
self._last_message_time = time.time()
while message is None and not consumer_timed_out():
try:
message = self._consumer.consume(block=block)
except ConsumerStoppedException:
if not self._running:
return
continue
if message:
self._last_message_time = time.time()
if not block:
return message
return message
def commit_offsets(self):
"""Commit offsets for this consumer's partitions
Uses the offset commit/fetch API
"""
return self._consumer.commit_offsets()
|
|
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = [pytest.mark.single, td.skip_array_manager_not_yet_implemented]
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="fixed")
msg = r"invalid HDFStore format specified \[foo\]"
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=False, format="foo")
# File path doesn't exist
path = ""
msg = f"File {path} does not exist"
with pytest.raises(FileNotFoundError, match=msg):
read_hdf(path, "df")
def test_get(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
def test_put_integer(setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
_check_roundtrip(df, tm.assert_frame_equal, setup_path)
def test_table_values_dtypes_roundtrip(setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
msg = re.escape(
"invalid combination of [values_axes] on appending data "
"[name->values_block_0,cname->values_block_0,"
"dtype->float64,kind->float,shape->(1, 3)] vs "
"current table [name->values_block_0,"
"cname->values_block_0,dtype->int64,kind->integer,"
"shape->None]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_series(setup_path):
s = tm.makeStringSeries()
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
_check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
_check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_tuple_index(setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
_check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
_check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failure on some windows platforms")
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
_check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
_check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
_check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._mgr.is_consolidated()
# empty
_check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
def test_empty_series_frame(setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
_check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
_check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
_check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"])
def test_empty_series(dtype, setup_path):
s = Series(dtype=dtype)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
_check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_unicode_index(setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(setup_path):
# GH 11234
char = "\u0394"
df = DataFrame({"A": [char]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = DataFrame({"A": ["a", char], "B": ["b", "b"]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
_check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
def test_round_trip_equals(setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
|
|
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
from typing import Any
from typing import List
from typing import Optional
import tabview as t
from policyuniverse.arn import ARN
from tabulate import tabulate
from tqdm import tqdm
import repokid.hooks
from repokid.exceptions import MissingRepoableServices
from repokid.role import Role
from repokid.role import RoleList
from repokid.types import RepokidConfig
from repokid.types import RepokidHooks
from repokid.utils.dynamo import find_role_in_cache
from repokid.utils.dynamo import get_all_role_ids_for_account
from repokid.utils.dynamo import role_arns_for_all_accounts
from repokid.utils.iam import inline_policies_size_exceeds_maximum
from repokid.utils.permissions import get_permissions_in_policy
from repokid.utils.permissions import get_services_in_permissions
LOGGER = logging.getLogger("repokid")
def _display_roles(account_number: str, inactive: bool = False) -> None:
"""
Display a table with data about all roles in an account and write a csv file with the data.
Args:
account_number (string)
inactive (bool): show roles that have historically (but not currently) existed in the account if True
Returns:
None
"""
headers = [
"Name",
"Refreshed",
"Disqualified By",
"Can be repoed",
"Permissions",
"Repoable",
"Repoed",
"Services",
]
rows: List[List[Any]] = []
role_ids = get_all_role_ids_for_account(account_number)
roles = RoleList.from_ids(role_ids)
if not inactive:
roles = roles.get_active()
for role in roles:
rows.append(
[
role.role_name,
role.refreshed,
role.disqualified_by,
len(role.disqualified_by) == 0,
role.total_permissions,
role.repoable_permissions,
role.repoed,
role.repoable_services,
]
)
rows = sorted(rows, key=lambda x: (x[5], x[0], x[4]))
rows.insert(0, headers)
# print tabulate(rows, headers=headers)
t.view(rows)
with open("table.csv", "w") as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headers)
for row in rows:
csv_writer.writerow(row)
def _find_roles_with_permissions(permissions: List[str], output_file: str) -> None:
"""
Search roles in all accounts for a policy with any of the provided permissions, log the ARN of each role.
Args:
permissions (list[string]): The name of the permissions to find
output_file (string): filename to write the output
Returns:
None
"""
arns: List[str] = list()
role_ids = role_arns_for_all_accounts()
roles = RoleList.from_ids(
role_ids, fields=["Policies", "RoleName", "Arn", "Active"]
)
for role in roles:
role_permissions, _ = role.get_permissions_for_policy_version()
permissions_set = set([p.lower() for p in permissions])
found_permissions = permissions_set.intersection(role_permissions)
if found_permissions and role.active:
arns.append(role.arn)
LOGGER.info(
"ARN {arn} has {permissions}".format(
arn=role.arn, permissions=list(found_permissions)
)
)
if not output_file:
return
with open(output_file, "w") as fd:
json.dump(arns, fd)
LOGGER.info(f"Output written to file {output_file}")
def _display_role(
account_number: str,
role_name: str,
config: RepokidConfig,
) -> None:
"""
Displays data about a role in a given account:
1) Name, which filters are disqualifying it from repo, if it's repoable, total/repoable permissions,
when it was last repoed, which services can be repoed
2) The policy history: how discovered (repo, scan, etc), the length of the policy, and start of the contents
3) Captured stats entry for the role
4) A list of all services/actions currently allowed and whether they are repoable
5) What the new policy would look like after repoing (if it is repoable)
Args:
account_number (string)
role_name (string)
Returns:
None
"""
role_id = find_role_in_cache(role_name, account_number)
if not role_id:
LOGGER.warning("Could not find role with name {}".format(role_name))
return
role = Role(role_id=role_id)
role.fetch()
print("\n\nRole repo data:")
headers = [
"Name",
"Refreshed",
"Disqualified By",
"Can be repoed",
"Permissions",
"Repoable",
"Repoed",
"Services",
]
rows = [
[
role.role_name,
role.refreshed,
role.disqualified_by,
len(role.disqualified_by) == 0,
role.total_permissions,
role.repoable_permissions,
role.repoed,
role.repoable_services,
]
]
print(tabulate(rows, headers=headers) + "\n\n")
print("Policy history:")
headers = ["Number", "Source", "Discovered", "Permissions", "Services"]
rows = []
for index, policies_version in enumerate(role.policies):
policy_permissions, _ = get_permissions_in_policy(policies_version["Policy"])
rows.append(
[
index,
policies_version["Source"],
policies_version["Discovered"],
len(policy_permissions),
get_services_in_permissions(policy_permissions),
]
)
print(tabulate(rows, headers=headers) + "\n\n")
print("Stats:")
headers = ["Date", "Event Type", "Permissions Count", "Disqualified By"]
rows = []
for stats_entry in role.stats:
rows.append(
[
stats_entry["Date"],
stats_entry["Source"],
stats_entry["PermissionsCount"],
stats_entry.get("DisqualifiedBy", []),
]
)
print(tabulate(rows, headers=headers) + "\n\n")
# can't do anymore if we don't have AA data
if not role.aa_data:
LOGGER.warning("ARN not found in Access Advisor: {}".format(role.arn))
return
warn_unknown_permissions = config.get("warnings", {}).get(
"unknown_permissions", False
)
permissions, eligible_permissions = role.get_permissions_for_policy_version(
warn_unknown_perms=warn_unknown_permissions
)
print("Repoable services and permissions")
headers = ["Service", "Action", "Repoable"]
rows = []
for permission in permissions:
service = permission.split(":")[0]
action = permission.split(":")[1]
is_repoable_permission = permission in role.repoable_services
is_repoable_service = permission.split(":")[0] in role.repoable_services
# repoable is is True if the action (`permission`) is in the list of repoable
# services OR if the service (`permission.split(":")[0]`) is in the list
repoable = is_repoable_permission or is_repoable_service
rows.append([service, action, repoable])
rows = sorted(rows, key=lambda x: (x[2], x[0], x[1]))
print(tabulate(rows, headers=headers) + "\n\n")
try:
repoed_policies, _ = role.get_repoed_policy()
print(
"Repo'd Policies: \n{}".format(
json.dumps(repoed_policies, indent=2, sort_keys=True)
)
)
except MissingRepoableServices:
print("All Policies Removed")
repoed_policies = {}
# need to check if all policies would be too large
if inline_policies_size_exceeds_maximum(repoed_policies):
LOGGER.warning(
"Policies would exceed the AWS size limit after repo for role: {}. "
"Please manually minify.".format(role_name)
)
def _remove_permissions_from_roles(
permissions: List[str],
role_filename: str,
config: Optional[RepokidConfig],
hooks: RepokidHooks,
commit: bool = False,
) -> None:
"""Loads roles specified in file and calls _remove_permissions_from_role() for each one.
Args:
permissions (list<string>)
role_filename (string)
commit (bool)
Returns:
None
"""
with open(role_filename, "r") as fd:
roles = json.load(fd)
for role_arn in tqdm(roles):
arn = ARN(role_arn)
if arn.error:
LOGGER.error("INVALID ARN: {arn}".format(arn=role_arn))
return
account_number = arn.account_number
role_name = arn.name.split("/")[-1]
role_id = find_role_in_cache(role_name, account_number)
role = Role(role_id=role_id, config=config)
role.fetch()
role.remove_permissions(permissions, hooks, commit=commit)
repokid.hooks.call_hooks(hooks, "AFTER_REPO", {"role": role})
|
|
from itertools import product
import os
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_array_equal
from mne.channels import make_standard_montage
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info
from mne.io.constants import FIFF
from mne import (read_forward_solution, write_forward_solution,
make_forward_solution, convert_forward_solution,
setup_volume_source_space, read_source_spaces, create_info,
make_sphere_model, pick_types_forward, pick_info, pick_types,
read_evokeds, read_cov, read_dipole)
from mne.utils import (requires_mne, requires_nibabel,
run_tests_if_main, run_subprocess)
from mne.forward._make_forward import _create_meg_coils, make_forward_dipole
from mne.forward._compute_forward import _magnetic_dipole_field_vec
from mne.forward import Forward, _do_forward_solution
from mne.dipole import Dipole, fit_dipole
from mne.simulation import simulate_evoked
from mne.source_estimate import VolSourceEstimate
from mne.source_space import (get_volume_labels_from_aseg, write_source_spaces,
_compare_source_spaces, setup_source_space)
from mne.forward.tests.test_forward import assert_forward_allclose
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif')
def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
meg_rtol=1e-4, meg_atol=1e-9,
eeg_rtol=1e-3, eeg_atol=1e-3):
"""Test forwards."""
# check source spaces
assert_equal(len(fwd['src']), len(fwd_py['src']))
_compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
for surf_ori, force_fixed in product([False, True], [False, True]):
# use copy here to leave our originals unmodified
fwd = convert_forward_solution(fwd, surf_ori, force_fixed, copy=True,
use_cps=True)
fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed,
copy=True, use_cps=True)
check_src = n_src // 3 if force_fixed else n_src
for key in ('nchan', 'source_rr', 'source_ori',
'surf_ori', 'coord_frame', 'nsource'):
assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7,
err_msg=key)
# In surf_ori=True only Z matters for source_nn
if surf_ori and not force_fixed:
ori_sl = slice(2, None, 3)
else:
ori_sl = slice(None)
assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl],
rtol=1e-4, atol=1e-6)
assert_allclose(fwd_py['mri_head_t']['trans'],
fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
assert_equal(fwd_py['sol']['data'].shape, (n_sensors, check_src))
assert_equal(len(fwd['sol']['row_names']), n_sensors)
assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
# check MEG
assert_allclose(fwd['sol']['data'][:306, ori_sl],
fwd_py['sol']['data'][:306, ori_sl],
rtol=meg_rtol, atol=meg_atol,
err_msg='MEG mismatch')
# check EEG
if fwd['sol']['data'].shape[0] > 306:
assert_allclose(fwd['sol']['data'][306:, ori_sl],
fwd_py['sol']['data'][306:, ori_sl],
rtol=eeg_rtol, atol=eeg_atol,
err_msg='EEG mismatch')
def test_magnetic_dipole():
"""Test basic magnetic dipole forward calculation."""
info = read_info(fname_raw)
picks = pick_types(info, meg=True, eeg=False, exclude=[])
info = pick_info(info, picks[:12])
coils = _create_meg_coils(info['chs'], 'normal', None)
# magnetic dipole far (meters!) from device origin
r0 = np.array([0., 13., -6.])
for ch, coil in zip(info['chs'], coils):
rr = (ch['loc'][:3] + r0) / 2. # get halfway closer
far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag
assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
# degenerate case
r0 = coils[0]['rmag'][[0]]
with pytest.raises(RuntimeError, match='Coil too close'):
_magnetic_dipole_field_vec(r0, coils[:1])
with pytest.warns(RuntimeWarning, match='Coil too close'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning')
assert not np.isfinite(fwd).any()
with np.errstate(invalid='ignore'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info')
assert not np.isfinite(fwd).any()
@pytest.mark.slowtest # slow-ish on Travis OSX
@pytest.mark.timeout(60) # can take longer than 30 sec on Travis
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_kit(tmpdir):
"""Test making fwd using KIT, BTI, and CTF (compensated) files."""
kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
trans_path = op.join(kit_dir, 'trans-sample.fif')
fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_ctf_comp_raw.fif')
# first set up a small testing source space
fname_src_small = tmpdir.join('sample-oct-2-src.fif')
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
n_src = 108 # this is the resulting # of verts in fwd
# first use mne-C: convert file, make forward solution
fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
assert (isinstance(fwd, Forward))
# now let's use python with the same raw file
fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
fname_bem_meg, eeg=False, meg=True)
_compare_forwards(fwd, fwd_py, 157, n_src)
assert (isinstance(fwd_py, Forward))
# now let's use mne-python all the way
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# without ignore_ref=True, this should throw an error:
with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'):
make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
# check that asking for eeg channels (even if they don't exist) is handled
meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
eeg=False))
fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
bem=fname_bem_meg, trans=trans_path,
ignore_ref=True)
_compare_forwards(fwd, fwd_py, 157, n_src,
meg_rtol=1e-3, meg_atol=1e-7)
# BTI python end-to-end versus C
fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
_compare_forwards(fwd, fwd_py, 248, n_src)
# now let's test CTF w/compensation
fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True, subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
# CTF with compensation changed in python
ctf_raw = read_raw_fif(fname_ctf_raw)
ctf_raw.info['bads'] = ['MRO24-2908'] # test that it works with some bads
ctf_raw.apply_gradient_compensation(2)
fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True,
subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
fname_temp = tmpdir.join('test-ctf-fwd.fif')
write_forward_solution(fname_temp, fwd_py)
fwd_py2 = read_forward_solution(fname_temp)
_compare_forwards(fwd_py, fwd_py2, 274, n_src)
repr(fwd_py)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_solution():
"""Test making M-EEG forward solution from python."""
fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem, mindist=5.)
assert (isinstance(fwd_py, Forward))
fwd = read_forward_solution(fname_meeg)
assert (isinstance(fwd, Forward))
_compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
# Homogeneous model
with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'):
make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem_meg)
@testing.requires_testing_data
def test_make_forward_solution_discrete(tmpdir):
"""Test making and converting a forward solution with discrete src."""
# smoke test for depth weighting and discrete source spaces
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
src = src + setup_volume_source_space(
pos=dict(rr=src[0]['rr'][src[0]['vertno'][:3]].copy(),
nn=src[0]['nn'][src[0]['vertno'][:3]].copy()))
sphere = make_sphere_model()
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
convert_forward_solution(fwd, surf_ori=True)
@testing.requires_testing_data
@requires_mne
@pytest.mark.timeout(90) # can take longer than 60 sec on Travis
def test_make_forward_solution_sphere(tmpdir):
"""Test making a forward solution with a sphere model."""
fname_src_small = tmpdir.join('sample-oct-2-src.fif')
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
out_name = tmpdir.join('tmp-fwd.fif')
run_subprocess(['mne_forward_solution', '--meg', '--eeg',
'--meas', fname_raw, '--src', fname_src_small,
'--mri', fname_trans, '--fwd', out_name])
fwd = read_forward_solution(out_name)
sphere = make_sphere_model(verbose=True)
fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=True, verbose=True)
_compare_forwards(fwd, fwd_py, 366, 108,
meg_rtol=5e-1, meg_atol=1e-6,
eeg_rtol=5e-1, eeg_atol=5e-1)
# Since the above is pretty lax, let's check a different way
for meg, eeg in zip([True, False], [False, True]):
fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
fwd_py_['sol']['data'].ravel())[0, 1],
1.0, rtol=1e-3)
# Number of layers in the sphere model doesn't matter for MEG
# (as long as no sources are omitted due to distance)
assert len(sphere['layers']) == 4
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
sphere_1 = make_sphere_model(head_radius=None)
assert len(sphere_1['layers']) == 0
assert_array_equal(sphere['r0'], sphere_1['r0'])
fwd_1 = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
_compare_forwards(fwd, fwd_1, 306, 108, meg_rtol=1e-12, meg_atol=1e-12)
# Homogeneous model
sphere = make_sphere_model(head_radius=None)
with pytest.raises(RuntimeError, match='zero shells.*EEG'):
make_forward_solution(fname_raw, fname_trans, src, sphere)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_forward_mixed_source_space(tmpdir):
"""Test making the forward solution for a mixed source space."""
# get the surface source space
rng = np.random.RandomState(0)
surf = read_source_spaces(fname_src)
# setup two volume source spaces
label_names = get_volume_labels_from_aseg(fname_aseg)
vol_labels = rng.choice(label_names, 2)
vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[0],
add_interpolator=False)
vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[1],
add_interpolator=False)
# merge surfaces and volume
src = surf + vol1 + vol2
# calculate forward solution
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem)
assert (repr(fwd))
# extract source spaces
src_from_fwd = fwd['src']
# get the coordinate frame of each source space
coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
# assert that all source spaces are in head coordinates
assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
# run tests for SourceSpaces.export_volume
fname_img = tmpdir.join('temp-image.mgz')
# head coordinates and mri_resolution, but trans file
with pytest.raises(ValueError, match='trans containing mri to head'):
src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None)
# head coordinates and mri_resolution, but wrong trans file
vox_mri_t = vol1[0]['vox_mri_t']
with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'):
src_from_fwd.export_volume(fname_img, mri_resolution=True,
trans=vox_mri_t)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_dipole(tmpdir):
"""Test forward-projecting dipoles."""
rng = np.random.RandomState(0)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
cov['projs'] = [] # avoid proj warning
dip_c = read_dipole(fname_dip)
# Only use magnetometers for speed!
picks = pick_types(evoked.info, meg='mag', eeg=False)[::8]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
info = evoked.info
# Make new Dipole object with n_test_dipoles picked from the dipoles
# in the test dataset.
n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time
dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles])
dip_test = Dipole(times=dip_c.times[dipsel],
pos=dip_c.pos[dipsel],
amplitude=dip_c.amplitude[dipsel],
ori=dip_c.ori[dipsel],
gof=dip_c.gof[dipsel])
sphere = make_sphere_model(head_radius=0.1)
# Warning emitted due to uneven sampling in time
with pytest.warns(RuntimeWarning, match='unevenly spaced'):
fwd, stc = make_forward_dipole(dip_test, sphere, info,
trans=fname_trans)
# stc is list of VolSourceEstimate's
assert isinstance(stc, list)
for n_dip in range(n_test_dipoles):
assert isinstance(stc[n_dip], VolSourceEstimate)
# Now simulate evoked responses for each of the test dipoles,
# and fit dipoles to them (sphere model, MEG and EEG)
times, pos, amplitude, ori, gof = [], [], [], [], []
nave = 200 # add a tiny amount of noise to the simulated evokeds
for s in stc:
evo_test = simulate_evoked(fwd, s, info, cov,
nave=nave, random_state=rng)
# evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info))
dfit, resid = fit_dipole(evo_test, cov, sphere, None)
times += dfit.times.tolist()
pos += dfit.pos.tolist()
amplitude += dfit.amplitude.tolist()
ori += dfit.ori.tolist()
gof += dfit.gof.tolist()
# Create a new Dipole object with the dipole fits
dip_fit = Dipole(times, pos, amplitude, ori, gof)
# check that true (test) dipoles and fits are "close"
# cf. mne/tests/test_dipole.py
diff = dip_test.pos - dip_fit.pos
corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1]
dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1)))
gc_dist = 180 / np.pi * \
np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1)))
amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2))
# Make sure each coordinate is close to reference
# NB tolerance should be set relative to snr of simulated evoked!
assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2,
err_msg='position mismatch')
assert dist < 1e-2 # within 1 cm
assert corr > 0.985
assert gc_dist < 20 # less than 20 degrees
assert amp_err < 10e-9 # within 10 nAm
# Make sure rejection works with BEM: one dipole at z=1m
# NB _make_forward.py:_prepare_for_forward will raise a RuntimeError
# if no points are left after min_dist exclusions, hence 2 dips here!
dip_outside = Dipole(times=[0., 0.001],
pos=[[0., 0., 1.0], [0., 0., 0.040]],
amplitude=[100e-9, 100e-9],
ori=[[1., 0., 0.], [1., 0., 0.]], gof=1)
with pytest.raises(ValueError, match='outside the inner skull'):
make_forward_dipole(dip_outside, fname_bem, info, fname_trans)
# if we get this far, can safely assume the code works with BEMs too
# -> use sphere again below for speed
# Now make an evenly sampled set of dipoles, some simultaneous,
# should return a VolSourceEstimate regardless
times = [0., 0., 0., 0.001, 0.001, 0.002]
pos = np.random.rand(6, 3) * 0.020 + \
np.array([0., 0., 0.040])[np.newaxis, :]
amplitude = np.random.rand(6) * 100e-9
ori = np.eye(6, 3) + np.eye(6, 3, -3)
gof = np.arange(len(times)) / len(times) # arbitrary
dip_even_samp = Dipole(times, pos, amplitude, ori, gof)
# I/O round-trip
fname = str(tmpdir.join('test-fwd.fif'))
with pytest.warns(RuntimeWarning, match='free orientation'):
write_forward_solution(fname, fwd)
fwd_read = convert_forward_solution(
read_forward_solution(fname), force_fixed=True)
assert_forward_allclose(fwd, fwd_read, rtol=1e-6)
fwd, stc = make_forward_dipole(dip_even_samp, sphere, info,
trans=fname_trans)
assert isinstance(stc, VolSourceEstimate)
assert_allclose(stc.times, np.arange(0., 0.003, 0.001))
@testing.requires_testing_data
def test_make_forward_no_meg(tmpdir):
"""Test that we can make and I/O forward solution with no MEG channels."""
pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.]])
src = setup_volume_source_space(pos=pos)
bem = make_sphere_model()
trans = None
montage = make_standard_montage('standard_1020')
info = create_info(['Cz'], 1000., 'eeg').set_montage(montage)
fwd = make_forward_solution(info, trans, src, bem)
fname = tmpdir.join('test-fwd.fif')
write_forward_solution(fname, fwd)
fwd_read = read_forward_solution(fname)
assert_allclose(fwd['sol']['data'], fwd_read['sol']['data'])
run_tests_if_main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/quaternion/blob/main/LICENSE>
from __future__ import division, print_function, absolute_import
import numpy as np
from quaternion.numba_wrapper import njit, jit, xrange
def fd_derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt
@njit
def _derivative(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[i]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1])
return
@njit
def _derivative_2d(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))
* f[i, k]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k])
return
@njit
def _derivative_3d(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k, m]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k, m]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k, m]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k, m])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k, m]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))
* f[i, k, m]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k, m]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k, m])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k, m]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k, m]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k, m]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k, m])
return
@jit
def fd_indefinite_integral(f, t):
Sfdt = np.empty_like(f)
Sfdt[0] = 0.0
for i in xrange(1, len(t)):
for j in xrange(f.shape[1]):
Sfdt[i, j] = Sfdt[i - 1, j] + (f[i, j] + f[i - 1, j]) * ((t[i] - t[i - 1]) / 2.0)
return Sfdt
def fd_definite_integral(f, t):
Sfdt = np.zeros_like(f)
Sfdt[1:, ...] = (f[1:, ...] + f[:-1, ...]) * ((t[1:] - t[:-1]) / 2.0).reshape((-1,) + (1,)*(f.ndim-1))
return np.sum(Sfdt, axis=0)
def spline_evaluation(f, t, t_out=None, axis=None, spline_degree=3,
derivative_order=0, definite_integral_bounds=None):
"""Approximate input data using a spline and evaluate
Note that this function is somewhat more general than it needs to be, so that it can be reused
for closely related functions involving derivatives, antiderivatives, and integrals.
Parameters
==========
f : (..., N, ...) array_like
Real or complex function values to be interpolated.
t : (N,) array_like
An N-D array of increasing real values. The length of f along the interpolation axis must be
equal to the length of t. The number of data points must be larger than the spline degree.
t_out : None or (M,) array_like [defaults to None]
The new values of `t` on which to evaluate the result. If None, it is assumed that some
other feature of the data is needed, like a derivative or antiderivative, which are then
output using the same `t` values as the input.
axis : None or int [defaults to None]
The axis of `f` with length equal to the length of `t`. If None, this function searches for
an axis of equal length in reverse order -- that is, starting from the last axis of `f`.
Note that this feature is helpful when `f` is one-dimensional or will always satisfy that
criterion, but is dangerous otherwise. Caveat emptor.
spline_degree : int [defaults to 3]
Degree of the interpolating spline. Must be 1 <= spline_degree <= 5.
derivative_order : int [defaults to 0]
The order of the derivative to apply to the data. Note that this may be negative, in which
case the corresponding antiderivative is returned.
definite_integral_bounds : None or (2,) array_like [defaults to None]
If this is not None, the `t_out` and `derivative_order` parameters are ignored, and the
returned values are just the (first) definite integrals of the splines between these limits,
along each remaining axis.
"""
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
# Process input arguments and get data into correct shape
if not 1 <= spline_degree <= 5:
raise ValueError('The spline degree must be between 1 and 5 (inclusive); it is {0}.'.format(spline_degree))
t = np.asarray(t, dtype=float, order='C')
if t.ndim != 1:
raise ValueError('Input t values must be a one-dimensional array; this input has {0}.'.format(t.ndim))
n = t.size
if spline_degree >= n:
raise ValueError('The spline degree ({0}) must be less than the number of data points ({1}).'.format(spline_degree, n))
f = np.asanyarray(f)
if axis is None:
try:
axis = f.ndim - 1 - list(reversed(f.shape)).index(n)
except ValueError:
axis = None
if axis is None or f.shape[axis] != n:
raise ValueError((
"Input function values `f` [shape {0}] should have at least one "
"axis with the same length as input `t` [{1}], or bad axis input."
).format(f.shape, n))
shape = list(f.shape)
if definite_integral_bounds is not None:
shape[axis] = 1 # We'll keep this axis for now (set to length 1) for uniform treatment, and remove it before returning
definite_integral_bounds = np.array(definite_integral_bounds, dtype=float)
if definite_integral_bounds.shape != (2,):
raise ValueError("Expected exactly two bounds for the definite integral; got {0}.".format(definite_integral_bounds.shape))
f_out = np.empty(shape, dtype=f.dtype)
t_a, t_b = definite_integral_bounds
def evaluator(s):
return s.integral(t_a, t_b)
axis_slice = slice(max(0, np.argmin(np.abs(t-t_a))-10), min(n, np.argmin(np.abs(t-t_b))+11))
else:
if t_out is None:
t_out = t
axis_slice = slice(None)
else:
axis_slice = slice(max(0, np.argmin(np.abs(t-t_out[0]))-10), min(n, np.argmin(np.abs(t-t_out[-1]))+11))
shape[axis] = t_out.size
if derivative_order != 0 and derivative_order > spline_degree:
raise ValueError("Order of derivative ({0}) must not be greater than degree of spline ({1})".format(derivative_order, spline_degree))
f_out = np.empty(shape, dtype=f.dtype)
if derivative_order < 0:
def evaluator(s):
return s.antiderivative(n=-derivative_order)(t_out)
elif derivative_order > 0:
def evaluator(s):
return s.derivative(n=derivative_order)(t_out)
else:
def evaluator(s):
return s(t_out)
def spline(f, t):
return InterpolatedUnivariateSpline(t[axis_slice], f[axis_slice], k=spline_degree)
# Move the axis to the end so that we can just iterate over all but the last index
if axis != -1 and axis != n-1:
f = np.moveaxis(f, axis, -1)
f_out = np.moveaxis(f_out, axis, -1)
# Iterate over all extra axes and evaluate
complex_valued = np.iscomplexobj(f)
for index in np.ndindex(f.shape[:-1]):
if complex_valued:
f_out[index] = evaluator(spline(f[index].real, t)) + 1j * evaluator(spline(f[index].imag, t))
else:
f_out[index] = evaluator(spline(f[index], t))
# Undo the axis move we did previously to the output (input doesn't matter any more)
if axis != -1 and axis != n-1:
f_out = np.moveaxis(f_out, -1, axis)
# If this is a definite integral, remove that extraneous axis
if definite_integral_bounds is not None:
f_out = np.squeeze(f_out, axis=axis)
return f_out
def spline_derivative(f, t, derivative_order=1, axis=0):
return spline_evaluation(f, t, axis=axis, derivative_order=derivative_order)
def spline_indefinite_integral(f, t, integral_order=1, axis=0):
return spline_evaluation(f, t, axis=axis, derivative_order=-integral_order)
def spline_definite_integral(f, t, t1=None, t2=None, axis=0):
if t1 is None:
t1 = t[0]
if t2 is None:
t2 = t[-1]
return spline_evaluation(f, t, axis=axis, definite_integral_bounds=(t1, t2))
try:
from scipy.interpolate import InterpolatedUnivariateSpline
spline = spline_evaluation
derivative = spline_derivative
antiderivative = spline_indefinite_integral
indefinite_integral = spline_indefinite_integral
definite_integral = spline_definite_integral
except ImportError:
import warnings
warning_text = (
"\n\n" + "!" * 57 + "\n" +
"Could not import from scipy, which means that derivatives\n" +
"and integrals will use less accurate finite-differencing\n" +
"techniques. You may want to install scipy." +
"\n" + "!" * 57 + "\n"
)
warnings.warn(warning_text)
derivative = fd_derivative
antiderivative = fd_indefinite_integral
indefinite_integral = fd_indefinite_integral
definite_integral = fd_definite_integral
|
|
import numpy as np
from itertools import combinations
import pickle
from multiprocessing import Pool
import sys
class DataManager(object):
def __init__(self, explanans_size, random_size, file_name=sys.argv[1], explanandum_size=3, add_random=True):
self.folder_path = "data"
self.file_path = "../../{}/{}.txt".format(self.folder_path, file_name)
self.explanans_size = explanans_size
self.explanandum_size = explanandum_size
self.random_size = random_size
self.data_size = self.explanans_size + self.random_size
self.data = self.import_txt(add_random=add_random)
def import_txt(self, add_random):
print("Import txt file.")
data = np.loadtxt(self.file_path)
# Add X random columns to analyze performance of random data
if add_random is True:
for i in np.arange(self.random_size):
to_insert = np.random.random(data.shape[0])
data = np.insert(data, -3, to_insert, axis=1)
else:
pass
return data
def format_data(self):
# Center reduce for explanans, normalize for explanandum
data = np.zeros(self.data.shape[0], dtype=[('x', float, self.data_size),
('y', float, self.explanandum_size)])
data["x"] = Format.center_reduce(self.data[:, :self.data_size])
data["y"] = Format.normalize(self.data[:, self.data_size:])
self.data = data
def import_data(self, explanans=None, explanandum=None, individuals=None):
# Select piece of data
if explanans is None:
explanans = np.arange(self.explanans_size)
if explanandum is None:
explanandum = np.arange(self.explanandum_size)
if individuals is None:
individuals = np.arange(self.data.shape[0])
data = np.zeros(len(individuals), dtype=[('x', float, len(explanans)),
('y', float, len(explanandum))])
data["x"] = self.data['x'][np.asarray(individuals)][:, explanans]
if len(explanandum) == 1:
data["y"] = self.data['y'][np.asarray(individuals)][:, np.asarray(explanandum)].T
else:
data["y"] = self.data['y'][np.asarray(individuals)][:, np.asarray(explanandum)]
return data
class SamplesCreator(object):
@classmethod
def combinations_samples(cls, n, split_value):
print("Compute combinations for samples...")
print("Number of individuals: {}.".format(n))
print("Split value: {}".format(split_value))
indexes_list = []
ind = np.arange(n)
for i in combinations(ind, split_value):
indexes_list.append({"learning": i,
"testing": np.setdiff1d(ind, i)})
print("Done.")
return indexes_list
class Format(object):
@classmethod
def normalize(cls, data, new_range=1, new_min=-0.5):
if len(data.shape) == 1:
vmin, vmax = data.min(), data.max()
formatted_data = new_range * (data - vmin) / (vmax - vmin) + new_min
else:
formatted_data = data.copy()
for i in range(data.shape[1]):
vmin, vmax = data[:, i].min(), data[:, i].max()
formatted_data[:, i] = new_range * (data[:, i] - vmin) / (vmax - vmin) + new_min
return formatted_data
@classmethod
def center_reduce(cls, data):
if len(data.shape) == 1:
mean, std = np.mean(data), np.std(data)
if std != 0:
formatted_data = (data - mean) / std
else:
formatted_data = (data - mean)
else:
formatted_data = np.zeros(data.shape)
for i in range(data.shape[1]):
mean, std = np.mean(data[:, i]), np.std(data[:, i])
if std != 0:
formatted_data[:, i] = 2 * (data[:, i] - mean) / std
else:
formatted_data[:, i] = 2 * (data[:, i] - mean)
return formatted_data
@staticmethod
def format_random(data):
mean, std = np.mean(data), np.std(data)
if std != 0:
formatted_data = (data - mean) / std
else:
formatted_data = (data - mean)
return formatted_data
def job_definition(total_comb):
n_job = 100 * (total_comb // 100000)
step = total_comb // n_job
print('\ntotal number of jobs: {}'.format(n_job))
# We know that 4hours (240 minutes) for 100,000 networks is ok
# So we choose the number of jobs according to that
mean_time = step * 240 / 2000
print("mean job time should be : {}h{}\n".format(int(mean_time // 60), int(mean_time % 60)))
args = list()
for i in range(n_job):
if i != n_job-1:
start = i + i * step
end = start + step + 1
args.append((start, end))
else:
start = i + i * step
end = start + step + total_comb % step + 1 - i
args.append((start, end))
filename = "job_list"
path_to_file = "../../kwargs/{}.p".format(filename)
pickle.dump(args, open(path_to_file, "wb"))
def prepare_comb_list(explanans_size=130, combination_size=3):
comb_list = [i for i in combinations(np.arange(explanans_size), combination_size)]
np.save("comb_list.npy", comb_list)
return comb_list
class Supervisor:
def __init__(self, add_random, explanans_size, combination_size, random_size):
self.n_network = 50
self.data_size = explanans_size + random_size
self.combination_list = prepare_comb_list(explanans_size=self.data_size, combination_size=combination_size)
# Import txt file
self.data_manager = DataManager(add_random=add_random,
explanans_size=explanans_size,
random_size=random_size,
explanandum_size=3)
self.data_manager.format_data() # Center-reduce input variables and normalize output variables
self.indexes_list = SamplesCreator.combinations_samples(n=self.data_manager.data.shape[0],
split_value=int(0.8 * self.data_manager.data.shape[0]))
@staticmethod
def convert_seconds_to_h_m_s(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def prepare_kwargs_list(self, param):
n_network = 50
hidden_layer = 3
id_job = param['id_job']
start = param['start']
end = param['end']
learning_rate = 0.05
presentation_number = 1000
kwargs_list = []
for selected_variables in self.combination_list[start:end]:
for selected_ind in self.indexes_list[0:n_network]:
samples_learning = self.data_manager.import_data(explanandum=[0, 1, 2],
explanans=selected_variables,
individuals=selected_ind['learning'])
samples_testing = self.data_manager.import_data(explanandum=[0, 1, 2],
explanans=selected_variables,
individuals=selected_ind['testing'])
kwargs = {"dataset": samples_learning,
"test_dataset": samples_testing,
"hidden_layer": [hidden_layer],
"presentation_number": presentation_number,
"learning_rate": learning_rate,
"momentum": learning_rate,
'ind_learning': selected_ind['learning'],
'ind_testing': selected_ind['testing'],
'selected_var': selected_variables
}
kwargs_list.append(kwargs)
filename = "perceptromic_job{}".format(id_job)
path_to_file = "../../kwargs/{}.p".format(filename)
pickle.dump(kwargs_list, open(path_to_file, "wb"))
def combination_var(start_job=None, end_job=None, list_job_generated=False):
print("\n*************************")
print('Preparing kwarg list...')
print("**************************\n")
print("Random size is equal to explanans size \n")
s = Supervisor(add_random=True, explanans_size=107, random_size=107, combination_size=3)
print("\n")
if list_job_generated is not True:
job_definition(total_comb=len(s.combination_list))
with open('../../kwargs/list_job', 'rb') as file:
all_jobs = pickle.load(file)
if start_job is None and end_job is None:
list_jobs = all_jobs
else:
list_jobs = all_jobs[start_job, end_job]
list_dict = []
for id_job, job in enumerate(list_jobs):
dict_jobs = {"id_job": id_job, "start": job[0], "end": job[1]}
list_dict.append(dict_jobs)
print("\nList jobs ready - preparing kwargs \n")
pool = Pool(processes=12)
pool.map(s.prepare_kwargs_list, list_dict)
print("\n**************************")
print('Kwarg list ready.')
print("\n*************************")
if __name__ == '__main__':
combination_var()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
|
|
"""
@package mi.instrument.seabird.sbe54tps.ooicore.driver
@file /Users/unwin/OOI/Workspace/code/marine-integrations/mi/instrument/seabird/sbe54tps/ooicore/driver.py
@author Roger Unwin
@brief Driver for the ooicore
Release notes:
"""
import re
import time
from mi.core.log import get_logger
from mi.core.util import dict_equal
from mi.core.common import BaseEnum, Units
from mi.core.time_tools import get_timestamp_delayed
from mi.core.time_tools import timegm_to_float
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.instrument.chunker import StringChunker
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentException
from mi.instrument.seabird.driver import SeaBirdInstrumentDriver
from mi.instrument.seabird.driver import SeaBirdProtocol
from mi.instrument.seabird.driver import NEWLINE
from mi.instrument.seabird.driver import TIMEOUT
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
log = get_logger()
GENERIC_PROMPT = r"S>"
LONG_TIMEOUT = 200
MAX_SAMPLE_DURATION = 240
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CONFIGURATION_DATA = "configuration_data"
STATUS_DATA = "status_data"
EVENT_COUNTER_DATA = "event_counter"
HARDWARE_DATA = "hardware_data"
CLOCK_SYNC = 'clock_sync'
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
PREST_REAL_TIME = 'prest_real_time'
PREST_REFERENCE_OSCILLATOR = 'prest_reference_oscillator'
PREST_CONFIGURATION_DATA = 'prest_configuration_data'
PREST_DEVICE_STATUS = 'prest_device_status'
PREST_EVENT_COUNTER = 'prest_event_counter'
PREST_HARDWARE_DATA = 'prest_hardware_data'
# Device specific parameters.
class InstrumentCmds(BaseEnum):
"""
Instrument Commands
These are the commands that according to the science profile must be supported.
"""
# Artificial Constructed Commands for Driver
SET = "set"
# Status
GET_CONFIGURATION_DATA = "GetCD"
GET_STATUS_DATA = "GetSD"
GET_EVENT_COUNTER_DATA = "GetEC"
GET_HARDWARE_DATA = "GetHD"
# Sampling
START_LOGGING = "Start"
STOP_LOGGING = "Stop"
SAMPLE_REFERENCE_OSCILLATOR = "SampleRefOsc"
# Diagnostic
TEST_EEPROM = "TestEeprom"
class ProtocolState(BaseEnum):
"""
Protocol state enum.
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
OSCILLATOR = "DRIVER_STATE_OSCILLATOR"
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
RECOVER_AUTOSAMPLE = 'PROTOCOL_EVENT_RECOVER_AUTOSAMPLE'
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
SAMPLE_REFERENCE_OSCILLATOR = 'PROTOCOL_EVENT_SAMPLE_REFERENCE_OSCILLATOR'
TEST_EEPROM = 'PROTOCOL_EVENT_TEST_EEPROM'
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
INIT_PARAMS = DriverEvent.INIT_PARAMS
SCHEDULED_CLOCK_SYNC = DriverEvent.SCHEDULED_CLOCK_SYNC
SCHEDULED_ACQUIRE_STATUS = 'PROTOCOL_EVENT_SCHEDULED_ACQUIRE_STATUS'
ACQUIRE_OSCILLATOR_SAMPLE = 'PROTOCOL_EVENT_ACQUIRE_OSCILLATOR_SAMPLE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
SAMPLE_REFERENCE_OSCILLATOR = ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR
TEST_EEPROM = ProtocolEvent.TEST_EEPROM
GET = DriverEvent.GET
SET = DriverEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
# Device specific parameters.
class Parameter(DriverParameter):
TIME = "time"
SAMPLE_PERIOD = "sampleperiod"
ENABLE_ALERTS = "enablealerts"
BATTERY_TYPE = "batterytype"
# Device prompts.
class Prompt(BaseEnum):
COMMAND = "<Executed/>\r\nS>"
AUTOSAMPLE = "<Executed/>\r\n"
BAD_COMMAND_AUTOSAMPLE = "<Error.*?\r\n<Executed/>\r\n"
BAD_COMMAND = "<Error.*?\r\n<Executed/>\r\nS>"
######################### PARTICLES #############################
STATUS_DATA_REGEX =r"(<StatusData DeviceType='.*?</StatusData>)"
STATUS_DATA_REGEX_MATCHER = re.compile(STATUS_DATA_REGEX, re.DOTALL)
CONFIGURATION_DATA_REGEX = r"(<ConfigurationData DeviceType=.*?</ConfigurationData>)"
CONFIGURATION_DATA_REGEX_MATCHER = re.compile(CONFIGURATION_DATA_REGEX, re.DOTALL)
EVENT_COUNTER_DATA_REGEX = r"(<EventSummary numEvents='.*?</EventList>)"
EVENT_COUNTER_DATA_REGEX_MATCHER = re.compile(EVENT_COUNTER_DATA_REGEX, re.DOTALL)
HARDWARE_DATA_REGEX = r"(<HardwareData DeviceType='.*?</HardwareData>)"
HARDWARE_DATA_REGEX_MATCHER = re.compile(HARDWARE_DATA_REGEX, re.DOTALL)
SAMPLE_DATA_REGEX = r"<Sample Num='[0-9]+' Type='Pressure'>.*?</Sample>"
SAMPLE_DATA_REGEX_MATCHER = re.compile(SAMPLE_DATA_REGEX, re.DOTALL)
SAMPLE_REF_OSC_REGEX = r"<SetTimeout>.*?</Sample>"
SAMPLE_REF_OSC_MATCHER = re.compile(SAMPLE_REF_OSC_REGEX, re.DOTALL)
ENGINEERING_DATA_REGEX = r"<MainSupplyVoltage>(.*?)</MainSupplyVoltage>"
ENGINEERING_DATA_MATCHER = re.compile(SAMPLE_REF_OSC_REGEX, re.DOTALL)
RECOVER_AUTOSAMPLE_REGEX = "CMD Mode 2 min timeout, returning to ACQ Mode"
RECOVER_AUTOSAMPLE_MATCHER = re.compile(RECOVER_AUTOSAMPLE_REGEX, re.DOTALL)
class SBE54tpsStatusDataParticleKey(BaseEnum):
DEVICE_TYPE = "device_type"
SERIAL_NUMBER = "serial_number"
TIME = "date_time_str"
EVENT_COUNT = "event_count"
MAIN_SUPPLY_VOLTAGE = "battery_voltage_main"
NUMBER_OF_SAMPLES = "sample_number"
BYTES_USED = "bytes_used"
BYTES_FREE = "bytes_free"
class SBE54tpsStatusDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_DEVICE_STATUS
LINE1 = r"<StatusData DeviceType='([^']+)' SerialNumber='(\d+)'>"
LINE2 = r"<DateTime>([^<]+)</DateTime>"
LINE3 = r"<EventSummary numEvents='(\d+)'/>"
LINE4 = r"<MainSupplyVoltage>([.\d]+)</MainSupplyVoltage>"
LINE5 = r"<Samples>(\d+)</Samples>"
LINE6 = r"<Bytes>(\d+)</Bytes>"
LINE7 = r"<BytesFree>(\d+)</BytesFree>"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
values = {
SBE54tpsStatusDataParticleKey.DEVICE_TYPE: None,
SBE54tpsStatusDataParticleKey.SERIAL_NUMBER: None,
SBE54tpsStatusDataParticleKey.TIME: None,
SBE54tpsStatusDataParticleKey.EVENT_COUNT: None,
SBE54tpsStatusDataParticleKey.MAIN_SUPPLY_VOLTAGE: None,
SBE54tpsStatusDataParticleKey.NUMBER_OF_SAMPLES: None,
SBE54tpsStatusDataParticleKey.BYTES_USED: None,
SBE54tpsStatusDataParticleKey.BYTES_FREE: None
}
matchers = {
re.compile(self.LINE1): [SBE54tpsStatusDataParticleKey.DEVICE_TYPE,
SBE54tpsStatusDataParticleKey.SERIAL_NUMBER],
re.compile(self.LINE2): [SBE54tpsStatusDataParticleKey.TIME],
re.compile(self.LINE3): [SBE54tpsStatusDataParticleKey.EVENT_COUNT],
re.compile(self.LINE4): [SBE54tpsStatusDataParticleKey.MAIN_SUPPLY_VOLTAGE],
re.compile(self.LINE5): [SBE54tpsStatusDataParticleKey.NUMBER_OF_SAMPLES],
re.compile(self.LINE6): [SBE54tpsStatusDataParticleKey.BYTES_USED],
re.compile(self.LINE7): [SBE54tpsStatusDataParticleKey.BYTES_FREE]
}
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in [SBE54tpsStatusDataParticleKey.DEVICE_TYPE,
SBE54tpsStatusDataParticleKey.SERIAL_NUMBER]:
values[key] = val
elif key in [SBE54tpsStatusDataParticleKey.EVENT_COUNT,
SBE54tpsStatusDataParticleKey.NUMBER_OF_SAMPLES,
SBE54tpsStatusDataParticleKey.BYTES_USED,
SBE54tpsStatusDataParticleKey.BYTES_FREE]:
values[key] = int(val)
elif key in [SBE54tpsStatusDataParticleKey.MAIN_SUPPLY_VOLTAGE]:
values[key] = float(val)
elif key in [SBE54tpsStatusDataParticleKey.TIME]:
values[key] = val
py_timestamp = time.strptime(val, "%Y-%m-%dT%H:%M:%S")
self.set_internal_timestamp(unix_time=timegm_to_float(py_timestamp))
result = []
for key, value in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value})
return result
class SBE54tpsConfigurationDataParticleKey(BaseEnum):
DEVICE_TYPE = "device_type"
SERIAL_NUMBER = "serial_number"
ACQ_OSC_CAL_DATE = "calibration_date_acq_crystal"
FRA0 = "acq_crystal_coeff_fra0"
FRA1 = "acq_crystal_coeff_fra1"
FRA2 = "acq_crystal_coeff_fra2"
FRA3 = "acq_crystal_coeff_fra3"
PRESSURE_SERIAL_NUM = "pressure_sensor_serial_number"
PRESSURE_CAL_DATE = "calibration_date_pressure"
PU0 = "press_coeff_pu0"
PY1 = "press_coeff_py1"
PY2 = "press_coeff_py2"
PY3 = "press_coeff_py3"
PC1 = "press_coeff_pc1"
PC2 = "press_coeff_pc2"
PC3 = "press_coeff_pc3"
PD1 = "press_coeff_pd1"
PD2 = "press_coeff_pd2"
PT1 = "press_coeff_pt1"
PT2 = "press_coeff_pt2"
PT3 = "press_coeff_pt3"
PT4 = "press_coeff_pt4"
PRESSURE_OFFSET = "press_coeff_poffset"
PRESSURE_RANGE = "pressure_sensor_range"
BATTERY_TYPE = "battery_type"
BAUD_RATE = "baud_rate"
UPLOAD_TYPE = "upload_type"
ENABLE_ALERTS = "enable_alerts"
SAMPLE_PERIOD = "sample_period"
class SBE54tpsConfigurationDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_CONFIGURATION_DATA
LINE1 = r"<ConfigurationData DeviceType='([^']+)' SerialNumber='(\d+)'>"
LINE2 = r"<AcqOscCalDate>([0-9\-]+)</AcqOscCalDate>"
LINE3 = r"<FRA0>([0-9E+-.]+)</FRA0>"
LINE4 = r"<FRA1>([0-9E+-.]+)</FRA1>"
LINE5 = r"<FRA2>([0-9E+-.]+)</FRA2>"
LINE6 = r"<FRA3>([0-9E+-.]+)</FRA3>"
LINE7 = r"<PressureSerialNum>(\d+)</PressureSerialNum>"
LINE8 = r"<PressureCalDate>([0-9\-]+)</PressureCalDate>"
LINE9 = r"<pu0>([0-9E+-.]+)</pu0>"
LINE10 = r"<py1>([0-9E+-.]+)</py1>"
LINE11 = r"<py2>([0-9E+-.]+)</py2>"
LINE12 = r"<py3>([0-9E+-.]+)</py3>"
LINE13 = r"<pc1>([0-9E+-.]+)</pc1>"
LINE14 = r"<pc2>([0-9E+-.]+)</pc2>"
LINE15 = r"<pc3>([0-9E+-.]+)</pc3>"
LINE16 = r"<pd1>([0-9E+-.]+)</pd1>"
LINE17 = r"<pd2>([0-9E+-.]+)</pd2>"
LINE18 = r"<pt1>([0-9E+-.]+)</pt1>"
LINE19 = r"<pt2>([0-9E+-.]+)</pt2>"
LINE20 = r"<pt3>([0-9E+-.]+)</pt3>"
LINE21 = r"<pt4>([0-9E+-.]+)</pt4>"
LINE22 = r"<poffset>([0-9E+-.]+)</poffset>"
LINE23 = r"<prange>([0-9E+-.]+)</prange>"
LINE24 = r"batteryType='(\d+)'"
LINE25 = r"baudRate='(\d+)'"
LINE26 = r"enableAlerts='(\d+)'"
LINE27 = r"uploadType='(\d+)'"
LINE28 = r"samplePeriod='(\d+)'"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
values = {
SBE54tpsConfigurationDataParticleKey.DEVICE_TYPE: None,
SBE54tpsConfigurationDataParticleKey.SERIAL_NUMBER: None,
SBE54tpsConfigurationDataParticleKey.ACQ_OSC_CAL_DATE: None,
SBE54tpsConfigurationDataParticleKey.FRA0: None,
SBE54tpsConfigurationDataParticleKey.FRA1: None,
SBE54tpsConfigurationDataParticleKey.FRA2: None,
SBE54tpsConfigurationDataParticleKey.FRA3: None,
SBE54tpsConfigurationDataParticleKey.PRESSURE_SERIAL_NUM: None,
SBE54tpsConfigurationDataParticleKey.PRESSURE_CAL_DATE: None,
SBE54tpsConfigurationDataParticleKey.PU0: None,
SBE54tpsConfigurationDataParticleKey.PY1: None,
SBE54tpsConfigurationDataParticleKey.PY2: None,
SBE54tpsConfigurationDataParticleKey.PY3: None,
SBE54tpsConfigurationDataParticleKey.PC1: None,
SBE54tpsConfigurationDataParticleKey.PC2: None,
SBE54tpsConfigurationDataParticleKey.PC3: None,
SBE54tpsConfigurationDataParticleKey.PD1: None,
SBE54tpsConfigurationDataParticleKey.PD2: None,
SBE54tpsConfigurationDataParticleKey.PT1: None,
SBE54tpsConfigurationDataParticleKey.PT2: None,
SBE54tpsConfigurationDataParticleKey.PT3: None,
SBE54tpsConfigurationDataParticleKey.PT4: None,
SBE54tpsConfigurationDataParticleKey.PRESSURE_OFFSET: None,
SBE54tpsConfigurationDataParticleKey.PRESSURE_RANGE: None,
SBE54tpsConfigurationDataParticleKey.BATTERY_TYPE: None,
SBE54tpsConfigurationDataParticleKey.BAUD_RATE: None,
SBE54tpsConfigurationDataParticleKey.ENABLE_ALERTS: None,
SBE54tpsConfigurationDataParticleKey.UPLOAD_TYPE: None,
SBE54tpsConfigurationDataParticleKey.SAMPLE_PERIOD: None
}
matchers = {
re.compile(self.LINE1): [SBE54tpsConfigurationDataParticleKey.DEVICE_TYPE,
SBE54tpsConfigurationDataParticleKey.SERIAL_NUMBER],
re.compile(self.LINE2): [SBE54tpsConfigurationDataParticleKey.ACQ_OSC_CAL_DATE],
re.compile(self.LINE3): [SBE54tpsConfigurationDataParticleKey.FRA0],
re.compile(self.LINE4): [SBE54tpsConfigurationDataParticleKey.FRA1],
re.compile(self.LINE5): [SBE54tpsConfigurationDataParticleKey.FRA2],
re.compile(self.LINE6): [SBE54tpsConfigurationDataParticleKey.FRA3],
re.compile(self.LINE7): [SBE54tpsConfigurationDataParticleKey.PRESSURE_SERIAL_NUM],
re.compile(self.LINE8): [SBE54tpsConfigurationDataParticleKey.PRESSURE_CAL_DATE],
re.compile(self.LINE9): [SBE54tpsConfigurationDataParticleKey.PU0],
re.compile(self.LINE10): [SBE54tpsConfigurationDataParticleKey.PY1],
re.compile(self.LINE11): [SBE54tpsConfigurationDataParticleKey.PY2],
re.compile(self.LINE12): [SBE54tpsConfigurationDataParticleKey.PY3],
re.compile(self.LINE13): [SBE54tpsConfigurationDataParticleKey.PC1],
re.compile(self.LINE14): [SBE54tpsConfigurationDataParticleKey.PC2],
re.compile(self.LINE15): [SBE54tpsConfigurationDataParticleKey.PC3],
re.compile(self.LINE16): [SBE54tpsConfigurationDataParticleKey.PD1],
re.compile(self.LINE17): [SBE54tpsConfigurationDataParticleKey.PD2],
re.compile(self.LINE18): [SBE54tpsConfigurationDataParticleKey.PT1],
re.compile(self.LINE19): [SBE54tpsConfigurationDataParticleKey.PT2],
re.compile(self.LINE20): [SBE54tpsConfigurationDataParticleKey.PT3],
re.compile(self.LINE21): [SBE54tpsConfigurationDataParticleKey.PT4],
re.compile(self.LINE22): [SBE54tpsConfigurationDataParticleKey.PRESSURE_OFFSET],
re.compile(self.LINE23): [SBE54tpsConfigurationDataParticleKey.PRESSURE_RANGE],
re.compile(self.LINE24): [SBE54tpsConfigurationDataParticleKey.BATTERY_TYPE],
re.compile(self.LINE25): [SBE54tpsConfigurationDataParticleKey.BAUD_RATE],
re.compile(self.LINE26): [SBE54tpsConfigurationDataParticleKey.ENABLE_ALERTS],
re.compile(self.LINE27): [SBE54tpsConfigurationDataParticleKey.UPLOAD_TYPE],
re.compile(self.LINE28): [SBE54tpsConfigurationDataParticleKey.SAMPLE_PERIOD]
}
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in [SBE54tpsConfigurationDataParticleKey.DEVICE_TYPE,
SBE54tpsConfigurationDataParticleKey.PRESSURE_CAL_DATE,
SBE54tpsConfigurationDataParticleKey.ACQ_OSC_CAL_DATE,
SBE54tpsConfigurationDataParticleKey.PRESSURE_SERIAL_NUM,
SBE54tpsConfigurationDataParticleKey.SERIAL_NUMBER,]:
values[key] = val
elif key in [SBE54tpsConfigurationDataParticleKey.BATTERY_TYPE,
SBE54tpsConfigurationDataParticleKey.UPLOAD_TYPE,
SBE54tpsConfigurationDataParticleKey.SAMPLE_PERIOD,
SBE54tpsConfigurationDataParticleKey.BAUD_RATE,
SBE54tpsConfigurationDataParticleKey.ENABLE_ALERTS]:
values[key] = int(val)
elif key in [SBE54tpsConfigurationDataParticleKey.FRA0,
SBE54tpsConfigurationDataParticleKey.FRA1,
SBE54tpsConfigurationDataParticleKey.FRA2,
SBE54tpsConfigurationDataParticleKey.FRA3,
SBE54tpsConfigurationDataParticleKey.PU0,
SBE54tpsConfigurationDataParticleKey.PY1,
SBE54tpsConfigurationDataParticleKey.PY2,
SBE54tpsConfigurationDataParticleKey.PY3,
SBE54tpsConfigurationDataParticleKey.PC1,
SBE54tpsConfigurationDataParticleKey.PC2,
SBE54tpsConfigurationDataParticleKey.PC3,
SBE54tpsConfigurationDataParticleKey.PD1,
SBE54tpsConfigurationDataParticleKey.PD2,
SBE54tpsConfigurationDataParticleKey.PT1,
SBE54tpsConfigurationDataParticleKey.PT2,
SBE54tpsConfigurationDataParticleKey.PT3,
SBE54tpsConfigurationDataParticleKey.PT4,
SBE54tpsConfigurationDataParticleKey.PRESSURE_OFFSET,
SBE54tpsConfigurationDataParticleKey.PRESSURE_RANGE]:
values[key] = float(val)
result = []
for key, value in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
class SBE54tpsEventCounterDataParticleKey(BaseEnum):
NUMBER_EVENTS = "number_events"
MAX_STACK = "max_stack"
DEVICE_TYPE = "device_type"
SERIAL_NUMBER = "serial_number"
POWER_ON_RESET = "power_on_reset"
POWER_FAIL_RESET = "power_fail_reset"
SERIAL_BYTE_ERROR = "serial_byte_error"
COMMAND_BUFFER_OVERFLOW = "command_buffer_overflow"
SERIAL_RECEIVE_OVERFLOW = "serial_receive_overflow"
LOW_BATTERY = "low_battery"
SIGNAL_ERROR = "signal_error"
ERROR_10 = "error_10"
ERROR_12 = "error_12"
class SBE54tpsEventCounterDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_EVENT_COUNTER
LINE1 = r"<EventSummary numEvents='(\d+)' maxStack='(\d+)'/>"
LINE2 = r"<EventList DeviceType='([^']+)' SerialNumber='(\d+)'>"
LINE3 = r"<Event type='PowerOnReset' count='(\d+)'/>"
LINE4 = r"<Event type='PowerFailReset' count='(\d+)'/>"
LINE5 = r"<Event type='SerialByteErr' count='(\d+)'/>"
LINE6 = r"<Event type='CMDBuffOflow' count='(\d+)'/>"
LINE7 = r"<Event type='SerialRxOflow' count='(\d+)'/>"
LINE8 = r"<Event type='LowBattery' count='(\d+)'/>"
LINE9 = r"<Event type='SignalErr' count='(\d+)'/>"
LINE10 = r"<Event type='Error10' count='(\d+)'/>"
LINE11 = r"<Event type='Error12' count='(\d+)'/>"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
values = {
SBE54tpsEventCounterDataParticleKey.NUMBER_EVENTS: None,
SBE54tpsEventCounterDataParticleKey.MAX_STACK: None,
SBE54tpsEventCounterDataParticleKey.DEVICE_TYPE: None,
SBE54tpsEventCounterDataParticleKey.SERIAL_NUMBER: None,
SBE54tpsEventCounterDataParticleKey.POWER_ON_RESET: None,
SBE54tpsEventCounterDataParticleKey.POWER_FAIL_RESET: None,
SBE54tpsEventCounterDataParticleKey.SERIAL_BYTE_ERROR: None,
SBE54tpsEventCounterDataParticleKey.COMMAND_BUFFER_OVERFLOW: None,
SBE54tpsEventCounterDataParticleKey.SERIAL_RECEIVE_OVERFLOW: None,
SBE54tpsEventCounterDataParticleKey.LOW_BATTERY: None,
SBE54tpsEventCounterDataParticleKey.SIGNAL_ERROR: None,
SBE54tpsEventCounterDataParticleKey.ERROR_10: None,
SBE54tpsEventCounterDataParticleKey.ERROR_12: None
}
matchers = {
re.compile(self.LINE1): [SBE54tpsEventCounterDataParticleKey.NUMBER_EVENTS,
SBE54tpsEventCounterDataParticleKey.MAX_STACK],
re.compile(self.LINE2): [SBE54tpsEventCounterDataParticleKey.DEVICE_TYPE,
SBE54tpsEventCounterDataParticleKey.SERIAL_NUMBER],
re.compile(self.LINE3): [SBE54tpsEventCounterDataParticleKey.POWER_ON_RESET],
re.compile(self.LINE4): [SBE54tpsEventCounterDataParticleKey.POWER_FAIL_RESET],
re.compile(self.LINE5): [SBE54tpsEventCounterDataParticleKey.SERIAL_BYTE_ERROR],
re.compile(self.LINE6): [SBE54tpsEventCounterDataParticleKey.COMMAND_BUFFER_OVERFLOW],
re.compile(self.LINE7): [SBE54tpsEventCounterDataParticleKey.SERIAL_RECEIVE_OVERFLOW],
re.compile(self.LINE8): [SBE54tpsEventCounterDataParticleKey.LOW_BATTERY],
re.compile(self.LINE9): [SBE54tpsEventCounterDataParticleKey.SIGNAL_ERROR],
re.compile(self.LINE10): [SBE54tpsEventCounterDataParticleKey.ERROR_10],
re.compile(self.LINE11): [SBE54tpsEventCounterDataParticleKey.ERROR_12]
}
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in [SBE54tpsEventCounterDataParticleKey.DEVICE_TYPE,
SBE54tpsEventCounterDataParticleKey.SERIAL_NUMBER]:
values[key] = val
elif key in [SBE54tpsEventCounterDataParticleKey.NUMBER_EVENTS,
SBE54tpsEventCounterDataParticleKey.MAX_STACK,
SBE54tpsEventCounterDataParticleKey.POWER_ON_RESET,
SBE54tpsEventCounterDataParticleKey.POWER_FAIL_RESET,
SBE54tpsEventCounterDataParticleKey.SERIAL_BYTE_ERROR,
SBE54tpsEventCounterDataParticleKey.COMMAND_BUFFER_OVERFLOW,
SBE54tpsEventCounterDataParticleKey.SERIAL_RECEIVE_OVERFLOW,
SBE54tpsEventCounterDataParticleKey.LOW_BATTERY,
SBE54tpsEventCounterDataParticleKey.SIGNAL_ERROR,
SBE54tpsEventCounterDataParticleKey.ERROR_10,
SBE54tpsEventCounterDataParticleKey.ERROR_12]:
values[key] = int(val)
result = []
for key, value in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
class SBE54tpsHardwareDataParticleKey(BaseEnum):
DEVICE_TYPE = "device_type"
SERIAL_NUMBER = "serial_number"
MANUFACTURER = "manufacturer"
FIRMWARE_VERSION = "firmware_version"
FIRMWARE_DATE = "firmware_date"
HARDWARE_VERSION = "hardware_version"
PCB_SERIAL_NUMBER = "pcb_serial_number"
PCB_TYPE = "pcb_type"
MANUFACTURE_DATE = "manufacture_date"
class SBE54tpsHardwareDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_HARDWARE_DATA
LINE1 = r"<HardwareData DeviceType='([^']+)' SerialNumber='(\d+)'>"
LINE2 = r"<Manufacturer>([^<]+)</Manufacturer>"
LINE3 = r"<FirmwareVersion>([^<]+)</FirmwareVersion>"
LINE4 = r"<FirmwareDate>([^<]+)</FirmwareDate>"
LINE5 = r"<HardwareVersion>([^<]+)</HardwareVersion>"
LINE6 = r"<PCBSerialNum>([^<]+)</PCBSerialNum>"
LINE7 = r"<PCBType>([^<]+)</PCBType>"
LINE8 = r"<MfgDate>([^<]+)</MfgDate>"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
arrays = [SBE54tpsHardwareDataParticleKey.HARDWARE_VERSION,
SBE54tpsHardwareDataParticleKey.PCB_SERIAL_NUMBER]
matchers = {
re.compile(self.LINE1): [SBE54tpsHardwareDataParticleKey.DEVICE_TYPE,
SBE54tpsHardwareDataParticleKey.SERIAL_NUMBER],
re.compile(self.LINE2): [SBE54tpsHardwareDataParticleKey.MANUFACTURER],
re.compile(self.LINE3): [SBE54tpsHardwareDataParticleKey.FIRMWARE_VERSION],
re.compile(self.LINE4): [SBE54tpsHardwareDataParticleKey.FIRMWARE_DATE],
re.compile(self.LINE5): [SBE54tpsHardwareDataParticleKey.HARDWARE_VERSION],
re.compile(self.LINE6): [SBE54tpsHardwareDataParticleKey.PCB_SERIAL_NUMBER],
re.compile(self.LINE7): [SBE54tpsHardwareDataParticleKey.PCB_TYPE],
re.compile(self.LINE8): [SBE54tpsHardwareDataParticleKey.MANUFACTURE_DATE]
}
values = {}
result = []
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in arrays:
values.setdefault(key, []).append(val)
else:
values[key] = val
for key, val in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: val})
return result
class SBE54tpsSampleDataParticleKey(BaseEnum):
SAMPLE_NUMBER = "sample_number"
SAMPLE_TYPE = "sample_type"
INST_TIME = "date_time_string"
PRESSURE = "absolute_pressure" # psi
PRESSURE_TEMP = "pressure_temp"
class SBE54tpsSampleDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_REAL_TIME
LINE1 = r"<Sample Num='(\d+)' Type='([^']+)'>"
LINE2 = r"<Time>([^<]+)</Time>"
LINE3 = r"<PressurePSI>([0-9.+-]+)</PressurePSI>"
LINE4 = r"<PTemp>([0-9.+-]+)</PTemp>"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
values = {
SBE54tpsSampleDataParticleKey.SAMPLE_NUMBER: None,
SBE54tpsSampleDataParticleKey.SAMPLE_TYPE: None,
SBE54tpsSampleDataParticleKey.INST_TIME: None,
SBE54tpsSampleDataParticleKey.PRESSURE: None,
SBE54tpsSampleDataParticleKey.PRESSURE_TEMP: None
}
matchers = {
re.compile(self.LINE1): [SBE54tpsSampleDataParticleKey.SAMPLE_NUMBER,
SBE54tpsSampleDataParticleKey.SAMPLE_TYPE],
re.compile(self.LINE2): [SBE54tpsSampleDataParticleKey.INST_TIME],
re.compile(self.LINE3): [SBE54tpsSampleDataParticleKey.PRESSURE],
re.compile(self.LINE4): [SBE54tpsSampleDataParticleKey.PRESSURE_TEMP]
}
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in [SBE54tpsSampleDataParticleKey.SAMPLE_TYPE]:
values[key] = val
elif key in [SBE54tpsSampleDataParticleKey.SAMPLE_NUMBER]:
values[key] = int(val)
elif key in [SBE54tpsSampleDataParticleKey.PRESSURE,
SBE54tpsSampleDataParticleKey.PRESSURE_TEMP]:
values[key] = float(val)
elif key in [SBE54tpsSampleDataParticleKey.INST_TIME]:
# <Time>2012-11-07T12:21:25</Time>
# yyyy-mm-ddThh:mm:ss
py_timestamp = time.strptime(val, "%Y-%m-%dT%H:%M:%S")
self.set_internal_timestamp(unix_time=timegm_to_float(py_timestamp))
values[key] = val
result = []
for key, value in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
class SBE54tpsSampleRefOscDataParticleKey(BaseEnum):
SET_TIMEOUT = "set_timeout"
SET_TIMEOUT_MAX = "set_timeout_max"
SET_TIMEOUT_ICD = "set_timeout_icd"
SAMPLE_NUMBER = "sample_number"
SAMPLE_TYPE = "sample_type"
SAMPLE_TIMESTAMP = "date_time_string"
REF_OSC_FREQ = "reference_oscillator_freq"
PCB_TEMP_RAW = "pcb_thermistor_value"
REF_ERROR_PPM = "reference_error"
class SBE54tpsSampleRefOscDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.PREST_REFERENCE_OSCILLATOR
LINE1 = r"<SetTimeout>([^<]+)</SetTimeout>"
LINE2 = r"<SetTimeoutMax>([^<]+)</SetTimeoutMax>"
LINE3 = r"<SetTimeoutICD>([^<]+)</SetTimeoutICD>"
LINE4 = r"<Sample Num='([^']+)' Type='([^']+)'>"
LINE5 = r"<Time>([^<]+)</Time>"
LINE6 = r"<RefOscFreq>([0-9.+-]+)</RefOscFreq>"
LINE7 = r"<PCBTempRaw>([0-9.+-]+)</PCBTempRaw>"
LINE8 = r"<RefErrorPPM>([0-9.+-]+)</RefErrorPPM>"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
values = {
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT: None,
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_MAX: None,
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_ICD: None,
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_NUMBER: None,
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TYPE: None,
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TIMESTAMP: None,
SBE54tpsSampleRefOscDataParticleKey.REF_OSC_FREQ: None,
SBE54tpsSampleRefOscDataParticleKey.PCB_TEMP_RAW: None,
SBE54tpsSampleRefOscDataParticleKey.REF_ERROR_PPM: None
}
matchers = {
re.compile(self.LINE1): [SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT],
re.compile(self.LINE2): [SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_MAX],
re.compile(self.LINE3): [SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_ICD],
re.compile(self.LINE4): [SBE54tpsSampleRefOscDataParticleKey.SAMPLE_NUMBER,
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TYPE],
re.compile(self.LINE5): [SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TIMESTAMP],
re.compile(self.LINE6): [SBE54tpsSampleRefOscDataParticleKey.REF_OSC_FREQ],
re.compile(self.LINE7): [SBE54tpsSampleRefOscDataParticleKey.PCB_TEMP_RAW],
re.compile(self.LINE8): [SBE54tpsSampleRefOscDataParticleKey.REF_ERROR_PPM]
}
for line in self.raw_data.split(NEWLINE):
for matcher, keys in matchers.iteritems():
match = matcher.match(line)
if match:
for index, key in enumerate(keys):
val = match.group(index + 1)
if key in [SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TYPE]:
values[key] = val
elif key in [SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT,
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_NUMBER,
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_MAX,
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_ICD,
SBE54tpsSampleRefOscDataParticleKey.PCB_TEMP_RAW]:
if key == SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_MAX and val.lower() == 'off':
val = 0
values[key] = int(val)
elif key in [SBE54tpsSampleRefOscDataParticleKey.REF_OSC_FREQ,
SBE54tpsSampleRefOscDataParticleKey.REF_ERROR_PPM]:
values[key] = float(val)
elif key in [SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TIMESTAMP]:
# <Time>2012-11-07T12:21:25</Time>
# yyyy-mm-ddThh:mm:ss
values[key] = val
py_timestamp = time.strptime(val, "%Y-%m-%dT%H:%M:%S")
self.set_internal_timestamp(unix_time=timegm_to_float(py_timestamp))
result = []
for key, value in values.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
######################################### /PARTICLES #############################
###############################################################################
# Driver
###############################################################################
class SBE54PlusInstrumentDriver(SeaBirdInstrumentDriver):
"""
SBEInstrumentDriver subclass
Subclasses Seabird driver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###############################################################################
# Protocol
################################################################################
class Protocol(SeaBirdProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
SeaBirdProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.RECOVER_AUTOSAMPLE, self._handler_command_recover_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC, self._handler_command_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_command_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS, self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR, self._handler_command_sample_ref_osc)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.TEST_EEPROM, self._handler_command_test_eeprom)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.INIT_PARAMS, self._handler_command_init_params)
self._protocol_fsm.add_handler(ProtocolState.OSCILLATOR, ProtocolEvent.ENTER, self._handler_oscillator_enter)
self._protocol_fsm.add_handler(ProtocolState.OSCILLATOR, ProtocolEvent.ACQUIRE_OSCILLATOR_SAMPLE, self._handler_oscillator_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.OSCILLATOR, ProtocolEvent.EXIT, self._handler_oscillator_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS, self._handler_autosample_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_autosample_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.INIT_PARAMS, self._handler_autosample_init_params)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# Build dictionaries for driver schema
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
self._add_build_handler(InstrumentCmds.SET, self._build_set_command)
self._add_build_handler(InstrumentCmds.GET_CONFIGURATION_DATA, self._build_simple_command)
self._add_build_handler(InstrumentCmds.GET_STATUS_DATA, self._build_simple_command)
self._add_build_handler(InstrumentCmds.GET_EVENT_COUNTER_DATA, self._build_simple_command)
self._add_build_handler(InstrumentCmds.GET_HARDWARE_DATA, self._build_simple_command)
self._add_build_handler(InstrumentCmds.START_LOGGING, self._build_simple_command)
self._add_build_handler(InstrumentCmds.STOP_LOGGING, self._build_simple_command)
self._add_build_handler(InstrumentCmds.SAMPLE_REFERENCE_OSCILLATOR, self._build_simple_command)
self._add_build_handler(InstrumentCmds.TEST_EEPROM, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(InstrumentCmds.SET, self._parse_set_response)
self._add_response_handler(InstrumentCmds.GET_CONFIGURATION_DATA, self._parse_generic_response)
self._add_response_handler(InstrumentCmds.GET_STATUS_DATA,self._parse_generic_response)
self._add_response_handler(InstrumentCmds.GET_EVENT_COUNTER_DATA, self._parse_generic_response)
self._add_response_handler(InstrumentCmds.GET_HARDWARE_DATA, self._parse_generic_response)
self._add_response_handler(InstrumentCmds.SAMPLE_REFERENCE_OSCILLATOR, self._parse_sample_ref_osc)
self._add_response_handler(InstrumentCmds.TEST_EEPROM, self._parse_test_eeprom)
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.SCHEDULED_CLOCK_SYNC)
# commands sent sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(Protocol.sieve_function)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
"""
return_list = []
sieve_matchers = [STATUS_DATA_REGEX_MATCHER,
CONFIGURATION_DATA_REGEX_MATCHER,
EVENT_COUNTER_DATA_REGEX_MATCHER,
HARDWARE_DATA_REGEX_MATCHER,
SAMPLE_DATA_REGEX_MATCHER,
ENGINEERING_DATA_MATCHER,
RECOVER_AUTOSAMPLE_MATCHER]
for matcher in sieve_matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
# This instrument will automatically put itself back into autosample mode after a couple minutes idle
# in command mode. If a message is seen, figure out if an event to needs to be raised to adjust
# the state machine.
if RECOVER_AUTOSAMPLE_MATCHER.match(chunk) and self._protocol_fsm.get_current_state() == ProtocolState.COMMAND:
log.debug("FSM state out of date. Recovering to autosample!")
self._async_raise_fsm_event(ProtocolEvent.RECOVER_AUTOSAMPLE)
if self._extract_sample(SBE54tpsSampleDataParticle, SAMPLE_DATA_REGEX_MATCHER, chunk, timestamp): return
if self._extract_sample(SBE54tpsStatusDataParticle, STATUS_DATA_REGEX_MATCHER, chunk, timestamp): return
if self._extract_sample(SBE54tpsConfigurationDataParticle, CONFIGURATION_DATA_REGEX_MATCHER, chunk, timestamp): return
if self._extract_sample(SBE54tpsEventCounterDataParticle, EVENT_COUNTER_DATA_REGEX_MATCHER, chunk, timestamp): return
if self._extract_sample(SBE54tpsHardwareDataParticle, HARDWARE_DATA_REGEX_MATCHER, chunk, timestamp): return
if self._extract_sample(SBE54tpsSampleRefOscDataParticle, SAMPLE_REF_OSC_MATCHER, chunk, timestamp): return
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
events_out = [x for x in events if Capability.has(x)]
return events_out
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.ACQUIRE_STATUS, timeout=TIMEOUT, display_name="Acquire Status")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
self._cmd_dict.add(Capability.SAMPLE_REFERENCE_OSCILLATOR, display_name="Sample Reference Oscillator")
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.TEST_EEPROM, display_name="Test EEPROM")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _send_wakeup(self):
pass
def _wakeup(self, timeout, delay=1):
pass
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state
"""
next_state = self._discover()
result = []
if next_state is ProtocolState.UNKNOWN:
result = 'Failure to connect to instrument'
return next_state, (next_state, result)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
########################################################################
# Command handlers.
########################################################################
def _handler_command_acquire_status(self, *args, **kwargs):
"""
Run all Get?? commands. Concat command results and return
@param args:
@param kwargs:
@return: next_state, (next_state, result)
"""
timeout = kwargs.get('timeout', TIMEOUT)
next_state = None
self._do_cmd_resp(InstrumentCmds.GET_CONFIGURATION_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_STATUS_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_EVENT_COUNTER_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_HARDWARE_DATA, timeout=timeout)
result = self.wait_for_particles([DataParticleType.PREST_CONFIGURATION_DATA,
DataParticleType.PREST_DEVICE_STATUS,
DataParticleType.PREST_EVENT_COUNTER,
DataParticleType.PREST_HARDWARE_DATA])
return next_state, (next_state, result)
def _handler_command_start_direct(self, *args, **kwargs):
return ProtocolState.DIRECT_ACCESS, (ProtocolState.DIRECT_ACCESS, [])
def _handler_command_recover_autosample(self, *args, **kwargs):
"""
Reenter autosample mode. Used when our data handler detects
as data sample.
@retval next_state, (next_state, result)
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._async_agent_state_change(ResourceAgentState.STREAMING)
return next_state, (next_state, result)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_test_eeprom(self, *args, **kwargs):
next_state = None
kwargs['expected_prompt'] = GENERIC_PROMPT
kwargs['timeout'] = LONG_TIMEOUT
result = self._do_cmd_resp(InstrumentCmds.TEST_EEPROM, *args, **kwargs)
return next_state, (next_state, [result])
def _handler_command_sample_ref_osc(self, *args, **kwargs):
"""
Transition to a separate state to allow the instrument enough time to acquire a sample
"""
next_state = ProtocolState.OSCILLATOR
result = []
return next_state, (next_state, result)
def _handler_oscillator_enter(self, *args, **kwargs):
"""
Enter the Oscillator state
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.ACQUIRE_OSCILLATOR_SAMPLE)
def _handler_oscillator_acquire_sample(self, *args, **kwargs):
result = None
kwargs['expected_prompt'] = "</Sample>"
kwargs['timeout'] = LONG_TIMEOUT
try:
result = self._do_cmd_resp(InstrumentCmds.SAMPLE_REFERENCE_OSCILLATOR, *args, **kwargs)
except InstrumentException as e:
log.error("Exception occurred when trying to acquire Reference Oscillator Sample: %s" % e)
return ProtocolState.COMMAND, (ProtocolState.COMMAND, result)
def _handler_oscillator_exit(self, *args, **kwargs):
"""
Exit the Oscillator state
"""
pass
def _handler_command_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = self._do_cmd_resp(InstrumentCmds.SET, Parameter.TIME, get_timestamp_delayed("%Y-%m-%dT%H:%M:%S"), **kwargs)
return next_state, (next_state, [result])
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@retval next_state, (next_state, result)
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
kwargs['expected_prompt'] = Prompt.COMMAND
kwargs['timeout'] = 30
log.info("SYNCING TIME WITH SENSOR")
self._do_cmd_resp(InstrumentCmds.SET, Parameter.TIME, get_timestamp_delayed("%Y-%m-%dT%H:%M:%S"), **kwargs)
# Issue start command and switch to autosample if successful.
self._do_cmd_no_resp(InstrumentCmds.START_LOGGING, *args, **kwargs)
return next_state, (next_state, result)
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change from
autosample mode. For this command we have to move the instrument
into command mode, do the clock sync, then switch back. If an
exception is thrown we will try to get ourselves back into
streaming and then raise that exception.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
try:
# Switch to command mode,
self._stop_logging()
# Sync the clock
result = self._do_cmd_resp(InstrumentCmds.SET, Parameter.TIME, get_timestamp_delayed("%Y-%m-%dT%H:%M:%S"), **kwargs)
finally:
# Switch back to streaming
self._start_logging()
return next_state, (next_state, result)
def _handler_autosample_acquire_status(self, *args, **kwargs):
"""
Run all status commands. Concat command results and return
@param args:
@param kwargs:
"""
next_state = None
try:
# Switch to command mode
self._stop_logging()
timeout = kwargs.get('timeout', TIMEOUT)
self._do_cmd_resp(InstrumentCmds.GET_CONFIGURATION_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_STATUS_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_EVENT_COUNTER_DATA, timeout=timeout)
self._do_cmd_resp(InstrumentCmds.GET_HARDWARE_DATA, timeout=timeout)
result = self.wait_for_particles([DataParticleType.PREST_CONFIGURATION_DATA,
DataParticleType.PREST_DEVICE_STATUS,
DataParticleType.PREST_EVENT_COUNTER,
DataParticleType.PREST_HARDWARE_DATA])
finally:
# Switch back to streaming
self._start_logging()
return next_state, (next_state, result)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
"""
next_state = ProtocolState.COMMAND
result = []
self._do_cmd_resp(InstrumentCmds.STOP_LOGGING, *args, **kwargs)
return next_state, (next_state, result)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
pass
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = ProtocolState.AUTOSAMPLE
result = []
return next_state, (next_state, result)
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
########################################################################
# Response Parsers
########################################################################
def _parse_set_response(self, response, prompt):
"""
Parse handler for set command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if set command misunderstood.
"""
if 'Error' in response:
raise InstrumentParameterException('Protocol._parse_set_response : Set command not recognized: %s' % response)
def _parse_generic_response(self, response, prompt):
response = response.replace("S>" + NEWLINE, "")
response = response.replace("<Executed/>" + NEWLINE, "")
response = response.replace("S>", "")
return response
def _parse_test_eeprom(self, response, prompt):
"""
@return: True or False
"""
if prompt != GENERIC_PROMPT:
raise InstrumentProtocolException('TEST_EEPROM command not recognized: %s' % response)
if "PASSED" in response:
return True
return False
def _parse_sample_ref_osc(self, response, prompt):
if not SAMPLE_REF_OSC_MATCHER.search(response):
log.error("Unexpected reply received from instrument in response to sample reference oscillator command.")
return response
########################################################################
# Response Parsers
########################################################################
def _build_set_command(self, cmd, param, val):
"""
Build handler for set commands. SETparam=val followed by newline.
String val constructed by param dict formatting function. <--- needs a better/clearer way
@param param the parameter key to set.
@param val the parameter value to set.
@ retval The set command to be sent to the device.
@throws InstrumentProtocolException if the parameter is not valid or
if the formatting function could not accept the value passed.
"""
try:
str_val = self._param_dict.format(param, val)
if str_val is None:
raise InstrumentParameterException("Driver PARAM was None!!!!")
set_cmd = 'set%s=%s%s' % (param, str_val, NEWLINE)
except KeyError:
raise InstrumentParameterException('Unknown driver parameter %s' % param)
return set_cmd
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
self._verify_not_readonly(*args, **kwargs)
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
self._do_cmd_resp(InstrumentCmds.SET, key, val, **kwargs)
self._update_params()
def apply_startup_params(self):
log.debug("CURRENT STATE: %s", self.get_current_state())
if (self.get_current_state() != DriverProtocolState.COMMAND and
self.get_current_state() != DriverProtocolState.AUTOSAMPLE):
raise InstrumentProtocolException("Not in command or autosample state. Unable to apply startup params")
# If we are in streaming mode and our configuration on the
# instrument matches what we think it should be then we
# don't need to do anything.
if self._instrument_config_dirty():
self._apply_params()
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary. Wake the device then issue
display status and display calibration commands. The parameter
dict will match line output and update itself.
@throws InstrumentTimeoutException if device cannot be timely woken.
@throws InstrumentProtocolException if ds/dc misunderstood.
"""
# Get old param dict config.
old_config = self._param_dict.get_config()
# Issue display commands and parse results.
timeout = kwargs.get('timeout', TIMEOUT)
log.debug("Run status command: %s" % InstrumentCmds.GET_STATUS_DATA)
response = self._do_cmd_resp(InstrumentCmds.GET_STATUS_DATA, timeout=timeout)
for line in response.split(NEWLINE):
self._param_dict.update(line)
log.debug("status command response: %r" % response)
log.debug("Run configure command: %s" % InstrumentCmds.GET_CONFIGURATION_DATA)
response = self._do_cmd_resp(InstrumentCmds.GET_CONFIGURATION_DATA, timeout=timeout)
for line in response.split(NEWLINE):
self._param_dict.update(line)
log.debug("configure command response: %r" % response)
# Get new param dict config. If it differs from the old config,
# tell driver superclass to publish a config change event.
new_config = self._param_dict.get_config()
log.debug("new_config: %s == old_config: %s" % (new_config, old_config))
if not dict_equal(old_config, new_config, ignore_keys=Parameter.TIME):
log.debug("configuration has changed. Send driver event")
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
########################################################################
# Private helpers.
########################################################################
def _discover(self):
"""
Determine instrument state. PREST is always sampling, so if we haven't received a particle within the last
max sample period, then we've lost connection to the instrument.
"""
state = DriverProtocolState.AUTOSAMPLE
sample_period = self._param_dict.get(Parameter.SAMPLE_PERIOD)
if not sample_period:
sample_period = MAX_SAMPLE_DURATION
particles = self.wait_for_particles([DataParticleType.PREST_REAL_TIME], time.time()+sample_period+1)
if not particles:
state = DriverProtocolState.UNKNOWN
return state
def _start_logging(self, timeout=TIMEOUT):
"""
Command the instrument to start logging
@param timeout: how long to wait for a prompt
@return: True if successful
@raise: InstrumentProtocolException if failed to start logging
"""
self._do_cmd_no_resp(InstrumentCmds.START_LOGGING)
return True
def _stop_logging(self):
self._do_cmd_resp(InstrumentCmds.STOP_LOGGING)
return True
def _is_logging(self, *args, **kwargs):
"""
Determine if we are in autosample
@return: True - PREST is always in a logging state or will return to logging after inactivity
"""
return True
@staticmethod
def _bool_to_int_string(v):
# return a string of 1 or 0 to indicate true/false
if v:
return "1"
return "0"
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
self._param_dict.add(Parameter.TIME,
SBE54tpsStatusDataParticle.LINE2,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=0,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Instrument Time",
description="Timestamp of last clock sync.",
units="Y-M-DTH:M:S")
self._param_dict.add(Parameter.SAMPLE_PERIOD,
SBE54tpsConfigurationDataParticle.LINE28,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Sample Period",
range=(1, MAX_SAMPLE_DURATION),
description="Duration of each pressure measurement (1-240).",
units=Units.SECOND,
default_value=15,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.BATTERY_TYPE,
SBE54tpsConfigurationDataParticle.LINE24,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Battery Type",
range={'lithium': 0, 'alkaline': 1},
description="Battery type: (0:lithium | 1:alkaline) ",
default_value=1,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.ENABLE_ALERTS,
SBE54tpsConfigurationDataParticle.LINE26,
lambda match: bool(int(match.group(1))),
self._bool_to_int_string,
type=ParameterDictType.BOOL,
display_name="Enable Alerts",
range={'True': True, 'False': False},
description="Enable output of alerts (true | false)",
default_value=1,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True)
def create_playback_protocol(callback):
return Protocol(None, None, callback)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(tf.test.TestCase):
def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.test_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = tf.Variable(p_init)
vals = tf.constant(v_i, shape=vals_shape, name="vals")
ind = tf.constant(indices, dtype=tf.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
tf.initialize_all_variables().run()
# p += vals
result = p2.eval()
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == tf.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (
vals_init.reshape(vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (
vals_init.reshape(vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices),
tf.scatter_sub)
def testWrongShape(self):
# Indices and values mismatch.
var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))
indices = tf.placeholder(tf.int32, shape=[32])
values = tf.placeholder(tf.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
tf.scatter_add(var, indices, values)
# Var and values mismatch.
values = tf.placeholder(tf.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
tf.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards, vocab_size,
dtype=tf.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape: shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = tf.placeholder(dtype, shape=None, name=param_name)
else:
param = tf.constant(1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == tf.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingResult(params, id_vals, num_shards, vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
for ids, wts in zip(id_vals, weight_vals):
val_aggr = None
wt_aggr = None
if isinstance(ids, tf.compat.integral_types):
ids = [ids]
wts = [wts]
for i, wt_val in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * wt_val
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * wt_val
else:
assert False
if val_aggr is None:
assert wt_aggr is None
val_aggr = val
wt_aggr = wt_val
else:
assert wt_aggr is not None
val_aggr += val
wt_aggr += wt_val
values.append(val_aggr)
weights.append(wt_aggr)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
return values, weights
class EmbeddingLookupTest(tf.test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
def testSimpleSharded(self):
with self.test_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = tf.constant(list(id_vals), dtype=tf.int32)
print("Construct ids", ids.get_shape())
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int32)
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int32)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningUnknownParamShape(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = tf.constant(list(id_vals), dtype=tf.int64)
embedding = tf.nn.embedding_lookup(p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf.logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.test_session():
ids = tf.constant(id_vals, shape=ids_shape, dtype=tf.int32)
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=[2])
y = tf.nn.embedding_lookup(x, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-4)
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf.logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.test_session():
ids = tf.constant(id_vals, dtype=tf.int32)
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [tf.square(elem) for elem in x]
y = tf.nn.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with tf.Graph().as_default():
p = tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))
ids = tf.constant([0, 1, 1, 7], dtype=tf.int32)
tf.nn.embedding_lookup([p], ids)
def testConstructionSharded(self):
with tf.Graph().as_default():
p = []
for _ in range(2):
p += [tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))]
ids = tf.constant([0, 1, 1, 17], dtype=tf.int32)
tf.nn.embedding_lookup(p, ids)
def testHigherRank(self):
np.random.seed(8)
with self.test_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(params.shape[0],
size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = tf.nn.embedding_lookup(params, ids).eval()
self.assertAllEqual(simple, tf.gather(params, ids).eval())
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * tf.range(params.shape[0] // procs)
split_params = [tf.gather(params, stride + p)
for p in xrange(procs)]
sharded = tf.nn.embedding_lookup(split_params, ids).eval()
self.assertAllEqual(simple, sharded)
class EmbeddingLookupSparseTest(tf.test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(ids, tf.int32),
tf.constant(shape, tf.int64))
sp_weights = tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(weights, tf.float32),
tf.constant(shape, tf.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index: (index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5],
["sum", "mean"],
[tf.float32, tf.float64],
[True, False]):
with self.test_session():
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size,
shape=param_shape,
dtype=dtype)
embedding_sum = tf.nn.embedding_lookup_sparse(
p, sp_ids, None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum = _EmbeddingResult(
params, grouped_ids, num_shards, vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (
self._RandomIdsAndWeights(batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3],
["sum", "mean"],
[tf.float32, tf.float64],
[True, False]):
with self.test_session():
x, params, _ = _EmbeddingParams(num_shards, vocab_size,
shape=param_shape,
dtype=dtype)
y = tf.nn.embedding_lookup_sparse(
x, sp_ids, None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = tf.test.compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == tf.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=tf.float32)
sp_ids = tf.SparseTensor(
tf.constant([[0, 0], [0, 1], [1, 0]], tf.int64),
tf.constant([0, 1, 2], tf.int32),
tf.constant([2, 2], tf.int64))
sp_weights = tf.SparseTensor(
tf.constant([[0, 0], [0, 1]], tf.int64),
tf.constant([12.0, 5.0], tf.float32),
tf.constant([1, 2], tf.int64))
with self.assertRaises(ValueError):
tf.nn.embedding_lookup_sparse(x, sp_ids, sp_weights, combiner="mean")
class DynamicStitchOpTest(tf.test.TestCase):
def testCint32Cpu(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testCint32Gpu(self):
with self.test_session(use_gpu=True):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Cpu(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Gpu(self):
with self.test_session(use_gpu=True):
indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testSumGradArgs(self):
with self.test_session(use_gpu=False):
indices = [tf.convert_to_tensor([0, 1, 2, 3]),
tf.convert_to_tensor([2, 3])]
values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
self.assertAllEqual(
tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
# We expect that the values are merged in order.
def testStitchOrder(self):
with self.test_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([tf.convert_to_tensor(np_values[-1])])
stitched = tf.dynamic_stitch(indices, values).eval()
self.assertAllEqual(np_values[-1], stitched)
if __name__ == "__main__":
tf.test.main()
|
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from scipy._lib._numpy_compat import broadcast_to
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = np.maximum(N-(M-n), 0)
self.b = np.minimum(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
self.a = np.where(lambda_ > 0, 0, -np.inf)
self.b = np.where(lambda_ > 0, np.inf, 0)
return lambda_ != 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if self._size is not None:
# Numpy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = broadcast_to(low, self._size)
high = broadcast_to(high, self._size)
randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
|
|
from __future__ import division
from future.utils import raise_from
import math
import time
from datetime import datetime
from operator import itemgetter
from pocket import (
Pocket,
PocketException,
PocketAutException
)
from progress.spinner import Spinner
from .config import Configs
from .exceptions import AppException, AppNotConfigured
from .storage import Storage
class PocketApp:
DEFAULT_WORDS_PER_MINUTE = 180
REDIRECT_URL = 'http://www.google.com'
def __init__(self):
self._configs = Configs()
self._storage = Storage()
self._pocket = Pocket(
self._configs.get('consumer_key'),
self._configs.get('access_token')
)
def configure(self, consumer_key, access_token,
words_per_minute, sort_field):
self._configs.set('consumer_key', consumer_key)
self._configs.set('access_token', access_token)
self._configs.set('words_per_minute', words_per_minute)
self._configs.set('sort_field', sort_field)
self._configs.set('last_fetch', 0)
self._configs.write()
self._storage.clear()
self._pocket = Pocket(
consumer_key,
access_token
)
def init_consumer_key(self, consumer_key):
self._pocket = Pocket(consumer_key)
def get_request_token(self):
return self._pocket.get_request_token(
self.REDIRECT_URL
)
def get_access_token(self, request_token):
return self._pocket.get_access_token(
request_token
)
def add_article(self, url, title=None, tags=None):
if isinstance(tags, tuple):
tags = ','.join(list(tags))
try:
return self._pocket.add(url, title, tags)
except PocketException as e:
raise_from(self._check_exception(e), e)
def get_articles(self, limit=None, order=None):
if self._storage.is_empty():
self.fetch_articles(True)
articles = self._storage.read(limit, order)
sort_field = self._configs.get('sort_field')
if not sort_field:
sort_field = 'reading_time'
articles = sorted(articles,
key=itemgetter(sort_field))
return articles
def search(self, search, state, tag, sort):
try:
articles = self._pocket.retrieve(search=search,
state=state,
tag=tag,
sort=sort)
return self._get_articles_index(articles)
except PocketException as e:
raise_from(self._check_exception(e), e)
def archive_article(self, item_id):
try:
self._pocket.archive(int(item_id)).commit()
except PocketException as e:
raise_from(self._check_exception(e), e)
def find_article(self, item_id):
index = self._storage.read()
for article in index:
if str(article['id']) == str(item_id):
return article
return None
def fetch_articles(self, output_progress=False):
spinner = None
if output_progress:
spinner = Spinner('Loading articles ')
articles_index = []
last_fetch = self._configs.get('last_fetch')
offset = 0
count = 20
while(True):
try:
articles = self._pocket.retrieve(
state='unread',
count=count,
offset=offset,
since=last_fetch
)
except PocketException as e:
spinner.finish()
raise_from(self._check_exception(e), e)
if not articles['list']:
break
articles_index.extend(self._get_articles_index(articles))
offset += count
if spinner:
spinner.next()
if spinner:
spinner.finish()
sort_field = self._configs.get('sort_field')
if not sort_field:
sort_field = 'reading_time'
articles_index = sorted(articles_index,
key=itemgetter(sort_field))
self._storage.write(articles_index)
self._configs.set('last_fetch', self._get_timestamp(datetime.now()))
self._configs.write()
def _get_articles_index(self, articles):
wpm = self._configs.get('words_per_minute')
if not wpm:
wpm = self.DEFAULT_WORDS_PER_MINUTE
wpm = int(wpm)
articles_index = []
articles_list = articles['list']
if isinstance(articles_list, list) and len(articles_list) == 0:
return articles_index
for article in articles_list.values():
word_count = int(article.get('word_count', 0))
if word_count == 0:
reading_time = -1
else:
reading_time = int(math.ceil(word_count / wpm))
title = article.get('resolved_title', None)
if not title:
title = article['given_title']
url = article.get('resolved_url', None)
if not url:
url = article['given_url']
index = {
'id': article['item_id'],
'title': title,
'url': url,
'word_count': word_count,
'reading_time': reading_time
}
articles_index.append(index)
return articles_index
def _get_timestamp(self, date):
return int(time.mktime(date.timetuple()))
def _check_exception(self, e):
if isinstance(e, PocketAutException):
raise AppNotConfigured('Application is not configured')
raise AppException(e.message)
|
|
"""Target model."""
import enum
from gps_position import GpsPosition
from django.conf import settings
from django.db import models
class Choices(enum.IntEnum):
"""Base class for enums used to limit Django field choices,
plus other helper methods.
Item names should be lowercase to work properly with lookup().
"""
@classmethod
def choices(cls):
"""Provide choices for Django's IntField.choices.
Returns:
Enum values in an iterator to be passed to IntField.choices.
The enum value is used as the field key, and the name as the
description.
"""
return [(int(v), k) for k, v in cls.__members__.items()]
@classmethod
def lookup(cls, s):
"""Lookup value from name.
Args:
s: name to lookup; case insensitive
Returns:
Value associated with name
Raises:
KeyError: name not valid
"""
return cls.__members__[str(s).lower()]
@classmethod
def names(cls):
"""Names of choices
Returns:
List of names of values
"""
return cls.__members__.keys()
@enum.unique
class TargetType(Choices):
"""Valid target types.
Warning: DO NOT change/reuse values, or compatibility will be lost with
old data sets. Only add new values to the end.
"""
standard = 1
qrc = 2
off_axis = 3
emergent = 4
ir = 5
@enum.unique
class Orientation(Choices):
"""Valid target orientations.
Warning: DO NOT change/reuse values, or compatibility will be lost with
old data sets. Only add new values to the end.
"""
n = 1
ne = 2
e = 3
se = 4
s = 5
sw = 6
w = 7
nw = 8
@enum.unique
class Shape(Choices):
"""Valid target shapes.
Warning: DO NOT change/reuse values, or compatibility will be lost with
old data sets. Only add new values to the end.
"""
circle = 1
semicircle = 2
quarter_circle = 3
triangle = 4
square = 5
rectangle = 6
trapezoid = 7
pentagon = 8
hexagon = 9
heptagon = 10
octagon = 11
star = 12
cross = 13
@enum.unique
class Color(Choices):
"""Valid target colors.
Warning: DO NOT change/reuse values, or compatibility will be lost with
old data sets. Only add new values to the end.
"""
white = 1
black = 2
gray = 3
red = 4
blue = 5
green = 6
yellow = 7
purple = 8
brown = 9
orange = 10
class Target(models.Model):
"""Target represents a single target submission for a team."""
# The user which submitted and owns this target.
user = models.ForeignKey(settings.AUTH_USER_MODEL, db_index=True)
# Target type.
target_type = models.IntegerField(choices=TargetType.choices())
# Target location.
location = models.ForeignKey(GpsPosition, null=True, blank=True)
# Target orientation.
orientation = models.IntegerField(choices=Orientation.choices(),
null=True,
blank=True)
# Target shape.
shape = models.IntegerField(choices=Shape.choices(), null=True, blank=True)
# Target background color.
background_color = models.IntegerField(choices=Color.choices(),
null=True,
blank=True)
# Target alphanumeric.
alphanumeric = models.TextField(default='', blank=True)
# Target alphanumeric color.
alphanumeric_color = models.IntegerField(choices=Color.choices(),
null=True,
blank=True)
# Free-form target description.
description = models.TextField(default='', blank=True)
# Uploaded target image thumbnail.
thumbnail = models.ImageField(upload_to='targets', blank=True)
def __unicode__(self):
"""Descriptive text for use in displays."""
d = self.json()
return unicode(
"{name}(pk={id}, user={user}, "
"target_type={type}, "
"latitude={latitude}, "
"longitude={longitude}, "
"orientation={orientation}, shape={shape} "
"background_color={background_color}, "
"alphanumeric='{alphanumeric}', "
"alphanumeric_color={alphanumeric_color}, "
"description='{description}', "
"thumbnail='{thumbnail}')".format(
name=self.__class__.__name__,
thumbnail=self.thumbnail, **d))
def json(self):
"""Target as dict, for JSON."""
target_type = None
if self.target_type is not None:
target_type = TargetType(self.target_type).name
latitude = None
longitude = None
if self.location is not None:
latitude = self.location.latitude
longitude = self.location.longitude
orientation = None
if self.orientation is not None:
orientation = Orientation(self.orientation).name
shape = None
if self.shape is not None:
shape = Shape(self.shape).name
background_color = None
if self.background_color is not None:
background_color = Color(self.background_color).name
alphanumeric = None
if self.alphanumeric != '':
alphanumeric = self.alphanumeric
alphanumeric_color = None
if self.alphanumeric_color is not None:
alphanumeric_color = Color(self.alphanumeric_color).name
description = None
if self.description != '':
description = self.description
return {
'id': self.pk,
'user': self.user.pk,
'type': target_type,
'latitude': latitude,
'longitude': longitude,
'orientation': orientation,
'shape': shape,
'background_color': background_color,
'alphanumeric': alphanumeric,
'alphanumeric_color': alphanumeric_color,
'description': description,
}
|
|
import copy
from crispy_forms.utils import TEMPLATE_PACK
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, FieldError
from django.db import models, transaction
from django.forms.models import modelform_factory, modelform_defines_fields
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.template import loader
from django.utils.translation import ugettext as _
from xadmin import widgets
from xadmin.layout import FormHelper, Layout, Fieldset, TabHolder, Container, Column, Col, Field
from xadmin.util import unquote
from xadmin.views.detail import DetailAdminUtil
from base import ModelAdminView, filter_hook, csrf_protect_m
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.IPAddressField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.ForeignKey: {'widget': widgets.AdminSelectWidget},
models.OneToOneField: {'widget': widgets.AdminSelectWidget},
models.ManyToManyField: {'widget': widgets.AdminSelectMultiple},
}
class ReadOnlyField(Field):
template = "xadmin/layout/field_value.html"
def __init__(self, *args, **kwargs):
self.detail = kwargs.pop('detail')
super(ReadOnlyField, self).__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
html = ''
for field in self.fields:
result = self.detail.get_field_result(field)
field = {'auto_id': field}
html += loader.render_to_string(
self.template, {'field': field, 'result': result})
return html
class ModelFormAdminView(ModelAdminView):
form = forms.ModelForm
formfield_overrides = {}
readonly_fields = ()
style_fields = {}
exclude = None
relfield_style = None
save_as = False
save_on_top = False
add_form_template = None
change_form_template = None
form_layout = None
def __init__(self, request, *args, **kwargs):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
super(ModelFormAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if isinstance(db_field, models.ManyToManyField) and not db_field.rel.through._meta.auto_created:
return None
attrs = self.get_field_attrs(db_field, **kwargs)
return db_field.formfield(**dict(attrs, **kwargs))
@filter_hook
def get_field_style(self, db_field, style, **kwargs):
if style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)):
attrs = {'widget': widgets.AdminRadioSelect(
attrs={'inline': 'inline' if style == 'radio-inline' else ''})}
if db_field.choices:
attrs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('Null'))]
)
return attrs
if style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField):
return {'widget': widgets.AdminCheckboxSelect(attrs={'inline': style == 'checkbox-inline'}),
'help_text': None}
@filter_hook
def get_field_attrs(self, db_field, **kwargs):
if db_field.name in self.style_fields:
attrs = self.get_field_style(
db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
return attrs
if hasattr(db_field, "rel") and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(
db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
return attrs
if db_field.choices:
return {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
return self.formfield_overrides[klass].copy()
return {}
@filter_hook
def prepare_form(self):
self.model_form = self.get_model_form()
@filter_hook
def instance_forms(self):
self.form_obj = self.model_form(**self.get_form_datas())
# print '++' * 30 + '\n', type(self.form_obj.media)
def setup_forms(self):
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
@filter_hook
def valid_forms(self):
return self.form_obj.is_valid()
@filter_hook
def get_model_form(self, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": self.fields and list(self.fields) or None,
"exclude": exclude,
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
# return modelform_factory(self.model, **defaults)
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
@filter_hook
def get_form_layout(self):
layout = copy.deepcopy(self.form_layout)
fields = self.form_obj.fields.keys() + list(self.get_readonly_fields())
if layout is None:
layout = Layout(Container(Col('full',
Fieldset("", *fields, css_class="unsort no_title"), horizontal=True, span=12)
))
elif type(layout) in (list, tuple) and len(layout) > 0:
if isinstance(layout[0], Column):
fs = layout
elif isinstance(layout[0], (Fieldset, TabHolder)):
fs = (Col('full', *layout, horizontal=True, span=12),)
else:
fs = (Col('full', Fieldset("", *layout, css_class="unsort no_title"), horizontal=True, span=12),)
layout = Layout(Container(*fs))
rendered_fields = [i[1] for i in layout.get_field_names()]
container = layout[0].fields
other_fieldset = Fieldset(_(u'Other Fields'), *[f for f in fields if f not in rendered_fields])
if len(other_fieldset.fields):
if len(container) and isinstance(container[0], Column):
container[0].fields.append(other_fieldset)
else:
container.append(other_fieldset)
return layout
@filter_hook
def get_form_helper(self):
helper = FormHelper()
helper.form_tag = False
helper.add_layout(self.get_form_layout())
# deal with readonly fields
readonly_fields = self.get_readonly_fields()
if readonly_fields:
detail = self.get_model_view(
DetailAdminUtil, self.model, self.form_obj.instance)
for field in readonly_fields:
helper[field].wrap(ReadOnlyField, detail=detail)
return helper
@filter_hook
def get_readonly_fields(self):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
@filter_hook
def save_forms(self):
self.new_obj = self.form_obj.save(commit=False)
@filter_hook
def save_models(self):
self.new_obj.save()
@filter_hook
def save_related(self):
self.form_obj.save_m2m()
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
return self.get_response()
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
if self.valid_forms():
self.save_forms()
self.save_models()
self.save_related()
response = self.post_response()
if isinstance(response, basestring):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
@filter_hook
def get_context(self):
add = self.org_obj is None
change = self.org_obj is not None
new_context = {
'form': self.form_obj,
'original': self.org_obj,
'show_delete': self.org_obj is not None,
'add': add,
'change': change,
'errors': self.get_error_list(),
'has_add_permission': self.has_add_permission(),
'has_view_permission': self.has_view_permission(),
'has_change_permission': self.has_change_permission(self.org_obj),
'has_delete_permission': self.has_delete_permission(self.org_obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'form_url': '',
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
}
# for submit line
new_context.update({
'onclick_attrib': '',
'show_delete_link': (new_context['has_delete_permission']
and (change or new_context['show_delete'])),
'show_save_as_new': change and self.save_as,
'show_save_and_add_another': new_context['has_add_permission'] and
(not self.save_as or add),
'show_save_and_continue': new_context['has_change_permission'],
'show_save': True
})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url(
'delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_error_list(self):
errors = forms.utils.ErrorList()
if self.form_obj.is_bound:
errors.extend(self.form_obj.errors.values())
return errors
@filter_hook
def get_media(self):
# return super(ModelFormAdminView, self).get_media() + self.form_obj.media + \
return super(ModelFormAdminView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
class CreateAdminView(ModelFormAdminView):
def init_request(self, *args, **kwargs):
self.org_obj = None
if not self.has_add_permission():
raise PermissionDenied
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
if self.request_method == 'get':
initial = dict(self.request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return {'initial': initial}
else:
return {'data': self.request.POST, 'files': self.request.FILES}
@filter_hook
def get_context(self):
new_context = {
'title': _('Add %s') % force_unicode(self.opts.verbose_name),
}
context = super(CreateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': _('Add %s') % force_unicode(self.opts.verbose_name)}
if self.has_add_permission():
item['url'] = self.model_admin_url('add')
bcs.append(item)
return bcs
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
return TemplateResponse(
self.request, self.add_form_template or self.get_template_list(
'views/model_form.html'),
context, current_app=self.admin_site.name)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the add_view stage.
"""
request = self.request
msg = _(
'The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(self.opts.verbose_name),
'obj': "<a class='alert-link' href='%s'>%s</a>" % (self.model_admin_url('change', self.new_obj._get_pk_val()), force_unicode(self.new_obj))}
if "_continue" in [request.GET, request.POST]:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return self.model_admin_url('change', self.new_obj._get_pk_val())
if "_addanother" in [request.GET, request.POST]:
self.message_user(msg + ' ' + (_("You may add another %s below.") % force_unicode(self.opts.verbose_name)), 'success')
return request.path
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in [request.GET, request.POST]:
return request.GET["_redirect"] or request.POST["_redirect"]
elif self.has_view_permission():
return self.model_admin_url('changelist')
else:
return self.get_admin_url('index')
class UpdateAdminView(ModelFormAdminView):
def init_request(self, object_id, *args, **kwargs):
self.org_obj = self.get_object(unquote(object_id))
if not self.has_change_permission(self.org_obj):
raise PermissionDenied
if self.org_obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
params = {'instance': self.org_obj}
if self.request_method == 'post':
params.update(
{'data': self.request.POST, 'files': self.request.FILES})
return params
@filter_hook
def get_context(self):
new_context = {
'title': _('Change %s') % force_unicode(self.org_obj),
'object_id': str(self.org_obj.pk),
}
context = super(UpdateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': force_unicode(self.org_obj)}
if self.has_change_permission():
item['url'] = self.model_admin_url('change', self.org_obj.pk)
bcs.append(item)
return bcs
@filter_hook
def get_response(self, *args, **kwargs):
context = self.get_context()
context.update(kwargs or {})
return TemplateResponse(
self.request, self.change_form_template or self.get_template_list(
'views/model_form.html'),
context, current_app=self.admin_site.name)
def post(self, request, *args, **kwargs):
if "_saveasnew" in [self.request.POST, self.request.GET]:
return self.get_model_view(CreateAdminView, self.model).post(request)
return super(UpdateAdminView, self).post(request, *args, **kwargs)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = self.new_obj._meta
obj = self.new_obj
request = self.request
verbose_name = opts.verbose_name
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name':
force_unicode(verbose_name), 'obj': force_unicode(obj)}
if "_continue" in [request.GET, request.POST]:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return request.path
elif "_addanother" in [request.GET, request.POST]:
self.message_user(msg + ' ' + (_("You may add another %s below.")
% force_unicode(verbose_name)), 'success')
return self.model_admin_url('add')
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in [request.GET, request.POST]:
return request.GET["_redirect"] or request.POST["_redirect"]
elif self.has_view_permission():
change_list_url = self.model_admin_url('changelist')
if 'LIST_QUERY' in self.request.session \
and self.request.session['LIST_QUERY'][0] == self.model_info:
change_list_url += '?' + self.request.session['LIST_QUERY'][1]
return change_list_url
else:
return self.get_admin_url('index')
class ModelFormAdminUtil(ModelFormAdminView):
def init_request(self, obj=None):
self.org_obj = obj
self.prepare_form()
self.instance_forms()
@filter_hook
def get_form_datas(self):
return {'instance': self.org_obj}
|
|
import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
|
|
import unittest
import saspy
from saspy.tests.util import Utilities
class TestSASstat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession()
util = Utilities(cls.sas)
procNeeded=['reg', 'mixed', 'hpsplit', 'hplogistic', 'hpreg', 'glm', 'logistic', 'tpspline',
'hplogistic', 'hpreg', 'phreg', 'ttest', 'factor']
if not util.procFound(procNeeded):
cls.skipTest("Not all of these procedures were found: %s" % str(procNeeded))
@classmethod
def tearDownClass(cls):
if cls.sas:
cls.sas._endsas()
def defineData(self):
self.sas.submit("""
data Myeloma;
input Time VStatus LogBUN HGB Platelet Age LogWBC Frac
LogPBM Protein SCalc;
label Time='Survival Time'
VStatus='0=Alive 1=Dead';
datalines;
1.25 1 2.2175 9.4 1 67 3.6628 1 1.9542 12 10
1.25 1 1.9395 12.0 1 38 3.9868 1 1.9542 20 18
2.00 1 1.5185 9.8 1 81 3.8751 1 2.0000 2 15
2.00 1 1.7482 11.3 0 75 3.8062 1 1.2553 0 12
2.00 1 1.3010 5.1 0 57 3.7243 1 2.0000 3 9
3.00 1 1.5441 6.7 1 46 4.4757 0 1.9345 12 10
5.00 1 2.2355 10.1 1 50 4.9542 1 1.6628 4 9
5.00 1 1.6812 6.5 1 74 3.7324 0 1.7324 5 9
6.00 1 1.3617 9.0 1 77 3.5441 0 1.4624 1 8
6.00 1 2.1139 10.2 0 70 3.5441 1 1.3617 1 8
6.00 1 1.1139 9.7 1 60 3.5185 1 1.3979 0 10
6.00 1 1.4150 10.4 1 67 3.9294 1 1.6902 0 8
7.00 1 1.9777 9.5 1 48 3.3617 1 1.5682 5 10
7.00 1 1.0414 5.1 0 61 3.7324 1 2.0000 1 10
7.00 1 1.1761 11.4 1 53 3.7243 1 1.5185 1 13
9.00 1 1.7243 8.2 1 55 3.7993 1 1.7404 0 12
11.00 1 1.1139 14.0 1 61 3.8808 1 1.2788 0 10
11.00 1 1.2304 12.0 1 43 3.7709 1 1.1761 1 9
11.00 1 1.3010 13.2 1 65 3.7993 1 1.8195 1 10
11.00 1 1.5682 7.5 1 70 3.8865 0 1.6721 0 12
11.00 1 1.0792 9.6 1 51 3.5051 1 1.9031 0 9
13.00 1 0.7782 5.5 0 60 3.5798 1 1.3979 2 10
14.00 1 1.3979 14.6 1 66 3.7243 1 1.2553 2 10
15.00 1 1.6021 10.6 1 70 3.6902 1 1.4314 0 11
16.00 1 1.3424 9.0 1 48 3.9345 1 2.0000 0 10
16.00 1 1.3222 8.8 1 62 3.6990 1 0.6990 17 10
17.00 1 1.2304 10.0 1 53 3.8808 1 1.4472 4 9
17.00 1 1.5911 11.2 1 68 3.4314 0 1.6128 1 10
18.00 1 1.4472 7.5 1 65 3.5682 0 0.9031 7 8
19.00 1 1.0792 14.4 1 51 3.9191 1 2.0000 6 15
19.00 1 1.2553 7.5 0 60 3.7924 1 1.9294 5 9
24.00 1 1.3010 14.6 1 56 4.0899 1 0.4771 0 9
25.00 1 1.0000 12.4 1 67 3.8195 1 1.6435 0 10
26.00 1 1.2304 11.2 1 49 3.6021 1 2.0000 27 11
32.00 1 1.3222 10.6 1 46 3.6990 1 1.6335 1 9
35.00 1 1.1139 7.0 0 48 3.6532 1 1.1761 4 10
37.00 1 1.6021 11.0 1 63 3.9542 0 1.2041 7 9
41.00 1 1.0000 10.2 1 69 3.4771 1 1.4771 6 10
41.00 1 1.1461 5.0 1 70 3.5185 1 1.3424 0 9
51.00 1 1.5682 7.7 0 74 3.4150 1 1.0414 4 13
52.00 1 1.0000 10.1 1 60 3.8573 1 1.6532 4 10
54.00 1 1.2553 9.0 1 49 3.7243 1 1.6990 2 10
58.00 1 1.2041 12.1 1 42 3.6990 1 1.5798 22 10
66.00 1 1.4472 6.6 1 59 3.7853 1 1.8195 0 9
67.00 1 1.3222 12.8 1 52 3.6435 1 1.0414 1 10
88.00 1 1.1761 10.6 1 47 3.5563 0 1.7559 21 9
89.00 1 1.3222 14.0 1 63 3.6532 1 1.6232 1 9
92.00 1 1.4314 11.0 1 58 4.0755 1 1.4150 4 11
4.00 0 1.9542 10.2 1 59 4.0453 0 0.7782 12 10
4.00 0 1.9243 10.0 1 49 3.9590 0 1.6232 0 13
7.00 0 1.1139 12.4 1 48 3.7993 1 1.8573 0 10
7.00 0 1.5315 10.2 1 81 3.5911 0 1.8808 0 11
8.00 0 1.0792 9.9 1 57 3.8325 1 1.6532 0 8
12.00 0 1.1461 11.6 1 46 3.6435 0 1.1461 0 7
11.00 0 1.6128 14.0 1 60 3.7324 1 1.8451 3 9
12.00 0 1.3979 8.8 1 66 3.8388 1 1.3617 0 9
13.00 0 1.6628 4.9 0 71 3.6435 0 1.7924 0 9
16.00 0 1.1461 13.0 1 55 3.8573 0 0.9031 0 9
19.00 0 1.3222 13.0 1 59 3.7709 1 2.0000 1 10
19.00 0 1.3222 10.8 1 69 3.8808 1 1.5185 0 10
28.00 0 1.2304 7.3 1 82 3.7482 1 1.6721 0 9
41.00 0 1.7559 12.8 1 72 3.7243 1 1.4472 1 9
53.00 0 1.1139 12.0 1 66 3.6128 1 2.0000 1 11
57.00 0 1.2553 12.5 1 66 3.9685 0 1.9542 0 11
77.00 0 1.0792 14.0 1 60 3.6812 0 0.9542 0 12
;;
run;
data SocioEconomics;
input Population School Employment Services HouseValue;
datalines;
5700 12.8 2500 270 25000
1000 10.9 600 10 10000
3400 8.8 1000 10 9000
3800 13.6 1700 140 25000
4000 12.8 1600 140 25000
8200 8.3 2600 60 12000
1200 11.4 400 10 16000
9100 11.5 3300 60 14000
9900 12.5 3400 180 18000
9600 13.7 3600 390 25000
9600 9.6 3300 80 12000
9400 11.4 4000 100 13000
;;
run;
data time;
input time @@;
datalines;
43 90 84 87 116 95 86 99 93 92
121 71 66 98 79 102 60 112 105 98
;;
run;
data pressure;
input SBPbefore SBPafter @@;
datalines;
120 128 124 131 130 131 118 127
140 132 128 125 140 141 135 137
126 118 130 132 126 129 127 135
;;
run;
""")
def test_smokeReg(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# REG
b = stat.reg(data=tr, model='weight=height')
a = ['ANOVA', 'COOKSDPLOT', 'DFBETASPANEL', 'DFFITSPLOT', 'DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS',
'LOG', 'NOBS', 'OBSERVEDBYPREDICTED', 'PARAMETERESTIMATES', 'QQPLOT', 'RESIDUALBOXPLOT',
'RESIDUALBYPREDICTED',
'RESIDUALHISTOGRAM', 'RESIDUALPLOT', 'RFPLOT', 'RSTUDENTBYLEVERAGE', 'RSTUDENTBYPREDICTED']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (reg) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def regResult1(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b, saspy.sasresults.SASresults, msg="correct return type")
def regResult2(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.ANOVA, pandas.core.frame.DataFrame, msg="correct return type")
def regResult3(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.LOG, IPython.core.display.HTML, msg="correct return type")
def regResult4(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('PANDAS')
b = stat.reg(data=tr, model='weight=height')
self.assertIsInstance(b.RESIDUALHISTOGRAM, IPython.core.display.HTML, msg="correct return type")
def test_smokeMixed(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.mixed(data=tr, model='weight=height')
a = ['COVPARMS', 'DIMENSIONS', 'FITSTATISTICS', 'LOG', 'MODELINFO', 'NOBS', 'PEARSONPANEL',
'RESIDUALPANEL', 'STUDENTPANEL', 'TESTS3']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_smokeGLM(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.glm(data=tr, model='weight=height')
a = ['DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS', 'LOG', 'MODELANOVA', 'NOBS', 'OVERALLANOVA',
'PARAMETERESTIMATES', 'RESIDUALPLOTS']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_smokeLogistic(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.logistic(data=tr, model='sex=height weight')
self.assertFalse('ERROR_LOG' in b.__dir__(), msg=u"logistic had errors in the log")
def test_smokeTpspline(self):
# Basic model returns objects
stat = self.sas.sasstat()
self.sas.submit("""
data work.melanoma;
input year incidences @@;
datalines;
1936 0.9 1937 0.8 1938 0.8 1939 1.3
1940 1.4 1941 1.2 1942 1.7 1943 1.8
1944 1.6 1945 1.5 1946 1.5 1947 2.0
1948 2.5 1949 2.7 1950 2.9 1951 2.5
1952 3.1 1953 2.4 1954 2.2 1955 2.9
1956 2.5 1957 2.6 1958 3.2 1959 3.8
1960 4.2 1961 3.9 1962 3.7 1963 3.3
1964 3.7 1965 3.9 1966 4.1 1967 3.8
1968 4.7 1969 4.4 1970 4.8 1971 4.8
1972 4.8
;;
run;
""")
tr = self.sas.sasdata("melanoma", "work")
b = stat.tpspline(data=tr, model='incidences = (year) /alpha = 0.1', output='out = result pred uclm lclm')
a = ['CRITERIONPLOT', 'DATASUMMARY', 'DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS', 'FITSUMMARY', 'LOG',
'OBSERVEDBYPREDICTED', 'QQPLOT', 'RESIDPANEL', 'RESIDUALBYPREDICTED', 'RESIDUALHISTOGRAM', 'RFPLOT']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_tpspline2(self):
# Basic model returns objects
stat = self.sas.sasstat()
self.sas.submit("""
data work.melanoma;
input year incidences @@;
datalines;
1936 0.9 1937 0.8 1938 0.8 1939 1.3
1940 1.4 1941 1.2 1942 1.7 1943 1.8
1944 1.6 1945 1.5 1946 1.5 1947 2.0
1948 2.5 1949 2.7 1950 2.9 1951 2.5
1952 3.1 1953 2.4 1954 2.2 1955 2.9
1956 2.5 1957 2.6 1958 3.2 1959 3.8
1960 4.2 1961 3.9 1962 3.7 1963 3.3
1964 3.7 1965 3.9 1966 4.1 1967 3.8
1968 4.7 1969 4.4 1970 4.8 1971 4.8
1972 4.8
;;
run;
""")
tr = self.sas.sasdata("melanoma", "work")
ds = self.sas.sasdata("result", "work")
b = stat.tpspline(data=tr, model='incidences = (year) /alpha = 0.1', score=ds)
a = ['CRITERIONPLOT', 'DATASUMMARY', 'DIAGNOSTICSPANEL', 'FITPLOT', 'FITSTATISTICS', 'FITSUMMARY', 'LOG',
'OBSERVEDBYPREDICTED', 'QQPLOT', 'RESIDPANEL', 'RESIDUALBYPREDICTED', 'RESIDUALHISTOGRAM', 'RFPLOT',
'SCOREPLOT']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_smokeHPLogistic(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.hplogistic(data=tr, model='sex=height weight')
self.assertFalse('ERROR_LOG' in b.__dir__(), msg=u"hplogistic had errors in the log")
def test_smokeHPReg(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# REG
b = stat.hpreg(data=tr, model='weight=height')
a = ['ANOVA', 'DATAACCESSINFO', 'DIMENSIONS', 'FITSTATISTICS', 'LOG', 'MODELINFO', 'NOBS',
'PARAMETERESTIMATES', 'PERFORMANCEINFO']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (reg) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_selectionDict(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
selDict = {'method':'stepwise'}
b = stat.hpreg(data=tr, model='weight=height', selection= selDict)
a = ['ANOVA', 'DATAACCESSINFO', 'DIMENSIONS', 'FITSTATISTICS', 'LOG', 'MODELINFO', 'NOBS',
'PARAMETERESTIMATES', 'PERFORMANCEINFO', 'SELECTEDEFFECTS', 'SELECTIONINFO', 'SELECTIONREASON',
'SELECTIONSUMMARY', 'STOPREASON']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (HPREG) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_selectionDict2(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# DETAILS=NONE | SUMMARY | ALL
selDict = {'method':'forward', 'details':'ALL', 'maxeffects':'0'}
b = stat.hpreg(data=tr, model='weight=height', selection= selDict)
a = ['ANOVA', 'DATAACCESSINFO', 'DIMENSIONS', 'ENTRYCANDIDATES', 'FITSTATISTICS', 'LOG',
'MODELINFO', 'NOBS', 'PARAMETERESTIMATES', 'PERFORMANCEINFO', 'SELECTEDEFFECTS',
'SELECTIONINFO', 'SELECTIONREASON', 'SELECTIONSUMMARY', 'STOPREASON']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (HPREG) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_selectionDict3(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# DETAILS=NONE | SUMMARY | ALL
selDict = {'stop': 'aic', 'method': 'backward', 'select': 'aic', 'choose': 'aic', 'maxeffects':'3'}
b = stat.hpreg(data=tr, model='weight=height', selection= selDict)
a = ['ANOVA', 'DATAACCESSINFO', 'DIMENSIONS', 'FITSTATISTICS', 'LOG', 'MODELINFO', 'NOBS',
'PARAMETERESTIMATES', 'PERFORMANCEINFO', 'SELECTEDEFFECTS', 'SELECTIONINFO', 'SELECTIONREASON',
'SELECTIONSUMMARY', 'STOPREASON']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (HPREG) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_selectionDictError(self):
# Basic model returns objects
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
# DETAILS=NONE | SUMMARY | ALL
selDict = {'method': 'stepwise', 'sl': '0.05'}
b = stat.hpreg(data=tr, model='weight=height', selection=selDict)
a = ['ERROR_LOG']
self.assertEqual(sorted(a), sorted(b.__dir__()),
msg=u"Simple Regession (HPREG) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
def test_missingVar(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
b = stat.mixed(data=tr, weight='novar', model='weight=height')
a = ['ERROR_LOG']
self.assertEqual(a, b.__dir__(),
msg=u"Simple Regession (mixed) model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
"""
def test_extraStmt(self):
# Extra Statements are ignored
stat = self.sas.sasstat()
d = self.sas.sasdata('cars', 'sashelp')
b = stat.hpsplit(data=d, target='MSRP / level=interval', architecture='MLP', hidden=100, input='enginesize--length', train='', procopts='maxdepth=3')
a = stat.hpsplit(data=d, target='MSRP / level=interval', input='enginesize--length', procopts='maxdepth=3' )
self.assertEqual(a.__dir__(), b.__dir__(), msg=u"Extra Statements not being ignored expected:{0:s} returned:{1:s}".format(str(a), str(b)))
"""
def test_multiTarget(self):
# multiple target variables
stat = self.sas.sasstat()
nnin = self.sas.sasdata('cars', 'sashelp')
# Need to change this assert; the exception isn't raised - I think the code changed
x = stat.hpsplit(data=nnin, target='MSRP origin', input='enginesize--length')
a = ['ERROR_LOG']
self.assertEqual(a, x.__dir__(), msg=u"Multiple target variables didn't fail in stat.hpsplit")
def test_outputDset(self):
stat = self.sas.sasstat()
tsave = self.sas.sasdata('tsave')
tr = self.sas.sasdata("class", "sashelp")
stat.mixed(data=tr, weight='novar', model='weight=height', out=tsave)
self.assertIsInstance(tsave, saspy.sasdata.SASdata, msg="out= dataset not created properly")
def test_target_input_syntax1(self):
stat = self.sas.sasstat()
c = self.sas.sasdata("class", "sashelp")
t1 = 'weight'
t2 = {'interval': 'weight'}
t3 = ['weight']
i1 = {'interval': ['height'],
'nominal' : ['sex']}
i2 = {'interval': ['height']}
i3 = ['height']
m = stat.glm(data=c, cls='sex', model='weight = height sex');
ti1 = stat.glm(data=c, target=t1, input=i1)
self.assertEqual(m.__dir__(), ti1.__dir__())
ti2 = stat.glm(data=c, target=t2, input=i1)
self.assertEqual(m.__dir__(), ti2.__dir__())
ti3 = stat.glm(data=c, target=t3, input=i1)
self.assertEqual(m.__dir__(), ti3.__dir__())
m2 = stat.glm(data=c, model='weight = height');
ti4 = stat.glm(data=c, target=t2, input=i2)
self.assertEqual(m2.__dir__(), ti4.__dir__())
ti5 = stat.glm(data=c, target=t1, input=i3)
self.assertEqual(m2.__dir__(), ti5.__dir__())
def phregResult1(self):
stat = self.sas.sasstat()
self.defineData()
tr = self.sas.sasdata("melanoma", "work")
b = stat.reg(data=tr, model="""Time*VStatus(0)=LogBUN HGB Platelet Age LogWBC
Frac LogPBM Protein SCalc / selection=stepwise slentry=0.25 slstay=0.15 details""")
self.assertIsInstance(b, saspy.SASresults, msg="correct return type")
def factorResult1(self):
stat = self.sas.sasstat()
self.defineData()
tr = self.sas.sasdata("SocioEconomics", "work")
b = stat.reg(data=tr, procopts='simple corr')
self.assertIsInstance(b, saspy.SASresults, msg="correct return type")
def factorResult2(self):
stat = self.sas.sasstat()
self.defineData()
tr = self.sas.sasdata("SocioEconomics", "work")
b = stat.reg(data=tr,
procopts='priors=smc msa residual rotate=promax reorder outstat=fact_all',
var = ['population', 'school']
)
self.assertIsInstance(b, saspy.SASresults, msg="correct return type")
def ttestResult1(self):
stat = self.sas.sasstat()
self.defineData()
tr = self.sas.sasdata("time", "work")
b = stat.reg(data=tr, var='time', procopts='h0=80 alpha=0.1')
self.assertIsInstance(b, saspy.SASresults, msg="correct return type")
def ttestResult2(self):
stat = self.sas.sasstat()
self.defineData()
tr = self.sas.sasdata("pressure", "work")
b = stat.reg(data=tr, paired="SBPbefore*SBPafter")
self.assertIsInstance(b, saspy.SASresults, msg="correct return type")
def strdset1(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
s = stat.reg(data='sashelp.class', model='weight=height')
ds = stat.reg(data=tr, model='weight=height')
self.assertEqual(s, ds, msg="string sasdata mismatch")
def strdset2(self):
stat = self.sas.sasstat()
tr = self.sas.sasdata("class", "sashelp")
s = stat.reg(data='sashelp.class', model='weight=height')
self.assertRaises(AssertionError,s, msg="bad dataset fails")
|
|
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from options import *
class GeoLocation(object):
# all schemas
latitude_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Latitude of the location. This is represented as degrees.minutes. The range is from -90.0 (south) to +90.0 (north)."),
required=False,
update_allowed=True,
)
longitude_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Longitude of the location. This is represented as degrees.minutes. The range is from -180.0 (west) to +180.0 (east)."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Location name in the format Country/State/City."),
required=False,
update_allowed=True,
)
tag_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Location tag string - example: USEast."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'latitude',
'longitude',
'name',
'tag',
)
# mapping of properties to their schemas
properties_schema = {
'latitude': latitude_schema,
'longitude': longitude_schema,
'name': name_schema,
'tag': tag_schema,
}
class IpAddrRange(object):
# all schemas
begin_schema = properties.Schema(
properties.Schema.MAP,
_("Starting IP address of the range"),
schema=IpAddr.properties_schema,
required=True,
update_allowed=True,
)
end_schema = properties.Schema(
properties.Schema.MAP,
_("Ending IP address of the range"),
schema=IpAddr.properties_schema,
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'begin',
'end',
)
# mapping of properties to their schemas
properties_schema = {
'begin': begin_schema,
'end': end_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'begin': getattr(IpAddr, 'field_references', {}),
'end': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'end': getattr(IpAddr, 'unique_keys', {}),
'begin': getattr(IpAddr, 'unique_keys', {}),
'my_key': 'begin,end',
}
class CustomParams(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
is_sensitive_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
is_dynamic_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'value',
'is_sensitive',
'is_dynamic',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'value': value_schema,
'is_sensitive': is_sensitive_schema,
'is_dynamic': is_dynamic_schema,
}
class PortRange(object):
# all schemas
start_schema = properties.Schema(
properties.Schema.NUMBER,
_("TCP/UDP port range start (inclusive)."),
required=True,
update_allowed=True,
)
end_schema = properties.Schema(
properties.Schema.NUMBER,
_("TCP/UDP port range end (inclusive)."),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'start',
'end',
)
# mapping of properties to their schemas
properties_schema = {
'start': start_schema,
'end': end_schema,
}
class CustomTag(object):
# all schemas
tag_key_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
tag_val_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'tag_key',
'tag_val',
)
# mapping of properties to their schemas
properties_schema = {
'tag_key': tag_key_schema,
'tag_val': tag_val_schema,
}
class TenantConfiguration(object):
# all schemas
tenant_vrf_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("When 'Per Tenant IP Domain' is selected, each tenant gets its own routing domain that is not shared with any other tenant. When 'Share IP Domain across all tenants' is selected, all tenants share the same routing domain. (Default: False)"),
required=False,
update_allowed=True,
)
se_in_provider_context_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Controls the ownership of ServiceEngines. Service Engines can either be exclusively owned by each tenant or owned by the administrator and shared by all tenants. When ServiceEngines are owned by the administrator, each tenant can have either read access or no access to their Service Engines. (Default: True)"),
required=False,
update_allowed=True,
)
tenant_access_to_provider_se_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'tenant_vrf',
'se_in_provider_context',
'tenant_access_to_provider_se',
)
# mapping of properties to their schemas
properties_schema = {
'tenant_vrf': tenant_vrf_schema,
'se_in_provider_context': se_in_provider_context_schema,
'tenant_access_to_provider_se': tenant_access_to_provider_se_schema,
}
class Tag(object):
# all schemas
value_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: USER_DEFINED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AVI_DEFINED', 'USER_DEFINED', 'VCENTER_DEFINED']),
],
)
# properties list
PROPERTIES = (
'value',
'type',
)
# mapping of properties to their schemas
properties_schema = {
'value': value_schema,
'type': type_schema,
}
unique_keys = {
'my_key': 'value',
}
class GcpInfo(object):
# all schemas
project_schema = properties.Schema(
properties.Schema.STRING,
_("Project this SE belongs to"),
required=True,
update_allowed=False,
)
zone_schema = properties.Schema(
properties.Schema.STRING,
_("Zone this SE is part of"),
required=True,
update_allowed=False,
)
network_schema = properties.Schema(
properties.Schema.STRING,
_("Network this SE is assigned"),
required=True,
update_allowed=False,
)
subnet_schema = properties.Schema(
properties.Schema.STRING,
_("Subnet assigned to this SE"),
required=False,
update_allowed=False,
)
hostname_schema = properties.Schema(
properties.Schema.STRING,
_("Hostname of this SE"),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'project',
'zone',
'network',
'subnet',
'hostname',
)
# mapping of properties to their schemas
properties_schema = {
'project': project_schema,
'zone': zone_schema,
'network': network_schema,
'subnet': subnet_schema,
'hostname': hostname_schema,
}
class TimeStamp(object):
# all schemas
secs_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=True,
update_allowed=True,
)
usecs_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'secs',
'usecs',
)
# mapping of properties to their schemas
properties_schema = {
'secs': secs_schema,
'usecs': usecs_schema,
}
class IpAddrPort(object):
# all schemas
ip_schema = properties.Schema(
properties.Schema.MAP,
_("IP Address of host. One of IP address or hostname should be set"),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Port number of server"),
required=True,
update_allowed=True,
)
hostname_schema = properties.Schema(
properties.Schema.STRING,
_("Hostname of server. One of IP address or hostname should be set"),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'ip',
'port',
'hostname',
'name',
)
# mapping of properties to their schemas
properties_schema = {
'ip': ip_schema,
'port': port_schema,
'hostname': hostname_schema,
'name': name_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'ip': getattr(IpAddr, 'unique_keys', {}),
}
class HTTPLocalFile(object):
# all schemas
content_type_schema = properties.Schema(
properties.Schema.STRING,
_("Mime-type of the content in the file."),
required=True,
update_allowed=True,
)
file_content_schema = properties.Schema(
properties.Schema.STRING,
_("File content to used in the local HTTP response body."),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'content_type',
'file_content',
)
# mapping of properties to their schemas
properties_schema = {
'content_type': content_type_schema,
'file_content': file_content_schema,
}
class Property(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.1) Property name."),
required=True,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.1) Property value."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'value',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'value': value_schema,
}
unique_keys = {
'my_key': 'name',
}
class SeNetworkSubnet(object):
# all schemas
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.4) "),
required=False,
update_allowed=True,
)
subnet_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.4) "),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'network_uuid',
'subnet',
)
# mapping of properties to their schemas
properties_schema = {
'network_uuid': network_uuid_schema,
'subnet': subnet_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
}
unique_keys = {
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
}
class PlacementNetwork(object):
# all schemas
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
subnet_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'network_uuid',
'subnet',
)
# mapping of properties to their schemas
properties_schema = {
'network_uuid': network_uuid_schema,
'subnet': subnet_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
'network_uuid': 'network',
}
unique_keys = {
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict
from operator import attrgetter
import os
import re
import subprocess
import sys
import unittest
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import api_def_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default False:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
_CONVERT_FROM_MULTILINE_SCRIPT = 'tensorflow/tools/api/tests/convert_from_multiline'
_BASE_API_DIR = 'tensorflow/core/api_def/base_api'
_PYTHON_API_DIR = 'tensorflow/core/api_def/python_api'
def _KeyToFilePath(key):
"""From a given key, construct a filepath."""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
def _GetSymbol(symbol_id):
"""Get TensorFlow symbol based on the given identifier.
Args:
symbol_id: Symbol identifier in the form module1.module2. ... .sym.
Returns:
Symbol corresponding to the given id.
"""
# Ignore first module which should be tensorflow
symbol_id_split = symbol_id.split('.')[1:]
symbol = tf
for sym in symbol_id_split:
symbol = getattr(symbol, sym)
return symbol
def _IsGenModule(module_name):
if not module_name:
return False
module_name_split = module_name.split('.')
return module_name_split[-1].startswith('gen_')
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed).' % key
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
logging.error('Issue %d\t: %s', i + 1, messages[i])
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
'API compabitility test goldens are generated using python2 on Linux.')
def testAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
class ApiDefTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiDefTest, self).__init__(*args, **kwargs)
self._first_cap_pattern = re.compile('(.)([A-Z][a-z]+)')
self._all_cap_pattern = re.compile('([a-z0-9])([A-Z])')
def _GenerateLowerCaseOpName(self, op_name):
lower_case_name = self._first_cap_pattern.sub(r'\1_\2', op_name)
return self._all_cap_pattern.sub(r'\1_\2', lower_case_name).lower()
def _CreatePythonApiDef(self, base_api_def, endpoint_names):
"""Creates Python ApiDef that overrides base_api_def if needed.
Args:
base_api_def: (api_def_pb2.ApiDef) base ApiDef instance.
endpoint_names: List of Python endpoint names.
Returns:
api_def_pb2.ApiDef instance with overrides for base_api_def
if module.name endpoint is different from any existing
endpoints in base_api_def. Otherwise, returns None.
"""
endpoint_names_set = set(endpoint_names)
base_endpoint_names_set = {
self._GenerateLowerCaseOpName(endpoint.name)
for endpoint in base_api_def.endpoint}
if endpoint_names_set == base_endpoint_names_set:
return None # All endpoints are the same
api_def = api_def_pb2.ApiDef()
api_def.graph_op_name = base_api_def.graph_op_name
for endpoint_name in sorted(endpoint_names):
new_endpoint = api_def.endpoint.add()
new_endpoint.name = endpoint_name
return api_def
def _GetBaseApiMap(self):
"""Get a map from graph op name to its base ApiDef.
Returns:
Dictionary mapping graph op name to corresponding ApiDef.
"""
# Convert base ApiDef in Multiline format to Proto format.
converted_base_api_dir = os.path.join(
test.get_temp_dir(), 'temp_base_api_defs')
subprocess.check_call(
[os.path.join(resource_loader.get_root_dir_with_all_resources(),
_CONVERT_FROM_MULTILINE_SCRIPT),
_BASE_API_DIR, converted_base_api_dir])
name_to_base_api_def = {}
base_api_files = file_io.get_matching_files(
os.path.join(converted_base_api_dir, 'api_def_*.pbtxt'))
for base_api_file in base_api_files:
if file_io.file_exists(base_api_file):
api_defs = api_def_pb2.ApiDefs()
text_format.Merge(
file_io.read_file_to_string(base_api_file), api_defs)
for api_def in api_defs.op:
lower_case_name = self._GenerateLowerCaseOpName(api_def.graph_op_name)
name_to_base_api_def[lower_case_name] = api_def
return name_to_base_api_def
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
'API compabitility test goldens are generated using python2 on Linux.')
def testAPIDefCompatibility(self):
# Get base ApiDef
name_to_base_api_def = self._GetBaseApiMap()
# Extract Python API
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Map from first character of op name to Python ApiDefs.
api_def_map = defaultdict(api_def_pb2.ApiDefs)
# We need to override all endpoints even if 1 endpoint differs from base
# ApiDef. So, we first create a map from an op to all its endpoints.
op_to_endpoint_name = defaultdict(list)
# Generate map from generated python op to endpoint names.
for public_module, value in proto_dict.items():
module_obj = _GetSymbol(public_module)
for sym in value.tf_module.member_method:
obj = getattr(module_obj, sym.name)
# Check if object is defined in gen_* module. That is,
# the object has been generated from OpDef.
if hasattr(obj, '__module__') and _IsGenModule(obj.__module__):
if obj.__name__ not in name_to_base_api_def:
# Symbol might be defined only in Python and not generated from
# C++ api.
continue
relative_public_module = public_module[len('tensorflow.'):]
full_name = (relative_public_module + '.' + sym.name
if relative_public_module else sym.name)
op_to_endpoint_name[obj].append(full_name)
# Generate Python ApiDef overrides.
for op, endpoint_names in op_to_endpoint_name.items():
api_def = self._CreatePythonApiDef(
name_to_base_api_def[op.__name__], endpoint_names)
if api_def:
api_defs = api_def_map[op.__name__[0].upper()]
api_defs.op.extend([api_def])
for key in _ALPHABET:
# Get new ApiDef for the given key.
new_api_defs_str = ''
if key in api_def_map:
new_api_defs = api_def_map[key]
new_api_defs.op.sort(key=attrgetter('graph_op_name'))
new_api_defs_str = str(new_api_defs)
# Get current ApiDef for the given key.
api_defs_file_path = os.path.join(
_PYTHON_API_DIR, 'api_def_%s.pbtxt' % key)
old_api_defs_str = ''
if file_io.file_exists(api_defs_file_path):
old_api_defs_str = file_io.read_file_to_string(api_defs_file_path)
if old_api_defs_str == new_api_defs_str:
continue
if FLAGS.update_goldens:
if not new_api_defs_str:
logging.info('Deleting %s...' % api_defs_file_path)
file_io.delete_file(api_defs_file_path)
else:
logging.info('Updating %s...' % api_defs_file_path)
file_io.write_string_to_file(api_defs_file_path, new_api_defs_str)
else:
self.assertMultiLineEqual(
old_api_defs_str, new_api_defs_str,
'To update golden API files, run api_compatibility_test locally '
'with --update_goldens=True flag.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=False, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
|
from sympy import Matrix
from sympy.tensor.tensor import tensor_indices, TensorHead, tensor_heads, \
TensExpr, canon_bp
from sympy import eye
from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex, \
kahane_simplify, gamma_trace, _simplify_single_line, simplify_gamma_expression
def _is_tensor_eq(arg1, arg2):
arg1 = canon_bp(arg1)
arg2 = canon_bp(arg2)
if isinstance(arg1, TensExpr):
return arg1.equals(arg2)
elif isinstance(arg2, TensExpr):
return arg2.equals(arg1)
return arg1 == arg2
def execute_gamma_simplify_tests_for_function(tfunc, D):
"""
Perform tests to check if sfunc is able to simplify gamma matrix expressions.
Parameters
==========
`sfunc` a function to simplify a `TIDS`, shall return the simplified `TIDS`.
`D` the number of dimension (in most cases `D=4`).
"""
mu, nu, rho, sigma = tensor_indices("mu, nu, rho, sigma", LorentzIndex)
a1, a2, a3, a4, a5, a6 = tensor_indices("a1:7", LorentzIndex)
mu11, mu12, mu21, mu31, mu32, mu41, mu51, mu52 = tensor_indices("mu11, mu12, mu21, mu31, mu32, mu41, mu51, mu52", LorentzIndex)
mu61, mu71, mu72 = tensor_indices("mu61, mu71, mu72", LorentzIndex)
m0, m1, m2, m3, m4, m5, m6 = tensor_indices("m0:7", LorentzIndex)
def g(xx, yy):
return (G(xx)*G(yy) + G(yy)*G(xx))/2
# Some examples taken from Kahane's paper, 4 dim only:
if D == 4:
t = (G(a1)*G(mu11)*G(a2)*G(mu21)*G(-a1)*G(mu31)*G(-a2))
assert _is_tensor_eq(tfunc(t), -4*G(mu11)*G(mu31)*G(mu21) - 4*G(mu31)*G(mu11)*G(mu21))
t = (G(a1)*G(mu11)*G(mu12)*\
G(a2)*G(mu21)*\
G(a3)*G(mu31)*G(mu32)*\
G(a4)*G(mu41)*\
G(-a2)*G(mu51)*G(mu52)*\
G(-a1)*G(mu61)*\
G(-a3)*G(mu71)*G(mu72)*\
G(-a4))
assert _is_tensor_eq(tfunc(t), \
16*G(mu31)*G(mu32)*G(mu72)*G(mu71)*G(mu11)*G(mu52)*G(mu51)*G(mu12)*G(mu61)*G(mu21)*G(mu41) + 16*G(mu31)*G(mu32)*G(mu72)*G(mu71)*G(mu12)*G(mu51)*G(mu52)*G(mu11)*G(mu61)*G(mu21)*G(mu41) + 16*G(mu71)*G(mu72)*G(mu32)*G(mu31)*G(mu11)*G(mu52)*G(mu51)*G(mu12)*G(mu61)*G(mu21)*G(mu41) + 16*G(mu71)*G(mu72)*G(mu32)*G(mu31)*G(mu12)*G(mu51)*G(mu52)*G(mu11)*G(mu61)*G(mu21)*G(mu41))
# Fully Lorentz-contracted expressions, these return scalars:
def add_delta(ne):
return ne * eye(4) # DiracSpinorIndex.delta(DiracSpinorIndex.auto_left, -DiracSpinorIndex.auto_right)
t = (G(mu)*G(-mu))
ts = add_delta(D)
assert _is_tensor_eq(tfunc(t), ts)
t = (G(mu)*G(nu)*G(-mu)*G(-nu))
ts = add_delta(2*D - D**2) # -8
assert _is_tensor_eq(tfunc(t), ts)
t = (G(mu)*G(nu)*G(-nu)*G(-mu))
ts = add_delta(D**2) # 16
assert _is_tensor_eq(tfunc(t), ts)
t = (G(mu)*G(nu)*G(-rho)*G(-nu)*G(-mu)*G(rho))
ts = add_delta(4*D - 4*D**2 + D**3) # 16
assert _is_tensor_eq(tfunc(t), ts)
t = (G(mu)*G(nu)*G(rho)*G(-rho)*G(-nu)*G(-mu))
ts = add_delta(D**3) # 64
assert _is_tensor_eq(tfunc(t), ts)
t = (G(a1)*G(a2)*G(a3)*G(a4)*G(-a3)*G(-a1)*G(-a2)*G(-a4))
ts = add_delta(-8*D + 16*D**2 - 8*D**3 + D**4) # -32
assert _is_tensor_eq(tfunc(t), ts)
t = (G(-mu)*G(-nu)*G(-rho)*G(-sigma)*G(nu)*G(mu)*G(sigma)*G(rho))
ts = add_delta(-16*D + 24*D**2 - 8*D**3 + D**4) # 64
assert _is_tensor_eq(tfunc(t), ts)
t = (G(-mu)*G(nu)*G(-rho)*G(sigma)*G(rho)*G(-nu)*G(mu)*G(-sigma))
ts = add_delta(8*D - 12*D**2 + 6*D**3 - D**4) # -32
assert _is_tensor_eq(tfunc(t), ts)
t = (G(a1)*G(a2)*G(a3)*G(a4)*G(a5)*G(-a3)*G(-a2)*G(-a1)*G(-a5)*G(-a4))
ts = add_delta(64*D - 112*D**2 + 60*D**3 - 12*D**4 + D**5) # 256
assert _is_tensor_eq(tfunc(t), ts)
t = (G(a1)*G(a2)*G(a3)*G(a4)*G(a5)*G(-a3)*G(-a1)*G(-a2)*G(-a4)*G(-a5))
ts = add_delta(64*D - 120*D**2 + 72*D**3 - 16*D**4 + D**5) # -128
assert _is_tensor_eq(tfunc(t), ts)
t = (G(a1)*G(a2)*G(a3)*G(a4)*G(a5)*G(a6)*G(-a3)*G(-a2)*G(-a1)*G(-a6)*G(-a5)*G(-a4))
ts = add_delta(416*D - 816*D**2 + 528*D**3 - 144*D**4 + 18*D**5 - D**6) # -128
assert _is_tensor_eq(tfunc(t), ts)
t = (G(a1)*G(a2)*G(a3)*G(a4)*G(a5)*G(a6)*G(-a2)*G(-a3)*G(-a1)*G(-a6)*G(-a4)*G(-a5))
ts = add_delta(416*D - 848*D**2 + 584*D**3 - 172*D**4 + 22*D**5 - D**6) # -128
assert _is_tensor_eq(tfunc(t), ts)
# Expressions with free indices:
t = (G(mu)*G(nu)*G(rho)*G(sigma)*G(-mu))
assert _is_tensor_eq(tfunc(t), (-2*G(sigma)*G(rho)*G(nu) + (4-D)*G(nu)*G(rho)*G(sigma)))
t = (G(mu)*G(nu)*G(-mu))
assert _is_tensor_eq(tfunc(t), (2-D)*G(nu))
t = (G(mu)*G(nu)*G(rho)*G(-mu))
assert _is_tensor_eq(tfunc(t), 2*G(nu)*G(rho) + 2*G(rho)*G(nu) - (4-D)*G(nu)*G(rho))
t = 2*G(m2)*G(m0)*G(m1)*G(-m0)*G(-m1)
st = tfunc(t)
assert _is_tensor_eq(st, (D*(-2*D + 4))*G(m2))
t = G(m2)*G(m0)*G(m1)*G(-m0)*G(-m2)
st = tfunc(t)
assert _is_tensor_eq(st, ((-D + 2)**2)*G(m1))
t = G(m0)*G(m1)*G(m2)*G(m3)*G(-m1)
st = tfunc(t)
assert _is_tensor_eq(st, (D - 4)*G(m0)*G(m2)*G(m3) + 4*G(m0)*g(m2, m3))
t = G(m0)*G(m1)*G(m2)*G(m3)*G(-m1)*G(-m0)
st = tfunc(t)
assert _is_tensor_eq(st, ((D - 4)**2)*G(m2)*G(m3) + (8*D - 16)*g(m2, m3))
t = G(m2)*G(m0)*G(m1)*G(-m2)*G(-m0)
st = tfunc(t)
assert _is_tensor_eq(st, ((-D + 2)*(D - 4) + 4)*G(m1))
t = G(m3)*G(m1)*G(m0)*G(m2)*G(-m3)*G(-m0)*G(-m2)
st = tfunc(t)
assert _is_tensor_eq(st, (-4*D + (-D + 2)**2*(D - 4) + 8)*G(m1))
t = 2*G(m0)*G(m1)*G(m2)*G(m3)*G(-m0)
st = tfunc(t)
assert _is_tensor_eq(st, ((-2*D + 8)*G(m1)*G(m2)*G(m3) - 4*G(m3)*G(m2)*G(m1)))
t = G(m5)*G(m0)*G(m1)*G(m4)*G(m2)*G(-m4)*G(m3)*G(-m0)
st = tfunc(t)
assert _is_tensor_eq(st, (((-D + 2)*(-D + 4))*G(m5)*G(m1)*G(m2)*G(m3) + (2*D - 4)*G(m5)*G(m3)*G(m2)*G(m1)))
t = -G(m0)*G(m1)*G(m2)*G(m3)*G(-m0)*G(m4)
st = tfunc(t)
assert _is_tensor_eq(st, ((D - 4)*G(m1)*G(m2)*G(m3)*G(m4) + 2*G(m3)*G(m2)*G(m1)*G(m4)))
t = G(-m5)*G(m0)*G(m1)*G(m2)*G(m3)*G(m4)*G(-m0)*G(m5)
st = tfunc(t)
result1 = ((-D + 4)**2 + 4)*G(m1)*G(m2)*G(m3)*G(m4) +\
(4*D - 16)*G(m3)*G(m2)*G(m1)*G(m4) + (4*D - 16)*G(m4)*G(m1)*G(m2)*G(m3)\
+ 4*G(m2)*G(m1)*G(m4)*G(m3) + 4*G(m3)*G(m4)*G(m1)*G(m2) +\
4*G(m4)*G(m3)*G(m2)*G(m1)
# Kahane's algorithm yields this result, which is equivalent to `result1`
# in four dimensions, but is not automatically recognized as equal:
result2 = 8*G(m1)*G(m2)*G(m3)*G(m4) + 8*G(m4)*G(m3)*G(m2)*G(m1)
if D == 4:
assert _is_tensor_eq(st, (result1)) or _is_tensor_eq(st, (result2))
else:
assert _is_tensor_eq(st, (result1))
# and a few very simple cases, with no contracted indices:
t = G(m0)
st = tfunc(t)
assert _is_tensor_eq(st, t)
t = -7*G(m0)
st = tfunc(t)
assert _is_tensor_eq(st, t)
t = 224*G(m0)*G(m1)*G(-m2)*G(m3)
st = tfunc(t)
assert _is_tensor_eq(st, t)
def test_kahane_algorithm():
# Wrap this function to convert to and from TIDS:
def tfunc(e):
return _simplify_single_line(e)
execute_gamma_simplify_tests_for_function(tfunc, D=4)
def test_kahane_simplify1():
i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15 = tensor_indices('i0:16', LorentzIndex)
mu, nu, rho, sigma = tensor_indices("mu, nu, rho, sigma", LorentzIndex)
D = 4
t = G(i0)*G(i1)
r = kahane_simplify(t)
assert r.equals(t)
t = G(i0)*G(i1)*G(-i0)
r = kahane_simplify(t)
assert r.equals(-2*G(i1))
t = G(i0)*G(i1)*G(-i0)
r = kahane_simplify(t)
assert r.equals(-2*G(i1))
t = G(i0)*G(i1)
r = kahane_simplify(t)
assert r.equals(t)
t = G(i0)*G(i1)
r = kahane_simplify(t)
assert r.equals(t)
t = G(i0)*G(-i0)
r = kahane_simplify(t)
assert r.equals(4*eye(4))
t = G(i0)*G(-i0)
r = kahane_simplify(t)
assert r.equals(4*eye(4))
t = G(i0)*G(-i0)
r = kahane_simplify(t)
assert r.equals(4*eye(4))
t = G(i0)*G(i1)*G(-i0)
r = kahane_simplify(t)
assert r.equals(-2*G(i1))
t = G(i0)*G(i1)*G(-i0)*G(-i1)
r = kahane_simplify(t)
assert r.equals((2*D - D**2)*eye(4))
t = G(i0)*G(i1)*G(-i0)*G(-i1)
r = kahane_simplify(t)
assert r.equals((2*D - D**2)*eye(4))
t = G(i0)*G(-i0)*G(i1)*G(-i1)
r = kahane_simplify(t)
assert r.equals(16*eye(4))
t = (G(mu)*G(nu)*G(-nu)*G(-mu))
r = kahane_simplify(t)
assert r.equals(D**2*eye(4))
t = (G(mu)*G(nu)*G(-nu)*G(-mu))
r = kahane_simplify(t)
assert r.equals(D**2*eye(4))
t = (G(mu)*G(nu)*G(-nu)*G(-mu))
r = kahane_simplify(t)
assert r.equals(D**2*eye(4))
t = (G(mu)*G(nu)*G(-rho)*G(-nu)*G(-mu)*G(rho))
r = kahane_simplify(t)
assert r.equals((4*D - 4*D**2 + D**3)*eye(4))
t = (G(-mu)*G(-nu)*G(-rho)*G(-sigma)*G(nu)*G(mu)*G(sigma)*G(rho))
r = kahane_simplify(t)
assert r.equals((-16*D + 24*D**2 - 8*D**3 + D**4)*eye(4))
t = (G(-mu)*G(nu)*G(-rho)*G(sigma)*G(rho)*G(-nu)*G(mu)*G(-sigma))
r = kahane_simplify(t)
assert r.equals((8*D - 12*D**2 + 6*D**3 - D**4)*eye(4))
# Expressions with free indices:
t = (G(mu)*G(nu)*G(rho)*G(sigma)*G(-mu))
r = kahane_simplify(t)
assert r.equals(-2*G(sigma)*G(rho)*G(nu))
t = (G(mu)*G(nu)*G(rho)*G(sigma)*G(-mu))
r = kahane_simplify(t)
assert r.equals(-2*G(sigma)*G(rho)*G(nu))
def test_gamma_matrix_class():
i, j, k = tensor_indices('i,j,k', LorentzIndex)
# define another type of TensorHead to see if exprs are correctly handled:
A = TensorHead('A', [LorentzIndex])
t = A(k)*G(i)*G(-i)
ts = simplify_gamma_expression(t)
assert _is_tensor_eq(ts, Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])*A(k))
t = G(i)*A(k)*G(j)
ts = simplify_gamma_expression(t)
assert _is_tensor_eq(ts, A(k)*G(i)*G(j))
execute_gamma_simplify_tests_for_function(simplify_gamma_expression, D=4)
def test_gamma_matrix_trace():
g = LorentzIndex.metric
m0, m1, m2, m3, m4, m5, m6 = tensor_indices('m0:7', LorentzIndex)
n0, n1, n2, n3, n4, n5 = tensor_indices('n0:6', LorentzIndex)
# working in D=4 dimensions
D = 4
# traces of odd number of gamma matrices are zero:
t = G(m0)
t1 = gamma_trace(t)
assert t1.equals(0)
t = G(m0)*G(m1)*G(m2)
t1 = gamma_trace(t)
assert t1.equals(0)
t = G(m0)*G(m1)*G(-m0)
t1 = gamma_trace(t)
assert t1.equals(0)
t = G(m0)*G(m1)*G(m2)*G(m3)*G(m4)
t1 = gamma_trace(t)
assert t1.equals(0)
# traces without internal contractions:
t = G(m0)*G(m1)
t1 = gamma_trace(t)
assert _is_tensor_eq(t1, 4*g(m0, m1))
t = G(m0)*G(m1)*G(m2)*G(m3)
t1 = gamma_trace(t)
t2 = -4*g(m0, m2)*g(m1, m3) + 4*g(m0, m1)*g(m2, m3) + 4*g(m0, m3)*g(m1, m2)
assert _is_tensor_eq(t1, t2)
t = G(m0)*G(m1)*G(m2)*G(m3)*G(m4)*G(m5)
t1 = gamma_trace(t)
t2 = t1*g(-m0, -m5)
t2 = t2.contract_metric(g)
assert _is_tensor_eq(t2, D*gamma_trace(G(m1)*G(m2)*G(m3)*G(m4)))
# traces of expressions with internal contractions:
t = G(m0)*G(-m0)
t1 = gamma_trace(t)
assert t1.equals(4*D)
t = G(m0)*G(m1)*G(-m0)*G(-m1)
t1 = gamma_trace(t)
assert t1.equals(8*D - 4*D**2)
t = G(m0)*G(m1)*G(m2)*G(m3)*G(m4)*G(-m0)
t1 = gamma_trace(t)
t2 = (-4*D)*g(m1, m3)*g(m2, m4) + (4*D)*g(m1, m2)*g(m3, m4) + \
(4*D)*g(m1, m4)*g(m2, m3)
assert _is_tensor_eq(t1, t2)
t = G(-m5)*G(m0)*G(m1)*G(m2)*G(m3)*G(m4)*G(-m0)*G(m5)
t1 = gamma_trace(t)
t2 = (32*D + 4*(-D + 4)**2 - 64)*(g(m1, m2)*g(m3, m4) - \
g(m1, m3)*g(m2, m4) + g(m1, m4)*g(m2, m3))
assert _is_tensor_eq(t1, t2)
t = G(m0)*G(m1)*G(-m0)*G(m3)
t1 = gamma_trace(t)
assert t1.equals((-4*D + 8)*g(m1, m3))
# p, q = S1('p,q')
# ps = p(m0)*G(-m0)
# qs = q(m0)*G(-m0)
# t = ps*qs*ps*qs
# t1 = gamma_trace(t)
# assert t1 == 8*p(m0)*q(-m0)*p(m1)*q(-m1) - 4*p(m0)*p(-m0)*q(m1)*q(-m1)
t = G(m0)*G(m1)*G(m2)*G(m3)*G(m4)*G(m5)*G(-m0)*G(-m1)*G(-m2)*G(-m3)*G(-m4)*G(-m5)
t1 = gamma_trace(t)
assert t1.equals(-4*D**6 + 120*D**5 - 1040*D**4 + 3360*D**3 - 4480*D**2 + 2048*D)
t = G(m0)*G(m1)*G(n1)*G(m2)*G(n2)*G(m3)*G(m4)*G(-n2)*G(-n1)*G(-m0)*G(-m1)*G(-m2)*G(-m3)*G(-m4)
t1 = gamma_trace(t)
tresu = -7168*D + 16768*D**2 - 14400*D**3 + 5920*D**4 - 1232*D**5 + 120*D**6 - 4*D**7
assert t1.equals(tresu)
# checked with Mathematica
# In[1]:= <<Tracer.m
# In[2]:= Spur[l];
# In[3]:= GammaTrace[l, {m0},{m1},{n1},{m2},{n2},{m3},{m4},{n3},{n4},{m0},{m1},{m2},{m3},{m4}]
t = G(m0)*G(m1)*G(n1)*G(m2)*G(n2)*G(m3)*G(m4)*G(n3)*G(n4)*G(-m0)*G(-m1)*G(-m2)*G(-m3)*G(-m4)
t1 = gamma_trace(t)
# t1 = t1.expand_coeff()
c1 = -4*D**5 + 120*D**4 - 1200*D**3 + 5280*D**2 - 10560*D + 7808
c2 = -4*D**5 + 88*D**4 - 560*D**3 + 1440*D**2 - 1600*D + 640
assert _is_tensor_eq(t1, c1*g(n1, n4)*g(n2, n3) + c2*g(n1, n2)*g(n3, n4) + \
(-c1)*g(n1, n3)*g(n2, n4))
p, q = tensor_heads('p,q', [LorentzIndex])
ps = p(m0)*G(-m0)
qs = q(m0)*G(-m0)
p2 = p(m0)*p(-m0)
q2 = q(m0)*q(-m0)
pq = p(m0)*q(-m0)
t = ps*qs*ps*qs
r = gamma_trace(t)
assert _is_tensor_eq(r, 8*pq*pq - 4*p2*q2)
t = ps*qs*ps*qs*ps*qs
r = gamma_trace(t)
assert _is_tensor_eq(r, -12*p2*pq*q2 + 16*pq*pq*pq)
t = ps*qs*ps*qs*ps*qs*ps*qs
r = gamma_trace(t)
assert _is_tensor_eq(r, -32*pq*pq*p2*q2 + 32*pq*pq*pq*pq + 4*p2*p2*q2*q2)
t = 4*p(m1)*p(m0)*p(-m0)*q(-m1)*q(m2)*q(-m2)
assert _is_tensor_eq(gamma_trace(t), t)
t = ps*ps*ps*ps*ps*ps*ps*ps
r = gamma_trace(t)
assert r.equals(4*p2*p2*p2*p2)
|
|
import os
from pymacaron.log import pymlogger
import json
import imp
utils = imp.load_source('utils', os.path.join(os.path.dirname(__file__), 'utils.py'))
log = pymlogger(__name__)
tmpdir = '/tmp/test-pym-microservice'
reportpath = os.path.join(tmpdir, "error_report.json")
try:
os.stat(tmpdir)
except Exception:
os.mkdir(tmpdir)
class Tests(utils.PyMacaronTests):
def assertNoErrorReport(self):
self.assertFalse(os.path.isfile(reportpath))
def load_report(self):
with open(reportpath) as f:
s = f.read()
log.info("GOT\n%s\n" % s)
j = json.loads(s)
log.debug("Report is %s" % json.dumps(j, indent=4))
title = j['title']
body = j['body']
log.info("Loaded error report [%s]" % title)
return title, body
def assertBaseReportOk(self, path=None, user_id=None):
title, body = self.load_report()
self.assertTrue(title)
# Urg. This is doomed to break for everyone else than me on my computer...
self.assertTrue('/home/erwan/pnt/pymacaron/test/testserver.py' in body['stack'][0])
self.assertTrue('call_id' in body)
self.assertEqual(body['call_path'], 'crash')
self.assertEqual(body['is_ec2_instance'], False)
if user_id:
self.assertEqual(body['user']['is_auth'], 1)
self.assertEqual(body['user']['id'], user_id)
else:
self.assertEqual(body['user']['is_auth'], 0)
self.assertEqual(body['user']['id'], '')
self.assertTrue('python-requests' in body['user']['user_agent'])
self.assertEqual(body['user']['ip'], '127.0.0.1')
self.assertEqual(body['endpoint']['method'], 'GET')
self.assertEqual(body['endpoint']['base_url'], 'http://127.0.0.1:8765/%s' % path)
self.assertEqual(body['endpoint']['url'], 'http://127.0.0.1:8765/%s' % path)
self.assertEqual(body['endpoint']['path'], '/%s' % path)
self.assertTrue(body['endpoint']['id'].endswith(' GET /%s' % path), "%s ends with GET /%s" % (body['endpoint']['id'], path))
self.assertEqual(body['server']['port'], '8765')
self.assertTrue(body['server']['api_name'] in ['ping', 'crash'])
self.assertEqual(body['server']['fqdn'], '127.0.0.1')
self.assertEqual(body['server']['api_version'], '0.0.1')
return title, body
def assertServerErrorReportOk(self, path=None, fatal=True, user_id=None):
title, body = self.assertBaseReportOk(user_id=user_id, path=path)
self.assertTrue('error_id' in body)
self.assertEqual(body['is_fatal_error'], fatal)
self.assertTrue(body['time']['end'] >= body['time']['start'])
self.assertTrue(body['time']['microsecs'] >= 1000)
self.assertTrue(body['time']['microsecs'] <= 10000000)
self.assertEqual(body['call_path'], 'crash')
return title, body
def setUp(self):
super().setUp()
if 'NO_ERROR_REPORTING' in os.environ:
del os.environ['NO_ERROR_REPORTING']
os.environ['DO_REPORT_ERROR'] = '1'
if os.path.isfile(reportpath):
os.remove(reportpath)
self.kill_server()
self.start_server()
self.port = 8765
#
# And the tests!
#
def test_internal_exception(self):
self.assertGetReturnError(
'crash/internalexception',
500,
'UNHANDLED_SERVER_ERROR'
)
title, body = self.assertServerErrorReportOk(
path='crash/internalexception',
)
self.assertEqual(title, 'FATAL ERROR %s 500 UNHANDLED_SERVER_ERROR: pymacaron.api.do_crash_internal_exception(): Raising an internal exception' % body['server']['api_name'])
self.assertEqual(body['response']['user_message'], '')
self.assertEqual(body['response']['type'], 'Response')
self.assertEqual(body['response']['status'], '500')
self.assertEqual(body['response']['is_error'], 1)
self.assertEqual(body['response']['error_code'], 'UNHANDLED_SERVER_ERROR')
self.assertEqual(body['response']['error_description'], 'Raising an internal exception')
self.assertEqual(body['trace'][0], 'Traceback (most recent call last):\n')
self.assertEqual(body['request']['params'], '[]')
def test_pymacaron_exception(self):
self.assertGetReturnError(
'crash/pymacaronexception',
401,
'NON_FATAL_CUSTOM_ERROR'
)
self.assertNoErrorReport()
def test_report_error(self):
self.assertGetReturnOk(
'crash/reporterror'
)
title, body = self.assertBaseReportOk(
path='crash/reporterror',
)
self.assertEqual(title, 'NON-FATAL ERROR %s do_crash_report_error(): called crash/reporterror to test error reporting' % body['server']['api_name'])
self.assertTrue('time' not in body)
self.assertTrue('error_id' not in body)
self.assertTrue('response' not in body)
self.assertTrue('trace' not in body)
self.assertTrue('request' not in body)
self.assertEqual(body['is_fatal_error'], False)
self.assertEqual(body['title'], 'called crash/reporterror to test error reporting')
def test_report_fatal_error_response(self):
self.assertGetReturnError(
'crash/returnfatalerrorresponse',
543,
'FATAL_CUSTOM_ERROR'
)
title, body = self.assertServerErrorReportOk(
path='crash/returnfatalerrorresponse',
fatal=True,
)
self.assertEqual(title, 'FATAL ERROR %s 543 FATAL_CUSTOM_ERROR: pymacaron.api.do_crash_return_fatal_error_response(): endpoint returns an Error response' % body['server']['api_name'])
def test_report_non_fatal_error_response(self):
self.assertGetReturnError(
'crash/returnnonfatalerrorresponse',
401,
'NON_FATAL_CUSTOM_ERROR'
)
self.assertNoErrorReport()
def test_report_error_model(self):
j = self.assertGetReturnError(
'crash/returnerrormodel',
543,
'ANOTHER_CUSTOM_ERROR'
)
self.assertEqual(j['error_description'], 'Testing error model')
self.assertEqual(j['status'], 543)
self.assertEqual(j['error'], 'ANOTHER_CUSTOM_ERROR')
title, body = self.assertServerErrorReportOk(
path='crash/returnerrormodel',
fatal=True,
)
self.assertEqual(title, 'FATAL ERROR %s 543 ANOTHER_CUSTOM_ERROR: pymacaron.api.do_crash_return_error_model(): Testing error model' % body['server']['api_name'])
def test_report_error_instance(self):
self.assertGetReturnError(
'crash/returnerrorinstance',
543,
'FATAL_CUSTOM_ERROR'
)
title, body = self.assertServerErrorReportOk(
path='crash/returnerrorinstance',
fatal=True,
)
self.assertEqual(title, 'FATAL ERROR %s 543 FATAL_CUSTOM_ERROR: pymacaron.api.do_crash_return_error_instance(): endpoint returns an Error instance' % body['server']['api_name'])
def test_report_slow_call(self):
self.assertGetReturnOk(
'crash/slowcall'
)
title, body = self.assertServerErrorReportOk(
path='crash/slowcall',
fatal=False,
)
self.assertEqual(title, 'NON-FATAL ERROR %s 200 : pymacaron.api.do_crash_slow_call() calltime exceeded 1000 millisec!' % body['server']['api_name'])
self.assertEqual(body['response']['user_message'], '')
self.assertEqual(body['response']['type'], 'Response')
self.assertEqual(body['response']['status'], '200')
self.assertEqual(body['response']['is_error'], 0)
self.assertEqual(body['response']['error_code'], '')
self.assertEqual(body['response']['error_description'], '')
self.assertTrue('trace' not in body)
self.assertEqual(body['request']['params'], '[]')
self.assertEqual(body['title'], 'pymacaron.api.do_crash_slow_call() calltime exceeded 1000 millisec!')
|
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Surveys.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"James Levy" <jamesalexanderlevy@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import datetime
import re
import string
from google.appengine.ext import db
from django import forms
from django import http
from django.utils import simplejson
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.logic.helper import timeline
from soc.logic.models.survey import logic as survey_logic
from soc.logic.models.user import logic as user_logic
from soc.models.survey_record import SurveyRecord
from soc.models.user import User
from soc.views import out_of_band
from soc.views.helper import access
from soc.views.helper import decorators
from soc.views.helper import forms as forms_helper
from soc.views.helper import lists
from soc.views.helper import redirects
from soc.views.helper import requests
from soc.views.helper import responses
from soc.views.helper import surveys
from soc.views.helper import widgets
from soc.views.models import base
from soc.models.survey import Survey
DEF_CHOICE_TYPES = set(('selection', 'pick_multi', 'choice', 'pick_quant'))
DEF_TEXT_TYPES = set(('long_answer', 'short_answer'))
DEF_PROPERTY_TYPES = tuple(DEF_CHOICE_TYPES) + tuple(DEF_TEXT_TYPES)
# used in View.getSchemaOptions to map POST values
DEF_BOOL = {'True': True, 'False': False}
DEF_SHORT_ANSWER = ("Short Answer",
"Less than 40 characters. Rendered as a text input. "
"It's possible to add a free form question (Content) "
"and a in-input prompt/example text.")
DEF_CHOICE = (
"Selection",
"Can be set as a single choice (selection) or multiple choice "
"(pick_multi) question. Rendered as a select (single choice) "
"or a group of checkboxes (multiple choice). It's possible to "
"add a free form question (Content) and as many free form options "
"as wanted. Each option can be edited (double-click), deleted "
"(click on (-) button) or reordered (drag and drop).")
DEF_LONG_ANSWER = (
"Long Answer",
"Unlimited length, auto-growing field. Rendered as a textarea. "
"It's possible to add a free form question (Content) and an in-input "
"prompt/example text.")
DEF_QUESTION_TYPES = dict(short_answer=DEF_SHORT_ANSWER,
long_answer=DEF_LONG_ANSWER, choice=DEF_CHOICE)
# for toCSV and View.exportSerialized
DEF_FIELDS = 'author modified_by'
DEF_PLAIN = 'is_featured content created modified'
class View(base.View):
"""View methods for the Survey model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['any_access'] = ['allow']
rights['show'] = [('checkIsSurveyWritable', survey_logic)]
rights['create'] = ['checkIsUser']
rights['edit'] = [('checkIsSurveyWritable', survey_logic)]
rights['delete'] = ['checkIsDeveloper'] # TODO: fix deletion of Surveys
rights['list'] = ['checkDocumentList']
rights['pick'] = ['checkDocumentPick']
rights['record'] = [('checkHasAny', [
[('checkIsSurveyReadable', [survey_logic]),
('checkIsMySurveyRecord', [survey_logic, 'id'])]
])]
rights['results'] = ['checkIsUser']
rights['take'] = [('checkIsSurveyTakeable', survey_logic)]
new_params = {}
new_params['logic'] = survey_logic
new_params['rights'] = rights
new_params['name'] = 'Survey'
new_params['sidebar_grouping'] = "Surveys"
new_params['extra_django_patterns'] = [
(r'^%(url_name)s/(?P<access_type>take)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.take',
'Take %(name)s'),
(r'^%(url_name)s/(?P<access_type>json)/%(scope)s$',
'%(module_package)s.%(module_name)s.json',
'Export %(name)s as JSON'),
(r'^%(url_name)s/(?P<access_type>record)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.record',
'View survey record for %(name)s'),
(r'^%(url_name)s/(?P<access_type>results)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.results',
'View survey results for %(name)s'),
(r'^%(url_name)s/(?P<access_type>show)/user/(?P<link_id>)\w+$',
'%(module_package)s.%(module_name)s.results',
'View survey results for user'),
]
new_params['export_content_type'] = 'text/text'
new_params['export_extension'] = '.csv'
new_params['export_function'] = surveys.toCSV(self)
new_params['delete_redirect'] = '/'
new_params['edit_template'] = 'soc/survey/edit.html'
new_params['create_template'] = 'soc/survey/edit.html'
new_params['public_template'] = 'soc/survey/public.html'
new_params['record_template'] = 'soc/survey/view_record.html'
new_params['take_template'] = 'soc/survey/take.html'
new_params['no_create_raw'] = True
new_params['no_create_with_scope'] = True
new_params['no_create_with_key_fields'] = True
new_params['no_list_raw'] = True
new_params['sans_link_id_create'] = True
new_params['sans_link_id_list'] = True
new_params['create_dynafields'] = [
{'name': 'link_id',
'base': forms.fields.CharField,
'label': 'Survey Link ID',
},
]
new_params['create_extra_dynaproperties'] = {
'content': forms.fields.CharField(required=False, label='Description',
widget=widgets.FullTinyMCE(attrs={'rows': 25, 'cols': 100})),
'survey_html': forms.fields.CharField(widget=forms.HiddenInput,
required=False),
'scope_path': forms.fields.CharField(widget=forms.HiddenInput,
required=True),
'prefix': forms.fields.CharField(widget=widgets.ReadOnlyInput(),
required=True),
'clean_content': cleaning.clean_html_content('content'),
'clean_link_id': cleaning.clean_link_id('link_id'),
'clean_scope_path': cleaning.clean_scope_path('scope_path'),
'clean': cleaning.validate_document_acl(self, True),
}
new_params['extra_dynaexclude'] = ['author', 'created',
'home_for', 'modified_by', 'modified',
'take_survey', 'survey_content']
new_params['edit_extra_dynaproperties'] = {
'doc_key_name': forms.fields.CharField(widget=forms.HiddenInput),
'created_by': forms.fields.CharField(widget=widgets.ReadOnlyInput(),
required=False),
'last_modified_by': forms.fields.CharField(
widget=widgets.ReadOnlyInput(), required=False),
'clean': cleaning.validate_document_acl(self),
}
new_params['survey_take_form'] = surveys.SurveyTakeForm
new_params['survey_record_form'] = surveys.SurveyRecordForm
new_params['public_field_prefetch'] = ['author']
new_params['public_field_extra'] = lambda entity: {
"path": entity.scope_path + '/' + entity.link_id,
"created_by": entity.author.link_id,
}
new_params['public_field_keys'] = [
"path", "title", "link_id","is_featured",
"created_by", "created", "modified"
]
new_params['public_field_names'] = [
"Path", "Title", "Link ID", "Featured",
"Created By", "Created On", "Modified",
]
new_params['records_field_keys'] = [
'taken_by', 'modified'
]
new_params['records_field_names'] = [
'Taken By', 'Taken On',
]
new_params['records_field_prefetch'] = ['user']
new_params['records_field_extra'] = lambda entity: {
'taken_by': '%s (%s)' %(entity.user.name, entity.user.link_id),
}
new_params['take_params'] = {'s': '0'}
new_params['successful_take_message'] = ugettext(
'Survey record submitted.')
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
def list(self, request, access_type, page_name=None, params=None,
filter=None, order=None, **kwargs):
"""See base.View.list.
"""
if not filter:
filter=kwargs
return super(View, self).list(request, access_type, page_name=page_name,
params=params, filter=filter, **kwargs)
def _public(self, request, entity, context):
"""Add a preview version of the Survey to the page's context.
Args:
request: the django request object
entity: the entity to make public
context: the context object
"""
# construct the form to be shown on the page
# TODO(ljvderijk) Generate SurveyForm without passing along the logic
survey_form = self._params['survey_take_form'](
survey=entity, survey_logic=self._params['logic'])
survey_form.getFields()
context['survey_form'] = survey_form
context['page_name'] = "%s titled '%s'" %(
context['page_name'], entity.title)
# return True to signal that the page may be displayed
return True
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
Processes POST request items to add new dynamic field names,
question types, and default prompt values to SurveyContent model.
"""
user = user_logic.getForCurrentAccount()
schema = {}
survey_fields = {}
if not entity:
# new Survey
if 'serialized' in request.POST:
fields, schema, survey_fields = self.importSerialized(request, fields,
user)
fields['author'] = user
else:
fields['author'] = entity.author
schema = self.loadSurveyContent(schema, survey_fields, entity)
# remove deleted properties from the model
self.deleteQuestions(schema, survey_fields, request.POST)
# add new text questions and re-build choice questions
self.getRequestQuestions(schema, survey_fields, request.POST)
# get schema options for choice questions
self.getSchemaOptions(schema, survey_fields, request.POST)
survey_content = getattr(entity,'survey_content', None)
# create or update a SurveyContent for this Survey
survey_content = survey_logic.createSurvey(survey_fields, schema,
survey_content=survey_content)
# save survey_content for existent survey or pass for creating a new one
if entity:
entity.modified_by = user
entity.survey_content = survey_content
db.put(entity)
else:
fields['survey_content'] = survey_content
fields['modified_by'] = user
super(View, self)._editPost(request, entity, fields)
def loadSurveyContent(self, schema, survey_fields, entity):
"""Populate the schema dict and get text survey questions.
"""
if hasattr(entity, 'survey_content'):
# there is a SurveyContent already
survey_content = entity.survey_content
schema = eval(survey_content.schema)
for question_name in survey_content.dynamic_properties():
# get the current questions from the SurveyContent
if question_name not in schema:
continue
if schema[question_name]['type'] not in DEF_CHOICE_TYPES:
# Choice questions are always regenerated from request, see
# self.get_request_questions()
question = getattr(survey_content, question_name)
survey_fields[question_name] = question
return schema
def deleteQuestions(self, schema, survey_fields, POST):
"""Process the list of questions to delete, from a hidden input.
"""
deleted = POST.get('__deleted__', '')
if deleted:
deleted = deleted.split(',')
for field in deleted:
if field in schema:
del schema[field]
if field in survey_fields:
del survey_fields[field]
def getRequestQuestions(self, schema, survey_fields, POST):
"""Get fields from request.
We use two field/question naming and processing schemes:
- Choice questions consist of <input/>s with a common name, being rebuilt
anew on every edit POST so we can gather ordering, text changes,
deletions and additions.
- Text questions only have special survey__* names on creation, afterwards
they are loaded from the SurveyContent dynamic properties.
"""
for key, value in POST.items():
if key.startswith('id_'):
# Choice question fields, they are always generated from POST contents,
# as their 'content' is editable and they're reorderable. Also get
# its field index for handling reordering fields later.
name, number = key[3:].replace('__field', '').rsplit('_', 1)
if name not in schema:
if 'NEW_' + name in POST:
# new Choice question, set generic type and get its index
schema[name] = {'type': 'choice'}
if name in schema and schema[name]['type'] in DEF_CHOICE_TYPES:
# build an index:content dictionary
if name in survey_fields:
if value not in survey_fields[name]:
survey_fields[name][int(number)] = value
else:
survey_fields[name] = {int(number): value}
elif key.startswith('survey__'): # Text question
# this is super ugly but unless data is serialized the regex is needed
prefix = re.compile('survey__([0-9]{1,3})__')
prefix_match = re.match(prefix, key)
index = prefix_match.group(0).replace('survey', '').replace('__','')
index = int(index)
field_name = prefix.sub('', key)
field = 'id_' + key
for ptype in DEF_PROPERTY_TYPES:
# should only match one
if ptype + "__" in field_name:
field_name = field_name.replace(ptype + "__", "")
if field_name not in schema:
schema[field_name]= {}
schema[field_name]["index"] = index
schema[field_name]["type"] = ptype
# store text question tooltip from the input/textarea value
schema[field_name]["tip"] = value
# add the question as a dynamic property to survey_content
survey_fields[field_name] = value
def getSchemaOptions(self, schema, survey_fields, POST):
"""Get question, type, rendering and option order for choice questions.
"""
RENDER = {'checkboxes': 'multi_checkbox', 'select': 'single_select',
'radio_buttons': 'quant_radio'}
RENDER_TYPES = {'select': 'selection',
'checkboxes': 'pick_multi',
'radio_buttons': 'pick_quant' }
for key in schema:
if schema[key]['type'] in DEF_CHOICE_TYPES and key in survey_fields:
render_for = 'render_for_' + key
if render_for in POST:
schema[key]['render'] = RENDER[POST[render_for]]
schema[key]['type'] = RENDER_TYPES[POST[render_for]]
# set the choice question's tooltip
tip_for = 'tip_for_' + key
schema[key]['tip'] = POST.get(tip_for)
# handle reordering fields
ordered = False
order = 'order_for_' + key
if order in POST and isinstance(survey_fields[key], dict):
order = POST[order]
# 'order_for_name' is jquery serialized from a sortable, so it's in
# a 'name[]=1&name[]=2&name[]=0' format ('id-li-' is set in our JS)
order = order.replace('id-li-%s[]=' % key, '')
order = order.split('&')
if len(order) == len(survey_fields[key]) and order[0]:
order = [int(number) for number in order]
if set(order) == set(survey_fields[key]):
survey_fields[key] = [survey_fields[key][i] for i in order]
ordered = True
if not ordered:
# we don't have a good ordering to use
ordered = sorted(survey_fields[key].items())
survey_fields[key] = [value for index, value in ordered]
# set 'question' entry (free text label for question) in schema
question_for = 'NEW_' + key
if question_for in POST and POST[question_for]:
schema[key]["question"] = POST[question_for]
# set wheter the question is required
required_for = 'required_for_' + key
schema[key]['required'] = DEF_BOOL[POST[required_for]]
# set wheter the question allows comments
comment_for = 'comment_for_' + key
schema[key]['has_comment'] = DEF_BOOL[POST[comment_for]]
# set the question index from JS-calculated value
index_for = 'index_for_' + key
if index_for in POST:
schema[key]['index'] = int(POST[index_for].replace('__', ''))
def createGet(self, request, context, params, seed):
"""Pass the question types for the survey creation template.
"""
context['question_types'] = DEF_QUESTION_TYPES
# avoid spurious results from showing on creation
context['new_survey'] = True
return super(View, self).createGet(request, context, params, seed)
def editGet(self, request, entity, context, params=None):
"""Process GET requests for the specified entity.
Builds the SurveyForm that represents the Survey question contents.
"""
self._entity = entity
survey_content = entity.survey_content
survey_form = surveys.SurveyEditForm(survey_content=survey_content,
survey_logic=params['logic'])
survey_form.getFields()
local = dict(survey_form=survey_form, question_types=DEF_QUESTION_TYPES,
survey_h=entity.survey_content)
context.update(local)
params['edit_form'] = surveys.HelperForm(params['edit_form'])
if entity.survey_end and datetime.datetime.now() > entity.survey_end:
# are we already passed the survey_end?
context["passed_survey_end"] = True
return super(View, self).editGet(request, entity, context, params=params)
@decorators.merge_params
@decorators.check_access
def take(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View for taking a Survey.
For Args see base.View().public().
"""
survey_logic = params['logic']
try:
entity = survey_logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
template = params['take_template']
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = "%s titled '%s'" % (page_name, entity.title)
context['entity'] = entity
# try to get an existing SurveyRecord for the current user
survey_record = self._getSurveyRecordFor(entity, request, params)
post_dict = request.POST
# get an instance of SurveyTakeForm to use
survey_form = params['survey_take_form'](
survey=entity,
survey_record=survey_record,
survey_logic=params['logic'],
data=post_dict)
# fill context with the survey_form and additional information
context['survey_form'] = survey_form
self._setSurveyTakeContext(request, params, context, entity, survey_record)
if request.POST:
return self.takePost(request, template, context, params, survey_form,
entity, survey_record, **kwargs)
else: #request.GET
return self.takeGet(request, template, context, params, survey_form,
entity, survey_record, **kwargs)
def _getSurveyRecordFor(self, survey, request, params):
"""Returns the SurveyRecord for the given Survey and request.
Args:
survey: a Survey entity
request: a Django HTTPRequest object
params: params for the requesting view
Returns:
An existing SurveyRecord iff any exists for the given Survey, request
and any other conditions that must apply.
"""
survey_logic = params['logic']
record_logic = survey_logic.getRecordLogic()
user_entity = user_logic.getForCurrentAccount()
filter = {'survey': survey,
'user': user_entity}
return record_logic.getForFields(filter, unique=True)
def takeGet(self, request, template, context, params, survey_form, entity,
record, **kwargs):
"""Handles the GET request for the Survey's take page.
Args:
template: the template used for this view
survey_form: instance of SurveyTakeForm
entity: the Survey entity
record: a SurveyRecord entity iff any exists
rest: see base.View.public()
"""
# call the hook method
self._takeGet(request, template, context, params, entity, record, **kwargs)
return responses.respond(request, template, context)
def _takeGet(self, request, template, context, params, entity, record,
**kwargs):
"""Hook for the GET request for the Survey's take page.
This method is called just before the GET page is shown.
Args:
template: the template used for this view
entity: the Survey entity
record: a SurveyRecord entity
rest: see base.View.public()
"""
pass
def takePost(self, request, template, context, params, survey_form, entity,
record, **kwargs):
"""Handles the POST request for the Survey's take page.
Args:
template: the template used for this view
survey_form: instance of SurveyTakeForm
entity: the Survey entity
record: a SurveyRecord entity
rest: see base.View.public()
"""
survey_logic = params['logic']
record_logic = survey_logic.getRecordLogic()
if not survey_form.is_valid():
# show the form errors
return self._constructResponse(request, entity=entity, context=context,
form=survey_form, params=params,
template=template)
# retrieve the data from the form
_, properties = forms_helper.collectCleanedFields(survey_form)
# add the required SurveyRecord properties
properties['user'] = user_logic.getForCurrentAccount()
properties['survey'] = entity
# call the hook method before updating the SurveyRecord
self._takePost(request, params, entity, record, properties)
# update the record entity if any and clear all dynamic properties
record = record_logic.updateOrCreateFromFields(record, properties,
clear_dynamic=True)
# get the path to redirect the user to
path = self._getRedirectOnSuccessfulTake(request, params, entity,
record)
return http.HttpResponseRedirect(path)
def _takePost(self, request, params, entity, record, properties):
"""Hook for the POST request for the Survey's take page.
This method is called just before the SurveyRecord is stored.
Args:
request: Django Request object
params: the params for the current view
entity: a Survey entity
record: a SurveyRecord entity
properties: properties to be stored in the SurveyRecord entity
"""
pass
def _setSurveyTakeContext(self, request, params, context, survey,
survey_record):
"""Sets the help_text and status for take template use.
Args:
request: HTTP request object
params: the params for the current View
context: the context for the view to update
survey: a Survey entity
survey_record: a SurveyRecordEntity iff exists
"""
if not survey.survey_end:
survey_end_text = ""
else:
survey_end_text = " by " + str(
survey.survey_end.strftime("%A, %d. %B %Y %I:%M%p"))
if survey_record:
help_text = "You may edit and re-submit this survey %s." %(
survey_end_text)
status = "edit"
else:
help_text = "Please complete this survey %s." %(
survey_end_text)
status = "create"
notice = params['successful_take_message'] if 's' in request.GET else None
# update the context with the help_text and status
context_update = dict(status=status, help_text=help_text, notice=notice)
context.update(context_update)
def _getRedirectOnSuccessfulTake(self, request, params, survey, record):
"""Returns a path to which the user should be redirected after successfully
taking a Survey.
Args:
request: current HTTPRequest
params: the params of the View
survey: Survey entity that was succesfully taken
record: SurveyRecord entity that has been stored/updated
"""
return requests.replaceSuffix(request.path, None,
params=params['take_params'])
@decorators.merge_params
@decorators.check_access
def viewResults(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View that lists all SurveyRecords which are of interest to the user.
For params see base.View.public().
"""
# TODO: this view could also contain statistics for the Survey
survey_logic = params['logic']
record_logic = survey_logic.getRecordLogic()
try:
entity = survey_logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = "%s titled '%s'" % (page_name, entity.title)
context['entity'] = entity
# add the first question to the context show a preview can be shown
context['first_question'] = entity.survey_content.orderedProperties()[0]
# get the rights checker
user_entity = user_logic.getForCurrentAccount()
rights = self._params['rights']
rights.setCurrentUser(user_entity.account, user_entity)
# check if the current user is allowed to visit the read the survey
allowed_to_read = False
try:
rights.checkIsSurveyReadable(
{'key_name': entity.key().name(),
'prefix': entity.prefix,
'scope_path': entity.scope_path,
'link_id': entity.link_id,
'user': user_entity},
survey_logic)
allowed_to_read = True
except:
pass
# get the filter for the SurveyRecords
fields = self._getResultsViewRecordFields(entity, allowed_to_read)
list_params = params.copy()
list_params['logic'] = record_logic
list_params['list_description'] = \
"List of Records for the %s titled '%s'." %(list_params['name'],
entity.title)
list_params['records_row_extra'] = lambda entity: {
'link': redirects.getViewSurveyRecordRedirect(entity, list_params)
}
list_params['records_row_action'] = {
'type': 'redirect_custom',
'parameters': dict(new_window=False),
}
return self.list(request, 'allow', page_name=page_name,
params=list_params, filter=fields, visibility='records',
context=context)
def _getResultsViewRecordFields(self, survey, allowed_to_read):
"""Retrieves the Results View filter for SurveyRecords.
Args:
survey: Survey instance for which the Records need to be shown
allowed_to_read: specifies if the current User has read access
Returns:
Returns the dictionary containing the fields to filter on
"""
# only show records for the retrieved survey
fields = {'survey': survey}
if not allowed_to_read:
# this user is not allowed to view all the Records so only show their own
fields['user'] = user_logic.getForCurrentAccount()
return fields
@decorators.merge_params
@decorators.check_access
def viewRecord(self, request, access_type, page_name=None,
params=None, **kwargs):
"""View that allows the user to see the contents of a single SurveyRecord.
For params see base.View.public()
"""
survey_logic = params['logic']
record_logic = survey_logic.getRecordLogic()
try:
survey_entity = survey_logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
get_dict = request.GET
record_id = get_dict.get('id')
if record_id and record_id.isdigit():
record_id = int(record_id)
record_entity = record_logic.getFromIDOr404(record_id)
else:
raise out_of_band.Error('No valid Record ID given')
if record_entity.survey.key() != survey_entity.key():
# record does not match the retrieved survey
raise out_of_band.Error('Record ID does not match the given survey')
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = "%s titled '%s'" %(page_name, survey_entity.title)
context['entity'] = survey_entity
context['record'] = record_entity
# store the read only survey form in the context
survey_form = params['survey_record_form'](
survey=survey_entity,
survey_record=record_entity,
survey_logic=self._params['logic'],
read_only=True)
survey_form.getFields()
context['survey_form'] = survey_form
template = params['record_template']
return responses.respond(request, template, context)
@decorators.merge_params
@decorators.check_access
def exportSerialized(self, request, access_type, page_name=None,
params=None, **kwargs):
"""Exports Surveys in JSON format.
For args see base.View.public().
"""
survey_logic = params['logic']
try:
sur = survey_logic.getFromKeyFieldsOr404(kwargs)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
json = sur.toDict()
json.update(dict((f, str(getattr(sur, f))) for f in DEF_PLAIN.split()))
static = ((f, str(getattr(sur, f).link_id)) for f in DEF_FIELDS.split())
json.update(dict(static))
dynamic = sur.survey_content.dynamic_properties()
content = ((prop, getattr(sur.survey_content, prop)) for prop in dynamic)
json['survey_content'] = dict(content)
schema = sur.survey_content.schema
json['survey_content']['schema'] = eval(sur.survey_content.schema)
data = simplejson.dumps(json, indent=2)
return self.json(request, data=json)
def importSerialized(self, request, fields, user):
"""Import Surveys in JSON format.
TODO: have this method do a proper import
Args:
request: Django Requset object
fields: ???
user: ???
Returns:
Keywords, the survey's schema and the survey content.
"""
json = request.POST['serialized']
json = simplejson.loads(json)['data']
survey_content = json.pop('survey_content')
schema = survey_content.pop('schema')
del json['author']
del json['created']
del json['modified']
# keywords can't be unicode
keywords = {}
for key, val in json.items():
keywords[str(key)] = val
if 'is_featured' in keywords:
keywords['is_featured'] = eval(keywords['is_featured'])
return keywords, schema, survey_content
def getMenusForScope(self, entity, params, id, user):
"""List featured surveys if after the survey_start date
and before survey_end an iff the current user has the right taking access.
Args:
entity: entity which is the scope for a Survey
params: params from the requesting View
id: GAE user instance for the current user
user: User entity from the current user
"""
# only list surveys for registered users
if not user:
return []
survey_params = self.getParams().copy()
survey_logic = survey_params['logic']
record_logic = survey_logic.getRecordLogic()
# filter all featured surveys for the given entity
filter = {
'prefix' : params['document_prefix'],
'scope_path': entity.key().id_or_name(),
'is_featured': True,
}
survey_entities = survey_logic.getForFields(filter)
submenus = []
# get the rights checker
rights = self._params['rights']
rights.setCurrentUser(id, user)
# cache ACL
survey_rights = {}
# add a link to all featured active surveys the user can take
for survey_entity in survey_entities:
if survey_entity.taking_access not in survey_rights:
# we have not determined if this user has the given type of access
# check if the current user is allowed to visit the take Survey page
allowed_to_take = False
try:
rights.checkIsSurveyTakeable(
{'key_name': survey_entity.key().name(),
'prefix': survey_entity.prefix,
'scope_path': survey_entity.scope_path,
'link_id': survey_entity.link_id,
'user': user},
survey_logic,
check_time=False)
allowed_to_take = True
except:
pass
# cache ACL for a given entity.taking_access
survey_rights[survey_entity.taking_access] = allowed_to_take
if not allowed_to_take:
# not allowed to take this survey
continue
elif not survey_rights[survey_entity.taking_access]:
# we already determined that the user doens't have access to this type
continue
if not timeline.isActivePeriod(survey_entity, 'survey'):
# this survey is not active right now
continue
# check if any SurveyRecord is available for this survey
filter = {'survey': survey_entity,
'user': user}
survey_record = record_logic.getForFields(filter, unique=True)
if survey_record:
taken_status = ""
else:
# no SurveyRecord available so we mark the survey as new
taken_status = "(new)"
submenu = (redirects.getTakeSurveyRedirect(survey_entity, survey_params),
'Survey ' + taken_status + ': ' + survey_entity.short_name,
'show')
submenus.append(submenu)
return submenus
view = View()
admin = decorators.view(view.admin)
create = decorators.view(view.create)
edit = decorators.view(view.edit)
export = decorators.view(view.export)
delete = decorators.view(view.delete)
json = decorators.view(view.exportSerialized)
list = decorators.view(view.list)
public = decorators.view(view.public)
record = decorators.view(view.viewRecord)
results = decorators.view(view.viewResults)
take = decorators.view(view.take)
|
|
'''tzinfo timezone information for America/Godthab.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Godthab(DstTzInfo):
'''America/Godthab timezone definition. See datetime.tzinfo for details'''
zone = 'America/Godthab'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,7,28,3,26,56),
d(1980,4,6,5,0,0),
d(1980,9,28,1,0,0),
d(1981,3,29,1,0,0),
d(1981,9,27,1,0,0),
d(1982,3,28,1,0,0),
d(1982,9,26,1,0,0),
d(1983,3,27,1,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(-12420,0,'LMT'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
i(-7200,3600,'WGST'),
i(-10800,0,'WGT'),
]
Godthab = Godthab()
|
|
"""
Construct dicts containing identifying information and DataFrames
for train, val, test splits of data, for use with classifiers.
See docs/datasets.md for more information on Vislab datasets.
"""
import numpy as np
import pandas as pd
import requests
import os
import subprocess
import csv
import vislab
import vislab.utils.cmdline
import vislab.utils.distributed
import vislab.datasets
def download_images(df, dirname, num_cpus=6):
"""
Download images from a dataset to a directory.
Parameters
----------
df: pandas.DataFrame
Must have column 'image_url', and a unique index that can be
used as filenames.
dirname: str
Images will be downloaded to dirname/originals.
num_cpus: int
Downloading will be parallelized with this many processes.
"""
vislab.util.makedirs(dirname + '/original')
df[['image_url']].to_csv(
dirname + '/image_url.csv',
sep=' ', header=None, quoting=csv.QUOTE_ALL
)
cmd = 'parallel --gnu -a image_url.csv --colsep=" " -j {} '.format(
num_cpus)
cmd += '"wget {{2}} -P original/{{1}}.jpg"\n'
with open(dirname + '/download_cmd.sh', 'w') as f:
f.write(cmd)
subprocess.call(
['sh', dirname + '/download_cmd.sh'], shell=True, cwd=dirname)
def resize_images(dirname, size, num_cpus=6):
vislab.util.makedirs(dirname + '/{}px'.format)
# TODO clean up below
[
'mkdir {}px'.format(size),
'parallel -a _temp_photo_names.txt -j {0} "convert original/{{}} -resize {1}\> {1}px/{{}}"'.format(num_cpus, size),
'parallel -a _temp_photo_names.txt -j {} "identify original/{{}}" > _sizes.txt'.format(num_cpus),
]
def get_image_sizes(dirname, num_cpus=6):
# TODO: borrow code from above, or write job-queue based approach
pass
def dl_and_resize_images(df, dirname, size, num_cpus=6):
"""
Download images from a dataset to a directory.
Parameters
----------
df: pandas.DataFrame
Must have column 'image_url', and a unique index that can be
used as filenames.
dirname: str
Images will be downloaded to dirname/originals.
num_cpus: int
Downloading will be parallelized with this many processes.
"""
# Load in the image sizes
size_df = pd.read_csv(dirname + '/_sizes.txt', sep=' ', header=None)
size_df = pd.DataFrame({
'photo_id': [_[:-4] for _ in size_df[0]],
'width': [int(_.split('x')[0]) for _ in size_df[2]],
'height': [int(_.split('x')[1]) for _ in size_df[2]]
})
return size_df
def get_train_test_split(df_, test_frac=0.2, random_seed=42):
"""
Split DataFrame into train and test subsets, by preserving label
ratios.
"""
np.random.seed(random_seed)
N = df_.shape[0]
df_ = df_.iloc[np.random.permutation(N)]
# Get equal amount of test_frac of each label
counts = df_.sum(0).astype(int)
min_count = int(round(counts[counts.argmin()] * test_frac))
test_balanced_set = np.concatenate([
df_.index[np.where(df_[l])[0][:min_count]]
for l, count in counts.iteritems()
]).tolist()
# Then add enough of the rest to get to test_frac of total.
remaining_ind = df_.index.diff(test_balanced_set).tolist()
np.random.shuffle(remaining_ind)
num_test = int(round(N * test_frac))
num_to_add = num_test - len(test_balanced_set)
if num_to_add > 0:
test_balanced_set += remaining_ind[:num_to_add]
else:
test_balanced_set = np.random.choice(
test_balanced_set, num_test, replace=False)
split = pd.Series('train', index=df_.index, name='_split')
split.ix[test_balanced_set] = 'test'
return split
def get_bool_df(df, column_name, min_positive_examples=-1):
"""
Return a boolean DataFrame whose columns consist of unique
values of df[column_name] that have more than the required
number of positive examples.
Parameters
----------
df: pandas.DataFrame
column_name: string
min_positive_examples: int [-1]
Only take those labels with more examples than this.
"""
assert(column_name in df.columns)
df = df.dropna(subset=[column_name])
freqs = df[column_name].value_counts()
# Filter out vals with less than min_pos examples.
if min_positive_examples > 0:
freqs = freqs[freqs >= min_positive_examples]
vals = freqs.index.tolist()
df = df[df[column_name].apply(lambda x: x in vals)]
# Expand values into own boolean DataFrame.
bool_df = pd.DataFrame(index=df.index)
for val in vals:
ascii_name = val.replace(' ', '_').encode('ascii', 'ignore')
if len(column_name) > 0:
ascii_name = column_name + '_' + ascii_name
bool_df[ascii_name] = (df[column_name] == val)
return bool_df
def subsample_dataset(df, num_images=-1, random_seed=42):
"""
Return a subsampled version of the dataset, with num_images images.
Take images randomly, according to random_seed.
Note: Does NOT permute images if df is of size num_images.
"""
if num_images < 0 or num_images >= df.shape[0]:
return df
np.random.seed(random_seed)
ind = np.random.permutation(df.shape[0])[:num_images]
return df.iloc[ind]
def fetch_image_filenames_for_ids(image_ids, dataset_name):
"""
Return list of image filenames for given image_ids in dataset_name.
If the images are not already present on disk, downloads them to
cache location.
Parameters
----------
image_ids: list of string
dataset_name: string
Returns
-------
good_filenames: list of string
Only filenames of images that actually exist on disk.
"""
image_dirname = vislab.util.makedirs(os.path.join(
vislab.config['paths']['images'], dataset_name))
df = load_dataset_df(dataset_name)
if 'image_filename' in df.columns:
filenames = df['image_filename'].loc[image_ids]
else:
assert 'image_url' in df.columns
filenames = [
os.path.join(image_dirname, '{}.jpg'.format(image_id))
for image_id in image_ids
]
urls = df['image_url'].loc[image_ids]
good_filenames = []
for i in range(len(filenames)):
filename = filenames[i]
if os.path.exists(filename):
good_filenames.append(filename)
continue
try:
print("Download image for {}: {}".format(dataset_name, filename))
r = requests.get(urls[i])
with open(filename, 'wb') as f:
f.write(r.content)
good_filenames.append(filename)
except Exception as e:
print("Exception: {}".format(e))
return good_filenames
def load_dataset_df(dataset_name, force=False):
if dataset_name not in vislab.datasets.DATASETS:
raise Exception('Unknown dataset.')
return vislab.datasets.DATASETS[dataset_name]['fn'](force=force)
def get_df_with_args(args=None):
"""
Use the parsed command line arguments to load the correct dataset.
Assumes the relevant datasets have already been fetched.
If this is not true, refer to the individual dataset code on info
about how to load.
"""
# Parse arguments.
if args is None:
args = vislab.utils.cmdline.get_args(
'dataset', 'get_df', ['dataset', 'processing'])
df = load_dataset_df(args.dataset, args.force)
df = subsample_dataset(df, args.num_images, args.random_seed)
return df
if __name__ == '__main__':
possible_functions = {
'get_df': get_df_with_args,
}
vislab.utils.cmdline.run_function_in_file(__file__, possible_functions)
|
|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats, integers,
bools etc. However, it is possible for elements to be combinations of these,
such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = a.view(np.recarray)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
import numeric as sb
from defchararray import chararray
import numerictypes as nt
import types
import os
import sys
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
_typestr = nt._typestr
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i+1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
"""Class to convert formats, names, titles description to a dtype
After constructing the format_parser object, the dtype attribute is
the converted data-type.
dtype = format_parser(formats, names, titles).dtype
Parameters
----------
formats : string or list
comma-separated format descriptions --- 'f8, i4, a5'
list of format description strings --- ['f8', 'i4', 'a5']
names : string or (list or tuple of strings)
comma-separated field names --- 'col1, col2, col3'
list or tuple of field names
titles : sequence
sequence of title strings or unicode
aligned : bool
align the fields by padding as the C-compiler would
byteorder :
If specified, all the fields will be changed to the
provided byteorder. Otherwise, the default byteorder is
used.
Returns
-------
object
A Python object whose dtype attribute is a data-type.
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError, "Need formats argument"
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [types.ListType, types.TupleType]):
pass
elif (type(names) == types.StringType):
names = names.split(',')
else:
raise NameError, "illegal input names %s" % `names`
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError, "Duplicate field names: %s" % _dup
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None]*(self._nfields-len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a recarray,
# if it's a string ('SU') return a chararray
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
return obj
if dt.fields:
return obj.view(obj.__class__)
if dt.char in 'SU':
return obj.view(chararray)
return obj
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError, "Cannot set '%s' attribute" % attr
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self,attr,None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def pprint(self):
# pretty-print all fields
names = self.dtype.names
maxlen = max([len(name) for name in names])
rows = []
fmt = '%% %ds: %%s' %maxlen
for name in names:
rows.append(fmt%(name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""
Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analagous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of strings, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of strings, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : {True, False}, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for recarray
format_parser : determine a data-type from formats, names, titles
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a reccord array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr))
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides)
return self
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self,'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError, "record array has no attribute %s" % attr
obj = self.getfield(*res)
# if it has fields return a recarray, otherwise return
# normal array
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
# Save the dictionary
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError,KeyError):
raise AttributeError, "record array has no attribute %s" % attr
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
if (isinstance(obj, ndarray) and obj.dtype.isbuiltin):
return obj.view(ndarray)
return obj
def __repr__(self) :
ret = ndarray.__repr__(self)
return ret.replace("recarray", "rec.array", 1)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self,'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self,'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def view(self, dtype=None, type=None):
if dtype is None:
return ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
return ndarray.view(self, dtype)
except TypeError:
pass
dtype = sb.dtype(dtype)
if dtype.fields is None:
return self.__array__().view(dtype)
return ndarray.view(self, dtype)
else:
return ndarray.view(self, dtype, type)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print r[1]
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = ''
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError, "item in the array list must be an ndarray."
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, nt.flexible):
formats += `obj.itemsize`
formats += ','
formats = formats[:-1]
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError, "mismatch between the number of fields "\
"and the number of arrays"
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape)-nn]
if testshape != shape:
raise ValueError, "array-shape mismatch in array %d" % k
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print r[0]
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
chararray(['dbe', 'de'],
dtype='|S3')
>>> import cPickle
>>> print cPickle.loads(cPickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[...,i].tolist()) for i in xrange(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype = descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError, "Can only deal with 1-d array."
_array = recarray(shape, descr)
for k in xrange(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
res.dtype = sb.dtype((record, res.dtype))
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError, "Must have dtype= or formats="
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring)-offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod*itemsize
if shapesize < 0:
shape = list(shape)
shape[ shape.index(-1) ] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod*itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if isinstance(obj, (type(None), str, file)) and (formats is None) \
and (dtype is None):
raise ValueError("Must define formats (or dtype) if object is "\
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names' : names,
'titles' : titles,
'aligned' : aligned,
'byteorder' : byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, str):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isinstance(obj, file):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
res = new.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
res = obj.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
|
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2009-2015 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for more fancy file handles.
Classes:
- UndoHandle File object decorator with support for undo-like operations.
Additional private classes used in Bio.SeqIO and Bio.SearchIO for indexing
files are also defined under Bio.File but these are not intended for direct
use.
"""
from __future__ import print_function
import codecs
import os
import sys
import contextlib
import itertools
from Bio._py3k import basestring
try:
from collections import UserDict as _dict_base
except ImportError:
from UserDict import DictMixin as _dict_base
try:
from sqlite3 import dbapi2 as _sqlite
from sqlite3 import IntegrityError as _IntegrityError
from sqlite3 import OperationalError as _OperationalError
except ImportError:
# Not present on Jython, but should be included in Python 2.5
# or later (unless compiled from source without its dependencies)
# Still want to offer in-memory indexing.
_sqlite = None
pass
__docformat__ = "restructuredtext en"
@contextlib.contextmanager
def as_handle(handleish, mode='r', **kwargs):
r"""Context manager to ensure we are using a handle.
Context manager for arguments that can be passed to
SeqIO and AlignIO read, write, and parse methods: either file objects or strings.
When given a string, returns a file handle open to handleish with provided
mode which will be closed when the manager exits.
All other inputs are returned, and are *not* closed
- handleish - Either a string or file handle
- mode - Mode to open handleish (used only if handleish is a string)
- kwargs - Further arguments to pass to open(...)
Example:
>>> with as_handle('seqs.fasta', 'w') as fp:
... fp.write('>test\nACGT')
>>> fp.closed
True
>>> handle = open('seqs.fasta', 'w')
>>> with as_handle(handle) as fp:
... fp.write('>test\nACGT')
>>> fp.closed
False
>>> fp.close()
Note that if the mode argument includes U (for universal new lines)
this will be removed under Python 3 where is is redundant and has
been deprecated (this happens automatically in text mode).
"""
if isinstance(handleish, basestring):
if sys.version_info[0] >= 3 and "U" in mode:
mode = mode.replace("U", "")
if 'encoding' in kwargs:
with codecs.open(handleish, mode, **kwargs) as fp:
yield fp
else:
with open(handleish, mode, **kwargs) as fp:
yield fp
else:
yield handleish
def _open_for_random_access(filename):
"""Open a file in binary mode, spot if it is BGZF format etc (PRIVATE).
This functionality is used by the Bio.SeqIO and Bio.SearchIO index
and index_db functions.
"""
handle = open(filename, "rb")
from . import bgzf
try:
return bgzf.BgzfReader(mode="rb", fileobj=handle)
except ValueError as e:
assert "BGZF" in str(e)
# Not a BGZF file after all, rewind to start:
handle.seek(0)
return handle
class UndoHandle(object):
"""A Python handle that adds functionality for saving lines.
Saves lines in a LIFO fashion.
Added methods:
- saveline Save a line to be returned next time.
- peekline Peek at the next line without consuming it.
"""
def __init__(self, handle):
self._handle = handle
self._saved = []
try:
# If wrapping an online handle, this this is nice to have:
self.url = handle.url
except AttributeError:
pass
def __iter__(self):
return self
def __next__(self):
next = self.readline()
if not next:
raise StopIteration
return next
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def readlines(self, *args, **keywds):
lines = self._saved + self._handle.readlines(*args, **keywds)
self._saved = []
return lines
def readline(self, *args, **keywds):
if self._saved:
line = self._saved.pop(0)
else:
line = self._handle.readline(*args, **keywds)
return line
def read(self, size=-1):
if size == -1:
saved = "".join(self._saved)
self._saved[:] = []
else:
saved = ''
while size > 0 and self._saved:
if len(self._saved[0]) <= size:
size = size - len(self._saved[0])
saved = saved + self._saved.pop(0)
else:
saved = saved + self._saved[0][:size]
self._saved[0] = self._saved[0][size:]
size = 0
return saved + self._handle.read(size)
def saveline(self, line):
if line:
self._saved = [line] + self._saved
def peekline(self):
if self._saved:
line = self._saved[0]
else:
line = self._handle.readline()
self.saveline(line)
return line
def tell(self):
return self._handle.tell() - sum(len(line) for line in self._saved)
def seek(self, *args):
self._saved = []
self._handle.seek(*args)
def __getattr__(self, attr):
return getattr(self._handle, attr)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._handle.close()
# The rest of this file defines code used in Bio.SeqIO and Bio.SearchIO
# for indexing
class _IndexedSeqFileProxy(object):
"""Base class for file format specific random access (PRIVATE).
This is subclasses in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Subclasses for each file format should define '__iter__', 'get'
and optionally 'get_raw' methods.
"""
def __iter__(self):
"""Returns (identifier, offset, length in bytes) tuples.
The length can be zero where it is not implemented or not
possible for a particular file format.
"""
raise NotImplementedError("Subclass should implement this")
def get(self, offset):
"""Returns parsed object for this entry."""
# Most file formats with self contained records can be handled by
# parsing StringIO(_bytes_to_string(self.get_raw(offset)))
raise NotImplementedError("Subclass should implement this")
def get_raw(self, offset):
"""Returns bytes string (if implemented for this file format)."""
# Should be done by each sub-class (if possible)
raise NotImplementedError("Not available for this file format.")
class _IndexedSeqFileDict(_dict_base):
"""Read only dictionary interface to a sequential record file.
This code is used in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Keeps the keys and associated file offsets in memory, reads the file
to access entries as objects parsing them on demand. This approach
is memory limited, but will work even with millions of records.
Note duplicate keys are not allowed. If this happens, a ValueError
exception is raised.
As used in Bio.SeqIO, by default the SeqRecord's id string is used
as the dictionary key. In Bio.SearchIO, the query's id string is
used. This can be changed by suppling an optional key_function,
a callback function which will be given the record id and must
return the desired key. For example, this allows you to parse
NCBI style FASTA identifiers, and extract the GI number to use
as the dictionary key.
Note that this dictionary is essentially read only. You cannot
add or change values, pop values, nor clear the dictionary.
"""
def __init__(self, random_access_proxy, key_function,
repr, obj_repr):
# Use key_function=None for default value
self._proxy = random_access_proxy
self._key_function = key_function
self._repr = repr
self._obj_repr = obj_repr
if key_function:
offset_iter = (
(key_function(k), o, l) for (k, o, l) in random_access_proxy)
else:
offset_iter = random_access_proxy
offsets = {}
for key, offset, length in offset_iter:
# Note - we don't store the length because I want to minimise the
# memory requirements. With the SQLite backend the length is kept
# and is used to speed up the get_raw method (by about 3 times).
# The length should be provided by all the current backends except
# SFF where there is an existing Roche index we can reuse (very fast
# but lacks the record lengths)
# assert length or format in ["sff", "sff-trim"], \
# "%s at offset %i given length %r (%s format %s)" \
# % (key, offset, length, filename, format)
if key in offsets:
self._proxy._handle.close()
raise ValueError("Duplicate key '%s'" % key)
else:
offsets[key] = offset
self._offsets = offsets
def __repr__(self):
return self._repr
def __str__(self):
# TODO - How best to handle the __str__ for SeqIO and SearchIO?
if self:
return "{%r : %s(...), ...}" % (list(self.keys())[0], self._obj_repr)
else:
return "{}"
def __contains__(self, key):
return key in self._offsets
def __len__(self):
"""How many records are there?"""
return len(self._offsets)
def items(self):
"""Iterate over the (key, SeqRecord) items.
This tries to act like a Python 3 dictionary, and does not return
a list of (key, value) pairs due to memory concerns.
"""
for key in self.__iter__():
yield key, self.__getitem__(key)
def values(self):
"""Iterate over the SeqRecord items.
This tries to act like a Python 3 dictionary, and does not return
a list of value due to memory concerns.
"""
for key in self.__iter__():
yield self.__getitem__(key)
def keys(self):
"""Iterate over the keys.
This tries to act like a Python 3 dictionary, and does not return
a list of keys due to memory concerns.
"""
return self.__iter__()
if hasattr(dict, "iteritems"):
# Python 2, also define iteritems etc
def itervalues(self):
"""Iterate over the SeqRecord) items."""
for key in self.__iter__():
yield self.__getitem__(key)
def iteritems(self):
"""Iterate over the (key, SeqRecord) items."""
for key in self.__iter__():
yield key, self.__getitem__(key)
def iterkeys(self):
"""Iterate over the keys."""
return self.__iter__()
def __iter__(self):
"""Iterate over the keys."""
return iter(self._offsets)
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
# Pass the offset to the proxy
record = self._proxy.get(self._offsets[key])
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
NOTE - This functionality is not supported for every file format.
"""
# Pass the offset to the proxy
return self._proxy.get_raw(self._offsets[key])
def __setitem__(self, key, value):
"""Would allow setting or replacing records, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def update(self, *args, **kwargs):
"""Would allow adding more values, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def pop(self, key, default=None):
"""Would remove specified record, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def popitem(self):
"""Would remove and return a SeqRecord, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def clear(self):
"""Would clear dictionary, but not implemented."""
raise NotImplementedError("An indexed a sequence file is read only.")
def fromkeys(self, keys, value=None):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
def copy(self):
"""A dictionary method which we don't implement."""
raise NotImplementedError("An indexed a sequence file doesn't "
"support this.")
def close(self):
"""Close the file handle being used to read the data.
Once called, further use of the index won't work. The sole purpose
of this method is to allow explicit handle closure - for example
if you wish to delete the file, on Windows you must first close
all open handles to that file.
"""
self._proxy._handle.close()
class _SQLiteManySeqFilesDict(_IndexedSeqFileDict):
"""Read only dictionary interface to many sequential record files.
This code is used in both Bio.SeqIO for indexing as SeqRecord
objects, and in Bio.SearchIO for indexing QueryResult objects.
Keeps the keys, file-numbers and offsets in an SQLite database. To access
a record by key, reads from the offset in the appropriate file and then
parses the record into an object.
There are OS limits on the number of files that can be open at once,
so a pool are kept. If a record is required from a closed file, then
one of the open handles is closed first.
"""
def __init__(self, index_filename, filenames,
proxy_factory, format,
key_function, repr, max_open=10):
"""Loads or creates an SQLite based index."""
# TODO? - Don't keep filename list in memory (just in DB)?
# Should save a chunk of memory if dealing with 1000s of files.
# Furthermore could compare a generator to the DB on reloading
# (no need to turn it into a list)
if not _sqlite:
# Hack for Jython (of if Python is compiled without it)
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Requires sqlite3, which is "
"included Python 2.5+")
if filenames is not None:
filenames = list(filenames) # In case it was a generator
# Cache the arguments as private variables
self._index_filename = index_filename
self._filenames = filenames
self._format = format
self._key_function = key_function
self._proxy_factory = proxy_factory
self._repr = repr
self._max_open = max_open
self._proxies = {}
# Note if using SQLite :memory: trick index filename, this will
# give $PWD as the relative path (which is fine).
self._relative_path = os.path.abspath(os.path.dirname(index_filename))
if os.path.isfile(index_filename):
self._load_index()
else:
self._build_index()
def _load_index(self):
"""Called from __init__ to re-use an existing index (PRIVATE)."""
index_filename = self._index_filename
relative_path = self._relative_path
filenames = self._filenames
format = self._format
proxy_factory = self._proxy_factory
con = _sqlite.connect(index_filename)
self._con = con
# Check the count...
try:
count, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("count",)).fetchone()
self._length = int(count)
if self._length == -1:
con.close()
raise ValueError("Unfinished/partial database")
count, = con.execute(
"SELECT COUNT(key) FROM offset_data;").fetchone()
if self._length != int(count):
con.close()
raise ValueError("Corrupt database? %i entries not %i"
% (int(count), self._length))
self._format, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("format",)).fetchone()
if format and format != self._format:
con.close()
raise ValueError("Index file says format %s, not %s"
% (self._format, format))
try:
filenames_relative_to_index, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",)).fetchone()
filenames_relative_to_index = (filenames_relative_to_index.upper() == "TRUE")
except TypeError:
# Original behaviour, assume if meta_data missing
filenames_relative_to_index = False
self._filenames = [row[0] for row in
con.execute("SELECT name FROM file_data "
"ORDER BY file_number;").fetchall()]
if filenames_relative_to_index:
# Not implicitly relative to $PWD, explicitly relative to index file
relative_path = os.path.abspath(os.path.dirname(index_filename))
tmp = []
for f in self._filenames:
if os.path.isabs(f):
tmp.append(f)
else:
# Would be stored with Unix / path separator, so convert
# it to the local OS path separator here:
tmp.append(os.path.join(relative_path, f.replace("/", os.path.sep)))
self._filenames = tmp
del tmp
if filenames and len(filenames) != len(self._filenames):
con.close()
raise ValueError("Index file says %i files, not %i"
% (len(self._filenames), len(filenames)))
if filenames and filenames != self._filenames:
for old, new in zip(self._filenames, filenames):
# Want exact match (after making relative to the index above)
if os.path.abspath(old) != os.path.abspath(new):
con.close()
if filenames_relative_to_index:
raise ValueError("Index file has different filenames, e.g. %r != %r"
% (os.path.abspath(old), os.path.abspath(new)))
else:
raise ValueError("Index file has different filenames "
"[This is an old index where any relative paths "
"were relative to the original working directory]. "
"e.g. %r != %r"
% (os.path.abspath(old), os.path.abspath(new)))
# Filenames are equal (after imposing abspath)
except _OperationalError as err:
con.close()
raise ValueError("Not a Biopython index database? %s" % err)
# Now we have the format (from the DB if not given to us),
if not proxy_factory(self._format):
con.close()
raise ValueError("Unsupported format '%s'" % self._format)
def _build_index(self):
"""Called from __init__ to create a new index (PRIVATE)."""
index_filename = self._index_filename
relative_path = self._relative_path
filenames = self._filenames
format = self._format
key_function = self._key_function
proxy_factory = self._proxy_factory
max_open = self._max_open
random_access_proxies = self._proxies
if not format or not filenames:
raise ValueError("Filenames to index and format required to build %r" % index_filename)
if not proxy_factory(format):
raise ValueError("Unsupported format '%s'" % format)
# Create the index
con = _sqlite.connect(index_filename)
self._con = con
# print("Creating index")
# Sqlite PRAGMA settings for speed
con.execute("PRAGMA synchronous=OFF")
con.execute("PRAGMA locking_mode=EXCLUSIVE")
# Don't index the key column until the end (faster)
# con.execute("CREATE TABLE offset_data (key TEXT PRIMARY KEY, "
# "offset INTEGER);")
con.execute("CREATE TABLE meta_data (key TEXT, value TEXT);")
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("count", -1))
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("format", format))
con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);",
("filenames_relative_to_index", "True"))
# TODO - Record the alphabet?
# TODO - Record the file size and modified date?
con.execute(
"CREATE TABLE file_data (file_number INTEGER, name TEXT);")
con.execute("CREATE TABLE offset_data (key TEXT, file_number INTEGER, offset INTEGER, length INTEGER);")
count = 0
for i, filename in enumerate(filenames):
# Default to storing as an absolute path,
f = os.path.abspath(filename)
if not os.path.isabs(filename) and not os.path.isabs(index_filename):
# Since user gave BOTH filename & index as relative paths,
# we will store this relative to the index file even though
# if it may now start ../ (meaning up a level)
# Note for cross platfrom use (e.g. shared data drive over SAMBA),
# convert any Windows slash into Unix style / for relative paths.
f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/")
elif (os.path.dirname(os.path.abspath(filename)) + os.path.sep).startswith(relative_path + os.path.sep):
# Since sequence file is in same directory or sub directory,
# might as well make this into a relative path:
f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/")
assert not f.startswith("../"), f
# print("DEBUG - storing %r as [%r] %r" % (filename, relative_path, f))
con.execute(
"INSERT INTO file_data (file_number, name) VALUES (?,?);",
(i, f))
random_access_proxy = proxy_factory(format, filename)
if key_function:
offset_iter = ((key_function(k), i, o, l)
for (k, o, l) in random_access_proxy)
else:
offset_iter = ((k, i, o, l)
for (k, o, l) in random_access_proxy)
while True:
batch = list(itertools.islice(offset_iter, 100))
if not batch:
break
# print("Inserting batch of %i offsets, %s ... %s"
# % (len(batch), batch[0][0], batch[-1][0]))
con.executemany(
"INSERT INTO offset_data (key,file_number,offset,length) VALUES (?,?,?,?);",
batch)
con.commit()
count += len(batch)
if len(random_access_proxies) < max_open:
random_access_proxies[i] = random_access_proxy
else:
random_access_proxy._handle.close()
self._length = count
# print("About to index %i entries" % count)
try:
con.execute("CREATE UNIQUE INDEX IF NOT EXISTS "
"key_index ON offset_data(key);")
except _IntegrityError as err:
self._proxies = random_access_proxies
self.close()
con.close()
raise ValueError("Duplicate key? %s" % err)
con.execute("PRAGMA locking_mode=NORMAL")
con.execute("UPDATE meta_data SET value = ? WHERE key = ?;",
(count, "count"))
con.commit()
# print("Index created")
def __repr__(self):
return self._repr
def __contains__(self, key):
return bool(
self._con.execute("SELECT key FROM offset_data WHERE key=?;",
(key,)).fetchone())
def __len__(self):
"""How many records are there?"""
return self._length
# return self._con.execute("SELECT COUNT(key) FROM offset_data;").fetchone()[0]
def __iter__(self):
"""Iterate over the keys."""
for row in self._con.execute("SELECT key FROM offset_data;"):
yield str(row[0])
if hasattr(dict, "iteritems"):
# Python 2, use iteritems but not items etc
# Just need to override this...
def keys(self):
"""Return a list of all the keys (SeqRecord identifiers)."""
return [str(row[0]) for row in
self._con.execute("SELECT key FROM offset_data;").fetchall()]
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
# Pass the offset to the proxy
row = self._con.execute(
"SELECT file_number, offset FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row:
raise KeyError
file_number, offset = row
proxies = self._proxies
if file_number in proxies:
record = proxies[file_number].get(offset)
else:
if len(proxies) >= self._max_open:
# Close an old handle...
proxies.popitem()[1]._handle.close()
# Open a new handle...
proxy = self._proxy_factory(self._format, self._filenames[file_number])
record = proxy.get(offset)
proxies[file_number] = proxy
if self._key_function:
key2 = self._key_function(record.id)
else:
key2 = record.id
if key != key2:
raise ValueError("Key did not match (%s vs %s)" % (key, key2))
return record
def get(self, k, d=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
try:
return self.__getitem__(k)
except KeyError:
return d
def get_raw(self, key):
"""Similar to the get method, but returns the record as a raw string.
If the key is not found, a KeyError exception is raised.
Note that on Python 3 a bytes string is returned, not a typical
unicode string.
**NOTE** - This functionality is not supported for every file format.
"""
# Pass the offset to the proxy
row = self._con.execute(
"SELECT file_number, offset, length FROM offset_data WHERE key=?;",
(key,)).fetchone()
if not row:
raise KeyError
file_number, offset, length = row
proxies = self._proxies
if file_number in proxies:
if length:
# Shortcut if we have the length
h = proxies[file_number]._handle
h.seek(offset)
return h.read(length)
else:
return proxies[file_number].get_raw(offset)
else:
# This code is duplicated from __getitem__ to avoid a function call
if len(proxies) >= self._max_open:
# Close an old handle...
proxies.popitem()[1]._handle.close()
# Open a new handle...
proxy = self._proxy_factory(self._format, self._filenames[file_number])
proxies[file_number] = proxy
if length:
# Shortcut if we have the length
h = proxy._handle
h.seek(offset)
return h.read(length)
else:
return proxy.get_raw(offset)
def close(self):
"""Close any open file handles."""
proxies = self._proxies
while proxies:
proxies.popitem()[1]._handle.close()
|
|
#!/usr/bin/env python3
import datetime
import logging
import math
import os
import random
import re
import signal
import subprocess
import sys
import telnetlib
import time
import IPy
import vrnetlab
def handle_SIGCHLD(signal, frame):
os.waitpid(-1, os.WNOHANG)
def handle_SIGTERM(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, handle_SIGTERM)
signal.signal(signal.SIGTERM, handle_SIGTERM)
signal.signal(signal.SIGCHLD, handle_SIGCHLD)
TRACE_LEVEL_NUM = 9
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
def mangle_uuid(uuid):
""" Mangle the UUID to fix endianness mismatch on first part
"""
parts = uuid.split("-")
new_parts = [
uuid_rev_part(parts[0]),
uuid_rev_part(parts[1]),
uuid_rev_part(parts[2]),
parts[3],
parts[4]
]
return '-'.join(new_parts)
def uuid_rev_part(part):
""" Reverse part of a UUID
"""
res = ""
for i in reversed(range(0, len(part), 2)):
res += part[i]
res += part[i+1]
return res
class SROS_vm(vrnetlab.VM):
def __init__(self, username, password, num=0):
super(SROS_vm, self).__init__(username, password, disk_image = "/sros.qcow2", num=num)
self.uuid = "00000000-0000-0000-0000-000000000000"
self.read_license()
def bootstrap_spin(self):
""" This function should be called periodically to do work.
"""
if self.spins > 60:
# too many spins with no result, probably means SROS hasn't started
# successfully, so we restart it
self.logger.warning("no output from serial console, restarting VM")
self.stop()
self.start()
self.spins = 0
return
(ridx, match, res) = self.tn.expect([b"Login:", b"^[^ ]+#"], 1)
if match: # got a match!
if ridx == 0: # matched login prompt, so should login
self.logger.debug("matched login prompt")
self.wait_write("admin", wait=None)
self.wait_write("admin", wait="Password:")
# run main config!
self.bootstrap_config()
# close telnet connection
self.tn.close()
# calc startup time
startup_time = datetime.datetime.now() - self.start_time
self.logger.info("Startup complete in: %s" % startup_time)
self.running = True
return
# no match, if we saw some output from the router it's probably
# booting, so let's give it some more time
if res != b'':
self.logger.trace("OUTPUT: %s" % res.decode())
# reset spins if we saw some output
self.spins = 0
self.spins += 1
return
def read_license(self):
""" Read the license file, if it exists, and extract the UUID and start
time of the license
"""
if not os.path.isfile("/tftpboot/license.txt"):
self.logger.info("No license file found")
return
lic_file = open("/tftpboot/license.txt", "r")
license = lic_file.read()
lic_file.close()
try:
uuid_input = license.split(" ")[0]
self.uuid = mangle_uuid(uuid_input)
m = re.search("([0-9]{4}-[0-9]{2}-)([0-9]{2})", license)
if m:
self.fake_start_date = "%s%02d" % (m.group(1), int(m.group(2))+1)
except:
raise ValueError("Unable to parse license file")
self.logger.info("License file found for UUID %s with start date %s" % (self.uuid, self.fake_start_date))
class SROS_integrated(SROS_vm):
""" Integrated VSR-SIM
"""
def __init__(self, username, password):
super(SROS_integrated, self).__init__(username, password)
self.num_nics = 5
self.smbios = ["type=1,product=TIMOS:address=10.0.0.15/24@active license-file=tftp://10.0.0.2/license.txt slot=A chassis=SR-c12 card=cfm-xp-b mda/1=m20-1gb-xp-sfp"]
def gen_mgmt(self):
""" Generate mgmt interface(s)
We override the default function since we want a fake NIC in there
"""
# call parent function to generate first mgmt interface (e1000)
res = super(SROS_integrated, self).gen_mgmt()
# add virtio NIC for internal control plane interface to vFPC
res.append("-device")
res.append("e1000,netdev=dummy0,mac=%s" % vrnetlab.gen_mac(1))
res.append("-netdev")
res.append("tap,ifname=dummy0,id=dummy0,script=no,downscript=no")
return res
def bootstrap_config(self):
""" Do the actual bootstrap config
"""
if self.username and self.password:
self.wait_write("configure system security user \"%s\" password %s" % (self.username, self.password))
self.wait_write("configure system security user \"%s\" access console netconf" % (self.username))
self.wait_write("configure system security user \"%s\" console member \"administrative\" \"default\"" % (self.username))
self.wait_write("configure system netconf no shutdown")
self.wait_write("configure system security profile \"administrative\" netconf base-op-authorization lock")
self.wait_write("configure card 1 mda 1 shutdown")
self.wait_write("configure card 1 mda 1 no mda-type")
self.wait_write("configure card 1 shutdown")
self.wait_write("configure card 1 no card-type")
self.wait_write("configure card 1 card-type iom-xp-b")
self.wait_write("configure card 1 mcm 1 mcm-type mcm-xp")
self.wait_write("configure card 1 mda 1 mda-type m20-1gb-xp-sfp")
self.wait_write("configure card 1 no shutdown")
self.wait_write("admin save")
self.wait_write("logout")
class SROS_cp(SROS_vm):
""" Control plane for distributed VSR-SIM
"""
def __init__(self, username, password, num_lc=1):
super(SROS_cp, self).__init__(username, password)
self.num_lc = num_lc
self.num_nics = 0
self.smbios = ["type=1,product=TIMOS:address=10.0.0.15/24@active license-file=tftp://10.0.0.2/license.txt chassis=XRS-20 slot=A card=cpm-x20"]
def start(self):
# use parent class start() function
super(SROS_cp, self).start()
# add interface to internal control plane bridge
vrnetlab.run_command(["brctl", "addif", "int_cp", "vcp-int"])
vrnetlab.run_command(["ip", "link", "set", "vcp-int", "up"])
def gen_mgmt(self):
""" Generate mgmt interface(s)
We override the default function since we want a NIC to the vFPC
"""
# call parent function to generate first mgmt interface (e1000)
res = super(SROS_cp, self).gen_mgmt()
# add virtio NIC for internal control plane interface to vFPC
res.append("-device")
res.append("e1000,netdev=vcp-int,mac=%s" % vrnetlab.gen_mac(1))
res.append("-netdev")
res.append("tap,ifname=vcp-int,id=vcp-int,script=no,downscript=no")
return res
def bootstrap_config(self):
""" Do the actual bootstrap config
"""
if self.username and self.password:
self.wait_write("configure system security user \"%s\" password %s" % (self.username, self.password))
self.wait_write("configure system security user \"%s\" access console netconf" % (self.username))
self.wait_write("configure system security user \"%s\" console member \"administrative\" \"default\"" % (self.username))
self.wait_write("configure system netconf no shutdown")
self.wait_write("configure system security profile \"administrative\" netconf base-op-authorization lock")
# configure SFMs
for i in range(1, 16):
self.wait_write("configure sfm {} sfm-type sfm-x20-b".format(i))
# configure line card & MDAs
for i in range(1, self.num_lc+1):
self.wait_write("configure card {} card-type xcm-x20".format(i))
self.wait_write("configure card {} mda 1 mda-type cx20-10g-sfp".format(i))
self.wait_write("admin save")
self.wait_write("logout")
class SROS_lc(SROS_vm):
""" Line card for distributed VSR-SIM
"""
def __init__(self, slot=1):
super(SROS_lc, self).__init__(None, None, num=slot)
self.slot = slot
self.num_nics = 6
self.smbios = ["type=1,product=TIMOS:chassis=XRS-20 slot={} card=xcm-x20 mda/1=cx20-10g-sfp".format(slot)]
def start(self):
# use parent class start() function
super(SROS_lc, self).start()
# add interface to internal control plane bridge
vrnetlab.run_command(["brctl", "addif", "int_cp", "vfpc{}-int".format(self.slot)])
vrnetlab.run_command(["ip", "link", "set", "vfpc{}-int".format(self.slot), "up"])
def gen_mgmt(self):
""" Generate mgmt interface
"""
res = []
# mgmt interface
res.extend(["-device", "e1000,netdev=mgmt,mac=%s" % vrnetlab.gen_mac(0)])
res.extend(["-netdev", "user,id=mgmt,net=10.0.0.0/24"])
# internal control plane interface to vFPC
res.extend(["-device", "e1000,netdev=vfpc-int,mac=%s" %
vrnetlab.gen_mac(0)])
res.extend(["-netdev",
"tap,ifname=vfpc{}-int,id=vfpc-int,script=no,downscript=no".format(self.slot)])
return res
def gen_nics(self):
""" Generate qemu args for the normal traffic carrying interface(s)
"""
res = []
# TODO: should this offset business be put in the common vrnetlab?
offset = 6 * (self.slot-1)
for j in range(0, self.num_nics):
i = offset + j + 1
res.append("-device")
res.append(self.nic_type + ",netdev=p%(i)02d,mac=%(mac)s"
% { 'i': i, 'mac': vrnetlab.gen_mac(i) })
res.append("-netdev")
res.append("socket,id=p%(i)02d,listen=:100%(i)02d"
% { 'i': i })
return res
def bootstrap_spin(self):
""" We have nothing to do for VSR-SIM line cards
"""
self.running = True
self.tn.close()
return
class SROS(vrnetlab.VR):
def __init__(self, username, password, num_nics):
super(SROS, self).__init__(username, password)
# move files into place
for e in os.listdir("/"):
if re.search("\.qcow2$", e):
os.rename("/" + e, "/sros.qcow2")
if re.search("\.license$", e):
os.rename("/" + e, "/tftpboot/license.txt")
self.license = False
if os.path.isfile("/tftpboot/license.txt"):
self.logger.info("License found")
self.license = True
self.logger.info("Number of NICS: " + str(num_nics))
# if we have more than 5 NICs we use distributed VSR-SIM
if num_nics > 5:
if not self.license:
self.logger.error("More than 5 NICs require distributed VSR which requires a license but no license is found")
sys.exit(1)
num_lc = math.ceil(num_nics / 6)
self.logger.info("Number of linecards: " + str(num_lc))
self.vms = [ SROS_cp(username, password, num_lc=num_lc) ]
for i in range(1, num_lc+1):
self.vms.append(SROS_lc(i))
else: # 5 ports or less means integrated VSR-SIM
self.vms = [ SROS_integrated(username, password) ]
# set up bridge for connecting CP with LCs
vrnetlab.run_command(["brctl", "addbr", "int_cp"])
vrnetlab.run_command(["ip", "link", "set", "int_cp", "up"])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--trace', action='store_true', help='enable trace level logging')
parser.add_argument('--username', default='vrnetlab', help='Username')
parser.add_argument('--password', default='VR-netlab9', help='Password')
parser.add_argument('--num-nics', default=5, help='Number of NICs')
args = parser.parse_args()
LOG_FORMAT = "%(asctime)s: %(module)-10s %(levelname)-8s %(message)s"
logging.basicConfig(format=LOG_FORMAT)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if args.trace:
logger.setLevel(1)
ia = SROS(args.username, args.password, num_nics=int(args.num_nics))
ia.start()
|
|
import pytest
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index)
from pandas._libs.tslib import iNaT
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setup_method(self, method):
super(TestTimedeltaIndexOps, self).setup_method(method)
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
f = lambda x: isinstance(x, TimedeltaIndex)
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta('1 days')
assert idx.max() == Timedelta('3 days')
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
assert np.min(td) == Timedelta('16815 days')
assert np.max(td) == Timedelta('16820 days')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, td, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, td, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
td.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, td.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
assert result == expected
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
pytest.raises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# floor divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng // offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
pytest.raises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert 'foo' not in ts.__dict__.keys()
pytest.raises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Timedelta('1 day')
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Timedelta('1 day')
result = idx.take([-1])
assert result == pd.Timedelta('31 day')
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(['1 days', '2 days'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.asobject.equals(idx2.asobject)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
assert -td == Timedelta(-10, unit='d')
assert +td == Timedelta(10, unit='d')
assert td - td == Timedelta(0, unit='ns')
assert (td - pd.NaT) is pd.NaT
assert td + td == Timedelta(20, unit='d')
assert (td + pd.NaT) is pd.NaT
assert td * 2 == Timedelta(20, unit='d')
assert (td * pd.NaT) is pd.NaT
assert td / 2 == Timedelta(5, unit='d')
assert td // 2 == Timedelta(5, unit='d')
assert abs(td) == td
assert abs(-td) == td
assert td / td == 1
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
# invert
assert -td == Timedelta('-10d')
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
assert abs(-td) == Timedelta('10d')
# invalid multiply with another timedelta
pytest.raises(TypeError, lambda: td * td)
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
assert 240 == td / pd.offsets.Hour(1)
assert 1 / 240.0 == pd.offsets.Hour(1) / td
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert s.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert s2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert s.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with pytest.raises(TypeError):
l + r
with pytest.raises(TypeError):
l > r
with pytest.raises(TypeError):
l == r
with pytest.raises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
assert result == expected
result = td.median()
expected = to_timedelta('00:00:09')
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
pytest.raises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
assert s.diff().median() == timedelta(days=4)
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
assert s.diff().median() == timedelta(days=6)
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
class TestSlicing(object):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2D'
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_put_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_patch_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_validate_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-09-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ConfigServersOperations(object):
"""ConfigServersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2021_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.ConfigServerResource":
"""Get the config server and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigServerResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> "_models.ConfigServerResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_resource, 'ConfigServerResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
@distributed_trace
def begin_update_put(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> LROPoller["_models.ConfigServerResource"]:
"""Update the config server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_resource: Parameters for the update operation.
:type config_server_resource:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_resource=config_server_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> "_models.ConfigServerResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_resource, 'ConfigServerResource')
request = build_update_patch_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_patch_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
@distributed_trace
def begin_update_patch(
self,
resource_group_name: str,
service_name: str,
config_server_resource: "_models.ConfigServerResource",
**kwargs: Any
) -> LROPoller["_models.ConfigServerResource"]:
"""Update the config server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_resource: Parameters for the update operation.
:type config_server_resource:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_resource=config_server_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default'} # type: ignore
def _validate_initial(
self,
resource_group_name: str,
service_name: str,
config_server_settings: "_models.ConfigServerSettings",
**kwargs: Any
) -> "_models.ConfigServerSettingsValidateResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerSettingsValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(config_server_settings, 'ConfigServerSettings')
request = build_validate_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._validate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_validate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate'} # type: ignore
@distributed_trace
def begin_validate(
self,
resource_group_name: str,
service_name: str,
config_server_settings: "_models.ConfigServerSettings",
**kwargs: Any
) -> LROPoller["_models.ConfigServerSettingsValidateResult"]:
"""Check if the config server settings are valid.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param config_server_settings: Config server settings to be validated.
:type config_server_settings:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettings
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ConfigServerSettingsValidateResult or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettingsValidateResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigServerSettingsValidateResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._validate_initial(
resource_group_name=resource_group_name,
service_name=service_name,
config_server_settings=config_server_settings,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ConfigServerSettingsValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/validate'} # type: ignore
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for the Oppia reader view.
Before running this script, exploration 0 should be loaded in the target
server.
Run this script from the Oppia root directory:
python core/tests/reader_view_load_test.py --thread_count=5 --start_uid=1 \
https://my-oppia-instance.appspot.com
"""
# Pylint has issues with import order of argparse.
#pylint: disable=wrong-import-order
import argparse
import cookielib
import json
import logging
import sys
import threading
import time
import urllib
import urllib2
#pylint: enable=wrong-import-order
XSSI_PREFIX = ')]}\'\n'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the Oppia installation to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if needle not in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cookie_jar = cookielib.CookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cookie_jar))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cookie_jar:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as http_error:
if (try_count < WebSession.MAX_RETRIES and
self.is_soft_error(http_error)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise http_error
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e:
logging.error('Error in %s: %s', self.name, e)
exc_info = sys.exc_info()
raise exc_info[1], None, exc_info[2]
class ReaderViewLoadTest(object):
"""A reader view load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
self.exp_id = None
self.last_state_name = None
self.last_params = None
self.state_history = None
self.session = WebSession(uid=uid)
def run(self):
self.init_player(
'0', 'Welcome to Oppia!', 'do you know where the name \'Oppia\'')
self.submit_and_compare(
'0', 'In fact, the word Oppia means \'learn\'.')
self.submit_and_compare('Finish', 'Check your spelling!')
self.submit_and_compare(
'Finnish', 'Yes! Oppia is the Finnish word for learn.')
def _get(self, url):
return self.session.get(url)
def _get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_body = self.session.get(url)
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def _post(self, url, data):
return self.session.post(url, data)
def _post_json(self, url, data):
"""Post a JSON request, returning the response as a Python object."""
json_body = self.session.post(str(url), {'payload': json.dumps(data)})
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def init_player(self, exploration_id, expected_title, expected_response):
self.exp_id = exploration_id
body = self._get('%s/explore/%s' % (self.host, self.exp_id))
assert_contains('Learn', body)
assert_contains('Return to Library', body)
body = self._get_json(
'%s/explorehandler/init/%s' % (self.host, self.exp_id))
assert_equals(body['title'], expected_title)
assert_contains(expected_response, body['init_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history = [self.last_state_name]
def submit_and_compare(self, answer, expected_response):
url = '%s/explorehandler/transition/%s/%s' % (
self.host, self.exp_id, urllib.quote(self.last_state_name))
body = self._post_json(url, {
'answer': answer, 'params': self.last_params,
'state_history': self.state_history,
})
assert_contains(expected_response, body['oppia_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history += [self.last_state_name]
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
if not args.base_url:
raise Exception('Please specify a base URL to load-test against.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = ReaderViewLoadTest(args.base_url, (
args.start_uid + iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='ReaderViewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Cloud Build service."""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.devtools.cloudbuild import CloudBuildClient
from google.cloud.devtools.cloudbuild_v1.types import Build, BuildTrigger, RepoSource
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 5
class CloudBuildHook(GoogleBaseHook):
"""
Hook for the Google Cloud Build Service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id, delegate_to=delegate_to, impersonation_chain=impersonation_chain
)
self._client: Optional[CloudBuildClient] = None
def _get_build_id_from_operation(self, operation: Operation) -> str:
"""
Retrieve Cloud Build ID from Operation Object.
:param operation: The proto to append resource_label airflow
version to
:type operation: google.api_core.operation.Operation
:return: Cloud Build ID
:rtype: str
"""
try:
return operation.metadata.build.id
except Exception:
raise AirflowException("Could not retrieve Build ID from Operation.")
def get_conn(self) -> CloudBuildClient:
"""
Retrieves the connection to Google Cloud Build.
:return: Google Cloud Build client object.
:rtype: `google.cloud.devtools.cloudbuild_v1.CloudBuildClient`
"""
if not self._client:
self._client = CloudBuildClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def cancel_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Build:
"""
Cancels a build in progress.
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
client = self.get_conn()
self.log.info("Start cancelling build: %s.", id_)
build = client.cancel_build(
request={'project_id': project_id, 'id': id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build has been cancelled: %s.", id_)
return build
@GoogleBaseHook.fallback_to_default_project_id
def create_build(
self,
build: Union[Dict, Build],
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Build:
"""
Starts a build with the specified configuration.
:param build: The build resource to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`
:type build: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.Build`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
client = self.get_conn()
self.log.info("Start creating build.")
operation = client.create_build(
request={'project_id': project_id, 'build': build},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(Operation)
if not wait:
return self.get_build(id_=id_, project_id=project_id)
operation.result()
self.log.info("Build has been created: %s.", id_)
return self.get_build(id_=id_, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_build_trigger(
self,
trigger: Union[dict, BuildTrigger],
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> BuildTrigger:
"""
Creates a new BuildTrigger.
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
"""
client = self.get_conn()
self.log.info("Start creating build trigger.")
trigger = client.create_build_trigger(
request={'project_id': project_id, 'trigger': trigger},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been created.")
return trigger
@GoogleBaseHook.fallback_to_default_project_id
def delete_build_trigger(
self,
trigger_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Deletes a BuildTrigger by its project ID and trigger ID.
:param trigger_id: The ID of the BuildTrigger to delete.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
"""
client = self.get_conn()
self.log.info("Start deleting build trigger: %s.", trigger_id)
client.delete_build_trigger(
request={'project_id': project_id, 'trigger_id': trigger_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been deleted: %s.", trigger_id)
@GoogleBaseHook.fallback_to_default_project_id
def get_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Build:
"""
Returns information about a previously requested build.
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
client = self.get_conn()
self.log.info("Start retrieving build: %s.", id_)
build = client.get_build(
request={'project_id': project_id, 'id': id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build has been retrieved: %s.", id_)
return build
@GoogleBaseHook.fallback_to_default_project_id
def get_build_trigger(
self,
trigger_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> BuildTrigger:
"""
Returns information about a BuildTrigger.
:param trigger_id: The ID of the BuildTrigger to get.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
"""
client = self.get_conn()
self.log.info("Start retrieving build trigger: %s.", trigger_id)
trigger = client.get_build_trigger(
request={'project_id': project_id, 'trigger_id': trigger_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been retrieved: %s.", trigger_id)
return trigger
@GoogleBaseHook.fallback_to_default_project_id
def list_build_triggers(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[BuildTrigger]:
"""
Lists existing BuildTriggers.
:param project_id: Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: The location of the project.
:type location: string
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:type page_token: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Start retrieving build triggers.")
response = client.list_build_triggers(
request={
'parent': parent,
'project_id': project_id,
'page_size': page_size,
'page_token': page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build triggers have been retrieved.")
return list(response.triggers)
@GoogleBaseHook.fallback_to_default_project_id
def list_builds(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
page_size: Optional[int] = None,
page_token: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> List[Build]:
"""
Lists previously requested builds.
:param project_id: Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: The location of the project.
:type location: string
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:type page_token: Optional[str]
:param filter_: Optional, the raw filter text to constrain the results.
:type filter_: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: List[`google.cloud.devtools.cloudbuild_v1.types.Build`]
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Start retrieving builds.")
response = client.list_builds(
request={
'parent': parent,
'project_id': project_id,
'page_size': page_size,
'page_token': page_token,
'filter': filter_,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Builds have been retrieved.")
return list(response.builds)
@GoogleBaseHook.fallback_to_default_project_id
def retry_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Build:
"""
Creates a new build based on the specified build. This method creates a new build
using the original build request, which may or may not result in an identical build.
:param id_: Build ID of the original build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
client = self.get_conn()
self.log.info("Start retrying build: %s.", id_)
operation = client.retry_build(
request={'project_id': project_id, 'id': id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(Operation)
if not wait:
return self.get_build(id_=id_, project_id=project_id)
operation.result()
self.log.info("Build has been retried: %s.", id_)
return self.get_build(id_=id_, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def run_build_trigger(
self,
trigger_id: str,
source: Union[dict, RepoSource],
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Build:
"""
Runs a BuildTrigger at a particular source revision.
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param source: Source to build against this trigger. If a dict is provided, it must be of the
same form as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:type source: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.RepoSource`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
client = self.get_conn()
self.log.info("Start running build trigger: %s.", trigger_id)
operation = client.run_build_trigger(
request={'project_id': project_id, 'trigger_id': trigger_id, 'source': source},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(Operation)
if not wait:
return self.get_build(id_=id_, project_id=project_id)
operation.result()
self.log.info("Build trigger has been run: %s.", trigger_id)
return self.get_build(id_=id_, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def update_build_trigger(
self,
trigger_id: str,
trigger: Union[dict, BuildTrigger],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> BuildTrigger:
"""
Updates a BuildTrigger by its project ID and trigger ID.
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:rtype: `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
"""
client = self.get_conn()
self.log.info("Start updating build trigger: %s.", trigger_id)
trigger = client.update_build_trigger(
request={'project_id': project_id, 'trigger_id': trigger_id, 'trigger': trigger},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been updated: %s.", trigger_id)
return trigger
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughSchedulingCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughSchedulingCasesPage, self).__init__(url=url, page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Page1(ToughSchedulingCasesPage):
""" Why: Simulate oversubscribed main thread """
def __init__(self, page_set):
super(Page1, self).__init__(
url='file://tough_scheduling_cases/simple_text_page.html?main_busy',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.008}}
class Page2(ToughSchedulingCasesPage):
""" Why: Simulate oversubscribed main thread """
def __init__(self, page_set):
super(Page2, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?main_very_busy',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.024}}
class Page3(ToughSchedulingCasesPage):
""" Why: Simulate a page with a a few graphics layers """
def __init__(self, page_set):
super(Page3, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?medium_layers',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.004},
'gpu.PresentingFrame': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004}
}
class Page4(ToughSchedulingCasesPage):
""" Why: Simulate a page with many graphics layers """
def __init__(self, page_set):
super(Page4, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?many_layers',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.012},
'gpu.PresentingFrame': {'target_duration': 0.012},
'cc.BeginMainFrame': {'target_duration': 0.012}
}
class Page5(ToughSchedulingCasesPage):
""" Why: Simulate a page with expensive recording and rasterization """
def __init__(self, page_set):
super(Page5, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?medium_raster',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004},
'gpu.AsyncTexImage': {'target_duration': 0.004}
}
class Page6(ToughSchedulingCasesPage):
""" Why: Simulate a page with expensive recording and rasterization """
def __init__(self, page_set):
super(Page6, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?heavy_raster',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.024},
'cc.BeginMainFrame': {'target_duration': 0.024},
'gpu.AsyncTexImage': {'target_duration': 0.024}
}
class Page7(ToughSchedulingCasesPage):
""" Why: Medium cost touch handler """
def __init__(self, page_set):
super(Page7, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?medium_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.008}}
class Page8(ToughSchedulingCasesPage):
""" Why: Slow touch handler """
def __init__(self, page_set):
super(Page8, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?slow_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024}}
class Page9(ToughSchedulingCasesPage):
""" Why: Touch handler that often takes a long time """
def __init__(self, page_set):
super(Page9, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?janky_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024, 'mode': 'alternating'}
}
class Page10(ToughSchedulingCasesPage):
""" Why: Touch handler that occasionally takes a long time """
def __init__(self, page_set):
super(Page10, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?occasionally_janky_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024, 'mode': 'oneshot'}}
class Page11(ToughSchedulingCasesPage):
""" Why: Super expensive touch handler causes browser to scroll after a
timeout.
"""
def __init__(self, page_set):
super(Page11, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?super_slow_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.2}}
class Page12(ToughSchedulingCasesPage):
""" Why: Super expensive touch handler that only occupies a part of the page.
"""
def __init__(self, page_set):
super(Page12, self).__init__(
url='file://tough_scheduling_cases/div_touch_handler.html',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent': {'target_duration': 0.2}}
class Page13(ToughSchedulingCasesPage):
""" Why: Test a moderately heavy requestAnimationFrame handler """
def __init__(self, page_set):
super(Page13, self).__init__(
url='file://tough_scheduling_cases/raf.html?medium_handler',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004},
'gpu.AsyncTexImage': {'target_duration': 0.004}
}
class Page14(ToughSchedulingCasesPage):
""" Why: Test a moderately heavy requestAnimationFrame handler """
def __init__(self, page_set):
super(Page14, self).__init__(
url='file://tough_scheduling_cases/raf.html?heavy_handler',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.024},
'cc.BeginMainFrame': {'target_duration': 0.024},
'gpu.AsyncTexImage': {'target_duration': 0.024}
}
class Page15(ToughSchedulingCasesPage):
""" Why: Simulate a heavily GPU bound page """
def __init__(self, page_set):
super(Page15, self).__init__(
url='file://tough_scheduling_cases/raf.html?gpu_bound',
page_set=page_set)
self.synthetic_delays = {'gpu.PresentingFrame': {'target_duration': 0.1}}
class Page16(ToughSchedulingCasesPage):
""" Why: Test a requestAnimationFrame handler with a heavy first frame """
def __init__(self, page_set):
super(Page16, self).__init__(
url='file://tough_scheduling_cases/raf.html?heavy_first_frame',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.15}}
class Page17(ToughSchedulingCasesPage):
""" Why: Medium stress test for the scheduler """
def __init__(self, page_set):
super(Page17, self).__init__(
url='file://tough_scheduling_cases/raf_touch_animation.html?medium',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004}
}
class Page18(ToughSchedulingCasesPage):
""" Why: Heavy stress test for the scheduler """
def __init__(self, page_set):
super(Page18, self).__init__(
url='file://tough_scheduling_cases/raf_touch_animation.html?heavy',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.012},
'cc.BeginMainFrame': {'target_duration': 0.012}
}
class Page19(ToughSchedulingCasesPage):
""" Why: Both main and impl thread animating concurrently """
def __init__(self, page_set):
super(Page19, self).__init__(
url='file://tough_scheduling_cases/split_animation.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
action_runner.Wait(3)
class Page20(ToughSchedulingCasesPage):
""" Why: Simple JS touch dragging """
def __init__(self, page_set):
super(Page20, self).__init__(
url='file://tough_scheduling_cases/simple_touch_drag.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#card',
use_touch=True,
direction='up',
speed_in_pixels_per_second=150,
distance=400)
interaction.End()
class EmptyTouchHandlerPage(ToughSchedulingCasesPage):
""" Why: Scrolling on a page with a touch handler that consumes no events but
may be slow """
def __init__(self, name, desktop, slow_handler, bounce, page_set):
super(EmptyTouchHandlerPage, self).__init__(
url='file://tough_scheduling_cases/empty_touch_handler' +
('_desktop' if desktop else '') + '.html?' + name,
page_set=page_set)
if slow_handler:
self.synthetic_delays = {
'blink.HandleInputEvent': {'target_duration': 0.2}
}
self.bounce = bounce
def RunSmoothness(self, action_runner):
if self.bounce:
interaction = action_runner.BeginGestureInteraction(
'ScrollBounceAction', is_smooth=True)
action_runner.ScrollBouncePage()
interaction.End()
else:
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
# Speed and distance are tuned to run exactly as long as a scroll
# bounce.
action_runner.ScrollPage(use_touch=True, speed_in_pixels_per_second=400,
distance=2100)
interaction.End()
class SynchronizedScrollOffsetPage(ToughSchedulingCasesPage):
"""Why: For measuring the latency of scroll-synchronized effects."""
def __init__(self, page_set):
super(SynchronizedScrollOffsetPage, self).__init__(
url='file://tough_scheduling_cases/sync_scroll_offset.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollBounceAction', is_smooth=True)
action_runner.ScrollBouncePage()
interaction.End()
class ToughSchedulingCasesPageSet(page_set_module.PageSet):
""" Tough scheduler latency test cases """
def __init__(self):
super(ToughSchedulingCasesPageSet, self).__init__()
# Why: Simple scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/simple_text_page.html',
self))
self.AddPage(Page1(self))
self.AddPage(Page2(self))
self.AddPage(Page3(self))
self.AddPage(Page4(self))
self.AddPage(Page5(self))
# self.AddPage(Page6(self)) Flaky crbug.com/368532
# Why: Touch handler scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/touch_handler_scrolling.html',
self))
self.AddPage(Page7(self))
self.AddPage(Page8(self))
self.AddPage(Page9(self))
self.AddPage(Page10(self))
self.AddPage(Page11(self))
self.AddPage(Page12(self))
# Why: requestAnimationFrame scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf.html',
self))
# Why: Test canvas blocking behavior
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_canvas.html',
self))
self.AddPage(Page13(self))
# Disabled for flakiness. See 368532
# self.AddPage(Page14(self))
self.AddPage(Page15(self))
self.AddPage(Page16(self))
# Why: Test a requestAnimationFrame handler with concurrent CSS animation
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_animation.html',
self))
# Why: Stress test for the scheduler
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_touch_animation.html',
self))
self.AddPage(Page17(self))
self.AddPage(Page18(self))
self.AddPage(Page19(self))
self.AddPage(Page20(self))
# Why: Baseline for scrolling in the presence of a no-op touch handler
self.AddPage(EmptyTouchHandlerPage(
name='baseline',
desktop=False,
slow_handler=False,
bounce=False,
page_set=self))
# Why: Slow handler blocks scroll start
self.AddPage(EmptyTouchHandlerPage(
name='slow_handler',
desktop=False,
slow_handler=True,
bounce=False,
page_set=self))
# Why: Slow handler blocks scroll start until touch ACK timeout
self.AddPage(EmptyTouchHandlerPage(
name='desktop_slow_handler',
desktop=True,
slow_handler=True,
bounce=False,
page_set=self))
# Why: Scroll bounce showing repeated transitions between scrolling and
# sending synchronous touchmove events. Should be nearly as fast as
# scroll baseline.
self.AddPage(EmptyTouchHandlerPage(
name='bounce',
desktop=False,
slow_handler=False,
bounce=True,
page_set=self))
# Why: Scroll bounce with slow handler, repeated blocking.
self.AddPage(EmptyTouchHandlerPage(
name='bounce_slow_handler',
desktop=False,
slow_handler=True,
bounce=True,
page_set=self))
# Why: Scroll bounce with slow handler on desktop, blocks only once until
# ACK timeout.
self.AddPage(EmptyTouchHandlerPage(
name='bounce_desktop_slow_handler',
desktop=True,
slow_handler=True,
bounce=True,
page_set=self))
# Why: For measuring the latency of scroll-synchronized effects.
self.AddPage(SynchronizedScrollOffsetPage(page_set=self))
|
|
# -*- coding: utf-8 -*-
# Copyright 2017, 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Register the Z-Y decomposition for an arbitrary one qubit gate.
See paper "Elementary gates for quantum computing" by Adriano Barenco et al.,
arXiv:quant-ph/9503016v1. (Note: They use different gate definitions!)
Or see theorem 4.1 in Nielsen and Chuang.
Decompose an arbitrary one qubit gate U into
U = e^(i alpha) Rz(beta) Ry(gamma) Rz(delta). If a gate V is element of SU(2),
i.e., determinant == 1, then
V = Rz(beta) Ry(gamma) Rz(delta)
"""
import cmath
import itertools
import math
import numpy
from projectq.cengines import DecompositionRule
from projectq.meta import Control, get_control_count
from projectq.ops import BasicGate, Ph, Ry, Rz
TOLERANCE = 1e-12
def _recognize_arb1qubit(cmd):
"""
Recognize an arbitrary one qubit gate which has a matrix property.
It does not allow gates which have control qubits as otherwise the
AutoReplacer might go into an infinite loop. Use
carb1qubit2cnotrzandry instead.
"""
try:
return len(cmd.gate.matrix) == 2 and get_control_count(cmd) == 0
except AttributeError:
return False
def _test_parameters(matrix, a, b_half, c_half, d_half): # pylint: disable=invalid-name
"""
Build matrix U with parameters (a, b/2, c/2, d/2) and compares against matrix.
U = [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
Args:
matrix(list): 2x2 matrix
a: parameter of U
b_half: b/2. parameter of U
c_half: c/2. parameter of U
d_half: d/2. parameter of U
Returns:
True if matrix elements of U and `matrix` are TOLERANCE close.
"""
unitary = [
[
cmath.exp(1j * (a - b_half - d_half)) * math.cos(c_half),
-cmath.exp(1j * (a - b_half + d_half)) * math.sin(c_half),
],
[
cmath.exp(1j * (a + b_half - d_half)) * math.sin(c_half),
cmath.exp(1j * (a + b_half + d_half)) * math.cos(c_half),
],
]
return numpy.allclose(unitary, matrix, rtol=10 * TOLERANCE, atol=TOLERANCE)
def _find_parameters(matrix): # pylint: disable=too-many-branches,too-many-statements
"""
Find decomposition parameters.
Given a 2x2 unitary matrix, find the parameters a, b/2, c/2, and d/2 such that
matrix == [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
Note:
If the matrix is element of SU(2) (determinant == 1), then we can choose a = 0.
Args:
matrix(list): 2x2 unitary matrix
Returns:
parameters of the matrix: (a, b/2, c/2, d/2)
"""
# Determine a, b/2, c/2 and d/2 (3 different cases).
# Note: everything is modulo 2pi.
# Case 1: sin(c/2) == 0:
if abs(matrix[0][1]) < TOLERANCE:
two_a = cmath.phase(matrix[0][0] * matrix[1][1]) % (2 * math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2 * math.pi - TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0 # pylint: disable=invalid-name
else:
a = two_a / 2.0 # pylint: disable=invalid-name
d_half = 0 # w.l.g
b = cmath.phase(matrix[1][1]) - cmath.phase(matrix[0][0]) # pylint: disable=invalid-name
possible_b_half = [
(b / 2.0) % (2 * math.pi),
(b / 2.0 + math.pi) % (2 * math.pi),
]
# As we have fixed a, we need to find correct sign for cos(c/2)
possible_c_half = [0.0, math.pi]
found = False
for b_half, c_half in itertools.product(possible_b_half, possible_c_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception(
"Couldn't find parameters for matrix ",
matrix,
"This shouldn't happen. Maybe the matrix is " + "not unitary?",
)
# Case 2: cos(c/2) == 0:
elif abs(matrix[0][0]) < TOLERANCE:
two_a = cmath.phase(-matrix[0][1] * matrix[1][0]) % (2 * math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2 * math.pi - TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0 # pylint: disable=invalid-name
else:
a = two_a / 2.0 # pylint: disable=invalid-name
d_half = 0 # w.l.g
b = cmath.phase(matrix[1][0]) - cmath.phase(matrix[0][1]) + math.pi # pylint: disable=invalid-name
possible_b_half = [
(b / 2.0) % (2 * math.pi),
(b / 2.0 + math.pi) % (2 * math.pi),
]
# As we have fixed a, we need to find correct sign for sin(c/2)
possible_c_half = [math.pi / 2.0, 3.0 / 2.0 * math.pi]
found = False
for b_half, c_half in itertools.product(possible_b_half, possible_c_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception(
"Couldn't find parameters for matrix ",
matrix,
"This shouldn't happen. Maybe the matrix is " + "not unitary?",
)
# Case 3: sin(c/2) != 0 and cos(c/2) !=0:
else:
two_a = cmath.phase(matrix[0][0] * matrix[1][1]) % (2 * math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2 * math.pi - TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0 # pylint: disable=invalid-name
else:
a = two_a / 2.0 # pylint: disable=invalid-name
two_d = 2.0 * cmath.phase(matrix[0][1]) - 2.0 * cmath.phase(matrix[0][0])
# yapf: disable
possible_d_half = [two_d/4. % (2*math.pi),
(two_d/4.+math.pi/2.) % (2*math.pi),
(two_d/4.+math.pi) % (2*math.pi),
(two_d/4.+3./2.*math.pi) % (2*math.pi)]
two_b = 2. * cmath.phase(matrix[1][0]) - 2. * cmath.phase(matrix[0][0])
possible_b_half = [two_b/4. % (2*math.pi),
(two_b/4.+math.pi/2.) % (2*math.pi),
(two_b/4.+math.pi) % (2*math.pi),
(two_b/4.+3./2.*math.pi) % (2*math.pi)]
tmp = math.acos(abs(matrix[1][1]))
possible_c_half = [tmp % (2*math.pi),
(tmp+math.pi) % (2*math.pi),
(-1.*tmp) % (2*math.pi),
(-1.*tmp+math.pi) % (2*math.pi)]
# yapf: enable
found = False
for b_half, c_half, d_half in itertools.product(possible_b_half, possible_c_half, possible_d_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception(
"Couldn't find parameters for matrix ",
matrix,
"This shouldn't happen. Maybe the matrix is " + "not unitary?",
)
return (a, b_half, c_half, d_half)
def _decompose_arb1qubit(cmd):
"""
Use Z-Y decomposition of Nielsen and Chuang (Theorem 4.1).
An arbitrary one qubit gate matrix can be writen as
U = [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
where a,b,c,d are real numbers.
Then U = exp(j*a) Rz(b) Ry(c) Rz(d).
If the matrix is element of SU(2) (determinant == 1), then
we can choose a = 0.
"""
matrix = cmd.gate.matrix.tolist()
a, b_half, c_half, d_half = _find_parameters(matrix) # pylint: disable=invalid-name
qb = cmd.qubits
eng = cmd.engine
with Control(eng, cmd.control_qubits):
if Rz(2 * d_half) != Rz(0):
Rz(2 * d_half) | qb
if Ry(2 * c_half) != Ry(0):
Ry(2 * c_half) | qb
if Rz(2 * b_half) != Rz(0):
Rz(2 * b_half) | qb
if a != 0:
Ph(a) | qb
#: Decomposition rules
all_defined_decomposition_rules = [DecompositionRule(BasicGate, _decompose_arb1qubit, _recognize_arb1qubit)]
|
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import Queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import conversion_custom_variable
from google.ads.googleads.v9.services.types import (
conversion_custom_variable_service,
)
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
ConversionCustomVariableServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ConversionCustomVariableServiceGrpcTransport
class ConversionCustomVariableServiceClientMeta(type):
"""Metaclass for the ConversionCustomVariableService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionCustomVariableServiceTransport]]
_transport_registry["grpc"] = ConversionCustomVariableServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConversionCustomVariableServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConversionCustomVariableServiceClient(
metaclass=ConversionCustomVariableServiceClientMeta
):
"""Service to manage conversion custom variables."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionCustomVariableServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionCustomVariableServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversionCustomVariableServiceTransport:
"""Return the transport used by the client instance.
Returns:
ConversionCustomVariableServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def conversion_custom_variable_path(
customer_id: str, conversion_custom_variable_id: str,
) -> str:
"""Return a fully-qualified conversion_custom_variable string."""
return "customers/{customer_id}/conversionCustomVariables/{conversion_custom_variable_id}".format(
customer_id=customer_id,
conversion_custom_variable_id=conversion_custom_variable_id,
)
@staticmethod
def parse_conversion_custom_variable_path(path: str) -> Dict[str, str]:
"""Parse a conversion_custom_variable path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/conversionCustomVariables/(?P<conversion_custom_variable_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def customer_path(customer_id: str,) -> str:
"""Return a fully-qualified customer string."""
return "customers/{customer_id}".format(customer_id=customer_id,)
@staticmethod
def parse_customer_path(path: str) -> Dict[str, str]:
"""Parse a customer path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, ConversionCustomVariableServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the conversion custom variable service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversionCustomVariableServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConversionCustomVariableServiceTransport):
# transport is a ConversionCustomVariableServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ConversionCustomVariableServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_conversion_custom_variable(
self,
request: Union[
conversion_custom_variable_service.GetConversionCustomVariableRequest,
dict,
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_custom_variable.ConversionCustomVariable:
r"""Returns the requested conversion custom variable.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetConversionCustomVariableRequest, dict]):
The request object. Request message for
[ConversionCustomVariableService.GetConversionCustomVariable][google.ads.googleads.v9.services.ConversionCustomVariableService.GetConversionCustomVariable].
resource_name (:class:`str`):
Required. The resource name of the
conversion custom variable to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.ConversionCustomVariable:
A conversion custom variable
See "About custom variables for
conversions" at
https://support.google.com/google-
ads/answer/9964350
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_custom_variable_service.GetConversionCustomVariableRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
conversion_custom_variable_service.GetConversionCustomVariableRequest,
):
request = conversion_custom_variable_service.GetConversionCustomVariableRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_conversion_custom_variable
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_conversion_custom_variables(
self,
request: Union[
conversion_custom_variable_service.MutateConversionCustomVariablesRequest,
dict,
] = None,
*,
customer_id: str = None,
operations: Sequence[
conversion_custom_variable_service.ConversionCustomVariableOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_custom_variable_service.MutateConversionCustomVariablesResponse:
r"""Creates or updates conversion custom variables. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ConversionCustomVariableError <>`__
`DatabaseError <>`__ `HeaderError <>`__ `InternalError <>`__
`QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateConversionCustomVariablesRequest, dict]):
The request object. Request message for
[ConversionCustomVariableService.MutateConversionCustomVariables][google.ads.googleads.v9.services.ConversionCustomVariableService.MutateConversionCustomVariables].
customer_id (:class:`str`):
Required. The ID of the customer
whose conversion custom variables are
being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.ConversionCustomVariableOperation]`):
Required. The list of operations to
perform on individual conversion custom
variables.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateConversionCustomVariablesResponse:
Response message for
[ConversionCustomVariableService.MutateConversionCustomVariables][google.ads.googleads.v9.services.ConversionCustomVariableService.MutateConversionCustomVariables].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_custom_variable_service.MutateConversionCustomVariablesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
conversion_custom_variable_service.MutateConversionCustomVariablesRequest,
):
request = conversion_custom_variable_service.MutateConversionCustomVariablesRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_conversion_custom_variables
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ConversionCustomVariableServiceClient",)
|
|
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo_concurrency import processutils as putils
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi.hnas_backend import HNASSSHBackend
from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import volume_types
# The following information is passed on to tests, when creating a volume
_VOLUME = {'name': 'volume-cinder',
'id': fake.VOLUME_ID,
'size': 128,
'host': 'host1@hnas-iscsi-backend#default',
'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-'
'4182-afb8-1755025c35b8'}
_VOLUME2 = {'name': 'volume-clone',
'id': fake.VOLUME2_ID,
'size': 150,
'host': 'host1@hnas-iscsi-backend#default',
'provider_location': '83-68-96-AA-DA-5D.volume-8fe1802a-316b-'
'5237-1c57-c35b81755025'}
_SNAPSHOT = {
'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
'id': fake.SNAPSHOT_ID,
'size': 128,
'volume_type': None,
'provider_location': None,
'volume_size': 128,
'volume': _VOLUME,
'volume_name': _VOLUME['name'],
'host': 'host1@hnas-iscsi-backend#silver',
'volume_type_id': fake.VOLUME_TYPE_ID,
}
class HNASiSCSIDriverTest(test.TestCase):
"""Test HNAS iSCSI volume driver."""
def setUp(self):
super(HNASiSCSIDriverTest, self).setUp()
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(
self.context, **_VOLUME)
self.volume_clone = fake_volume.fake_volume_obj(
self.context, **_VOLUME2)
self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
self.volume_type = fake_volume.fake_volume_type_obj(
None,
**{'name': 'silver',
'id': fake.VOLUME_TYPE_ID}
)
self.parsed_xml = {
'username': 'supervisor',
'password': 'supervisor',
'hnas_cmd': 'ssc',
'fs': {'fs2': 'fs2'},
'ssh_port': '22',
'port': '3260',
'services': {
'default': {
'hdp': 'fs2',
'iscsi_ip': '172.17.39.132',
'iscsi_port': '3260',
'port': '22',
'volume_type': 'default',
'label': 'svc_0',
'evs': '1',
'tgt': {
'alias': 'test',
'secret': 'itEpgB5gPefGhW2'
}
},
'silver': {
'hdp': 'fs3',
'iscsi_ip': '172.17.39.133',
'iscsi_port': '3260',
'port': '22',
'volume_type': 'silver',
'label': 'svc_1',
'evs': '2',
'tgt': {
'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'
}
}
},
'cluster_admin_ip0': None,
'ssh_private_key': None,
'chap_enabled': True,
'mgmt_ip0': '172.17.44.15',
'ssh_enabled': None
}
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hds_hnas_iscsi_config_file = 'fake.xml'
self.mock_object(hnas_utils, 'read_cinder_conf',
mock.Mock(return_value=self.parsed_xml))
self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration)
@staticmethod
def instantiate_snapshot(snap):
snap = snap.copy()
snap['volume'] = fake_volume.fake_volume_obj(
None, **snap['volume'])
snapshot = fake_snapshot.fake_snapshot_obj(
None, expected_attrs=['volume'], **snap)
return snapshot
def test_get_service_target_chap_enabled(self):
lu_info = {'mapped': False,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': True,
'tgt': {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0',
'name': 'cinder-lu'},
{'id': '1',
'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
],
'auth': 'Enabled'}}
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value='1'))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'check_target',
mock.Mock(return_value=tgt))
self.mock_object(HNASSSHBackend, 'get_target_secret',
mock.Mock(return_value=''))
self.mock_object(HNASSSHBackend, 'set_target_secret')
self.mock_object(HNASSSHBackend, 'get_target_iqn',
mock.Mock(return_value=iqn))
self.driver._get_service_target(self.volume)
def test_get_service_target_chap_disabled(self):
lu_info = {'mapped': False,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': False,
'tgt': {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0',
'name': 'cinder-lu'},
{'id': '1',
'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
],
'auth': 'Enabled'}}
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.driver.config['chap_enabled'] = False
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value='1'))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'check_target',
mock.Mock(return_value=tgt))
self.mock_object(HNASSSHBackend, 'get_target_iqn',
mock.Mock(return_value=iqn))
self.mock_object(HNASSSHBackend, 'create_target')
self.driver._get_service_target(self.volume)
def test_get_service_target_no_more_targets_exception(self):
iscsi.MAX_HNAS_LUS_PER_TARGET = 4
lu_info = {'mapped': False, 'id': 1,
'tgt': {'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2'}}
tgt = {'found': True,
'tgt': {
'alias': 'cinder-default', 'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '0', 'name': 'volume-0'},
{'id': '1', 'name': 'volume-1'},
{'id': '2', 'name': 'volume-2'},
{'id': '3', 'name': 'volume-3'}, ],
'auth': 'Enabled'}}
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value='1'))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'check_target',
mock.Mock(return_value=tgt))
self.assertRaises(exception.NoMoreTargets,
self.driver._get_service_target, self.volume)
def test_check_pool_and_fs(self):
self.mock_object(hnas_utils, 'get_pool',
mock.Mock(return_value='default'))
self.driver._check_pool_and_fs(self.volume, 'fs2')
def test_check_pool_and_fs_no_default_configured(self):
self.volume.volume_type = self.volume_type
self.mock_object(hnas_utils, 'get_pool',
mock.Mock(return_value='default'))
self.driver.config['services'] = {
'silver': {
'hdp': 'fs3',
'iscsi_ip': '172.17.39.133',
'iscsi_port': '3260',
'port': '22',
'volume_type': 'silver',
'label': 'svc_1',
'evs': '2',
'tgt': {
'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'
}
}
}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs-cinder')
def test_check_pool_and_fs_mismatch(self):
self.mock_object(hnas_utils, 'get_pool',
mock.Mock(return_value='default'))
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs-cinder')
def test_check_pool_and_fs_host_mismatch(self):
self.mock_object(hnas_utils, 'get_pool',
mock.Mock(return_value='silver'))
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver._check_pool_and_fs, self.volume,
'fs3')
def test_do_setup(self):
evs_info = {'172.17.39.132': {'evs_number': 1},
'172.17.39.133': {'evs_number': 2},
'172.17.39.134': {'evs_number': 3}}
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
self.mock_object(HNASSSHBackend, 'get_fs_info',
mock.Mock(return_value=True))
self.mock_object(HNASSSHBackend, 'get_evs_info',
mock.Mock(return_value=evs_info))
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
self.driver.do_setup(None)
HNASSSHBackend.get_fs_info.assert_called_with('fs2')
self.assertTrue(HNASSSHBackend.get_evs_info.called)
def test_do_setup_portal_not_found(self):
evs_info = {'172.17.48.132': {'evs_number': 1},
'172.17.39.133': {'evs_number': 2},
'172.17.39.134': {'evs_number': 3}}
version_info = {
'mac': '83-68-96-AA-DA-5D',
'model': 'HNAS 4040',
'version': '12.4.3924.11',
'hardware': 'NAS Platform',
'serial': 'B1339109',
}
self.mock_object(HNASSSHBackend, 'get_fs_info',
mock.Mock(return_value=True))
self.mock_object(HNASSSHBackend, 'get_evs_info',
mock.Mock(return_value=evs_info))
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
self.assertRaises(exception.InvalidParameterValue,
self.driver.do_setup, None)
def test_do_setup_umounted_filesystem(self):
self.mock_object(HNASSSHBackend, 'get_fs_info',
mock.Mock(return_value=False))
self.assertRaises(exception.ParameterNotFound, self.driver.do_setup,
None)
def test_initialize_connection(self):
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
conn = {'lun_name': 'cinder-lu',
'initiator': 'initiator',
'hdp': 'fs-cinder',
'lu_id': '0',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'port': 3260}
connector = {'initiator': 'fake_initiator'}
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value=2))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'add_iscsi_conn',
mock.Mock(return_value=conn))
self.driver.initialize_connection(self.volume, connector)
HNASSSHBackend.add_iscsi_conn.assert_called_with(self.volume.name,
'fs2', '22',
'iscsi-test',
connector[
'initiator'])
def test_initialize_connection_command_error(self):
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
connector = {'initiator': 'fake_initiator'}
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value=2))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'add_iscsi_conn',
mock.Mock(side_effect=putils.ProcessExecutionError))
self.assertRaises(exception.ISCSITargetAttachFailed,
self.driver.initialize_connection, self.volume,
connector)
def test_terminate_connection(self):
connector = {}
lu_info = {'mapped': True,
'id': 1,
'tgt': {'alias': 'iscsi-test',
'secret': 'itEpgB5gPefGhW2'}}
self.mock_object(HNASSSHBackend, 'get_evs',
mock.Mock(return_value=2))
self.mock_object(HNASSSHBackend, 'check_lu',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'del_iscsi_conn')
self.driver.terminate_connection(self.volume, connector)
HNASSSHBackend.del_iscsi_conn.assert_called_with('1',
'iscsi-test',
lu_info['id'])
def test_get_volume_stats(self):
self.driver.pools = [{'pool_name': 'default',
'service_label': 'svc_0',
'fs': '172.17.39.132:/fs2'},
{'pool_name': 'silver',
'service_label': 'svc_1',
'fs': '172.17.39.133:/fs3'}]
fs_cinder = {
'evs_id': '2',
'total_size': '250',
'label': 'fs-cinder',
'available_size': '228',
'used_size': '21.4',
'id': '1025',
'provisioned_capacity': 0.0
}
self.mock_object(HNASSSHBackend, 'get_fs_info',
mock.Mock(return_value=fs_cinder))
stats = self.driver.get_volume_stats(refresh=True)
self.assertEqual('5.0.0', stats['driver_version'])
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertEqual('iSCSI', stats['storage_protocol'])
def test_create_volume(self):
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.volume.name
}
self.mock_object(HNASSSHBackend, 'create_lu')
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
out = self.driver.create_volume(self.volume)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_lu.assert_called_with('fs2', u'128',
self.volume.name)
def test_create_volume_missing_fs(self):
self.volume.host = 'host1@hnas-iscsi-backend#missing'
self.assertRaises(exception.ParameterNotFound,
self.driver.create_volume, self.volume)
def test_delete_volume(self):
self.mock_object(HNASSSHBackend, 'delete_lu')
self.driver.delete_volume(self.volume)
HNASSSHBackend.delete_lu.assert_called_once_with(
self.parsed_xml['fs']['fs2'], self.volume.name)
def test_extend_volume(self):
new_size = 200
self.mock_object(HNASSSHBackend, 'extend_lu')
self.driver.extend_volume(self.volume, new_size)
HNASSSHBackend.extend_lu.assert_called_once_with(
self.parsed_xml['fs']['fs2'], new_size,
self.volume.name)
def test_create_cloned_volume(self):
clone_name = self.volume_clone.name
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location':
version_info['mac'] + '.' + self.volume_clone.name
}
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
self.mock_object(HNASSSHBackend, 'extend_lu')
out = self.driver.create_cloned_volume(self.volume_clone, self.volume)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_cloned_lu.assert_called_with(self.volume.name,
'fs2',
clone_name)
def test_functions_with_pass(self):
self.driver.check_for_setup_error()
self.driver.ensure_export(None, self.volume)
self.driver.create_export(None, self.volume, 'connector')
self.driver.remove_export(None, self.volume)
def test_create_snapshot(self):
lu_info = {'lu_mounted': 'No',
'name': 'cinder-lu',
'fs_mounted': 'YES',
'filesystem': 'FS-Cinder',
'path': '/.cinder/cinder-lu.iscsi',
'size': 2.0}
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.snapshot.name
}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
mock.Mock(return_value=lu_info))
self.mock_object(volume_types, 'get_volume_type',
mock.Mock(return_value=self.volume_type))
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
out = self.driver.create_snapshot(self.snapshot)
self.assertEqual(expected_out, out)
def test_delete_snapshot(self):
lu_info = {'filesystem': 'FS-Cinder'}
self.mock_object(volume_types, 'get_volume_type',
mock.Mock(return_value=self.volume_type))
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
mock.Mock(return_value=lu_info))
self.mock_object(HNASSSHBackend, 'delete_lu')
self.driver.delete_snapshot(self.snapshot)
def test_create_volume_from_snapshot(self):
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.snapshot.name
}
self.mock_object(HNASSSHBackend, 'create_cloned_lu')
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
out = self.driver.create_volume_from_snapshot(self.volume,
self.snapshot)
self.assertEqual(expected_out, out)
HNASSSHBackend.create_cloned_lu.assert_called_with(self.snapshot.name,
'fs2',
self.volume.name)
def test_manage_existing_get_size(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
lu_info = {
'name': 'volume-cinder',
'comment': None,
'path': ' /.cinder/volume-cinder',
'size': 128,
'filesystem': 'fs-cinder',
'fs_mounted': 'Yes',
'lu_mounted': 'Yes'
}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
mock.Mock(return_value=lu_info))
out = self.driver.manage_existing_get_size(self.volume,
existing_vol_ref)
self.assertEqual(lu_info['size'], out)
HNASSSHBackend.get_existing_lu_info.assert_called_with(
'volume-cinder', lu_info['filesystem'])
def test_manage_existing_get_size_no_source_name(self):
existing_vol_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing_get_size_wrong_source_name(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume/cinder'}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
mock.Mock(return_value={}))
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing_get_size_volume_not_found(self):
existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
mock.Mock(return_value={}))
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self.volume,
existing_vol_ref)
def test_manage_existing(self):
self.volume.volume_type = self.volume_type
existing_vol_ref = {'source-name': 'fs2/volume-cinder'}
metadata = {'service_label': 'default'}
version_info = {'mac': '83-68-96-AA-DA-5D'}
expected_out = {
'provider_location': version_info['mac'] + '.' + self.volume.name
}
self.mock_object(HNASSSHBackend, 'rename_existing_lu')
self.mock_object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(return_value=metadata))
self.mock_object(HNASSSHBackend, 'get_version',
mock.Mock(return_value=version_info))
out = self.driver.manage_existing(self.volume, existing_vol_ref)
self.assertEqual(expected_out, out)
HNASSSHBackend.rename_existing_lu.assert_called_with('fs2',
'volume-cinder',
self.volume.name)
def test_unmanage(self):
self.mock_object(HNASSSHBackend, 'rename_existing_lu')
self.driver.unmanage(self.volume)
HNASSSHBackend.rename_existing_lu.assert_called_with(
self.parsed_xml['fs']['fs2'],
self.volume.name, 'unmanage-' + self.volume.name)
|
|
"""This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 6 : Add field data arrays to VTK grid
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
inputdescription = datadescription.GetInputDescriptionByName("input")
if inputdescription.GetIfGridIsNecessary() == False:
return
if grid != None:
# attach VTK data set to pipeline input
inputdescription.SetGrid(grid)
# execute catalyst processing
cpscript.DoCoProcessing(datadescription)
# [SC14-Catalyst] convert dolfin mesh to a VTK unstructured grid
def Mesh2VTKUGrid(mesh):
vtkcelltypes=((),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_QUAD,vtk.VTK_POLYGON,vtk.VTK_POLYGON),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_TETRA,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_HEXAHEDRON))
npoints=mesh.num_vertices()
geom=mesh.geometry()
pts=vtk.vtkPoints()
pts.SetNumberOfPoints(npoints)
for i in xrange(npoints):
p=geom.point(i)
pts.SetPoint(i,p.x(),p.y(),p.z())
dim = mesh.topology().dim()
ncells=mesh.num_cells()
cells=vtk.vtkCellArray()
cellTypes=vtk.vtkUnsignedCharArray()
cellTypes.SetNumberOfTuples(ncells)
cellLocations=vtk.vtkIdTypeArray()
cellLocations.SetNumberOfTuples(ncells)
loc=0
for (cell,i) in zip(mesh.cells(),xrange(ncells)) :
ncellpoints=len(cell)
cells.InsertNextCell(ncellpoints)
for cpoint in cell:
cells.InsertCellPoint(cpoint)
cellTypes.SetTuple1(i,vtkcelltypes[dim][ncellpoints])
cellLocations.SetTuple1(i,loc)
loc+=1+ncellpoints
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(pts)
ugrid.SetCells(cellTypes,cellLocations,cells)
return ugrid
# [SC14-Catalyst] convert a flattened sequence of values to VTK double array
def Values2VTKArray(values,n,name):
ncomps=len(values)/n
array=vtk.vtkDoubleArray()
array.SetNumberOfComponents(ncomps)
array.SetNumberOfTuples(n)
for i in range(n):
a = []
for j in range(ncomps):
a.append(values[i+j*n])
array.SetTupleValue(i, a)
array.SetName(name)
return array
def AddFieldData(ugrid, pointArrays, cellArrays ):
# add Point data fields
npoints = ugrid.GetNumberOfPoints()
for (name,values) in pointArrays:
ugrid.GetPointData().AddArray( Values2VTKArray(values,npoints,name) )
# add Cell data fields
ncells = ugrid.GetNumberOfCells()
for (name,values) in cellArrays:
ugrid.GetCellData().AddArray( Values2VTKArray(values,ncells,name) )
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = Mesh2VTKUGrid( u1.function_space().mesh() )
# [SC14-Catalyst] add field data to the VTK grid
velocity = u1.compute_vertex_values()
pressure = p1.compute_vertex_values()
AddFieldData( ugrid, [ ("Velocity",velocity) , ("Pressure",pressure) ] , [] )
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
|
|
import cPickle as pickle
import copy
import os
import menpo.io as mio
import numpy as np
from menpo.feature import no_op
from menpo.landmark import labeller, face_ibug_68_to_face_ibug_66_trimesh
from menpo.math import as_matrix
from menpo.model import PCAModel
from menpo.transform import PiecewiseAffine
from menpofit.aam import HolisticAAM, LucasKanadeAAMFitter
from menpofit.builder import (build_reference_frame, warp_images, align_shapes, rescale_images_to_reference_shape)
from menpofit.transform import DifferentiableAlignmentSimilarity
from pathlib import Path
#=======================================================================================================================
### Function for Reading {LFPW-AFW-Helen-Ibug} Images ###
#=======================================================================================================================
def load_image(i):
i = i.crop_to_landmarks_proportion(0.5)
if i.n_channels == 3:
i = i.as_greyscale()
labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
del i.landmarks['PTS']
return i
#=======================================================================================================================
### Function for Reading Extended-Cohn-Kanade Images ###
#=======================================================================================================================
def Read_CK(land_tmp,image_path):
img = mio.import_image(image_path)
land_path = image_path[:-4] + "_landmarks.txt"
with open(land_path) as file:
tmp = np.array([[float(x) for x in line.split()] for line in file])
# Removing Extra Points - Lips inner corner
tmp = np.delete(tmp, (60), axis=0)
tmp = np.delete(tmp, (63), axis=0)
# Swapping Columns - (Y,X) -> (X,Y)
result = np.zeros((66,2))
result [:,0] = tmp[:,1]
result [:,1] = tmp[:,0]
# Adding Landmarks
land_tmp.lms.points= result
img.landmarks['face_ibug_66_trimesh'] = land_tmp
# Gray_Scaling
img = img.crop_to_landmarks_proportion(0.5)
if img.n_channels == 3:
img = img.as_greyscale()
return img
#=======================================================================================================================
### Function for Reading UNBC-McMaster Images ###
#=======================================================================================================================
def Read_UNBC(land_tmp,image_path):
img = mio.import_image(image_path)
land_path = image_path[:-4] + "_landmarks.txt"
with open(land_path) as file:
tmp = np.array([[float(x) for x in line.split()] for line in file])
# Swapping Columns (Y,X) -> (X,Y)
result = np.zeros((66,2))
result [:,0] = tmp[:,1]
result [:,1] = tmp[:,0]
# Adding Landmarks
land_tmp.lms.points= result
img.landmarks['face_ibug_66_trimesh'] = land_tmp
# Gray_Scaling
img = img.crop_to_landmarks_proportion(0.5)
if img.n_channels == 3:
img = img.as_greyscale()
return img
#=======================================================================================================================
### Function for finding effective Source Eigen-Vectors ###
#=======================================================================================================================
def Compute_Effective_Space(T_Data,T_mean,S_Comp):
N_Samples = T_Data.shape[0]
N_Comp = S_Comp.shape[0]
Captured_variance = np.zeros((N_Comp))
# Mean Centering Target Data
data = T_Data - T_mean
for i in range(N_Comp):
Captured_variance[i] = (1.0/N_Samples) * \
np.dot(np.dot(S_Comp[i,:],np.transpose(data)),np.dot(data,np.transpose(S_Comp[i,:])))
indexes = np.argsort(Captured_variance)[::-1]
New_Comp = S_Comp[indexes,:]
Captured_variance = np.sort(Captured_variance)[::-1]
return New_Comp, Captured_variance
#=======================================================================================================================
### Loading Data ###
#=======================================================================================================================
# Loading the face_ibug_66_trimesh template
with open('/Users/azinasgarian/Documents/Research/face_ibug_66_trimesh_temp.pkl', 'rb') as input:
land_tmp = pickle.load(input)
# Loading (LFPW-AFW-Helen-Ibug) Images (Source Images)
source_path = Path('/Users/azinasgarian/Documents/Research/Source_Small')
source_images = [load_image(i) for i in mio.import_images(source_path, verbose=True)]
# Loading CK Images (Source Images)
CK_root = "/Users/azinasgarian/Documents/Research/Sample_CK+_Small"
for root, dirs, filenames in os.walk(CK_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename) :
tmp_image = Read_CK(land_tmp,root+"/"+filename)
source_images.append(tmp_image)
# Loading UNBC Images - Target
UNBC_root = "/Users/azinasgarian/Documents/Research/Sample_UNBC_Small_Target"
target_images = []
for root, dirs, filenames in os.walk(UNBC_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename) :
target_images.append(Read_UNBC(land_tmp,root+"/"+filename))
# Loading UNBC Images - Test
UNBC_root = "/Users/azinasgarian/Documents/Research/Sample_UNBC_Small_Test_2"
test_images = []
for root, dirs, filenames in os.walk(UNBC_root):
for filename in filenames:
if (".png" in filename) and (".DS_Store" not in filename):
test_images.append(Read_UNBC(land_tmp, root + "/" + filename))
#=======================================================================================================================
### Data Split Into Test, Target ###
#=======================================================================================================================
all_images = copy.deepcopy(target_images+source_images)
#=======================================================================================================================
### Pre-Computation ###
#=======================================================================================================================
# Building Source_AAM
source_aam = HolisticAAM(
source_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
# Building Target AAM
target_aam = HolisticAAM(
target_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
# Building SUT AAM
SUT_aam = HolisticAAM(
all_images,
group='face_ibug_66_trimesh',
holistic_features=no_op,
scales=1,
diagonal=150,
max_appearance_components=200,
max_shape_components=100,
verbose=True
)
#=======================================================================================================================
### Main Body ###
#=======================================================================================================================
### Rescaling Target Images to Target reference shape ###
#=======================================================================================================================
ST = rescale_images_to_reference_shape(target_images, 'face_ibug_66_trimesh', target_aam.reference_shape, verbose=True)
#=======================================================================================================================
### Building Shape Variables ###
#=======================================================================================================================
# Building Target Shape Data Matrix
ST_scaled_shapes = [i.landmarks['face_ibug_66_trimesh'].lms for i in ST]
ST_aligned_shapes = align_shapes(ST_scaled_shapes)
ST_data = as_matrix(ST_aligned_shapes, return_template=False, verbose=True)
ST_N = ST_data.shape[0]
ST_mean = np.mean(ST_data, axis=0)
# Reordering Source Shape vectors based on variance captured in target
New_Shape_Components, New_Shape_Eigen_Values = Compute_Effective_Space(ST_data, ST_mean,
source_aam.shape_models[0].model._components)
# Orthonormalize Source Shape Model Against Target Shape Model
source_aam.shape_models[0].model._components = New_Shape_Components
source_aam.shape_models[0].model.orthonormalize_against_inplace(target_aam.shape_models[0].model)
#=======================================================================================================================
### Concatenating Target and Discarded Source Shape Space ###
#=======================================================================================================================
Shape_e_vectors = np.vstack((target_aam.shape_models[0].model._components,source_aam.shape_models[0].model._components))
Shape_e_values = np.concatenate((target_aam.shape_models[0].model._eigenvalues,New_Shape_Eigen_Values))
n_new_samples = ST_N + len(source_aam.shape_models[0].model._eigenvalues)
#=======================================================================================================================
### Updating Shape Components ###
#=======================================================================================================================
Shape_tmp = PCAModel.init_from_components(components=Shape_e_vectors, eigenvalues=Shape_e_values,
mean=SUT_aam.shape_models[0].model.mean(),
n_samples = n_new_samples, centred=True,
max_n_components=SUT_aam.max_shape_components[0])
# Setting Models info
SUT_aam.shape_models[0].model = Shape_tmp
SUT_aam.shape_models[0]._target = None
SUT_aam.shape_models[0]._weights = np.zeros(SUT_aam.shape_models[0].model.n_active_components)
SUT_aam.shape_models[0]._target = SUT_aam.shape_models[0].model.mean()
shape_mean = SUT_aam.shape_models[0].model.mean()
SUT_aam.shape_models[0].global_transform = DifferentiableAlignmentSimilarity(shape_mean,shape_mean)
# Re-orthonormalize components against similarity transfrom
SUT_aam.shape_models[0]._construct_similarity_model()
# Reset the target given the new model
SUT_aam.shape_models[0]._sync_target_from_state()
#=======================================================================================================================
### Building Appearance Model ###
#=======================================================================================================================
# Building Reference frames
SUT_reference_frame = build_reference_frame(SUT_aam.reference_shape)
S_reference_frame = build_reference_frame(source_aam.reference_shape)
T_reference_frame = build_reference_frame(target_aam.reference_shape)
# Obtain warped target samples
ST_warped = warp_images(ST, ST_scaled_shapes, T_reference_frame, target_aam.transform,verbose=True)
# Building Data Matrix
ST_App_data = as_matrix(ST_warped, return_template=False, verbose=True)
ST_App_N = ST_App_data.shape[0]
# Defining the warping from Samples Images to Mean of the Target Images
pwa_s_t = PiecewiseAffine(T_reference_frame.landmarks['source'].lms, S_reference_frame.landmarks['source'].lms)
# Warping Source Appearance Vectors to Target reference frame
Warped_S_to_T = []
for i in range(0, source_aam.appearance_models[0]._components.shape[0]):
img = S_reference_frame.from_vector(source_aam.appearance_models[0]._components[i, :])
warped = img.as_unmasked(copy=False).warp_to_mask(T_reference_frame.mask, pwa_s_t)
Warped_S_to_T.append(warped)
comp_S = as_matrix(Warped_S_to_T, return_template=False, verbose=True)
# Defining Source Weight Vector
New_Appearance_Components, New_Appearance_Eigen_Values = Compute_Effective_Space(ST_App_data,
target_aam.appearance_models[0]._mean,
comp_S)
# Orthonormalize Source Apearance Model Against Target Shape Model
source_aam.appearance_models[0]._components = New_Appearance_Components
source_aam.appearance_models[0].orthonormalize_against_inplace(target_aam.appearance_models[0])
#=======================================================================================================================
### Building Appearance Model ###
#=======================================================================================================================
# Concatenating Target and Discarded Source Appearance Space
App_e_vectors = np.vstack((target_aam.appearance_models[0]._components,source_aam.appearance_models[0]._components))
App_e_values = np.concatenate((target_aam.appearance_models[0]._eigenvalues, New_Appearance_Eigen_Values))
App_n_new_samples = ST_App_N + len(New_Appearance_Eigen_Values)
# Defining the warping from Target template to SUT template
pwa_t_sut = PiecewiseAffine(SUT_reference_frame.landmarks['source'].lms, T_reference_frame.landmarks['source'].lms)
# Warping Target eigen vectors to SUT reference frame
Warped_T_to_SUT = []
for i in range(0, App_e_vectors.shape[0]):
img = T_reference_frame.from_vector(App_e_vectors[i, :])
warped = img.as_unmasked(copy=False).warp_to_mask(SUT_reference_frame.mask, pwa_t_sut)
Warped_T_to_SUT.append(warped)
App_e_vectors = as_matrix(Warped_T_to_SUT, return_template=False, verbose=True)
#=======================================================================================================================
### Updating Appearance Components and ###
#=======================================================================================================================
App_tmp = PCAModel.init_from_components(components=App_e_vectors, eigenvalues=App_e_values,
mean=SUT_aam.appearance_models[0].mean(),
n_samples = App_n_new_samples,centred=True,
max_n_components=SUT_aam.max_appearance_components[0])
del SUT_aam.appearance_models[0]
SUT_aam.appearance_models.append(App_tmp)
#=======================================================================================================================
### Looping on Different Values of Alpha ###
#=======================================================================================================================
alpha = [4, 6, 8, 10, 12, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30]
beta = [4, 21, 39, 56, 74, 91, 100, 109, 126, 144, 161, 178, 196, 200, 200]
for x in range(len(alpha)):
#===================================================================================================================
### Bulding the Fitter ###
#===================================================================================================================
fitter = LucasKanadeAAMFitter(
SUT_aam,
n_shape=alpha[x],
n_appearance=beta[x]
)
#===================================================================================================================
### Fitting Images ###
#===================================================================================================================
errors = []
n_iters = []
final_errors = []
# fitting
for k, i in enumerate(test_images):
# Ground Truth
gt_s = i.landmarks['face_ibug_66_trimesh'].lms
# Loading the perturbations
with open('/u/azinasg/Research/Sample_UNBC_Small_Init_2/' + i.path.name[:-4] + '.pkl',
'rb') as input:
perturbations = pickle.load(input)
for j in range(0, 10):
initial_s = perturbations[j]
# fit image
fr = fitter.fit_from_shape(i, initial_s, gt_shape=gt_s, max_iters=300)
errors.append(fr.errors())
n_iters.append(fr.n_iters)
final_errors.append(fr.final_error())
print "SUT_Dis : A=" + str(alpha[x]) + " B=" + str(beta[x]) + " k=" + str(k) + " j=" + str(j) + \
" initial err: " + str(fr.initial_error()) + " final err: " + str(fr.final_error())
with open(r'/u/azinasg/res16/SUT_Discarded_Source_V2_x=' + str(x) + '.pkl', 'wb') as f:
pickle.dump(errors, f)
pickle.dump(n_iters, f)
pickle.dump(final_errors, f)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_impl import _compute_sampled_logits
from tensorflow.python.platform import test as test_lib
class ZeroFractionTest(test_lib.TestCase):
def _ZeroFraction(self, x):
assert x.shape
total_elements = np.prod(x.shape)
nonzeros = np.count_nonzero(x.flatten())
return 1.0 - nonzeros / total_elements
@test_util.run_deprecated_v1
def testZeroFraction(self):
x_shape = [5, 17]
x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
y_np = self._ZeroFraction(x_np)
x_tf = constant_op.constant(x_np)
x_tf.set_shape(x_shape)
y_tf = nn_impl.zero_fraction(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-8
self.assertAllClose(y_tf_np, y_np, eps)
@test_util.run_deprecated_v1
def testZeroFractionEmpty(self):
x = np.zeros(0)
y = self.evaluate(nn_impl.zero_fraction(x))
self.assertTrue(np.isnan(y))
@test_util.run_deprecated_v1
def testZeroFraction2_27Zeros(self):
sparsity = nn_impl.zero_fraction(
array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(1.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testZeroFraction2_27Ones(self):
sparsity = nn_impl.zero_fraction(
array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(0.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testUnknownSize(self):
value = array_ops.placeholder(dtype=dtypes.float32)
sparsity = nn_impl.zero_fraction(value)
with self.cached_session() as sess:
self.assertAllClose(
0.25,
sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]}))
class SoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
@test_util.run_in_graph_and_eager_modes
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)
y_tf_np = self.evaluate(y_tf)
y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
@test_util.run_deprecated_v1
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 2e-8
self.assertLess(err, eps)
class LogPoissonLossTest(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
@test_util.run_in_graph_and_eager_modes
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)
y_tf_np = self.evaluate(y_tf)
y_tf_np_stirling = self.evaluate(y_tf_stirling)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_tf, compute_full_loss=True)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
err_stirling = gradient_checker.compute_gradient_error(
x_tf, x_shape, y_tf_stirling, x_shape)
eps = 1e-6
self.assertLess(err, eps)
self.assertLess(err_stirling, eps)
class LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
@test_util.run_in_graph_and_eager_modes
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testLogSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
@test_util.run_deprecated_v1
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-7
self.assertLess(err, eps)
class L2LossTest(test_lib.TestCase):
@test_util.run_in_graph_and_eager_modes
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant(
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = self.evaluate(l2loss)
self.assertAllClose(7.0, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x")
output = nn_ops.l2_loss(x)
err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
print("L2Loss gradient err = %g " % err)
err_tolerance = 1e-10
self.assertLess(err, err_tolerance)
class L2NormalizeTest(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
@test_util.run_in_graph_and_eager_modes
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
@test_util.run_in_graph_and_eager_modes
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
@test_util.run_deprecated_v1
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.cached_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print("L2Normalize gradient err = %g " % err)
self.assertLess(err, 1e-4)
class DropoutTest(test_lib.TestCase):
def testDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability. This time with shaped
# noise.
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutCorrelation(self):
# Runs a shaped dropout and tests that the correlations are correct.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
# Verifies that each y column as only one type of activation.
for i in xrange(x_dim):
sorted_value = np.unique(np.sort(value[i, :]))
self.assertEqual(sorted_value.size, 1)
@test_util.run_deprecated_v1
def testDropoutPlaceholderKeepProb(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.cached_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob_placeholder)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
@test_util.run_deprecated_v1
def testShapedDropoutUnknownShape(self):
x_dim = 40
y_dim = 30
keep_prob = 0.5
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout_x = nn_ops.dropout(
x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
self.assertEqual(x.get_shape(), dropout_x.get_shape())
def testPartialShapedDropout(self):
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
# Set noise_shape=[None, 1] which means [x_dim, 1].
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[None, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
@test_util.run_deprecated_v1
def testInvalidKeepProb(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
@test_util.run_deprecated_v1
def testInvalidRate(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, [0.0, 1.0])
@test_util.run_deprecated_v1
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
y = nn_ops.dropout(x, keep_prob=1)
self.assertTrue(x is y)
y = nn_ops.dropout_v2(x, rate=0)
self.assertTrue(x is y)
def testDropoutWithIntegerInputs(self):
x = constant_op.constant([1, 1, 1, 1, 1])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(x, 0.5)
class ComputeSampledLogitsTest(test_lib.TestCase):
def setUp(self):
self._eps = 1e-3
def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,
sampled, subtract_log_q):
"""Randomly generates input/output data for a single test case.
This function returns numpy constants for use in a test case.
Args:
num_classes: An int. The number of embedding classes in the test case.
dim: An int. The dimension of the embedding.
batch_size: An int. The batch size.
num_true: An int. The number of target classes per training example.
labels: A list of batch_size * num_true ints. The target classes.
sampled: A list of indices in [0, num_classes).
subtract_log_q: A bool corresponding to the parameter in
_compute_sampled_logits().
Returns:
weights: Embedding weights to use as test input. It is a numpy array
of shape [num_classes, dim]
biases: Embedding biases to use as test input. It is a numpy array
of shape [num_classes].
hidden_acts: Forward activations of the network to use as test input.
It is a numpy array of shape [batch_size, dim].
sampled_vals: A tuple based on `sampled` to use as test input in the
format returned by a *_candidate_sampler function.
exp_logits: The output logits expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
exp_labels: The output labels expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
"""
weights = np.random.randn(num_classes, dim).astype(np.float32)
biases = np.random.randn(num_classes).astype(np.float32)
hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
sampled_vals = (sampled, true_exp, sampled_exp)
sampled_w, sampled_b = weights[sampled], biases[sampled]
true_w, true_b = weights[labels], biases[labels]
true_logits = np.sum(
hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(
(batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if subtract_log_q:
true_logits -= np.log(true_exp)
sampled_logits -= np.log(sampled_exp[np.newaxis, :])
exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
exp_labels = np.hstack((np.ones_like(true_logits) / num_true,
np.zeros_like(sampled_logits)))
return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
def _ShardTestEmbeddings(self, weights, biases, num_shards):
"""Shards the weights and biases returned by _GenerateTestData.
Args:
weights: The weights returned by _GenerateTestData.
biases: The biases returned by _GenerateTestData.
num_shards: The number of shards to create.
Returns:
sharded_weights: A list of size `num_shards` containing all the weights.
sharded_biases: A list of size `num_shards` containing all the biases.
"""
with ops.Graph().as_default() as g:
sharded_weights = variable_scope.get_variable(
"w",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(weights))
sharded_biases = variable_scope.get_variable(
"b",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(biases))
with self.session(graph=g) as sess:
variables.global_variables_initializer().run()
return self.evaluate([list(sharded_weights), list(sharded_biases)])
def testShapes(self):
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)
self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)
def testBasic(self):
"""Without accidental hit removal or subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testAccidentalHitRemoval(self):
"""With accidental hit removal, no subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
sampled = [1, 0, 2, 3]
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, _,
_) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=sampled,
subtract_log_q=False)
logits_tensor, _ = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=len(sampled),
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
partition_strategy="div",
name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((batch_size, num_true))
got_logits = self.evaluate(logits_tensor)
for row in xrange(batch_size):
row_labels = labels_reshape[row, :]
for col in xrange(len(sampled)):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(got_logits[row, col + num_true]), 0., self._eps)
def testSubtractLogQ(self):
"""With subtract_log_q, no accidental hit removal."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_subtract_log_q_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testSharded(self):
"""With sharded weights and sharded biases."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_sharded_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testNCELoss(self):
# A simple test to verify the numerics.
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
exp_nce_loss = np.sum(
_SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1)
got_nce_loss = nn_impl.nce_loss_v2(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
# Test with sharded weights and sharded biases.
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
got_nce_loss = nn_impl.nce_loss_v2(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
def testSampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(
logits - np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
exp_logits, exp_labels)
got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-4)
# Test with sharded weights and sharded biases.
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-4)
def testSampledSoftmaxLossBf16(self):
# A simple test to verify the numerics for bfloat16.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(
logits - np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
sampled = [1, 0, 2, 3]
(weights, biases, hidden_acts, _, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=sampled,
subtract_log_q=True)
exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
exp_logits, exp_labels)
true_exp_bf16 = np.full([batch_size, 1],
fill_value=0.5,
dtype=dtypes.bfloat16.as_numpy_dtype)
sampled_exp_bf16 = np.full([len(sampled)],
fill_value=0.5,
dtype=dtypes.bfloat16.as_numpy_dtype)
sampled_vals_bf16 = (sampled, true_exp_bf16, sampled_exp_bf16)
got_sampled_softmax_loss = math_ops.cast(
nn_impl.sampled_softmax_loss_v2(
weights=constant_op.constant(weights, dtype=dtypes.bfloat16),
biases=constant_op.constant(biases, dtype=dtypes.bfloat16),
labels=constant_op.constant(
labels, shape=(batch_size, 1), dtype=dtypes.bfloat16),
inputs=constant_op.constant(hidden_acts, dtype=dtypes.bfloat16),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals_bf16,
remove_accidental_hits=False), dtypes.float32)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-1)
class CReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
z = self.evaluate(nn_ops.crelu(constant_op.constant(x)))
self.assertAllClose(y, z, 1e-4)
class ReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.maximum(x, 0.0)
z = self.evaluate(nn_ops.relu(constant_op.constant(x)))
self.assertAllEqual(y, z)
@test_util.run_deprecated_v1
def testNaNs(self):
# Test that relu(nan) = nan for various sizes.
for i in range(18):
x = np.zeros(i) + np.nan
with self.cached_session():
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertTrue(np.isnan(z).all())
class LeakyReluTest(test_lib.TestCase):
def testRange(self):
batch_size = 3
height, width = 4, 4
np.random.seed(1) # Make it reproducible.
inputs = np.random.uniform(size=(batch_size, height, width, 3)).astype(
np.float32)
inputs = constant_op.constant(inputs)
outputs = nn_ops.leaky_relu(inputs)
self.assertEquals(inputs.shape, outputs.shape)
inputs, outputs = self.evaluate([inputs, outputs])
self.assertGreaterEqual(outputs.min(), 0.0)
self.assertLessEqual(outputs.max(), 1.0)
self.assertAllClose(inputs, outputs)
@test_util.run_deprecated_v1
def testValues(self):
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)
outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
outputs = self.evaluate(outputs)
tol = 2e-3 if dtype == np.float16 else 1e-6
self.assertAllClose(
outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testName(self):
np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)
outputs_with_name_set = nn_ops.leaky_relu(
constant_op.constant(np_values),
name='test_relu_op')
self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0')
outputs_without_name_set = nn_ops.leaky_relu(
constant_op.constant(np_values))
self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0')
class SwishTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testValues(self):
np_values = np.array(
[np.linspace(-10.0, 0.0, 100),
np.linspace(0.0, 10.0, 100)],
dtype=np.float32)
tf_values = constant_op.constant(np_values)
actual_tf_outputs = nn_impl.swish(tf_values)
expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values)
actual_outputs, expected_outputs = self.evaluate(
[actual_tf_outputs, expected_tf_outputs])
self.assertAllClose(actual_outputs, expected_outputs)
@test_util.run_deprecated_v1
def testGradients(self):
shape = [5, 3, 4]
sigma = 5
input_values = np.random.randn(*shape) * sigma
x_tf = constant_op.constant(input_values)
y_tf = nn_impl.swish(x_tf)
with self.cached_session():
err = gradient_checker.compute_gradient_error(x_tf, shape, y_tf, shape)
self.assertLess(err, 1e-4)
class MomentsTest(test_lib.TestCase):
def doOutputTest(self,
input_shape,
moments_axes,
tol=1e-4,
check_gradients=False):
for mu in [0.0, 1.0, 1e3]:
for sigma in [1.0, 0.1]:
for keep_dims in [True, False]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(
input_values, axis=moments_axes, keepdims=keep_dims)
expected_var = np.var(
input_values, axis=moments_axes, keepdims=keep_dims)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
inputs = constant_op.constant(
input_values, shape=input_shape, dtype=dtypes.float32)
mean, variance = nn_impl.moments_v2(
inputs, moments_axes, keepdims=keep_dims)
if check_gradients:
err = gradient_checker.compute_gradient_error(
inputs, input_shape, mean, mean.shape.as_list())
self.assertLess(err, 1e-3)
err = gradient_checker.compute_gradient_error(
inputs, input_shape, variance, variance.shape.as_list())
self.assertLess(err, 1e-3)
# Evaluate.
[mean, variance] = self.evaluate([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
def testOutputAndGradient2DInput0(self):
self.doOutputTest((10, 10), (0,), check_gradients=True)
def testOutputAndGradient2DInput01(self):
self.doOutputTest((10, 10), (0, 1), check_gradients=True)
def testOutput2DInput0(self):
self.doOutputTest((10, 300), (0,))
def testOutput2DInput1(self):
self.doOutputTest((10, 300), (1,))
def testOutput2DInput01(self):
self.doOutputTest((10, 300), (0, 1))
def testOutput4DInput0(self):
self.doOutputTest((10, 10, 10, 30), (0,))
def testOutput4DInput1(self):
self.doOutputTest((10, 10, 10, 30), (1,))
def testOutput4DInput3(self):
self.doOutputTest((10, 10, 10, 30), (3,))
def testOutput4DInput012(self):
self.doOutputTest((10, 10, 10, 30), (0, 1, 2))
def testOutput4DInput123(self):
self.doOutputTest((10, 10, 10, 30), (1, 2, 3))
class DataFormatDimMapTest(test_lib.TestCase):
def _test(self, x_val, y_val_expected):
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x)
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def test(self):
self._test(0, 0)
self._test(1, 2)
self._test(2, 3)
self._test(3, 1)
self._test(-1, 1)
self._test(-2, 3)
self._test(-3, 2)
self._test(-4, 0)
self._test([1, 3], [2, 1])
self._test([1, 3, -2], [2, 1, 3])
self._test([1, -3, -2], [2, 2, 3])
self._test([[1, -3], [1, -1]], [[2, 2], [2, 1]])
def testNHWCtoNCHW(self):
x_val = [1, -3, -2]
y_val_expected = [2, 2, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="NCHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNHWCtoHWNC(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [2, 0, 1, 3, 2, 0, 1, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNHWCtoWHCN(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="WHCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testArbitraryASCII(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="qwer", dst_format="rewq")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
class DataFormatVectorPermuteTest(test_lib.TestCase):
def testNHWCToNCHW(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 3, 4, 9])
def testNCHWToNHWC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 9, 3, 4])
def testNHWCToHWNC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [4, 9, 7, 3])
def testHWNCToNHWC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 7, 4, 3])
def testNHWCToNCHW2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
if __name__ == "__main__":
test_lib.main()
|
|
#!/usr/bin/env python
"""This file contains utility classes related to maintenance used by GRR."""
import hashlib
import os
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import build
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
DIGEST_ALGORITHM = hashlib.sha256 # pylint: disable=invalid-name
DIGEST_ALGORITHM_STR = "sha256"
SUPPORTED_PLATFORMS = ["windows", "linux", "darwin"]
SUPPORTED_ARCHITECTURES = ["i386", "amd64"]
def UploadSignedConfigBlob(content, aff4_path, client_context=None,
limit=None, token=None):
"""Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
token: A security token.
Raises:
IOError: On failure to write.
"""
if limit is None:
limit = config_lib.CONFIG["Datastore.maximum_blob_size"]
# Get the values of these parameters which apply to the client running on the
# target platform.
if client_context is None:
# Default to the windows client.
client_context = ["Platform:Windows", "Client Context"]
config_lib.CONFIG.Validate(
parameters="PrivateKeys.executable_signing_private_key")
sig_key = config_lib.CONFIG.Get("PrivateKeys.executable_signing_private_key",
context=client_context)
ver_key = config_lib.CONFIG.Get("Client.executable_signing_public_key",
context=client_context)
with aff4.FACTORY.Create(
aff4_path, "GRRSignedBlob", mode="w", token=token) as fd:
for start_of_chunk in xrange(0, len(content), limit):
chunk = content[start_of_chunk:start_of_chunk + limit]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, sig_key, ver_key, prompt=True)
fd.Add(blob_rdf)
logging.info("Uploaded to %s", fd.urn)
def UploadSignedDriverBlob(content, aff4_path=None, client_context=None,
install_request=None, token=None):
"""Upload a signed blob into the datastore.
Args:
content: Content of the driver file to upload.
aff4_path: aff4 path to upload to. If not specified, we use the config to
figure out where it goes.
client_context: The configuration contexts to use.
install_request: A DriverInstallRequest rdfvalue describing the installation
parameters for this driver. If None these are read from the config.
token: A security token.
Returns:
String containing path the file was written to.
Raises:
IOError: On failure to write.
"""
sig_key = config_lib.CONFIG.Get("PrivateKeys.driver_signing_private_key",
context=client_context)
ver_key = config_lib.CONFIG.Get("Client.driver_signing_public_key",
context=client_context)
if aff4_path is None:
aff4_paths = config_lib.CONFIG.Get("MemoryDriver.aff4_paths",
context=client_context)
if not aff4_paths:
raise IOError("Could not determine driver location.")
if len(aff4_paths) > 1:
logging.info("Possible driver locations: %s", aff4_paths)
raise IOError("Ambiguous driver location, please specify.")
aff4_path = aff4_paths[0]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(content, sig_key, ver_key, prompt=True)
with aff4.FACTORY.Create(
aff4_path, "GRRMemoryDriver", mode="w", token=token) as fd:
fd.Add(blob_rdf)
if install_request is None:
# Create install_request from the configuration.
install_request = rdf_client.DriverInstallTemplate(
device_path=config_lib.CONFIG.Get(
"MemoryDriver.device_path", context=client_context),
driver_display_name=config_lib.CONFIG.Get(
"MemoryDriver.driver_display_name", context=client_context),
driver_name=config_lib.CONFIG.Get(
"MemoryDriver.driver_service_name", context=client_context))
fd.Set(fd.Schema.INSTALLATION(install_request))
logging.info("Uploaded to %s", fd.urn)
return fd.urn
def GetConfigBinaryPathType(aff4_path):
"""Take an aff4_path and return type or None.
Args:
aff4_path: An RDFURN containing the path to the binary.
Returns:
None if the path is not supported for binary upload, otherwise a type.
"""
if not aff4_path.Path().startswith("/config"):
return
components = aff4_path.RelativeName("aff4:/config").split("/")
if components[0] == "drivers" and components[1] in SUPPORTED_PLATFORMS:
return "GRRMemoryDriver"
elif components[0] == "executables" and components[1] in SUPPORTED_PLATFORMS:
return "GRRSignedBlob"
elif components[0] == "python_hacks":
return "GRRSignedBlob"
else:
return
def CreateBinaryConfigPaths(token=None):
"""Create the paths required for binary configs."""
required_urns = set()
try:
# We weren't already initialized, create all directories we will need.
for platform in SUPPORTED_PLATFORMS:
for arch in SUPPORTED_ARCHITECTURES:
client_context = ["Platform:%s" % platform.title(),
"Arch:%s" % arch]
aff4_paths = config_lib.CONFIG.Get("MemoryDriver.aff4_paths",
context=client_context)
for aff4_path in aff4_paths:
required_urns.add(rdfvalue.RDFURN(aff4_path).Dirname())
required_urns.add("aff4:/config/executables/%s/agentupdates" % platform)
required_urns.add("aff4:/config/executables/%s/installers" % platform)
existing_urns = [x["urn"] for x in aff4.FACTORY.Stat(list(required_urns),
token=token)]
missing_urns = required_urns - set(existing_urns)
# One by one is not optimal but we have to do it only once per urn.
for urn in missing_urns:
aff4.FACTORY.Create(urn, "AFF4Volume", token=token).Flush()
except access_control.UnauthorizedAccess:
logging.info("User is not admin, cannot check configuration tree.")
return
def _RepackBinary(context, builder_cls):
# Check for the presence of the template.
template_path = config_lib.CONFIG.Get("ClientBuilder.template_path",
context=context)
if os.path.exists(template_path):
builder_obj = builder_cls(context=context)
try:
return builder_obj.MakeDeployableBinary(template_path)
except Exception as e: # pylint: disable=broad-except
print "Repacking template %s failed: %s" % (template_path, e)
else:
print "Template %s missing - will not repack." % template_path
def RepackAllBinaries(upload=False, debug_build=False, token=None):
"""Repack binaries based on the configuration.
NOTE: The configuration file specifies the location of the binaries
templates. These usually depend on the client version which is also specified
in the configuration file. This simple function simply runs through all the
supported architectures looking for available templates for the configured
client version, architecture and operating system.
We do not repack all the binaries that are found in the template directories,
only the ones that are valid for the current configuration. It is not an error
to have a template missing, we simply ignore it and move on.
Args:
upload: If specified we also upload the repacked binary into the datastore.
debug_build: Repack as a debug build.
token: Token to use when uploading
Returns:
A list of output installers generated.
"""
built = []
base_context = ["ClientBuilder Context"]
if debug_build:
base_context += ["DebugClientBuild Context"]
clients_to_repack = [
(["Target:Windows", "Platform:Windows", "Arch:amd64"],
build.WindowsClientDeployer),
(["Target:Windows", "Platform:Windows", "Arch:i386"],
build.WindowsClientDeployer),
(["Target:Linux", "Platform:Linux", "Arch:amd64"],
build.LinuxClientDeployer),
(["Target:Linux", "Platform:Linux", "Arch:i386"],
build.LinuxClientDeployer),
(["Target:Linux", "Target:LinuxRpm", "Platform:Linux", "Arch:amd64"],
build.CentosClientDeployer),
(["Target:Linux", "Target:LinuxRpm", "Platform:Linux", "Arch:i386"],
build.CentosClientDeployer),
(["Target:Darwin", "Platform:Darwin", "Arch:amd64"],
build.DarwinClientDeployer)]
msg = "Will repack the following clients "
if debug_build:
msg += "(debug build)"
print msg + ":"
print
for context, deployer in clients_to_repack:
context = base_context + context
template_path = config_lib.CONFIG.Get("ClientBuilder.template_path",
context=context)
output_path = config_lib.CONFIG.Get("ClientBuilder.output_path",
context=context)
readable = (os.path.isfile(template_path) and
os.access(template_path, os.R_OK))
if not readable:
readable_str = " (NOT READABLE)"
else:
readable_str = ""
print "Repacking : " + template_path + readable_str
print "To : " + output_path
print
for context, deployer in clients_to_repack:
context = base_context + context
template_path = config_lib.CONFIG.Get("ClientBuilder.template_path",
context=context)
output_path = _RepackBinary(context, deployer)
if output_path:
print "%s repacked ok." % template_path
built.append(output_path)
if upload:
dest = config_lib.CONFIG.Get("Executables.installer",
context=context)
UploadSignedConfigBlob(open(output_path).read(100 * 1024 * 1024),
dest, client_context=context, token=token)
else:
print "Failed to repack %s." % template_path
return built
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_util module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse_str('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
self.assertIsInstance(node.value.left.id, str)
source = compiler.ast_to_source(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_a + b')
def test_rename_symbols_attributes(self):
node = parser.parse_str('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = compiler.ast_to_source(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_annotations(self):
node = parser.parse_str('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_copy_clean(self):
node = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse_str('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = compiler.ast_to_object(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = compiler.ast_to_source(target, include_encoding_marker=False)
source = compiler.ast_to_source(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse_str('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse_str('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse_str(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse_str(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse_str(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
compiler.ast_to_source(node.body,
include_encoding_marker=False).strip(),
expected_bodies)
def test_find_matching_definitions_lambda(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('(1)',))
def test_find_matching_definitions_lambda_multiple_matches(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1, lambda x: 2
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('(1)', '(2)'))
def test_find_matching_definitions_lambda_uses_arg_names(self):
node = parser.parse_str(
textwrap.dedent("""
f = lambda x: 1, lambda y: 2
"""))
f = lambda x: x
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('(1)',))
f = lambda y: y
nodes = ast_util.find_matching_definitions(node, f)
self.assertLambdaNodes(nodes, ('(2)',))
if __name__ == '__main__':
test.main()
|
|
from django.conf import settings
from django.contrib import messages
from django.core.signing import BadSignature, Signer
from django.utils.functional import SimpleLazyObject, empty
from django.utils.translation import ugettext_lazy as _
from oscar.core.compat import MiddlewareMixin, user_is_authenticated
from oscar.core.loading import get_class, get_model
Applicator = get_class('offer.applicator', 'Applicator')
Basket = get_model('basket', 'basket')
Selector = get_class('partner.strategy', 'Selector')
selector = Selector()
class BasketMiddleware(MiddlewareMixin):
# Middleware interface methods
def process_request(self, request):
# Keep track of cookies that need to be deleted (which can only be done
# when we're processing the response instance).
request.cookies_to_delete = []
# Load stock/price strategy and assign to request (it will later be
# assigned to the basket too).
strategy = selector.strategy(request=request, user=request.user)
request.strategy = strategy
# We lazily load the basket so use a private variable to hold the
# cached instance.
request._basket_cache = None
def load_full_basket():
"""
Return the basket after applying offers.
"""
basket = self.get_basket(request)
basket.strategy = request.strategy
self.apply_offers_to_basket(request, basket)
return basket
def load_basket_hash():
"""
Load the basket and return the basket hash
Note that we don't apply offers or check that every line has a
stockrecord here.
"""
basket = self.get_basket(request)
if basket.id:
return self.get_basket_hash(basket.id)
# Use Django's SimpleLazyObject to only perform the loading work
# when the attribute is accessed.
request.basket = SimpleLazyObject(load_full_basket)
request.basket_hash = SimpleLazyObject(load_basket_hash)
def process_response(self, request, response):
# Delete any surplus cookies
cookies_to_delete = getattr(request, 'cookies_to_delete', [])
for cookie_key in cookies_to_delete:
response.delete_cookie(cookie_key)
if not hasattr(request, 'basket'):
return response
# If the basket was never initialized we can safely return
if (isinstance(request.basket, SimpleLazyObject)
and request.basket._wrapped is empty):
return response
cookie_key = self.get_cookie_key(request)
# Check if we need to set a cookie. If the cookies is already available
# but is set in the cookies_to_delete list then we need to re-set it.
has_basket_cookie = (
cookie_key in request.COOKIES
and cookie_key not in cookies_to_delete)
# If a basket has had products added to it, but the user is anonymous
# then we need to assign it to a cookie
if (request.basket.id and not user_is_authenticated(request.user)
and not has_basket_cookie):
cookie = self.get_basket_hash(request.basket.id)
response.set_cookie(
cookie_key, cookie,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True)
return response
def get_cookie_key(self, request):
"""
Returns the cookie name to use for storing a cookie basket.
The method serves as a useful hook in multi-site scenarios where
different baskets might be needed.
"""
return settings.OSCAR_BASKET_COOKIE_OPEN
def process_template_response(self, request, response):
if hasattr(response, 'context_data'):
if response.context_data is None:
response.context_data = {}
if 'basket' not in response.context_data:
response.context_data['basket'] = request.basket
else:
# Occasionally, a view will want to pass an alternative basket
# to be rendered. This can happen as part of checkout
# processes where the submitted basket is frozen when the
# customer is redirected to another site (eg PayPal). When the
# customer returns and we want to show the order preview
# template, we need to ensure that the frozen basket gets
# rendered (not request.basket). We still keep a reference to
# the request basket (just in case).
response.context_data['request_basket'] = request.basket
return response
# Helper methods
def get_basket(self, request):
"""
Return the open basket for this request
"""
if request._basket_cache is not None:
return request._basket_cache
num_baskets_merged = 0
manager = Basket.open
cookie_key = self.get_cookie_key(request)
cookie_basket = self.get_cookie_basket(cookie_key, request, manager)
if hasattr(request, 'user') and user_is_authenticated(request.user):
# Signed-in user: if they have a cookie basket too, it means
# that they have just signed in and we need to merge their cookie
# basket into their user basket, then delete the cookie.
try:
basket, __ = manager.get_or_create(owner=request.user)
except Basket.MultipleObjectsReturned:
# Not sure quite how we end up here with multiple baskets.
# We merge them and create a fresh one
old_baskets = list(manager.filter(owner=request.user))
basket = old_baskets[0]
for other_basket in old_baskets[1:]:
self.merge_baskets(basket, other_basket)
num_baskets_merged += 1
# Assign user onto basket to prevent further SQL queries when
# basket.owner is accessed.
basket.owner = request.user
if cookie_basket:
self.merge_baskets(basket, cookie_basket)
num_baskets_merged += 1
request.cookies_to_delete.append(cookie_key)
elif cookie_basket:
# Anonymous user with a basket tied to the cookie
basket = cookie_basket
else:
# Anonymous user with no basket - instantiate a new basket
# instance. No need to save yet.
basket = Basket()
# Cache basket instance for the during of this request
request._basket_cache = basket
if num_baskets_merged > 0:
messages.add_message(request, messages.WARNING,
_("We have merged a basket from a previous session. Its contents "
"might have changed."))
return basket
def merge_baskets(self, master, slave):
"""
Merge one basket into another.
This is its own method to allow it to be overridden
"""
master.merge(slave, add_quantities=False)
def get_cookie_basket(self, cookie_key, request, manager):
"""
Looks for a basket which is referenced by a cookie.
If a cookie key is found with no matching basket, then we add
it to the list to be deleted.
"""
basket = None
if cookie_key in request.COOKIES:
basket_hash = request.COOKIES[cookie_key]
try:
basket_id = Signer().unsign(basket_hash)
basket = Basket.objects.get(pk=basket_id, owner=None,
status=Basket.OPEN)
except (BadSignature, Basket.DoesNotExist):
request.cookies_to_delete.append(cookie_key)
return basket
def apply_offers_to_basket(self, request, basket):
if not basket.is_empty:
Applicator().apply(basket, request.user, request)
def get_basket_hash(self, basket_id):
return Signer().sign(basket_id)
|
|
#!/usr/bin/env python
import sys
from os import path, mkdir
import shutil
from glob import glob
import subprocess
import random
def write_script_header(cluster, script, event_id, walltime, working_folder):
if cluster == "nersc":
script.write(
"""#!/bin/bash -l
#SBATCH -p shared
#SBATCH -n 1
#SBATCH -J UrQMD_%s
#SBATCH -t %s
#SBATCH -L SCRATCH
#SBATCH -C haswell
""" % (event_id, walltime))
elif cluster == "guillimin":
script.write(
"""#!/usr/bin/env bash
#PBS -N UrQMD_%s
#PBS -l nodes=1:ppn=1
#PBS -l walltime=%s
#PBS -S /bin/bash
#PBS -e test.err
#PBS -o test.log
#PBS -A cqn-654-ad
#PBS -q sw
#PBS -d %s
""" % (event_id, walltime, working_folder))
elif cluster == "McGill":
script.write(
"""#!/usr/bin/env bash
#PBS -N UrQMD_%s
#PBS -l nodes=1:ppn=1:irulan
#PBS -l walltime=%s
#PBS -S /bin/bash
#PBS -e test.err
#PBS -o test.log
#PBS -d %s
""" % (event_id, walltime, working_folder))
else:
print("Error: unrecoginzed cluster name :", cluster)
print("Available options: nersc, guillimin, McGill")
exit(1)
def write_analysis_spectra_and_vn_commands(script, after_burner_type):
pid_particle_list = ['211', '-211', '321', '-321', '2212', '-2212',
'3122', '-3122', '3312', '-3312', '3334', '-3334',
'333']
charged_particle_list = ['9999', '9998', '-9998']
#pid_particle_list = []
#charged_particle_list = ['9999']
read_in_mode = 2
if after_burner_type == "JAM":
read_in_mode = 5
if after_burner_type == "OSCAR":
read_in_mode = 0
for ipart in charged_particle_list:
script.write(
"""
# charged hadrons
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=-0.1 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=0.1 rap_max=1.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=0.5 rap_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=-0.5 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 compute_correlation=1 flag_charge_dependence=1 pT_min=0.2 pT_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=2.0 compute_correlation=1 flag_charge_dependence=1 pT_min=0.2 pT_max=2.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-2.0 rap_max=2.0 >> ../output.log
""".format(read_in_mode, ipart))
for ipart in pid_particle_list:
script.write(
"""
#./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=0 >> ../output.log
./hadronic_afterburner_tools.e run_mode=0 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=1 >> ../output.log
""".format(read_in_mode, ipart))
def write_analysis_particle_distrubtion_commands(script, after_burner_type):
pid_particle_list = ['211', '-211', '321', '-321', '2212', '-2212',
'3122', '-3122']
charged_particle_list = ['9997', '-9997', '9998', '-9998']
read_in_mode = 2
if after_burner_type == "JAM":
read_in_mode = 5
if after_burner_type == "OSCAR":
read_in_mode = 0
for ipart in pid_particle_list:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=0 >> output.log
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=1 rap_type=1 >> output.log
""".format(read_in_mode, ipart))
if "-" not in ipart:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} distinguish_isospin=1 rap_type=0 net_particle_flag=1 >> output.log
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} distinguish_isospin=1 rap_type=1 net_particle_flag=1 >> output.log
""".format(read_in_mode, ipart))
for ipart in charged_particle_list:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> output.log
""".format(read_in_mode, ipart))
if "-" not in ipart:
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval={1} resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 net_particle_flag=1 >> output.log
""".format(read_in_mode, ipart))
script.write(
"""
./hadronic_afterburner_tools.e run_mode=2 read_in_mode={0} particle_monval=9999 resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 >> output.log
""".format(read_in_mode))
def generate_script(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '10:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir UrQMD_results
for iev in `ls OSCAR_events`
do
cd osc2u
./osc2u.e < ../OSCAR_events/$iev
mv fort.14 ../urqmd/OSCAR.input
cd ../urqmd
./runqmd.sh
mv particle_list.dat ../UrQMD_results/particle_list_`echo $iev | cut -f 2 -d _`
cd ..
done
""")
script.close()
def generate_script_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '10:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/scratch/irulan/chun/JAM/JAM_lib/lib
mkdir JAM_results
for iev in `ls OSCAR_events`
do
eventid=`echo $iev | cut -f 2 -d "_" | cut -f 1 -d "."`
cd JAM
mv ../OSCAR_events/$iev ./OSCAR.DAT
rm -fr phase.dat
./jamgo
mv phase.dat ../JAM_results/particle_list_$eventid.dat
mv OSCAR.DAT ../OSCAR_events/OSCAR_$eventid.dat
cd ..
done
""")
script.close()
def generate_script_iSS(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '35:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir UrQMD_results
mkdir spvn_results
for iev in `ls hydro_events --color=none | grep "surface"`
do
event_id=`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd iSS
if [ -d "results" ]; then
rm -fr results
fi
mkdir results
mv ../hydro_events/$iev results/surface.dat
cp ../hydro_events/music_input_event_$event_id results/music_input
./iSS.e >> ../output.log
mv results/surface.dat ../hydro_events/$iev
#rm -fr results/sample*
# turn on global momentum conservation
./correct_momentum_conservation.py OSCAR.DAT
mv OSCAR_w_GMC.DAT OSCAR.DAT
cd ../osc2u
./osc2u.e < ../iSS/OSCAR.DAT >> ../output.log
mv fort.14 ../urqmd/OSCAR.input
cd ../urqmd
./runqmd.sh >> ../output.log
mv particle_list.dat ../UrQMD_results/particle_list_$event_id.dat
#mv ../iSS/OSCAR.DAT ../UrQMD_results/OSCAR_$event_id.dat
rm -fr ../iSS/OSCAR.DAT
rm -fr OSCAR.input
cd ..
./hadronic_afterburner_toolkit/convert_to_binary.e UrQMD_results/particle_list_$event_id.dat
rm -fr UrQMD_results/particle_list_$event_id.dat
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_results/particle_list_$event_id.gz results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_results/particle_list_$event_id.gz
mv results ../spvn_results/event_$event_id
cd ..
done
""")
script.close()
def generate_script_iS(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '3:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls hydro_events --color=none | grep "surface"`
do
event_id=`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd iS
if [ -d "results" ]; then
rm -fr results
fi
mkdir results
mv ../hydro_events/$iev results/surface.dat
cp ../hydro_events/music_input_event_$event_id results/music_input
./iS_withResonance.sh >> ../output.log
mv results/surface.dat ../hydro_events/$iev
mv results/ ../spvn_results/event_$event_id
cd ..
done
""")
script.close()
def generate_script_HBT(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '20:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
eventid=`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
mv ../UrQMD_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
./hadronic_afterburner_tools.e read_in_mode=2 run_mode=1 resonance_feed_down_flag=0 > output.log
mv results/particle_list.dat ../UrQMD_events/$iev
mv results/particle_list_mixed_event.dat ../UrQMD_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_HBT_with_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '30:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls JAM_events | grep "particle_list"`
do
eventid=`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../JAM_events/$iev results/particle_list.dat
mv ../JAM_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
./hadronic_afterburner_tools.e run_mode=1 read_in_mode=5 resonance_feed_down_flag=0 > output.log
mv results/particle_list.dat ../JAM_events/$iev
mv results/particle_list_mixed_event.dat ../JAM_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_balance_function(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '01:00:00'
particle_a_list = ['9998']
particle_b_list = ['-9998']
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir BalanceFunction_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
eventid=`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
mv ../UrQMD_events/mixed_event_$eventid.dat results/particle_list_mixed_event.dat
""")
for ipart in range(len(particle_a_list)):
script.write(
"""
./hadronic_afterburner_tools.e read_in_mode=2 run_mode=3 resonance_feed_down_flag=0 distinguish_isospin=0 rap_type=0 rap_min=-1.0 rap_max=1.0 particle_alpha={0} particle_beta={1} BpT_min=0.2 BpT_max=3.0 > output.log
""".format(particle_a_list[ipart], particle_b_list[ipart]))
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results/particle_list_mixed_event.dat ../UrQMD_events/mixed_event_$eventid.dat
mv results ../BalanceFunction_results/event_$eventid
cd ..
done
""")
script.close()
def generate_script_spectra_and_vn(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results ../spvn_results/event_`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_particle_yield_distribution(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls UrQMD_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../UrQMD_events/$iev results/particle_list.dat
""")
write_analysis_particle_distrubtion_commands(script, "UrQMD")
script.write(
"""
mv results/particle_list.dat ../UrQMD_events/$iev
mv results ../spvn_results/event_`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_particle_yield_distribution_with_OSCAR(cluster_name,
folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '1:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls OSCAR_events`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../OSCAR_events/$iev results/OSCAR.DAT
""")
write_analysis_particle_distrubtion_commands(script, "OSCAR")
script.write(
"""
mv results/OSCAR.DAT ../OSCAR_events/$iev
mv results ../spvn_results/event_`echo $iev | cut -f 2 -d _ | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_spectra_and_vn_with_JAM(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '3:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir spvn_results
for iev in `ls JAM_events | grep "particle_list"`
do
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../JAM_events/$iev results/particle_list.dat
""")
write_analysis_spectra_and_vn_commands(script, "JAM")
script.write(
"""
mv results/particle_list.dat ../JAM_events/$iev
mv results ../spvn_results/event_`echo $iev | cut -f 3 -d _ | cut -f 1 -d .`
cd ..
done
""")
script.close()
def generate_script_HBT_with_OSCAR(cluster_name, folder_name):
working_folder = path.join(path.abspath('./'), folder_name)
event_id = working_folder.split('/')[-1]
walltime = '35:00:00'
script = open(path.join(working_folder, "submit_job.pbs"), "w")
write_script_header(cluster_name, script, event_id, walltime,
working_folder)
script.write(
"""
mkdir HBT_results
for iev in `ls OSCAR_events | grep "OSCAR"`
do
eventid=`echo $iev | cut -f 2 -d _ | cut -f 1 -d .`
cd hadronic_afterburner_toolkit
rm -fr results
mkdir results
mv ../OSCAR_events/$iev results/OSCAR.DAT
mv ../OSCAR_events/mixed_event_$eventid.dat results/OSCAR_mixed_event.DAT
./hadronic_afterburner_tools.e read_in_mode=0 run_mode=1 resonance_feed_down_flag=1 > output.log
mv results/OSCAR.DAT ../OSCAR_events/$iev
mv results/OSCAR_mixed_event.DAT ../OSCAR_events/mixed_event_$eventid.dat
mv results ../HBT_results/event_$eventid
cd ..
done
""")
script.close()
def copy_UrQMD_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/particle_list_*.dat' % input_folder)
if events_list == []:
events_list = glob('%s/particle_list_*.gz' % input_folder)
if events_list == []:
print("Error: can not find UrQMD events, events_list is empty! ",
events_list)
else:
print("Linking zipped binary UrQMD events, ",
"make sure read_in_mode is set to 2~")
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'UrQMD_events', '%s.dat' % filename)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'UrQMD_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[mixed_id]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def copy_JAM_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/particle_list_*.dat' % input_folder)
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'JAM_events', '%s.dat' % filename)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(working_folder, 'event_%d' % folder_id,
'JAM_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (path.abspath(events_list[mixed_id]),
folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def generate_event_folder_UrQMD(cluster_name, working_folder, event_id, mode):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
if mode == 2:
# calculate HBT correlation with OSCAR outputs
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_HBT_with_OSCAR(cluster_name, event_folder)
elif mode == 3:
# calculate HBT correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_HBT(cluster_name, event_folder)
elif mode == 4:
# calculate HBT correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_spectra_and_vn(cluster_name, event_folder)
elif mode == 8:
# collect event-by-event particle distribution
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_particle_yield_distribution(cluster_name, event_folder)
elif mode == 9:
# calculate event-by-event particle distribution with OSCAR outputs
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_particle_yield_distribution_with_OSCAR(cluster_name,
event_folder)
elif mode == 10:
# calculate balance function correlation with UrQMD outputs
mkdir(path.join(event_folder, 'UrQMD_events'))
generate_script_balance_function(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
def generate_event_folder_JAM(cluster_name, working_folder, event_id, mode):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
if mode == 5:
# run JAM with OSCAR files
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script_JAM(cluster_name, event_folder)
shutil.copytree('codes/JAM',
path.join(path.abspath(event_folder), 'JAM'))
elif mode == 6:
# collect particle spectra and vn with JAM outputs
mkdir(path.join(event_folder, 'JAM_events'))
generate_script_spectra_and_vn_with_JAM(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
elif mode == 7:
# calculate HBT correlation with JAM outputs
mkdir(path.join(event_folder, 'JAM_events'))
generate_script_HBT_with_JAM(cluster_name, event_folder)
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
def generate_event_folder(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'OSCAR_events'))
generate_script(cluster_name, event_folder)
shutil.copytree('codes/osc2u',
path.join(path.abspath(event_folder), 'osc2u'))
shutil.copytree('codes/urqmd',
path.join(path.abspath(event_folder), 'urqmd'))
def copy_OSCAR_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/*.dat' % input_folder)
for iev in range(len(events_list)):
folder_id = iev % number_of_cores
filename = events_list[iev].split('/')[-1].split('.')[0]
event_id = filename.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'OSCAR_events', events_list[iev].split('/')[-1])
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
while (mixed_event_id == iev):
mixed_id = random.randint(0, len(events_list)-1)
filename_mixed = events_list[mixed_id].split('/')[-1].split('.')[0]
mixed_event_id = filename_mixed.split('_')[-1]
folder_path = path.join(
working_folder, 'event_%d' % folder_id,
'OSCAR_events', 'mixed_event_%s.dat' % event_id)
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[mixed_id]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
def generate_event_folder_iSS(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'hydro_events'))
generate_script_iSS(cluster_name, event_folder)
shutil.copytree('codes/iSS',
path.join(path.abspath(event_folder), 'iSS'))
shutil.copytree('codes/osc2u',
path.join(path.abspath(event_folder), 'osc2u'))
shutil.copytree('codes/urqmd',
path.join(path.abspath(event_folder), 'urqmd'))
shutil.copytree('codes/hadronic_afterburner_toolkit',
path.join(path.abspath(event_folder),
'hadronic_afterburner_toolkit'))
def generate_event_folder_iS(cluster_name, working_folder, event_id):
event_folder = path.join(working_folder, 'event_%d' % event_id)
mkdir(event_folder)
mkdir(path.join(event_folder, 'hydro_events'))
generate_script_iS(cluster_name, event_folder)
shutil.copytree('codes/iS',
path.join(path.abspath(event_folder), 'iS'))
def copy_hydro_events(number_of_cores, input_folder, working_folder):
events_list = glob('%s/surface*.dat' % input_folder)
for iev in range(len(events_list)):
event_id = events_list[iev].split('/')[-1].split('_')[-1].split('.')[0]
folder_id = iev % number_of_cores
working_path = path.join(working_folder, 'event_%d' % folder_id,
'hydro_events')
folder_path = path.join(working_path, events_list[iev].split('/')[-1])
bashCommand = "ln -s %s %s" % (
path.abspath(events_list[iev]), folder_path)
subprocess.Popen(bashCommand, stdout = subprocess.PIPE, shell=True)
shutil.copy(path.join(input_folder,
'music_input_event_%s' % event_id),
working_path)
def copy_job_scripts(working_folder):
shutil.copy("job_MPI_wrapper.py", working_folder)
shutil.copy("submit_MPI_job_for_all.pbs", working_folder)
shutil.copy("run_job.sh", working_folder)
def print_mode_cheat_sheet():
print("Here is a cheat sheet for mode option:")
print("mode -1: run iS + resonance decay")
print("mode 0: run iSS + osc2u + UrQMD from hydro hypersurface")
print("mode 1: run UrQMD with OSCAR events")
print("mode 2: calculate HBT correlation with OSCAR events")
print("mode 3: calculate HBT correlation with UrQMD events")
print("mode 4: collect spectra and flow observables from UrQMD events")
print("mode 5: run JAM with OSCAR events")
print("mode 6: collect spectra and vn with JAM events")
print("mode 7: calculate HBT correlation with JAM events")
print("mode 8: collect particle yield distribution with UrQMD events")
print("mode 9: collect particle yield distribution with OSCAR events")
if __name__ == "__main__":
try:
from_folder = str(sys.argv[1])
folder_name = str(sys.argv[2])
cluster_name = str(sys.argv[3])
ncore = int(sys.argv[4])
mode = int(sys.argv[5])
except IndexError:
print("Usage:")
print(" %s input_folder working_folder cluster_name num_of_cores mode"
% str(sys.argv[0]))
print("")
print_mode_cheat_sheet()
exit(0)
if mode == 0: # run iSS + osc2u + UrQMD from hydro hypersurface
for icore in range(ncore):
generate_event_folder_iSS(cluster_name, folder_name, icore)
copy_hydro_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == -1: # run iS + resonance decay
for icore in range(ncore):
generate_event_folder_iS(cluster_name, folder_name, icore)
copy_hydro_events(ncore, from_folder, folder_name)
elif mode == 1: # run UrQMD with OSCAR events
for icore in range(ncore):
generate_event_folder(cluster_name, folder_name, icore)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 2: # calculate HBT correlation with OSCAR events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 3: # calculate HBT correlation with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == 4: # collect spectra and flow observables from UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
elif mode == 5: # run JAM with OSCAR events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 6: # collect spectra and vn with JAM events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_JAM_events(ncore, from_folder, folder_name)
elif mode == 7: # calculate HBT correlation with JAM events
for icore in range(ncore):
generate_event_folder_JAM(cluster_name, folder_name, icore, mode)
copy_JAM_events(ncore, from_folder, folder_name)
elif mode == 8: # collect particle yield distribution with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
elif mode == 9: # collect particle yield distribution with OSCAR events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_OSCAR_events(ncore, from_folder, folder_name)
elif mode == 10: # calculate balance function correlation with UrQMD events
for icore in range(ncore):
generate_event_folder_UrQMD(cluster_name, folder_name, icore, mode)
copy_UrQMD_events(ncore, from_folder, folder_name)
copy_job_scripts(folder_name)
|
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nodes that define analyzers.
`OperationNode`s are objects that describe how to perform a full pass analysis
over some input tensors. They are described by an `OperationDef`. This module
contains the `OperationDef` subclasses that define specific operations such as
computing a mean or vocabulary. It also contains a special `OperationDef`,
`ExtractTensors` which represents the operation of extracting the values of a
tuple of `Tensor`s into a `PCollection`.
"""
import abc
import json
import os
import struct
from typing import Any, Optional, Sequence, Type
import uuid
import numpy as np
import tensorflow as tf
from tensorflow_transform import common_types
from tensorflow_transform import nodes
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.graph_context import TFGraphContext
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
# pylint: disable=g-enable-tensorflow-import
# Key for graph collection containing `TensorSink` objects representing TFT
# analyzers.
TENSOR_REPLACEMENTS = 'tft_tensor_replacements'
# Key for graph collection containing `TensorSink` objects representing TFT
# analyzers irrespective of whether they have been evaluated or not.
ALL_REPLACEMENTS = 'tft_all_replacements'
def sanitize_label(label: str) -> str:
return label.replace('/', '#')
def _make_label(cls: Type[nodes.OperationDef],
label: Optional[str] = None) -> str:
if label is None:
scope = tf.compat.v1.get_default_graph().get_name_scope()
label = '{}[{}]'.format(cls.__name__, scope)
return sanitize_label(label)
TemporaryAssetInfo = tfx_namedtuple.namedtuple('TemporaryAssetInfo',
['value', 'file_format'])
class TensorInfo(
tfx_namedtuple.namedtuple('TensorInfo',
['dtype', 'shape', 'temporary_asset_info'])):
"""A container for attributes of output tensors from analyzers.
Fields:
dtype: The TensorFlow dtype.
shape: The shape of the tensor.
temporary_asset_info: A named tuple containing information about the
temporary asset file to write out while tracing the TF graph.
"""
def __new__(
cls: Type['TensorInfo'], dtype: tf.dtypes.DType,
shape: Sequence[Optional[int]],
temporary_asset_info: Optional[TemporaryAssetInfo]) -> 'TensorInfo':
if not isinstance(dtype, tf.DType):
raise TypeError('dtype must be a TensorFlow dtype, got {}'.format(dtype))
if temporary_asset_info is not None and not isinstance(
temporary_asset_info, TemporaryAssetInfo):
raise TypeError(
'temporary_asset_info should be an instance of TemporaryAssetInfo or '
f'None, got {temporary_asset_info}')
return super(TensorInfo, cls).__new__(
cls,
dtype=dtype,
shape=shape,
temporary_asset_info=temporary_asset_info)
class TensorSource(
tfx_namedtuple.namedtuple('TensorSource', ['tensors', 'label']),
nodes.OperationDef):
"""An `OperationDef` that defines extracting a tuple of tensor values.
This `OperationDef` defines an operation that extracts the values of the given
tensors into a PCollection of tuples of values. It is used as a source for
analyzers, which further transform
This OperationDef accepts zero inputs and return a single output representing
the PCollection of tuples of values. It will be converted in
tensorflow_transform.beam.analysis_graph_builder.build to an operation that
extracts the tensors for a dictionary of tensors, after running a beam.ParDo
to produce tensor values by running the graph on its inputs.
Fields:
tensors: The tensors whose values should be extracted.
label: A unique label for this operation.
"""
def __new__(cls, tensors):
for tensor in tensors:
if not isinstance(tensor, tf.Tensor):
raise TypeError('tensor must be a Tensor, got {} of type {}'.format(
tensor, type(tensor)))
return super(TensorSource, cls).__new__(
cls, tensors=tensors, label=_make_label(cls))
def get_input_tensors_value_nodes(tensor_inputs):
return nodes.apply_operation(TensorSource, tensors=tensor_inputs)
TensorSink = tfx_namedtuple.namedtuple(
'TensorSink', ['tensor', 'future', 'is_asset_filepath'])
def _bind_future_as_tensor_v1(future: nodes.ValueNode,
tensor_info: TensorInfo,
name: Optional[str] = None) -> tf.Tensor:
"""Bind a future value as a tensor to a TF1 graph."""
result = tf.compat.v1.placeholder(tensor_info.dtype, tensor_info.shape, name)
is_asset_filepath = tensor_info.temporary_asset_info is not None
tf.compat.v1.add_to_collection(TENSOR_REPLACEMENTS,
TensorSink(result, future, is_asset_filepath))
return result
_TemporaryAnalyzerOutputWrapper = tfx_namedtuple.namedtuple(
'_TemporaryAnalyzerOutputWrapper', ['eager_asset_path', 'graph_tensor'])
def _write_to_temporary_asset_file(
temp_dir: str, temporary_asset_info: TemporaryAssetInfo) -> str:
"""Returns path to temporary asset file created during tracing."""
# TODO(b/170111921): This temporary file should have a unique name to
# avoid namespace collisions between temporary files that contain data
# of different dtypes.
base_filename = uuid.uuid4().hex
if temporary_asset_info.file_format == 'text':
result = os.path.join(temp_dir, base_filename)
with tf.io.gfile.GFile(result, 'w') as f:
f.write(temporary_asset_info.value)
elif temporary_asset_info.file_format == 'tfrecord_gzip':
result = os.path.join(temp_dir, '{}.tfrecord.gz'.format(base_filename))
with tf.io.TFRecordWriter(result, 'GZIP') as f:
f.write(temporary_asset_info.value)
else:
raise ValueError(
'File format should be one of \'text\' or \'tfrecord_gzip\'. Received '
f'{temporary_asset_info.file_format}')
return result
def _get_temporary_analyzer_output(
temp_dir: str,
tensor_info: TensorInfo,
name: Optional[str] = None) -> _TemporaryAnalyzerOutputWrapper:
"""Create a temporary graph tensor using attributes in `tensor_info`.
Args:
temp_dir: Path to a directory to write out any temporary asset files to.
tensor_info: A `TensorInfo` object containing attributes to create the graph
tensor.
name: A string (or None). The created graph tensor uses this name.
Returns:
A named tuple `_TemporaryAnalyzerOutputWrapper` with:
eager_asset_path: If the analyzer output is an asset file, an eager tensor
pointing to the file path. Else, None.
graph_tensor: The graph tensor representing the analyzer output.
"""
asset = None
with tf.name_scope('temporary_analyzer_output'):
temporary_asset_info = tensor_info.temporary_asset_info
is_asset_filepath = temporary_asset_info is not None
if is_asset_filepath:
# Placeholders cannot be used for assets, if this graph will be serialized
# to a SavedModel, as they will be initialized with the init op. If a
# `temp_dir` is provided, it is assumed that this graph will be
# serialized and a temporary asset file is written out. Else, a
# placeholder is returned.
# TODO(b/149997088): Reduce number of temporary files written out.
if temp_dir:
with tf.init_scope():
temporary_asset_filepath = _write_to_temporary_asset_file(
temp_dir, temporary_asset_info)
asset = tf.constant(temporary_asset_filepath)
graph_tensor = tf.constant(
temporary_asset_filepath,
dtype=tensor_info.dtype,
shape=tensor_info.shape,
name=name)
else:
graph_tensor = tf.raw_ops.Placeholder(
dtype=tensor_info.dtype, shape=tensor_info.shape, name=name)
else:
# Using a placeholder with no default value causes tracing to fail if
# there is any control flow dependent on a child tensor of this
# placeholder. Hence, provide a temporary default value for it.
# If dtype is string, we want a tensor that contains '0's instead of b'[]
# to allow string to numeric conversion ops to trace successfully.
temporary_dtype = (
tf.int64 if tensor_info.dtype == tf.string else tensor_info.dtype)
temporary_tensor = tf2_utils.supply_missing_tensor(
1, tf.TensorShape(tensor_info.shape), temporary_dtype)
if tensor_info.dtype == tf.string:
temporary_tensor = tf.strings.as_string(temporary_tensor)
graph_tensor = tf.raw_ops.PlaceholderWithDefault(
input=temporary_tensor, shape=tensor_info.shape, name=name)
return _TemporaryAnalyzerOutputWrapper(asset, graph_tensor)
def _bind_future_as_tensor_v2(
future: nodes.ValueNode,
tensor_info: TensorInfo,
name: Optional[str] = None) -> common_types.TemporaryAnalyzerOutputType:
"""Bind a future value as a tensor to a TF2 FuncGraph.
If the future is expected to write out an asset file and this method is
invoked within a `TFGraphContext` that was provided a temporary directory,
a temporary file is written out by this method.
This could write out a significant number of temporary files depending on
number of times the `preprocessing_fn` is traced and number of asset files
in each tracing.
Args:
future: Future whose result should replace the graph tensor to which its
bound.
tensor_info: A `TensorInfo` object containing attributes to create the graph
tensor.
name: (Optional) If provided, the graph tensor created uses this name.
Returns:
A graph tensor or `tf.saved_model.Asset` that this future is bound to. If
this future has already been evaluated in a previous TFT phase, it is
directly returned.
"""
graph = ops.get_default_graph()
temp_dir = TFGraphContext.get_or_create_temp_dir()
temporary_analyzer_info = _get_temporary_analyzer_output(
temp_dir, tensor_info, name)
is_asset_filepath = tensor_info.temporary_asset_info is not None
# TODO(b/149997088): Switch to using a counter instead of tensor names.
# Check if an evaluated value exists for this analyzer node.
evaluated_replacements = TFGraphContext.get_evaluated_replacements()
# evaluated_replacements is a dictionary from placeholder name to evaluated
# tensor.
# If `preprocessing_fn` was traced previously and this future was then
# evaluated in a TFT phase, the result will be present in this dictionary.
analyzer_name = temporary_analyzer_info.graph_tensor.name
tensor_sink = TensorSink(temporary_analyzer_info.graph_tensor, future,
is_asset_filepath)
graph.add_to_collection(ALL_REPLACEMENTS, tensor_sink)
if (evaluated_replacements is not None and
analyzer_name in evaluated_replacements):
replaced_result = evaluated_replacements[analyzer_name]
if is_asset_filepath:
graph.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS,
replaced_result)
return replaced_result
else:
# Without the identity wrapper some V2 tests fail with AttributeError:
# Tensor.name is meaningless when eager execution is enabled.
# TODO(b/149997088): Remove the identity wrapper once we no longer rely on
# tensor names.
return tf.identity(replaced_result)
else:
graph.add_to_collection(TENSOR_REPLACEMENTS, tensor_sink)
eager_asset_path = temporary_analyzer_info.eager_asset_path
if is_asset_filepath and eager_asset_path is not None:
tf_utils.track_asset_analyzer_output(eager_asset_path,
temporary_analyzer_info.graph_tensor)
graph.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS,
eager_asset_path)
return temporary_analyzer_info.graph_tensor
def bind_future_as_tensor(
future: nodes.ValueNode,
tensor_info: TensorInfo,
name: Optional[str] = None) -> common_types.TemporaryAnalyzerOutputType:
"""Bind a future value as a tensor."""
# TODO(b/165884902): Use tf.inside_function after dropping TF 2.3 support.
if isinstance(ops.get_default_graph(), func_graph.FuncGraph):
# If the default graph is a `FuncGraph`, tf.function was used to trace the
# preprocessing fn.
return _bind_future_as_tensor_v2(future, tensor_info, name)
else:
return _bind_future_as_tensor_v1(future, tensor_info, name)
def wrap_as_tensor(
output_value_node: nodes.ValueNode
) -> common_types.TemporaryAnalyzerOutputType:
analyzer_def = output_value_node.parent_operation.operation_def
assert isinstance(analyzer_def, AnalyzerDef)
return bind_future_as_tensor(
output_value_node,
analyzer_def.output_tensor_infos[output_value_node.value_index])
class Combiner:
"""Analyze using combiner function.
This object mirrors a beam.CombineFn, that will receive a beam PCollection
representing the batched input tensors.
"""
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def create_accumulator(self):
"""Return a fresh, empty accumulator.
Returns: An empty accumulator. This can be any Python value.
"""
raise NotImplementedError
def add_input(self, accumulator, batch_values):
"""Return result of folding a batch of inputs into accumulator.
Args:
accumulator: the current accumulator
batch_values: A list of ndarrays representing the values of the inputs for
a batch, which should be added to the accumulator.
Returns: An accumulator that includes the batch of inputs.
"""
raise NotImplementedError
def merge_accumulators(self, accumulators):
"""Merges several accumulators to a single accumulator value.
Args:
accumulators: the accumulators to merge
Returns: The sole merged accumulator.
"""
raise NotImplementedError
def compact(self, accumulator):
"""Returns an equivalent but more compact represenation of the accumulator.
Args:
accumulator: the current accumulator.
Returns: A more compact accumulator.
"""
return accumulator
def extract_output(self, accumulator):
"""Return result of converting accumulator into the output value.
Args:
accumulator: the final accumulator value.
Returns: A list of ndarrays representing the result of this combiner.
"""
raise NotImplementedError
def output_tensor_infos(self):
"""Return the number / types of outputs that are produced by extract_output.
Returns: An iterable of `TensorInfo` describing how the outputs that
extract_output will produce should be wrapped as `Tensor`s.
Types are required to be TensorFlow dtypes.
"""
raise NotImplementedError
@property
def accumulator_coder(self):
return JsonNumpyCacheCoder()
class CacheCoder(metaclass=abc.ABCMeta):
"""A coder iterface for encoding and decoding cache items."""
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
@abc.abstractmethod
def encode_cache(self, cache):
pass
@abc.abstractmethod
def decode_cache(self, encoded_cache):
pass
class JsonNumpyCacheCoder(CacheCoder):
"""An accumulator cache coder that can handle lists."""
def _convert_numpy_dtype(self, x):
if hasattr(x, 'tolist'):
return x.tolist()
return x
def encode_cache(self, accumulator):
if isinstance(accumulator, (list, tuple)):
primitive_accumulator = [
self._convert_numpy_dtype(a) for a in accumulator
]
else:
primitive_accumulator = self._convert_numpy_dtype(accumulator)
# Need to wrap in np.array and call tolist to make it JSON serializable.
return tf.compat.as_bytes(json.dumps(primitive_accumulator))
def decode_cache(self, encoded_accumulator):
return np.array(json.loads(tf.compat.as_text(encoded_accumulator)))
class AnalyzerDef(nodes.OperationDef, metaclass=abc.ABCMeta):
"""A subclass of OperationDef whose outputs can be constant tensors.
An AnalyzerDef is an OperationDef that also provides enough information to
wrap each of its outputs as constant `Tensor`s in the graph. By inserting
the output of the AnalyzerDef back into the graph, the user can define
multiple levels of anaylsis and transformation.
All `OperationDef`s are placeholders for operations that will be implemented
as `beam.PTransform`s. This is done by a registration system. The subclasses
defined below that inherit from `AnalyzerDef` have there implementations
registered in the module `tensorflow_transform.beam.analyzer_impls`.
"""
@property
@abc.abstractmethod
def output_tensor_infos(self):
"""A description on how to wrap the outputs of this AnalyzerDef.
An `OperationDef` defines the number of outputs it creates. An
`AnalyzerDef` must implemented this property that defines not only the
number of outputs but how to wrap each output as a tensor.
"""
pass
@property
def num_outputs(self):
"""The number of outputs returned by this operation."""
return len(self.output_tensor_infos)
# We do the packing of combiners after the caching optimization. Hence, we don't
# name the packed operations as cacheable. The rationale behind doing the
# combiner packing after the cache optimization is that this optimization is
# more of a Beam execution level optimization and we want to keep it towards the
# end. So that, once Beam can automatically pack combines, we can remove this.
class PackedCombineAccumulate(
tfx_namedtuple.namedtuple('PackedCombineAccumulate',
['combiners', 'label']), nodes.OperationDef):
"""An analyzer that packs a list of combiners into a single beam CombineFn.
Fields:
combiners: A list of `analysis_graph_builder._CombinerOpWrapper` objects.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiners, label):
return super(PackedCombineAccumulate, cls).__new__(
cls, combiners=combiners, label=_make_label(cls, label))
@property
def num_outputs(self):
return 1
# Note that this will not have any effect as packing of combiners is done
# after the caching optimization.
@property
def is_partitionable(self):
return True
class PackedCombineMerge(
tfx_namedtuple.namedtuple('PackedCombineMerge', ['combiners', 'label']),
nodes.OperationDef):
"""An analyzer that packs a list of combiners into a single beam CombineFn.
Fields:
combiners: A list of `analysis_graph_builder._CombinerOpWrapper` objects.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiners, label):
return super(PackedCombineMerge, cls).__new__(
cls, combiners=combiners, label=_make_label(cls, label))
@property
def num_outputs(self):
return 1
class CacheableCombineAccumulate(
tfx_namedtuple.namedtuple('CacheableCombineAccumulate',
['combiner', 'label']), nodes.OperationDef):
"""An analyzer that runs a beam CombineFn to accumulate without merging.
This analyzer reduces the values that it accepts as inputs, using the
provided `Combiner`. The `Combiner` is applied to the data by wrapping it as
a `beam.CombineFn` and applying `beam.Combine`.
Fields:
combiner: The Combiner to be applies to the inputs.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiner):
return super(CacheableCombineAccumulate, cls).__new__(
cls, combiner=combiner, label=_make_label(cls))
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
@property
def cache_coder(self):
return self.combiner.accumulator_coder
class CacheableCombineMerge(
tfx_namedtuple.namedtuple('CacheableCombineMerge', ['combiner', 'label']),
nodes.OperationDef):
"""An analyzer that runs a beam CombineFn to only merge computed accumulators.
This analyzer reduces the values that it accepts as inputs, using the
provided `Combiner`. The `Combiner` is applied to the data by wrapping it as
a `beam.CombineFn` and applying `beam.Combine`.
Fields:
combiner: The Combiner to be applied to the inputs.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiner):
return super(CacheableCombineMerge, cls).__new__(
cls, combiner=combiner, label=_make_label(cls))
@property
def num_outputs(self):
return 1
class _CombinerPerKeyAccumulatorCoder(CacheCoder):
"""Coder for per-key combiner accumulators."""
def __init__(self, value_coder):
self._combiner_coder = value_coder
self._vocabulary_coder = _BaseKVCoder()
super().__init__()
def __repr__(self):
return '<{}[{}[{}]]>'.format(self.__class__.__name__,
repr(self._vocabulary_coder),
repr(self._combiner_coder))
def encode_cache(self, accumulator):
key, value = accumulator
encoded_value = self._combiner_coder.encode_cache(value)
return self._vocabulary_coder.encode_cache((key, encoded_value))
def decode_cache(self, encoded_accumulator):
accumulator = self._vocabulary_coder.decode_cache(encoded_accumulator)
key, encoded_value = accumulator
value = self._combiner_coder.decode_cache(encoded_value)
return (key, value)
class CacheableCombinePerKeyAccumulate(
tfx_namedtuple.namedtuple('CacheableCombinePerKeyAccumulate',
['combiner', 'label']), AnalyzerDef):
"""An analyzer that runs `beam.CombinePerKey` to accumulate without merging.
This analyzer reduces the values that it accepts as inputs, using the
provided `Combiner`. The `Combiner` is applied to the data by wrapping it as
a `beam.CombineFn` and applying `beam.CombinePerKey`.
This analyzer is implemented by
`tensorflow_transform.beam.analyzer_impls._IntermediateAccumulateCombineImpl`.
Fields:
combiner: The Combiner to be applied to the inputs.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiner):
return super(CacheableCombinePerKeyAccumulate, cls).__new__(
cls, combiner=combiner, label=_make_label(cls))
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
@property
def cache_coder(self):
return _CombinerPerKeyAccumulatorCoder(self.combiner.accumulator_coder)
class CacheableCombinePerKeyMerge(
tfx_namedtuple.namedtuple('CacheableCombinePerKeyMerge',
['combiner', 'label']), nodes.OperationDef):
"""An analyzer that runs `beam.CombinePerKey` to only merge accumulators.
This analyzer reduces the values that it accepts as inputs, using the
provided `Combiner`. The `Combiner` is applied to the data by wrapping it as
a `beam.CombineFn` and applying `beam.CombinePerKey`.
This analyzer is implemented by
`tensorflow_transform.beam.analyzer_impls._MergeAccumulatorsCombinePerKeyImpl`
Fields:
combiner: The Combiner to use for merging and extracting outputs.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiner):
return super(CacheableCombinePerKeyMerge, cls).__new__(
cls, combiner=combiner, label=_make_label(cls))
class CacheableCombinePerKeyFormatKeys(
tfx_namedtuple.namedtuple('CacheableCombinePerKeyFormatKeys',
['combiner', 'label']), AnalyzerDef):
"""An analyzer that formats output for the non-stored per-key case.
This analyzer converts the (key, output) pairs into a tuple of keys (of type
string) and outputs.
This analyzer is implemented by
`tensorflow_transform.beam.analyzer_impls._CombinePerKeyFormatKeysImpl`
Fields:
combiner: The Combiner to use for extracting outputs.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, combiner):
return super(CacheableCombinePerKeyFormatKeys, cls).__new__(
cls, combiner=combiner, label=_make_label(cls))
@property
def output_tensor_infos(self):
# Returns a key vocab and one output per combiner output.
return [TensorInfo(tf.string, (None,), None)] + [
TensorInfo(info.dtype, (None,) + info.shape, info.temporary_asset_info)
for info in self.combiner.output_tensor_infos()
]
class CacheableCombinePerKeyFormatLarge(
tfx_namedtuple.namedtuple('CacheableCombinePerKeyFormatLarge', ['label']),
nodes.OperationDef):
"""An analyzer that formats output prior to writing to file for per-key case.
This operation operates on the output of CacheableCombinePerKeyAccumulate and
is implemented by `tensorflow_transform.beam.analyzer_impls.
_CombinePerKeyFormatLargeImpl`.
"""
__slots__ = ()
def __new__(cls):
return super(CacheableCombinePerKeyFormatLarge, cls).__new__(
cls, label=_make_label(cls))
@property
def num_outputs(self):
return 1
class ScaleAndFlattenPerKeyBucketBouandaries(
tfx_namedtuple.namedtuple('PostProcessPerKeyBucketBoundaries',
['output_tensor_dtype', 'label']), AnalyzerDef):
"""An analyzer which takes quantile boundaries per key and combines them.
It receives a 2-d array of boundaries, computes scales and shifts to each
row separately, a new boundaries 1-d array which is a combination of
boundaries for all the keys, and the number of buckets defined for each key.
This outputs boundaries, scale_factor_per_key, shift_per_key, num_buckets.
For example, for an input boundaries matrix, [[0, 1, 2], [0, 1, 2]] it will
return:
boundaries: [0, 0.5, 1, 1.5, 2]
scale_factor_per_key: [0.5, 0.5]
shift_per_key: [0, 1]
num_buckets: 4
So the transformation of each input x before computing its bucket should be:
F(x, key) = x * scale_factor_per_key[key] + shift_per_key[key]
"""
__slots__ = ()
def __new__(cls, output_tensor_dtype):
return super(ScaleAndFlattenPerKeyBucketBouandaries, cls).__new__(
cls, output_tensor_dtype=output_tensor_dtype, label=_make_label(cls))
@property
def output_tensor_infos(self):
# Boundaries, scale_factor_per_key, shift_per_key, num_buckets.
return [TensorInfo(self.output_tensor_dtype,
(None,), None)] * 3 + [TensorInfo(tf.int64, (), None)]
class VocabularyAccumulate(
tfx_namedtuple.namedtuple('VocabularyAccumulate',
['vocab_ordering_type', 'input_dtype', 'label']),
nodes.OperationDef):
"""An operation that accumulates unique words with their frequency or weight.
This operation is implemented by
`tensorflow_transform.beam.analyzer_impls._VocabularyAccumulateImpl`.
"""
__slots__ = ()
def __new__(cls, vocab_ordering_type, input_dtype=tf.string.name):
return super(VocabularyAccumulate, cls).__new__(
cls,
vocab_ordering_type=vocab_ordering_type,
input_dtype=input_dtype,
label=_make_label(cls))
@property
def num_outputs(self):
return 1
@property
def is_partitionable(self):
return True
@property
def cache_coder(self):
return _VocabularyAccumulatorCoder(input_dtype=self.input_dtype)
class _BaseKVCoder(CacheCoder):
"""Coder for key-value based accumulators."""
def __init__(self):
self._lengths_prefix_format = 'qq'
self._lengths_prefix_length = struct.calcsize(self._lengths_prefix_format)
super().__init__()
def encode_cache(self, accumulator):
token, value = accumulator
len_token, len_value = len(token), len(value)
return struct.pack(
'{}{}s{}s'.format(self._lengths_prefix_format, len_token, len_value),
len_token, len_value, token, value)
def decode_cache(self, encoded_accumulator):
(len_token, len_value) = struct.unpack_from(
self._lengths_prefix_format,
encoded_accumulator[:self._lengths_prefix_length])
accumulator = struct.unpack_from(
'{}s{}s'.format(len_token, len_value),
encoded_accumulator[self._lengths_prefix_length:])
return accumulator
class _VocabularyAccumulatorCoder(_BaseKVCoder):
"""Coder for vocabulary accumulators."""
def __init__(self, input_dtype=tf.string.name):
self._input_dtype = tf.dtypes.as_dtype(input_dtype)
super().__init__()
def encode_cache(self, accumulator):
token, value = accumulator
if self._input_dtype is not tf.string:
token = tf.compat.as_bytes(json.dumps(token))
# If the value is a _WeightedMeanAndVarAccumulator, cast each field to a
# list for serialization.
if isinstance(value, tuple):
value = [
a.tolist()
for a in (value.count, value.mean, value.variance, value.weight)
]
value = tf.compat.as_bytes(json.dumps(value))
return super().encode_cache((token, value))
def decode_cache(self, encoded_accumulator):
accumulator = super().decode_cache(encoded_accumulator)
token, value = accumulator
if self._input_dtype is not tf.string:
token = json.loads(tf.compat.as_text(token))
value = json.loads(tf.compat.as_text(value))
if isinstance(value, list):
# If the value is a _WeightedMeanAndVarAccumulator (serialized to tuple),
# cast each field back to a np.array.
(count, mean, variance, weight) = value
value = (np.array(count), np.array(mean), np.array(variance),
np.array(weight))
return token, value
class VocabularyCount(
tfx_namedtuple.namedtuple('VocabularyCount', ['label']),
nodes.OperationDef):
"""An operation counts the total number of tokens in a vocabulary.
This operation takes in the output of VocabularyAccumulate and is implemented
by `tensorflow_transform.beam.analyzer_impls._VocabularyCountImpl`.
The output of this operation is a singleton Integer.
Fields:
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, label):
return super().__new__(cls, label=_make_label(cls, label))
@property
def num_outputs(self):
return 1
class VocabularyMerge(
tfx_namedtuple.namedtuple('VocabularyMerge', [
'vocab_ordering_type', 'use_adjusted_mutual_info', 'min_diff_from_avg',
'label'
]), nodes.OperationDef):
"""An operation that merges the accumulators produced by VocabularyAccumulate.
This operation operates on the output of VocabularyAccumulate and is
implemented by `tensorflow_transform.beam.analyzer_impls._VocabularyMergeImpl`
.
See `tft.vocabulary` for a description of the parameters.
"""
__slots__ = ()
def __new__(cls, vocab_ordering_type, use_adjusted_mutual_info,
min_diff_from_avg):
return super(VocabularyMerge, cls).__new__(
cls,
vocab_ordering_type=vocab_ordering_type,
use_adjusted_mutual_info=use_adjusted_mutual_info,
min_diff_from_avg=min_diff_from_avg,
label=_make_label(cls))
@property
def num_outputs(self):
return 1
class VocabularyPrune(
tfx_namedtuple.namedtuple('VocabularyPrune', [
'top_k', 'frequency_threshold', 'informativeness_threshold',
'coverage_top_k', 'coverage_frequency_threshold',
'coverage_informativeness_threshold', 'key_fn', 'input_dtype', 'label'
]), nodes.OperationDef):
"""An operation that filters and orders a computed vocabulary.
This operation operates on the output of VocabularyMerge and is implemented by
`tensorflow_transform.beam.analyzer_impls._VocabularyPruneImpl`.
See `tft.vocabulary` for a description of the parameters.
"""
__slots__ = ()
def __new__(cls,
top_k,
frequency_threshold,
input_dtype,
informativeness_threshold=float('-inf'),
coverage_top_k=None,
coverage_frequency_threshold=0,
coverage_informativeness_threshold=float('-inf'),
key_fn=None):
return super(VocabularyPrune, cls).__new__(
cls,
top_k=top_k,
frequency_threshold=frequency_threshold,
informativeness_threshold=informativeness_threshold,
coverage_top_k=coverage_top_k,
coverage_frequency_threshold=coverage_frequency_threshold,
coverage_informativeness_threshold=coverage_informativeness_threshold,
key_fn=key_fn,
input_dtype=input_dtype,
label=_make_label(cls))
@property
def num_outputs(self):
return 1
class VocabularyOrderAndWrite(
tfx_namedtuple.namedtuple('VocabularyOrderAndWrite', [
'vocab_filename', 'store_frequency', 'input_dtype', 'label',
'fingerprint_shuffle', 'file_format', 'input_is_sorted'
]), AnalyzerDef):
"""An analyzer that writes vocabulary files from an accumulator.
This operation operates on the output of VocabularyPrune and is implemented by
`tensorflow_transform.beam.analyzer_impls._VocabularyOrderAndWriteImpl`.
See `tft.vocabulary` for a description of the parameters.
"""
__slots__ = ()
def __new__(cls,
vocab_filename,
store_frequency,
fingerprint_shuffle,
file_format,
input_dtype=tf.string.name,
input_is_sorted=False):
return super(VocabularyOrderAndWrite, cls).__new__(
cls,
vocab_filename=vocab_filename,
store_frequency=store_frequency,
fingerprint_shuffle=fingerprint_shuffle,
file_format=file_format,
input_dtype=input_dtype,
input_is_sorted=input_is_sorted,
label=_make_label(cls))
@property
def output_tensor_infos(self):
# Define temporary data for this node to write to a file before the actual
# vocab file is evaluated and written out.
temporary_asset_value = (b'TEMPORARY_ASSET_VALUE' if tf.dtypes.as_dtype(
self.input_dtype) == tf.string else b'-777777')
if self.store_frequency:
temporary_asset_value = b'1 %s' % temporary_asset_value
return [
TensorInfo(tf.string, [],
TemporaryAssetInfo(temporary_asset_value, self.file_format))
]
class PTransform(
tfx_namedtuple.namedtuple('PTransform', [
'ptransform', 'output_tensor_info_list', 'is_partitionable',
'cache_coder', 'label'
]), AnalyzerDef):
"""(Experimental) OperationDef for PTransform anaylzer.
This analyzer is implemented by
`tensorflow_transform.beam.analyzer_impls._PTransformImpl`.
Fields:
ptransform: The `beam.PTransform` to be applied to the inputs.
output_tensor_info_list: A list of `TensorInfo`s that defines the outputs of
this `PTransform`.
is_partitionable: Whether or not this PTransform is partitionable.
cache_coder: (optional) A `CacheCoder` instance.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls,
ptransform: Any,
output_tensor_info_list: Sequence[TensorInfo],
is_partitionable: bool,
cache_coder: Optional[CacheCoder] = None):
return super(PTransform, cls).__new__(
cls,
ptransform=ptransform,
output_tensor_info_list=output_tensor_info_list,
is_partitionable=is_partitionable,
cache_coder=cache_coder,
label=_make_label(cls))
@property
def output_tensor_infos(self):
return self.output_tensor_info_list
class EncodeCache(
tfx_namedtuple.namedtuple('EncodeCache', ['coder', 'label']),
nodes.OperationDef):
"""OperationDef for encoding a cache instance.
Fields:
coder: An instance of CacheCoder used to encode cache.
label: A unique label for this operation.
"""
__slots__ = ()
@property
def is_partitionable(self):
return True
class DecodeCache(
tfx_namedtuple.namedtuple('DecodeCache',
['dataset_key', 'cache_key', 'coder', 'label']),
nodes.OperationDef):
"""OperationDef for decoding a cache instance.
Fields:
coder: An instance of CacheCoder used to decode cache.
label: A unique label for this operation.
"""
__slots__ = ()
def get_field_str(self, field_name):
if field_name == 'cache_key':
return '<bytes>'
return super().get_field_str(field_name)
@property
def is_partitionable(self):
return True
class AddKey(
tfx_namedtuple.namedtuple('AddKey', ['key', 'label']), nodes.OperationDef):
"""An operation that represents adding a key to a value.
This operation represents a `beam.Map` that is applied to a PCollection.
For each element of the PCollection, this corresponding element of the output
PCollection is a tuple of (key, value).
Attributes:
key: The key which should be added to each element of the input PCollection.
label: A unique label for this operation.
"""
__slots__ = ()
@property
def is_partitionable(self):
return True
class FlattenLists(
tfx_namedtuple.namedtuple('FlattenLists', ['label']), nodes.OperationDef):
"""An operation that represents flattening a PCollection of lists.
Attributes:
label: A unique label for this operation.
"""
def __new__(cls):
return super(FlattenLists, cls).__new__(cls, label=_make_label(cls))
@property
def is_partitionable(self):
return True
class ExtractCombineMergeOutputs(
tfx_namedtuple.namedtuple('ExtractOutputs',
['output_tensor_info_list', 'label']),
AnalyzerDef):
"""An operation that represents extracting outputs of a combine merge.
This operation represents a `beam.Map` that is applied to a PCollection.
For each element of the PCollection, this corresponding element of the output
PCollection is a tuple of outputs.
Attributes:
output_tensor_info_list: A list of `TensorInfo`s that defines the outputs of
this operation.
label: A unique label for this operation.
"""
__slots__ = ()
def __new__(cls, output_tensor_info_list):
return super(ExtractCombineMergeOutputs, cls).__new__(
cls,
output_tensor_info_list=output_tensor_info_list,
label=_make_label(cls))
@property
def output_tensor_infos(self):
return self.output_tensor_info_list
class ExtractPackedCombineMergeOutputs(
tfx_namedtuple.namedtuple('ExtractOutputs',
['output_tensor_info_list', 'label']),
AnalyzerDef):
"""An operation that represents extracting outputs of a packed combine merge.
This operation represents a `beam.Map` that is applied to a PCollection.
For each element of the PCollection, this corresponding element of the output
PCollection is a tuple of outputs.
Attributes:
output_tensor_info_list: A list of `TensorInfo`s that defines the outputs of
this operation.
label: A unique label for this operation.
"""
__slots__ = ()
@property
def output_tensor_infos(self):
return self.output_tensor_info_list
|
|
from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from allauth.account import app_settings as account_settings
from allauth.account.forms import (
ChangePasswordForm as AllauthChangePasswordForm,
LoginForm as AllauthLoginForm,
ResetPasswordForm as AllauthResetPasswordForm,
SignupForm as AllauthSignupForm,
SetPasswordForm as AllauthSetPasswordForm,
)
from allauth.account.models import EmailAddress
from allauth.account.utils import filter_users_by_email
from allauth.socialaccount.forms import SignupForm as AllauthSocialSignupForm
from .models import Member
User = get_user_model()
def _clean_password(child_class, self_instance, password_field_name):
"""
A custom password validator that enforces a minimum length.
"""
min_len = settings.ACCOUNT_PASSWORD_MIN_LENGTH
# Also use parent method if django-user-accounts ever implements it.
parent_clean_password = getattr(
super(child_class, self_instance), "clean_" + password_field_name, None
)
if parent_clean_password:
parent_clean_password()
if len(self_instance.cleaned_data[password_field_name]) < min_len:
raise forms.ValidationError(
"Password should be at least " + "%d characters long." % min_len
)
return self_instance.cleaned_data[password_field_name]
class MemberLoginForm(AllauthLoginForm):
"""
A subclass of django-allauth's form that checks user is a Member.
"""
authentication_fail_message = (
"Your password didn't match the " + "username or email you provided."
)
def clean(self):
"""Check that the user is a Member."""
cleaned_data = super().clean()
if self._errors:
return
if self.user:
try:
Member.objects.get(user=self.user)
except Member.DoesNotExist:
raise forms.ValidationError("This account doesn't have a Member role.")
return cleaned_data
class MemberSignupForm(AllauthSignupForm):
"""
A subclass of django-allauth's SignupForm with additions.
A `terms` field is added for the Terms of Use checkbox, and a `name` field
is added to store a Member's username.
"""
name = forms.CharField(max_length=30)
terms = forms.BooleanField()
class Meta: # noqa: D101
fields = "__all__"
class MemberProfileEditForm(forms.ModelForm):
"""
A form for editing a member's profile information.
"""
class Meta: # noqa: D101
model = Member
fields = ("profile_image", "about_me")
class MemberContactSettingsEditForm(forms.ModelForm):
"""
A form for editing a member's contact preferences.
"""
class Meta: # noqa: D101
model = Member
fields = ("newsletter", "allow_user_messages")
class MemberChangeNameForm(forms.ModelForm):
"""
A form for editing a member's name.
"""
class Meta: # noqa: D101
model = Member
fields = ("name",)
class MemberChangeUsernameForm(forms.ModelForm):
"""
A form for editing a member's username.
"""
def clean_username(self):
username = self.cleaned_data["username"]
for validator in account_settings.USERNAME_VALIDATORS:
validator(username)
return username
class Meta: # noqa: D101
model = User
fields = ("username",)
help_texts = {"username": "Letters, number, or underscore only."}
class ActivityMessageForm(forms.Form):
"""
A form that allows a user to send a message to a project.
"""
message = forms.CharField(widget=forms.Textarea)
if not settings.DEBUG:
captcha = ReCaptchaField()
def send_mail(self, project_member_id, project):
params = {
"message": self.cleaned_data["message"],
"project_member_id": project_member_id,
"project": project,
}
plain = render_to_string("email/activity-message.txt", params)
html = render_to_string("email/activity-message.html", params)
send_mail(
"Open Humans: message from project member {}".format(project_member_id),
plain,
"no-reply@example.com",
[project.contact_email],
html_message=html,
)
class EmailUserForm(forms.Form):
"""
A form that allows one user to email another user.
"""
message = forms.CharField(widget=forms.Textarea)
captcha = ReCaptchaField()
def send_mail(self, sender, receiver):
params = {
"message": self.cleaned_data["message"],
"sender": sender,
"receiver": receiver,
}
plain = render_to_string("email/user-message.txt", params)
html = render_to_string("email/user-message.html", params)
send_mail(
"Open Humans: message from {} ({})".format(
sender.member.name, sender.username
),
plain,
sender.member.primary_email.email,
[receiver.member.primary_email.email],
html_message=html,
)
class ResetPasswordForm(AllauthResetPasswordForm):
"""
Subclass django-allauths's ResetPasswordForm to capture the bit where we
say what the return uri is.
"""
def save(self, request, **kwargs):
next_url = request.session.pop("next_url", reverse(settings.LOGIN_REDIRECT_URL))
ret = super().save(request, **kwargs)
# Use the lookup method allauth uses to get relevant members.
users = filter_users_by_email(ret)
for user in users:
member = Member.objects.get(user=user)
member.password_reset_redirect = next_url
member.save()
return ret
class SocialSignupForm(AllauthSocialSignupForm):
"""
Add in extra form bits that we need that allauth's social account signup
form does not provide by default.
"""
name = forms.CharField(
max_length=60,
widget=forms.TextInput(attrs={"placeholder": "Write your name here"}),
)
newsletter = forms.BooleanField(required=False)
allow_contact = forms.BooleanField(required=False)
terms = forms.BooleanField()
def save(self, request):
"""
Make sure to also populate the member table
"""
user = super().save(request)
member = Member(user=user)
member.name = self.cleaned_data["name"]
member.newsletter = self.cleaned_data["newsletter"]
member.allow_user_messages = self.cleaned_data["allow_contact"]
member.save()
# And, populate the email field in the user table
account_emailaddress = EmailAddress.objects.get(
email=self.cleaned_data["email"]
)
user.email = account_emailaddress.email
user.save()
# We are trusting emails provided by Facebook and Google
account_emailaddress.verified = True
account_emailaddress.save()
return user
|
|
#
# Copyright 2005 OpenHosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: RedHat.py,v 1.16 2008/08/29 22:15:52 grisha Exp $
# This is the base class for RedHat (or RedHat-like?) distros.
from Distro import Distro, Bundle
import os
import time
import commands
import urllib
import sys
import shutil
import rpm
import tempfile
from openvps.common import util
from openvps.host import cfg, vsutil
header_info_cache = None
class RedHatBundle(Bundle):
# when rpm's are pulled from a local disk, they would be stored in
# distroot/DISTRO_DIR
DISTRO_DIR = 'RedHat/RPMS'
# this is an abstract class
def __init__(self, distroot, vpsroot):
if not distroot.startswith('http://') and not distroot.startswith('https://'):
# this is a local file, the rpms are in DISTRO_DIR
# directory
distroot = os.path.join(distroot, self.DISTRO_DIR)
# call super
Bundle.__init__(self, distroot, vpsroot)
def install(self):
# mount dev and proc
os.chdir(self.vpsroot) # this calms some warnings from following mounts (?)
cmd = 'mount -t proc none %s' % os.path.join(self.vpsroot, 'proc')
commands.getoutput(cmd)
cmd = 'mount -t devpts none %s' % os.path.join(self.vpsroot, 'dev', 'pts')
commands.getoutput(cmd)
try:
print "Installing %s from %s to %s" % (self.desc, self.distroot, self.vpsroot)
cmd = 'rpm --root %s -Uvh %s' % (self.vpsroot, ' '.join(self.resolve_packages()))
print cmd
pipe = os.popen('{ ' + cmd + '; } ', 'r', 0)
s = pipe.read(1)
while s:
sys.stdout.write(s); sys.stdout.flush()
s = pipe.read(1)
pipe.close()
finally:
# unmount dev and proc
cmd = 'umount %s' % os.path.join(self.vpsroot, 'proc')
commands.getoutput(cmd)
cmd = 'umount %s' % os.path.join(self.vpsroot, 'dev', 'pts')
commands.getoutput(cmd)
print "DONE"
def resolve_packages(self):
# XXX for whatever reason we were having a difficult time with
# passing urls to rpm -i (as if its http implementation is
# buggy - in some setups with proxy it just wouldn't work)
# This walks through the list, looking for entries beginning
# with 'http:', downloads them to a temporary location
# (cfg.RPM_CACHE). For other packages it finds the matching
# version of an rpm in self.distroot
if not os.path.exists(cfg.RPM_CACHE):
print 'Creating directory', cfg.RPM_CACHE
os.mkdir(cfg.RPM_CACHE)
## read current dir or headers.info into a dict keyed by the
## beginning of a file
pkgdict = {}
if self.distroot.startswith('http://') or self.distroot.startswith('https://'):
### the distroot is a url
# we rely on header.info file
global header_info_cache
if header_info_cache:
hi = header_info_cache
else:
hi_url = os.path.join(self.distroot, 'headers/header.info')
print 'Getting '+hi_url
hi = urllib.urlopen(hi_url).readlines()
# cache it
header_info_cache = hi
for line in hi:
rpm_name, rpm_path = line.strip().split(':')[1].split('=')
name = '-'.join(rpm_name.split('-')[:-2])
pkgdict[name] = os.path.join(self.distroot, rpm_path)
else:
### the distroot is a local directory
files = os.listdir(self.distroot)
files.sort()
pkgdict = {}
for f in files:
# everything but the last two dash separated parts
name = '-'.join(f.split('-')[:-2])
pkgdict[name] = f
## go throught the list and pull the files as needed
result = []
for pkg in self.packages:
if self.distroot.startswith('http://') or self.distroot.startswith('https://'):
# if distroot is a url,
if not (pkg.startswith('http://') or pkg.startswith('https://')):
# and this package is not a url, then replace a package name with its url
pkg = pkgdict[pkg]
if pkg.startswith('http://') or pkg.startswith('https://'):
# remote package
basename = os.path.split(pkg)[1]
cache_file = os.path.join(cfg.RPM_CACHE, basename)
if not os.path.exists(cache_file):
print 'Retrieveing %s -> %s' % (pkg, cache_file)
f = urllib.urlopen(pkg)
s = f.read()
open(os.path.join(cfg.RPM_CACHE, basename), 'wb').write(s)
else:
print 'Cached copy of %s exists as %s, not retrieving' % (basename, cache_file)
result.append(cache_file)
else:
# non-specific package, resolve it
result.append(os.path.join(self.distroot, pkgdict[pkg]))
return result
class RedHat_Bundle_base(RedHatBundle):
desc = 'RedHat Base Abstract Bundle'
# these are not actually services
NOT_SERVICES = ['functions', 'killall', 'halt', 'single']
SERVICES = ['crond', 'atd', 'httpd', 'sendmail', 'sshd',
'syslog', 'webmin', 'dovecot']
def install(self):
# call our super
self.make_devs()
RedHatBundle.install(self)
self.make_devs() # yes, again
self.make_tabs()
self.fix_services()
self.fix_halt()
self.fix_syslog()
self.make_i18n()
self.fix_inittab()
self.make_libexec_openvps()
def fix_services(self):
""" Disable certain services not necessary in vservers """
print 'Turning off some services...'
os.chdir(os.path.join(self.vpsroot, 'etc', 'init.d'))
services = os.listdir('.')
for service in services:
if service in self.NOT_SERVICES:
continue
else:
onoff = ['off', 'on'][service in self.SERVICES]
cmd = '%s %s /sbin/chkconfig --level 2345 %s %s' % (cfg.CHROOT, self.vpsroot, service, onoff)
print ' ', cmd
pipe = os.popen('{ ' + cmd + '; } ', 'r', 0)
s = pipe.read(1)
while s:
sys.stdout.write(s); sys.stdout.flush()
s = pipe.read(1)
pipe.close()
def make_tabs(self):
""" Make and /etc/fstab and an /etc/mtab """
fname = os.path.join(self.vpsroot, 'etc', 'fstab')
print 'Writing %s' % fname
f = open(fname, 'w')
f.write(cfg.FSTAB)
f.close()
os.chmod(fname, 0644)
# this is only cosmetic, since the reference server never actually
# "runs"
fname = os.path.join(self.vpsroot, 'etc', 'mtab')
print 'Writing %s' % fname
f = open(fname, 'w')
f.write('/dev/hdv1 / ext2 rw 1 1\n')
f.close()
os.chmod(fname, 0644)
def fix_halt(self):
""" Replace halt with a simpler version so the
server stops cleanly"""
fname = os.path.join(self.vpsroot, 'etc', 'init.d', 'halt')
print 'Writing %s' % fname
f = open(fname, 'w')
f.write('#!/bin/bash\n'
'#\n'
'# halt This file is executed by init when it goes into runlevel\n'
'# 0 (halt) or runlevel 6 (reboot). It kills all processes,\n'
'# unmounts file systems and then either halts or reboots.\n'
'#\n'
'# This is an OpenHosting version of this file\n'
'NOLOCALE=1\n'
'. /etc/init.d/functions\n'
'echo "Sending all processes the TERM signal..."\n'
'/sbin/killall5 -15\n'
'sleep 5\n'
'echo "Sending all processes the KILL signal..."\n'
'/sbin/killall5 -9\n\n'
r"mount | awk '!/( \/ |^\/dev\/root|^\/dev\/ram| \/proc )/ { print $3 }' | \ "
'\nwhile read line; do\n'
' umount -f $line\n'
'done\n'
'\n/sbin/reboot -n\n')
f.close()
def fix_syslog(self):
""" Remove references to klogd in syslog service """
fname = os.path.join(self.vpsroot, 'etc', 'init.d', 'syslog')
print 'Removing klogd from %s' % fname
result = []
for line in open(fname):
if 'klogd' in line or 'kernel' in line:
continue
result.append(line)
open(fname, 'w').writelines(result)
def make_i18n(self):
print 'Creating etc/sysconfig/i18n.'
open(os.path.join(self.vpsroot, 'etc/sysconfig/i18n'), 'w').write(
'LANG="en_US.UTF-8"\n'
'SUPPORTED="en_US.UTF-8:en_US:en"\n'
'SYSFONT="latarcyrheb-sun16"\n')
s = 'localedef -i en_US -c -f UTF-8 en_US.UTF-8'
print 'Running', s
cmd = '%s %s %s' % (cfg.CHROOT, self.vpsroot, s)
commands.getoutput(cmd)
def fix_inittab(self):
# we do not want mingetty in the inittab
file = os.path.join(self.vpsroot, 'etc/inittab')
print 'Commenting out mingetty lines in', file
lines = open(file).readlines()
for n in range(len(lines)):
if lines[n].find('mingetty') != -1:
if not lines[n].strip().startswith('#'):
lines[n] ='#' + lines[n]
open(file, 'w').writelines(lines)
def make_libexec_openvps(self):
libexec_dir = os.path.join(self.vpsroot, 'usr/libexec/openvps')
print 'Making %s' % libexec_dir
os.mkdir(libexec_dir)
print 'Copying traceroute there'
for path, short_name in [('bin/traceroute', 'traceroute'),]:
# move the originals into libexec/oh
dest_path = os.path.join(libexec_dir, short_name)
shutil.move(os.path.join(self.vpsroot, path), dest_path)
vsutil.set_file_immutable_unlink(dest_path)
# now place our custom in their path
dest_path = os.path.join(self.vpsroot, path)
shutil.copy(os.path.join(cfg.OV_MISC, short_name), dest_path)
# why can't I do setuid with os.chmod?
cmd = 'chmod 04755 %s' % dest_path
commands.getoutput(cmd)
vsutil.set_file_immutable_unlink(dest_path)
class RedHat(Distro):
# these are not actually services
NOT_SERVICES = ['functions', 'killall', 'halt', 'single']
SERVICES = ['crond', 'atd', 'httpd', 'sendmail', 'sshd',
'syslog', 'webmin', 'dovecot']
def distro_version(self):
# is this a redhat distribution?
discinfo = self.read_from_distro('.discinfo')
if not discinfo:
return None
lines = discinfo.splitlines()[:7]
if len(lines) < 7:
# wrong file
return None
result = {}
try:
result['buildtime'] = time.localtime(float(lines[0].strip()))
result['name'] = lines[1].strip()
result['platform'] = lines[2]
# this is a comma-separated list of cd's provided here
result['volumes'] = lines[3]
result['base'] = lines[4]
result['RPMS'] = lines[5]
result['pixmaps'] = lines[6]
except "BLAH":
return None
return result
def vps_version(self):
try:
return open(os.path.join(self.vpsroot, 'etc/redhat-release')).read()
except:
return None
def vps_arch(self):
cmd = "/usr/bin/file -b --no-dereference %s/sbin/init" % self.vpsroot
s = commands.getoutput(cmd)
if 'x86-64' in s:
return 'x86_64'
elif '80386' in s:
return 'i386'
else:
return None
def fixflags(self):
# This routine sets immutable-unlink flags on all files,
# except those that are marked as config (or mentioned at all)
# in rpms
print 'Fixing flags in %s ... (this will take a while)' % self.vpsroot
# progress indicator
prog_size = 60
sys.stdout.write('[%s]' % (' '*prog_size)); sys.stdout.flush()
p = 0
# list all rpms
# (rpmlint is a good place to look at Python code when it comes
# to completely undocumented rpm-python)
ts = rpm.TransactionSet(self.vpsroot)
rpms = [item[1][rpm.RPMTAG_NAME] for item in ts.IDTXload()]
# a stupid trick. makes the progress indicator move slow at first
# then faster (probably because small rpms are towards the end).
rpms.reverse()
# this will prevent some warnings related to chroot
os.chdir(cfg.VSERVERS_ROOT)
for name in rpms:
# list files in the rpm
it = ts.dbMatch('name', name)
hdr = it.next()
# this creates a list of file in an rpm. the implementation
# is borrowed from rpmlint package, i don't really understand
# how it works, but it does.
files = hdr[rpm.RPMTAG_OLDFILENAMES]
if files == None:
basenames = hdr[rpm.RPMTAG_BASENAMES]
if basenames:
dirnames = hdr[rpm.RPMTAG_DIRNAMES]
dirindexes = hdr[rpm.RPMTAG_DIRINDEXES]
files=[]
if type(dirindexes) == types.IntType:
files.append(dirnames[dirindexes] + basenames[0])
else:
for idx in range(0, len(dirindexes)):
files.append(dirnames[dirindexes[idx]] + basenames[idx])
# now step through those files
for idx in xrange(len(files)):
# do we need a pacing sleep?
if p >= 1000:
# instead of writing a dot, write something meaningful
prog = int(rpms.index(name)/float(len(rpms))*prog_size)
sys.stdout.write('\b'*(prog_size+2))
sys.stdout.write('[%s%s]' % ('='*prog, ' '*(prog_size-prog)))
sys.stdout.flush()
p = 0
else:
p += 1
flags = hdr[rpm.RPMTAG_FILEFLAGS][idx]
if not flags & rpm.RPMFILE_CONFIG:
# (if not a config file)
file = files[idx]
# check against our cloning rules
c, t, s = self.match_path(file)
if c or t or s:
# skip it
continue
else:
abspath = os.path.join(self.vpsroot, file[1:])
if (os.path.exists(abspath)
and (not os.path.islink(abspath))
and (not os.path.isdir(abspath))):
# (do not make symlinks and dirs immutable)
vsutil.set_file_immutable_unlink(abspath)
vsutil.set_file_xid(abspath, 0)
# NOTE that under no circumstances we *unset* the flag. This
# is because e.g. usr/libexec/oh stuff must be iunlink, but
# is not in an rpm.
# reldst is the way it would look relative to self.vpsroot
sys.stdout.write('\b'*(prog_size+2))
sys.stdout.write('[%s]' % ('='*prog_size)); sys.stdout.flush()
print 'Done.'
def clone(self, dest, pace=cfg.PACE[0]):
# pace counter
p = 0
# this will also strip trailing slashes
source, dest = self.vpsroot, os.path.abspath(dest)
print 'Cloning %s -> %s ... (this will take a while)' % (source, dest)
# this will prevent some warnings
os.chdir(cfg.VSERVERS_ROOT)
self.copy(source, dest)
for root, dirs, files in os.walk(source):
for file in files + dirs:
if pace and p >= pace:
sys.stdout.write('.'); sys.stdout.flush()
time.sleep(cfg.PACE[1])
p = 0
else:
p += 1
src = os.path.join(root, file)
# reldst is they way it would look inside vserver
reldst = os.path.join(max(root[len(source):], '/'), file)
dst = os.path.join(dest, reldst[1:])
c, t, s = self.match_path(reldst)
if not s:
link = not c and not self.is_config(source, reldst)
self.copy(src, dst, link=link, touch=t)
print 'Done.'
print 'Bytes copied:'.ljust(20), self.counter['bytes']
print 'Links created:'.ljust(20), self.counter['lins']
print 'Dirs copied:'.ljust(20), self.counter['drs']
print 'Symlinks copied:'.ljust(20), self.counter['syms']
print 'Touched files:'.ljust(20), self.counter['touchs']
print 'Copied files:'.ljust(20), self.counter['copys']
print 'Devices:'.ljust(20), self.counter['devs']
rpm_cache = {}
def is_config(self, root, file):
ts = rpm.TransactionSet(root)
if not self.rpm_cache.has_key(file):
hdr = self.rpm_which_package(ts, root, file)
if not hdr:
# assume it's config if not found, this will
# make sure it is copied, not linked
self.rpm_cache[file] = {'isconfig':1}
else:
self.rpm_cache.update(self.rpm_list_files(hdr))
# it's possible that which_package thinks a package is of an rpm
# but then it's not actually there
if file not in self.rpm_cache:
self.rpm_cache[file] = {'isconfig':1}
ts = None
return self.rpm_cache[file]['isconfig']
def rpm_which_package(self, ts, root, file):
# just like rpm -qf file
it = ts.dbMatch('basenames', file)
try:
hdr = it.next()
except StopIteration:
return None
#return hdr[rpm.RPMTAG_NAME]
return hdr
def rpm_list_files(self, hdr):
# list files in an RPM.
files=hdr[rpm.RPMTAG_OLDFILENAMES]
if files == None:
basenames = hdr[rpm.RPMTAG_BASENAMES]
if basenames:
dirnames = hdr[rpm.RPMTAG_DIRNAMES]
dirindexes = hdr[rpm.RPMTAG_DIRINDEXES]
files=[]
if type(dirindexes) == types.IntType:
files.append(dirnames[dirindexes] + basenames[0])
else:
for idx in range(0, len(dirindexes)):
files.append(dirnames[dirindexes[idx]] + basenames[idx])
# now stick in a dict
result = {}
for idx in xrange(len(files)):
flags = hdr[rpm.RPMTAG_FILEFLAGS][idx]
result[files[idx]] = {'isconfig': flags & rpm.RPMFILE_CONFIG}
return result
def add_user(self, userid, passwd):
""" Add a user. This method will guess whether
the password is already md5 hashed or not (in which
case it will hash it) """
print 'Adding user %s' % userid
comment = 'User %s' % userid
if passwd[0:3] == '$1$' and len(passwd) > 30:
# this is a password hash (most likely)
pass
else:
passwd = util.hash_passwd(passwd, md5=1)
cmd = "%s %s /usr/sbin/adduser -c '%s' -G wheel -p '%s' %s" % \
(cfg.CHROOT, self.vpsroot, comment, passwd, userid)
s = commands.getoutput(cmd)
def set_user_passwd(self, userid, passwd):
""" Sets password for uerid. This method will guess whether
the password is already md5 hashed or not (in which case it
will hash it) """
print 'Setting password for %s' % userid
if passwd[0:3] == '$1$' and len(passwd) > 30:
# this is a password hash (most likely)
pass
else:
passwd = util.hash_passwd(passwd, md5=1)
cmd = "%s %s /usr/sbin/usermod -p '%s' %s" % \
(cfg.CHROOT, self.vpsroot, passwd, userid)
s = commands.getoutput(cmd)
def make_hosts(self, hostname, ip):
# call super
fqdn = Distro.make_hosts(self, hostname, ip)
# /etc/sysconfig/network. at least xinetd service looks at it
fname = os.path.join(self.vpsroot, 'etc', 'sysconfig', 'network')
open(fname, 'w').write('NETWORKING=yes\nHOSTNAME=%s\n' % fqdn)
def fixup_rc(self):
# /etc/rc.d/rc needs to end with true
rc = os.path.join(self.vpsroot, 'etc/rc.d/rc')
lines = open(rc).readlines()
if not lines[-1] == 'true\n':
print 'Appending true to %s' % rc
lines.append('\ntrue\n')
open(rc, 'w').writelines(lines)
else:
print 'Not appending true to %s as it is already there' % rc
def stub_www_index_page(self):
""" Create a stub default www page """
fname = os.path.join(self.vpsroot, 'var', 'www', 'html', 'index.html')
print 'Writing %s' % fname
f = open(fname, 'w')
f.write(cfg.INDEX_HTML)
f.close()
def fix_services(self):
""" Disable certain services not necessary in vservers """
print 'Turning off some services...'
os.chdir(os.path.join(self.vpsroot, 'etc', 'init.d'))
services = os.listdir('.')
for service in services:
if service in self.NOT_SERVICES:
continue
else:
onoff = ['off', 'on'][service in self.SERVICES]
cmd = '%s %s /sbin/chkconfig --level 2345 %s %s' % (cfg.CHROOT, self.vpsroot, service, onoff)
print ' ', cmd
pipe = os.popen('{ ' + cmd + '; } ', 'r', 0)
s = pipe.read(1)
while s:
sys.stdout.write(s); sys.stdout.flush()
s = pipe.read(1)
pipe.close()
def make_ssl_cert(self, hostname):
if os.path.exists(os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.crt/.ohcert')):
print 'NOT generating an SSL certificate, it appears to be there already.'
return
print 'Generating an SSL certificate...'
# now make a cert
ssl_conf = cfg.SSL_CONFIG.replace('@SSL_HOSTNAME@', hostname)
d = tempfile.mkdtemp()
f = open(os.path.join(d, "ssl.cfg"), 'w')
f.write(ssl_conf)
f.close()
s = commands.getoutput('openssl req -new -x509 -days 3650 -nodes -config %s '
'-out %s/server.crt -keyout %s/server.key' % (os.path.join(d, 'ssl.cfg'), d, d))
print s
s = commands.getoutput('openssl x509 -subject -dates -fingerprint -noout -in %s/server.crt' %d)
print s
shutil.copy(os.path.join(d, 'server.crt'), os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.crt/server.crt'))
shutil.copy(os.path.join(d, 'server.key'), os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.key/server.key'))
os.chmod(os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.crt/server.crt'), 0700)
os.chmod(os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.key/server.key'), 0700)
commands.getoutput('cat %s %s > %s' % (os.path.join(d, 'server.crt'), os.path.join(d, 'server.key'),
os.path.join(self.vpsroot, 'usr/share/ssl/certs/imapd.pem')))
commands.getoutput('cat %s %s > %s' % (os.path.join(d, 'server.crt'), os.path.join(d, 'server.key'),
os.path.join(self.vpsroot, 'usr/share/ssl/certs/ipop3d.pem')))
commands.getoutput('cat %s %s > %s' % (os.path.join(d, 'server.crt'), os.path.join(d, 'server.key'),
os.path.join(self.vpsroot, 'etc/webmin/miniserv.pem')))
commands.getoutput('cat %s %s > %s' % (os.path.join(d, 'server.crt'), os.path.join(d, 'server.key'),
os.path.join(self.vpsroot, 'usr/share/ssl/certs/dovecot.pem')))
commands.getoutput('cat %s %s > %s' % (os.path.join(d, 'server.crt'), os.path.join(d, 'server.key'),
os.path.join(self.vpsroot, 'usr/share/ssl/private/dovecot.pem')))
s = commands.getoutput('rm -rf %s' % d)
print s
open(os.path.join(self.vpsroot, 'etc/httpd/conf/ssl.crt/.ohcert'), 'w').write('')
def fixup_crontab(self):
print 'Adding rndsleep and randomized crontab'
fname = os.path.join(self.vpsroot, 'usr/local/bin/rndsleep')
open(fname, 'w').write(cfg.RNDSLEEP)
os.chmod(fname, 0755)
open(os.path.join(self.vpsroot, 'etc/crontab'), 'w').write(cfg.CRONTAB)
def webmin_passwd(self):
# copy root password to webmin
if not os.path.exists(os.path.join(self.vpsroot, 'etc/webmin')):
print 'webmin not installed, skipping'
return
else:
print 'Setting webmin password'
shadow = os.path.join(self.vpsroot, 'etc/shadow')
root_hash = ''
for line in open(shadow):
if line.startswith('root:'):
root_hash = line.split(':')[1]
break
musers = os.path.join(self.vpsroot, 'etc/webmin/miniserv.users')
open(musers, 'w').write('root:%s:0' % root_hash)
os.chmod(musers, 0600)
def fixup_libexec_openvps(self):
# This sets the right permissions for the files in
# usr/libexec/oh
print 'Setting flags in usr/libexec/openvps'
for file in ['traceroute',]:
path = os.path.join(self.vpsroot, 'usr/libexec/openvps/', file)
vsutil.set_file_immutable_unlink(path)
def customize(self, name, xid, ip, userid, passwd, disklim, dns=cfg.PRIMARY_IP,
vpn_ip=None, vpn_mask='255.255.255.0'):
# call super
Distro.customize(self, name, xid, ip, userid, passwd, disklim, dns,
vpn_ip, vpn_mask)
self.fixup_rc()
self.webmin_passwd()
self.fixup_libexec_openvps()
def custcopy(self, source, name, userid, data={}, dns=cfg.PRIMARY_IP):
xid = Distro.custcopy(self, source, name, userid, data, dns)
self.fixup_rc()
self.webmin_passwd()
self.fixup_libexec_openvps()
return xid
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
class ImageOpsTest(test_util.TensorFlowTestCase):
def test_zeros(self):
for dtype in _DTYPES:
with self.cached_session():
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = array_ops.zeros(shape, dtype)
self.assertAllEqual(
image_ops.rotate(image, angle).eval(),
np.zeros(shape, dtype.as_numpy_dtype()))
# TODO(b/133773834) Re-enable these tests.
@unittest.skip("Skipping because of b/133773834.")
def test_rotate_even(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(36), dtype), (6, 6))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([0.0, np.pi / 4.0, np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]],
[[0, 3, 4, 11, 17, 0], [2, 3, 9, 16, 23, 23],
[1, 8, 15, 21, 22, 29], [6, 13, 20, 21, 27, 34],
[12, 18, 19, 26, 33, 33], [0, 18, 24, 31, 32, 0]],
[[5, 11, 17, 23, 29, 35], [4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33], [2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31], [0, 6, 12, 18, 24, 30]]])
def test_rotate_odd(self):
for dtype in _DTYPES:
with self.cached_session():
image = array_ops.reshape(
math_ops.cast(math_ops.range(25), dtype), (5, 5))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([np.pi / 4.0, 1.0, -np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 3, 8, 9, 0], [1, 7, 8, 13, 19],
[6, 6, 12, 18, 18], [5, 11, 16, 17, 23],
[0, 15, 16, 21, 0]],
[[0, 3, 9, 14, 0], [2, 7, 8, 13, 19],
[1, 6, 12, 18, 23], [5, 11, 16, 17, 22],
[0, 10, 15, 21, 0]],
[[20, 15, 10, 5, 0], [21, 16, 11, 6, 1],
[22, 17, 12, 7, 2], [23, 18, 13, 8, 3],
[24, 19, 14, 9, 4]]])
def test_translate(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
translation = constant_op.constant([-1, -1], dtypes.float32)
image_translated = image_ops.translate(image, translation)
self.assertAllEqual(image_translated.eval(),
[[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 0]])
def test_compose(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]], dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = image_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = constant_op.constant([1, 0, -1,
0, 1, 0,
0, 0],
dtype=dtypes.float32)
composed = image_ops.compose_transforms(rotation, translation)
image_transformed = image_ops.transform(image, composed)
self.assertAllEqual(image_transformed.eval(),
[[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]])
def test_extreme_projective_transform(self):
for dtype in _DTYPES:
with self.cached_session():
image = constant_op.constant(
[[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1]], dtype=dtype)
transformation = constant_op.constant([1, 0, 0, 0, 1, 0, -1, 0],
dtypes.float32)
image_transformed = image_ops.transform(image, transformation)
self.assertAllEqual(image_transformed.eval(),
[[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]])
def test_bilinear(self):
with self.cached_session():
image = constant_op.constant(
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtypes.float32)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000]],
atol=0.001)
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="NEAREST").eval(),
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_bilinear_uint8(self):
with self.cached_session():
image = constant_op.constant(
np.asarray(
[[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]],
np.uint8),
dtypes.uint8)
# == np.rint((expected image above) * 255)
self.assertAllEqual(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.0, 0.0, 87., 0.0, 0.0],
[0.0, 149, 233, 149, 0.0],
[87., 233, 0.0, 233, 87.],
[0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87., 0.0, 0.0]])
def test_rotate_static_shape(self):
image = array_ops.diag([1., 2., 3.])
result = image_ops.rotate(
image, random_ops.random_uniform((), -1, 1), interpolation="BILINEAR")
self.assertEqual(image.get_shape(), result.get_shape())
def test_transform_static_output_shape(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
result = image_ops.transform(
image, random_ops.random_uniform([8], -1, 1),
output_shape=constant_op.constant([3, 5]))
self.assertAllEqual([3, 5], result.get_shape())
def _test_grad(self, shape_to_test):
with self.cached_session():
test_image_shape = shape_to_test
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
output_shape = test_image_shape
output = image_ops.transform(test_image_tensor, test_transform)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def _test_grad_different_shape(self, input_shape, output_shape):
with self.cached_session():
test_image_shape = input_shape
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
if len(output_shape) == 2:
resize_shape = output_shape
elif len(output_shape) == 3:
resize_shape = output_shape[0:2]
elif len(output_shape) == 4:
resize_shape = output_shape[1:3]
output = image_ops.transform(
images=test_image_tensor,
transforms=test_transform,
output_shape=resize_shape)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
self._test_grad([3, 4, 12, 12])
self._test_grad_different_shape([16, 16], [8, 8])
self._test_grad_different_shape([4, 12, 3], [8, 24, 3])
self._test_grad_different_shape([3, 4, 12, 3], [3, 8, 24, 3])
def test_projective_transform_v1(self):
"""The original ImageProjectiveTransform op should take 2 arguments."""
image = constant_op.constant([[[[1], [0]], [[0], [1]]]])
transform = constant_op.constant([[1., 0., 0., 0., 1., 0., 0., 0.]])
result = gen_image_ops.image_projective_transform(
image, transform, interpolation="NEAREST")
with self.cached_session():
self.assertAllEqual([[[[1], [0]], [[0], [1]]]], result.eval())
def test_transform_data_types(self):
for dtype in _DTYPES:
image = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(
value.eval(),
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype()))
@test_util.run_in_graph_and_eager_modes
def test_transform_eager(self):
image = constant_op.constant([[1., 2.], [3., 4.]])
value = image_ops.transform(image, [1] * 8)
with self.test_session(use_gpu=True):
self.assertAllEqual(self.evaluate(value), np.array([[4, 4], [4, 4]]))
class BipartiteMatchTest(test_util.TensorFlowTestCase):
def _BipartiteMatchTest(self, distance_mat, distance_mat_shape,
num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match):
distance_mat_np = np.array(distance_mat, dtype=np.float32).reshape(
distance_mat_shape)
expected_row_to_col_match_np = np.array(expected_row_to_col_match,
dtype=np.int32)
expected_col_to_row_match_np = np.array(expected_col_to_row_match,
dtype=np.int32)
with self.cached_session():
distance_mat_tf = constant_op.constant(distance_mat_np,
shape=distance_mat_shape)
location_to_prior, prior_to_location = image_ops.bipartite_match(
distance_mat_tf, num_valid_rows)
location_to_prior_np = location_to_prior.eval()
prior_to_location_np = prior_to_location.eval()
self.assertAllEqual(location_to_prior_np, expected_row_to_col_match_np)
self.assertAllEqual(prior_to_location_np, expected_col_to_row_match_np)
def testBipartiteMatch(self):
distance_mat = [0.5, 0.8, 0.1,
0.3, 0.2, 0.15]
num_valid_rows = 2
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less than num-of-rows-in-distance-mat.
num_valid_rows = 1
expected_row_to_col_match = [2, -1]
expected_col_to_row_match = [-1, -1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows being 0.
num_valid_rows = 0
expected_row_to_col_match = [-1, -1]
expected_col_to_row_match = [-1, -1, -1]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less being -1.
num_valid_rows = -1
# The expected results are the same as num_valid_rows being 2.
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
if __name__ == "__main__":
googletest.main()
|
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# --------- API Actions ---------
# Access Key
ACTION_DESCRIBE_ACCESS_KEYS = "DescribeAccessKeys"
# User
ACTION_DESCRIBE_SUB_USERS = "DescribeSubUsers"
ACTION_CREATE_SUB_USER = "CreateSubUser"
ACTION_MODIFY_SUB_USER_ATTRIBUTES = "ModifySubUserAttributes"
ACTION_DELETE_SUB_USERS = "DeleteSubUsers"
ACTION_RESTORE_SUB_USERS = "RestoreSubUsers"
# Notification Center
ACTION_DESCRIBE_NOTIFICATION_CENTER_USER_POSTS = "DescribeNotificationCenterUserPosts"
ACTION_CREATE_NOTIFICATION_LIST = "CreateNotificationList"
ACTION_DESCRIBE_NOTIFICATION_LISTS = "DescribeNotificationLists"
ACTION_MODIFY_NOTIFICATION_LIST_ATTRIBUTES = "ModifyNotificationListAttributes"
ACTION_DELETE_NOTIFICATION_LISTS = "DeleteNotificationLists"
ACTION_CREATE_NOTIFICATION_ITEMS = "CreateNotificationItems"
ACTION_DESCRIBE_NOTIFICATION_ITEMS = "DescribeNotificationItems"
ACTION_DELETE_NOTIFICATION_ITEMS = "DeleteNotificationItems"
ACTION_VERIFY_NOTIFICATION_ITEM = "VerifyNotificationItem"
# zones
ACTION_DESCRIBE_ZONES = "DescribeZones"
# jobs
ACTION_DESCRIBE_JOBS = "DescribeJobs"
# images
ACTION_DESCRIBE_IMAGES = "DescribeImages"
ACTION_CAPTURE_INSTANCE = "CaptureInstance"
ACTION_DELETE_IMAGES = "DeleteImages"
ACTION_MODIFY_IMAGE_ATTRIBUTES = "ModifyImageAttributes"
# instances
ACTION_DESCRIBE_INSTANCES = "DescribeInstances"
ACTION_RUN_INSTANCES = "RunInstances"
ACTION_RUN_INSTANCES_BY_CONFIGURATION = "RunInstancesByConfiguration"
ACTION_TERMINATE_INSTANCES = "TerminateInstances"
ACTION_START_INSTANCES = "StartInstances"
ACTION_RESTART_INSTANCES = "RestartInstances"
ACTION_STOP_INSTANCES = "StopInstances"
ACTION_RESIZE_INSTANCES = "ResizeInstances"
ACTION_RESET_INSTANCES = "ResetInstances"
ACTION_MODIFY_INSTANCE_ATTRIBUTES = "ModifyInstanceAttributes"
ACTION_CLONE_INSTANCES = "CloneInstances"
# instance groups
ACTION_CREATE_INSTANCE_GROUPS = "CreateInstanceGroups"
ACTION_DELETE_INSTANCE_GROUPS = "DeleteInstanceGroups"
ACTION_JOIN_INSTANCE_GROUP = "JoinInstanceGroup"
ACTION_LEAVE_INSTANCE_GROUP = "LeaveInstanceGroup"
ACTION_DESCRIBE_INSTANCE_GROUPS = "DescribeInstanceGroups"
# user data
ACTION_UPLOAD_USERDATA_ATTACHMENT = "UploadUserDataAttachment"
# volumes
ACTION_CLONE_VOLUMES = "CloneVolumes"
ACTION_DESCRIBE_VOLUMES = "DescribeVolumes"
ACTION_CREATE_VOLUMES = "CreateVolumes"
ACTION_DELETE_VOLUMES = "DeleteVolumes"
ACTION_ATTACH_VOLUMES = "AttachVolumes"
ACTION_DETACH_VOLUMES = "DetachVolumes"
ACTION_RESIZE_VOLUMES = "ResizeVolumes"
ACTION_MODIFY_VOLUME_ATTRIBUTES = "ModifyVolumeAttributes"
# key pair
ACTION_DESCRIBE_KEY_PAIRS = "DescribeKeyPairs"
ACTION_CREATE_KEY_PAIR = "CreateKeyPair"
ACTION_DELETE_KEY_PAIRS = "DeleteKeyPairs"
ACTION_ATTACH_KEY_PAIRS = "AttachKeyPairs"
ACTION_DETACH_KEY_PAIRS = "DetachKeyPairs"
ACTION_MODIFY_KEYPAIR_ATTRIBUTES = "ModifyKeyPairAttributes"
# security group
ACTION_DESCRIBE_SECURITY_GROUPS = "DescribeSecurityGroups"
ACTION_CREATE_SECURITY_GROUP = "CreateSecurityGroup"
ACTION_MODIFY_SECURITY_GROUP_ATTRIBUTES = "ModifySecurityGroupAttributes"
ACTION_APPLY_SECURITY_GROUP = "ApplySecurityGroup"
ACTION_REMOVE_SECURITY_GROUP = "RemoveSecurityGroup"
ACTION_DELETE_SECURITY_GROUPS = "DeleteSecurityGroups"
ACTION_DESCRIBE_SECURITY_GROUP_RULES = "DescribeSecurityGroupRules"
ACTION_ADD_SECURITY_GROUP_RULES = "AddSecurityGroupRules"
ACTION_DELETE_SECURITY_GROUP_RULES = "DeleteSecurityGroupRules"
ACTION_MODIFY_SECURITY_GROUP_RULE_ATTRIBUTES = "ModifySecurityGroupRuleAttributes"
ACTION_DESCRIBE_SECURITY_GROUP_IPSETS = "DescribeSecurityGroupIPSets"
ACTION_CREATE_SECURITY_GROUP_IPSET = "CreateSecurityGroupIPSet"
ACTION_DELETE_SECURITY_GROUP_IPSETS = "DeleteSecurityGroupIPSets"
ACTION_MODIFY_SECURITY_GROUP_IPSET_ATTRIBUTES = "ModifySecurityGroupIPSetAttributes"
# vxnets
ACTION_DESCRIBE_VXNETS = "DescribeVxnets"
ACTION_CREATE_VXNETS = "CreateVxnets"
ACTION_DELETE_VXNETS = "DeleteVxnets"
ACTION_JOIN_VXNET = "JoinVxnet"
ACTION_LEAVE_VXNET = "LeaveVxnet"
ACTION_MODIFY_VXNET_ATTRIBUTES = "ModifyVxnetAttributes"
ACTION_DESCRIBE_VXNET_INSTANCES = "DescribeVxnetInstances"
# router
ACTION_CREATE_ROUTERS = "CreateRouters"
ACTION_UPDATE_ROUTERS = "UpdateRouters"
ACTION_DELETE_ROUTERS = "DeleteRouters"
ACTION_JOIN_ROUTER = "JoinRouter"
ACTION_LEAVE_ROUTER = "LeaveRouter"
ACTION_POWEROFF_ROUTERS = "PowerOffRouters"
ACTION_POWERON_ROUTERS = "PowerOnRouters"
ACTION_DESCRIBE_ROUTERS = "DescribeRouters"
ACTION_DESCRIBE_ROUTER_VXNETS = "DescribeRouterVxnets"
ACTION_MODIFY_ROUTER_ATTRIBUTES = "ModifyRouterAttributes"
ACTION_MODIFY_ROUTER_STATIC_ATTRIBUTES = "ModifyRouterStaticAttributes"
ACTION_DESCRIBE_ROUTER_STATICS = "DescribeRouterStatics"
ACTION_ADD_ROUTER_STATICS = "AddRouterStatics"
ACTION_DELETE_ROUTER_STATICS = "DeleteRouterStatics"
ACTION_MODIFY_ROUTER_STATIC_ENTRY_ATTRIBUTES = "ModifyRouterStaticEntryAttributes"
ACTION_DESCRIBE_ROUTER_STATIC_ENTRIES = "DescribeRouterStaticEntries"
ACTION_ADD_ROUTER_STATIC_ENTRIES = "AddRouterStaticEntries"
ACTION_DELETE_ROUTER_STATIC_ENTRIES = "DeleteRouterStaticEntries"
# eip
ACTION_ASSOCIATE_EIP = "AssociateEip"
ACTION_DISSOCIATE_EIPS = "DissociateEips"
ACTION_ALLOCATE_EIPS = "AllocateEips"
ACTION_RELEASE_EIPS = "ReleaseEips"
ACTION_DESCRIBE_EIPS = "DescribeEips"
ACTION_MODIFY_EIP_ATTRIBUTES = "ModifyEipAttributes"
ACTION_CHANGE_EIPS_BANDWIDTH = "ChangeEipsBandwidth"
ACTION_CHANGE_EIPS_BILLING_MODE = "ChangeEipsBillingMode"
# dns alias
ACTION_DESCRIBE_DNS_ALIASES = "DescribeDNSAliases"
ACTION_ASSOCIATE_DNS_ALIAS = "AssociateDNSAlias"
ACTION_DISSOCIATE_DNS_ALIASES = "DissociateDNSAliases"
ACTION_GET_DNS_LABEL = "GetDNSLabel"
# lb
ACTION_DESCRIBE_LOADBALANCERS = "DescribeLoadBalancers"
ACTION_CREATE_LOADBALANCER = "CreateLoadBalancer"
ACTION_DELETE_LOADBALANCERS = "DeleteLoadBalancers"
ACTION_ASSOCIATE_EIPS_TO_LOADBALANCER = "AssociateEipsToLoadBalancer"
ACTION_DISSOCIATE_EIPS_FROM_LOADBALANCER = "DissociateEipsFromLoadBalancer"
ACTION_UPDATE_LOADBALANCERS = "UpdateLoadBalancers"
ACTION_STOP_LOADBALANCERS = "StopLoadBalancers"
ACTION_START_LOADBALANCERS = "StartLoadBalancers"
ACTION_MODIFY_LOADBALANCER_ATTRIBUTES = "ModifyLoadBalancerAttributes"
ACTION_DESCRIBE_LOADBALANCER_LISTENERS = "DescribeLoadBalancerListeners"
ACTION_ADD_LOADBALANCER_LISTENERS = "AddLoadBalancerListeners"
ACTION_DELETE_LOADBALANCER_LISTENERS = "DeleteLoadBalancerListeners"
ACTION_MODIFY_LOADBALANCER_LISTENER_ATTRIBUTES = "ModifyLoadBalancerListenerAttributes"
ACTION_ADD_LOADBALANCER_BACKENDS = "AddLoadBalancerBackends"
ACTION_DELETE_LOADBALANCER_BACKENDS = "DeleteLoadBalancerBackends"
ACTION_MODIFY_LOADBALANCER_BACKEND_ATTRIBUTES = "ModifyLoadBalancerBackendAttributes"
ACTION_DESCRIBE_LOADBALANCER_BACKENDS = "DescribeLoadBalancerBackends"
ACTION_CREATE_LOADBALANCER_POLICY = "CreateLoadBalancerPolicy"
ACTION_MODIFY_LOADBALANCER_POLICY_ATTRIBUTES = "ModifyLoadBalancerPolicyAttributes"
ACTION_DESCRIBE_LOADBALANCER_POLICIES = "DescribeLoadBalancerPolicies"
ACTION_DELETE_LOADBALANCER_POLICIES = "DeleteLoadBalancerPolicies"
ACTION_APPLY_LOADBALANCER_POLICY = "ApplyLoadBalancerPolicy"
ACTION_DESCRIBE_LOADBALANCER_POLICY_RULES = "DescribeLoadBalancerPolicyRules"
ACTION_ADD_LOADBALANCER_POLICY_RULES = "AddLoadBalancerPolicyRules"
ACTION_MODIFY_LOADBALANCER_POLICY_RULE_ATTRIBUTES = "ModifyLoadBalancerPolicyRuleAttributes"
ACTION_DELETE_LOADBALANCER_POLICY_RULES = "DeleteLoadBalancerPolicyRules"
ACTION_CREATE_SERVER_CERTIFICATE = "CreateServerCertificate"
ACTION_DESCRIBE_SERVER_CERTIFICATES = "DescribeServerCertificates"
ACTION_MODIFY_SERVER_CERTIFICATE_ATTRIBUTES = "ModifyServerCertificateAttributes"
ACTION_DELETE_SERVER_CERTIFICATES = "DeleteServerCertificates"
# monitor
ACTION_GET_MONITOR = "GetMonitor"
ACTION_GET_LOADBALANCER_MONITOR = "GetLoadBalancerMonitor"
# snapshot
ACTION_CREATE_SNAPSHOTS = "CreateSnapshots"
ACTION_DELETE_SNAPSHOTS = "DeleteSnapshots"
ACTION_APPLY_SNAPSHOTS = "ApplySnapshots"
ACTION_DESCRIBE_SNAPSHOTS = "DescribeSnapshots"
ACTION_MODIFY_SNAPSHOT_ATTRIBUTES = "ModifySnapshotAttributes"
ACTION_CAPTURE_INSTANCE_FROM_SNAPSHOT = "CaptureInstanceFromSnapshot"
ACTION_CREATE_VOLUME_FROM_SNAPSHOT = "CreateVolumeFromSnapshot"
# rdb
ACTION_DESCRIBE_RDBS = "DescribeRDBs"
ACTION_CREATE_RDB = "CreateRDB"
ACTION_RESIZE_RDBS = "ResizeRDBs"
ACTION_START_RDBS = "StartRDBs"
ACTION_STOP_RDBS = "StopRDBs"
# mongo
ACTION_DESCRIBE_MONGOS = "DescribeMongos"
ACTION_RESIZE_MONGOS = "ResizeMongos"
ACTION_START_MONGOS = "StartMongos"
ACTION_STOP_MONGOS = "StopMongos"
# cache
ACTION_DESCRIBE_CACHES = "DescribeCaches"
ACTION_CREATE_CACHE = "CreateCache"
ACTION_RESIZE_CACHES = "ResizeCaches"
ACTION_START_CACHES = "StartCaches"
ACTION_STOP_CACHES = "StopCaches"
# spark
ACTION_DESCRIBE_SPARKS = "DescribeSparks"
ACTION_START_SPARKS = "StartSparks"
ACTION_STOP_SPARKS = "StopSparks"
ACTION_ADD_SPARK_NODES = "AddSparkNodes"
ACTION_DELETE_SPARK_NODES = "DeleteSparkNodes"
ACTION_CREATE_SPARK = "CreateSpark"
ACTION_DELETE_SPARKS = "DeleteSparks"
# hadoop
ACTION_DESCRIBE_HADOOPS = "DescribeHadoops"
ACTION_START_HADOOPS = "StartHadoops"
ACTION_STOP_HADOOPS = "StopHadoops"
ACTION_ADD_HADOOP_NODES = "AddHadoopNodes"
ACTION_DELETE_HADOOP_NODES = "DeleteHadoopNodes"
ACTION_CREATE_HADOOP = "CreateHadoop"
ACTION_DELETE_HADOOPS = "DeleteHadoops"
# zk
ACTION_DESCRIBE_ZOOKEEPERS = "DescribeZookeepers"
ACTION_START_ZOOKEEPERS = "StartZookeepers"
ACTION_STOP_ZOOKEEPERS = "StopZookeepers"
# elasticsearch
ACTION_DESCRIBE_ELASTICSEARCHS = "DescribeElasticsearchs"
ACTION_START_ELASTICSEARCHS = "StartElasticsearchs"
ACTION_STOP_ELASTICSEARCHS = "StopElasticsearchs"
# queue
ACTION_DESCRIBE_QUEUES = "DescribeQueues"
ACTION_START_QUEUES = "StartQueues"
ACTION_STOP_QUEUES = "StopQueues"
# tag
ACTION_DESCRIBE_TAGS = "DescribeTags"
ACTION_CREATE_TAG = "CreateTag"
ACTION_DELETE_TAGS = "DeleteTags"
ACTION_MODIFY_TAG_ATTRIBUTES = "ModifyTagAttributes"
ACTION_ATTACH_TAGS = "AttachTags"
ACTION_DETACH_TAGS = "DetachTags"
# nic
ACTION_DESCRIBE_NICS = "DescribeNics"
ACTION_CREATE_NICS = "CreateNics"
ACTION_ATTACH_NICS = "AttachNics"
ACTION_DETACH_NICS = "DetachNics"
ACTION_MODIFY_NIC_ATTRIBUTES = "ModifyNicAttributes"
ACTION_DELETE_NICS = "DeleteNics"
# S2
ACTION_CREATE_S2_SERVER = "CreateS2Server"
ACTION_DESCRIBE_S2_SERVERS = "DescribeS2Servers"
ACTION_MODIFY_S2_SERVER = "ModifyS2ServerAttributes"
ACTION_RESIZE_S2_SERVERS = "ResizeS2Servers"
ACTION_DELETE_S2_SERVERS = "DeleteS2Servers"
ACTION_POWERON_S2_SERVERS = "PowerOnS2Servers"
ACTION_POWEROFF_S2_SERVERS = "PowerOffS2Servers"
ACTION_UPDATE_S2_SERVERS = "UpdateS2Servers"
ACTION_CHANGE_S2_SERVER_VXNET = "ChangeS2ServerVxnet"
ACTION_CREATE_S2_SHARED_TARGET = "CreateS2SharedTarget"
ACTION_DESCRIBE_S2_SHARED_TARGETS = "DescribeS2SharedTargets"
ACTION_DELETE_S2_SHARED_TARGETS = "DeleteS2SharedTargets"
ACTION_ENABLE_S2_SHARED_TARGETS = "EnableS2SharedTargets"
ACTION_DISABLE_S2_SHARED_TARGETS = "DisableS2SharedTargets"
ACTION_MODIFY_S2_SHARED_TARGET = "ModifyS2SharedTargetAttributes"
ACTION_ATTACH_TO_S2_SHARED_TARGET = "AttachToS2SharedTarget"
ACTION_DETACH_FROM_S2_SHARED_TARGET = "DetachFromS2SharedTarget"
ACTION_DESCRIBE_S2_DEFAULT_PARAMETERS = "DescribeS2DefaultParameters"
ACTION_CREATE_S2_GROUP = "CreateS2Group"
ACTION_DESCRIBE_S2_GROUPS = "DescribeS2Groups"
ACTION_MODIFY_S2_GROUP = "ModifyS2Group"
ACTION_DELETE_S2_GROUPS = "DeleteS2Groups"
ACTION_CREATE_S2_ACCOUNT = "CreateS2Account"
ACTION_DESCRIBE_S2_ACCOUNTS = "DescribeS2Accounts"
ACTION_MODIFY_S2_ACCOUNT = "ModifyS2Account"
ACTION_DELETE_S2_ACCOUNTS = "DeleteS2Accounts"
ACTION_ASSOCIATE_S2_ACCOUNT_GROUP = "AssociateS2AccountGroup"
ACTION_DISSOCIATE_S2_ACCOUNT_GROUP = "DissociateS2AccountGroup"
# Alarm
ACTION_DESCRIBE_ALARM_POLICIES = "DescribeAlarmPolicies"
ACTION_CREATE_ALARM_POLICY = "CreateAlarmPolicy"
ACTION_MODIFY_ALARM_POLICY_ATTRIBUTES = "ModifyAlarmPolicyAttributes"
ACTION_DELETE_ALARM_POLICIES = "DeleteAlarmPolicies"
ACTION_DESCRIBE_ALARM_POLICY_RULES = "DescribeAlarmPolicyRules"
ACTION_ADD_ALARM_POLICY_RULES = "AddAlarmPolicyRules"
ACTION_MODIFY_ALARM_POLICY_RULE_ATTRIBUTES = "ModifyAlarmPolicyRuleAttributes"
ACTION_DELETE_ALARM_POLICY_RULES = "DeleteAlarmPolicyRules"
ACTION_DESCRIBE_ALARM_POLICY_ACTIONS = "DescribeAlarmPolicyActions"
ACTION_ADD_ALARM_POLICY_ACTIONS = "AddAlarmPolicyActions"
ACTION_MODIFY_ALARM_POLICY_ACTION_ATTRIBUTES = "ModifyAlarmPolicyActionAttributes"
ACTION_DELETE_ALARM_POLICY_ACTIONS = "DeleteAlarmPolicyActions"
ACTION_ASSOCIATE_ALARM_POLICY = "AssociateAlarmPolicy"
ACTION_DISSOCIATE_ALARM_POLICY = "DissociateAlarmPolicy"
ACTION_APPLY_ALARM_POLICY = "ApplyAlarmPolicy"
ACTION_DESCRIBE_ALARMS = "DescribeAlarms"
ACTION_DESCRIBE_ALARM_HISTORY = "DescribeAlarmHistory"
# Billing
ACTION_GET_BALANCE = "GetBalance"
ACTION_GET_LEASE_INFO = "GetLeaseInfo"
# Collaboration
ACTION_DESCRIBE_SHARED_RESOURCE_GROUPS = "DescribeSharedResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUPS = "DescribeResourceGroups"
ACTION_CREATE_RESOURCE_GROUPS = "CreateResourceGroups"
ACTION_MODIFY_RESOURCE_GROUP_ATTRIBUTES = "ModifyResourceGroupAttributes"
ACTION_DELETE_RESOURCE_GROUPS = "DeleteResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUP_ITEMS = "DescribeResourceGroupItems"
ACTION_ADD_RESOURCE_GROUP_ITEMS = "AddResourceGroupItems"
ACTION_DELETE_RESOURCE_GROUP_ITEMS = "DeleteResourceGroupItems"
ACTION_DESCRIBE_USER_GROUPS = "DescribeUserGroups"
ACTION_CREATE_USER_GROUPS = "CreateUserGroups"
ACTION_MODIFY_USER_GROUP_ATTRIBUTES = "ModifyUserGroupAttributes"
ACTION_DELETE_USER_GROUPS = "DeleteUserGroups"
ACTION_DESCRIBE_USER_GROUP_MEMBERS = "DescribeUserGroupMembers"
ACTION_ADD_USER_GROUP_MEMBERS = "AddUserGroupMembers"
ACTION_MODIFY_USER_GROUP_MEMBER_ATTRIBUTES = "ModifyUserGroupMemberAttributes"
ACTION_DELETE_USER_GROUP_MEMBERS = "DeleteUserGroupMembers"
ACTION_DESCRIBE_GROUP_ROLES = "DescribeGroupRoles"
ACTION_CREATE_GROUP_ROLES = "CreateGroupRoles"
ACTION_MODIFY_GROUP_ROLE_ATTRIBUTES = "ModifyGroupRoleAttributes"
ACTION_DELETE_GROUP_ROLES = "DeleteGroupRoles"
ACTION_DESCRIBE_GROUP_ROLE_RULES = "DescribeGroupRoleRules"
ACTION_ADD_GROUP_ROLE_RULES = "AddGroupRoleRules"
ACTION_MODIFY_GROUP_ROLE_RULE_ATTRIBUTES = "ModifyGroupRoleRuleAttributes"
ACTION_DELETE_GROUP_ROLE_RULES = "DeleteGroupRoleRules"
ACTION_GRANT_RESOURCE_GROUPS_TO_USER_GROUPS = "GrantResourceGroupsToUserGroups"
ACTION_REVOKE_RESOURCE_GROUPS_FROM_USER_GROUPS = "RevokeResourceGroupsFromUserGroups"
ACTION_DESCRIBE_RESOURCE_USER_GROUPS = "DescribeResourceUserGroups"
# sdwan
ACTION_DESCRIBE_WAN_ACCESS = "DescribeWanAccesss"
ACTION_CHANGE_WAN_ACCESS_BANDWIDTH = "ChangeWanAccessBandwidth"
ACTION_UPGRADE_WAN_ACCESS = "UpgradeWanAccess"
ACTION_GET_WAN_MONITOR = "GetWanMonitor"
ACTION_GET_WAN_INFO = "GetWanInfo"
# migrate
ACTION_MIGRATE_RESOURCES = "MigrateResources"
# VPC Border
ACTION_CREATE_VPC_BORDERS = "CreateVpcBorders"
ACTION_DELETE_VPC_BORDERS = "DeleteVpcBorders"
ACTION_DESCRIBE_VPC_BORDERS = "DescribeVpcBorders"
ACTION_JOIN_BORDER = "JoinBorder"
ACTION_LEAVE_BORDER = "LeaveBorder"
ACTION_CONFIG_BORDER = "ConfigBorder"
ACTION_MODIFY_BORDER_ATTRIBUTES = "ModifyBorderAttributes"
ACTION_DESCRIBE_BORDER_VXNETS = "DescribeBorderVxnets"
ACTION_ASSOCIATE_BORDER = "AssociateBorder"
ACTION_DISSOCIATE_BORDER = "DissociateBorder"
ACTION_DESCRIBE_BORDER_STATICS = "DescribeBorderStatics"
ACTION_ADD_BORDER_STATICS = "AddBorderStatics"
ACTION_DELETE_BORDER_STATICS = "DeleteBorderStatics"
ACTION_MODIFY_BORDER_STATIC_ATTRIBUTES = "ModifyBorderStaticAttributes"
ACTION_CANCEL_BORDER_STATIC_CHANGES = "CancelBorderStaticChanges"
# --------- Constants for resource ---------
# sg
DIRECTION_EGRESS = 1
DIRECTION_INGRESS = 0
# vxnet
VXNET_TYPE_MANAGED = 1
VXNET_TYPE_UNMANAGED = 0
# lb
BALANCE_ROUNDROBIN = "roundrobin"
BALANCE_LEASTCONN = "leastconn"
HEADER_X_FORWARD_FOR = 1
HEADER_QC_LBID = 2
HEADER_QC_LBIP = 4
LB_TYPE_MAXCONN_5k = 0
LB_TYPE_MAXCONN_20k = 1
LB_TYPE_MAXCONN_40k = 2
LB_TYPE_MAXCONN_100k = 3
LB_TYPE_MAXCONN_200k = 4
LB_TYPE_MAXCONN_500k = 5
# eip
EIP_BILLING_MODE_BANDWIDTH = "bandwidth"
EIP_BILLING_MODE_TRAFFIC = "traffic"
# cluster
ACTION_START_CLUSTERS = "StartClusters"
ACTION_STOP_CLUSTERS = "StopClusters"
ACTION_RESIZE_CLUSTER = "ResizeCluster"
ACTION_DESCRIBE_CLUSTERS = "DescribeClusters"
ACTION_DESCRIBE_CLUSTER_JOBS = "DescribeClusterJobs"
ACTION_ADD_CLUSTER_NODES = "AddClusterNodes"
ACTION_DELETE_CLUSTER_NODES = "DeleteClusterNodes"
ACTION_DELETE_CLUSTERS = "DeleteClusters"
ACTION_DEPLOY_APP_VERSION = "DeployAppVersion"
|
|
#!/usr/local/bin/python3
"""
containers
Python 3 version
This is a simple implementation of Stack, Queue, and Deque
Under the covers these are all derived from a doubley
linked list class
Clive Darke QA
"""
class _Node:
def __init__(self, Data):
self.NextNode = None
self.PrevNode = None
self.Data = Data
def SetNext(self, Next):
self.NextNode = Next
def SetPrev(self, Prev):
self.PrevNode = Prev
def __str__(self):
return str(self.Data)
def GetNext(self, current):
return current.NextNode
def GetPrev(self, current):
return current.PrevNode
################################################################################
class _DLinkList:
__delim = ','
@classmethod
def SetDelim(cls, delim):
_DLinkList.__delim = delim
def __init__(self):
self.__head = None
self.__tail = None
def __str__(self):
StrList = []
node = self.__head
while node:
StrList.append(str(node))
node = node.NextNode
return _DLinkList.__delim.join(StrList)
def __len__(self):
# Consider instead maintaining a class
# variable holding the current length (like Perl)
count = 0
node = self.__head
while node:
count += 1
node = node.NextNode
return count
def _AddToLeft(self, Data):
NewNode = _Node(Data)
if self.__head:
NewNode.SetNext(self.__head)
self.__head.SetPrev(NewNode)
else:
self.__tail = NewNode
self.__head = NewNode
NewNode.SetPrev(None)
def _AddToRight(self, Data):
NewNode = _Node(Data)
if self.__tail:
NewNode.SetPrev(self.__tail)
self.__tail.SetNext(NewNode)
else:
self.__head = NewNode
self.__tail = NewNode
self.__tail.SetNext(None)
def _GetNextFromLeft(self, from_node):
if from_node is None:
from_node = self.__head
return from_node.NextNode
def _GetNextFromRight(self, from_node):
if from_node is None:
from_node = self.__tail
return from_node.PrevNode
def _GetHead(self):
return self.__head
def _GetTail(self):
return self.__tail
def _RemoveFromLeft(self):
if self.__head:
OldHead = self.__head
self.__head = OldHead.NextNode
return OldHead.Data
else:
raise IndexError("Attempt to remove from an empty container")
def _RemoveFromRight(self):
if self.__tail:
OldTail = self.__tail
if self.__tail is self.__head:
self.__head = None
self.__tail = None
else:
self.__tail = OldTail.PrevNode
if self.__tail:
self.__tail.SetNext(None)
return OldTail.Data
else:
raise IndexError("Attempt to remove from an empty container")
################################################################################
class Stack(_DLinkList):
"""
Implementation of a stack
Items are placed on the stack and are accessed as First-In Last-Out (FILO)
"""
def __init__(self):
super(Stack, self).__init__()
def __iter__(self):
element = self._GetHead()
while element:
yield element.Data
element = self._GetNextFromLeft(element)
def push(self, data):
"""Push an item onto the stack"""
super(Stack, self)._AddToLeft(data)
def pop(self):
"""Pop an item from the stack"""
return super(Stack, self)._RemoveFromLeft()
###############################################################################
class Queue(_DLinkList):
"""
Implementation of a queue
Items are placed on the queue and are accessed as First-In First-Out (FIFO)
"""
def __init__(self):
super(Queue, self).__init__()
def __iter__(self):
element = self._GetTail()
while element:
yield element.Data
element = self._GetNextFromRight(element)
def enqueue(self, data):
"""Places an item in the queue"""
super(Queue, self)._AddToLeft(data)
def dequeue(self):
"""Remove and return the next item from the queue"""
return super(Queue, self)._RemoveFromRight()
###############################################################################
class Deque(_DLinkList):
"""
Implementation of a double-ended queue
Items are placed on either end and can be accessed from either end
"""
def __init__(self):
super(Deque, self).__init__()
def unshift(self, data):
"""Places an item on the left"""
super(Deque, self)._AddToLeft(data)
def pop(self):
"""Remove and return the item on the right"""
return super(Deque, self)._RemoveFromRight()
def push(self, data):
"""Places an item on the right"""
super(Deque, self)._AddToRight(data)
def shift(self):
"""Remove and return the item on the left"""
return super(Deque, self)._RemoveFromLeft()
def from_right(self):
element = self._GetTail()
while element:
yield element.Data
element = self._GetNextFromRight(element)
def from_left(self):
element = self._GetHead()
while element:
yield element.Data
element = self._GetNextFromLeft(element)
###############################################################################
if __name__ == "__main__":
import sys
print("STACK:")
astack = Stack()
astack.push(42)
astack.push(37)
astack.push(99)
astack.push(12)
print("Number of items in stack:", len(astack))
print("stack iterator")
print("Should print 12, 99, 37, 42")
try:
for item in astack:
print(item)
except TypeError as err:
print("****", err, file=sys.stderr)
print("\nQUEUE:")
aqueue = Queue()
aqueue.enqueue(12)
aqueue.enqueue(99)
aqueue.enqueue(37)
aqueue.enqueue(42)
print("Number of items in queue:", len(aqueue))
print("queue iterator")
print("Should print 12, 99, 37, 42")
try:
for item in aqueue:
print(item)
except TypeError as err:
print("****", err, file=sys.stderr)
print("\nDEQUE:")
adeque = Deque()
adeque.push(42)
adeque.push(37)
adeque.push(99)
adeque.push(12)
print("Number of items in deque:", len(aqueue))
try:
print("deque from_right iterator")
for item in adeque.from_right():
print(item)
except AttributeError as err:
print("****", err, file=sys.stderr)
try:
print("deque from_left iterator")
for item in adeque.from_left():
print(item)
except AttributeError as err:
print("****", err, file=sys.stderr)
# List comprehensions
print("\nLists")
print("stack: ", [element for element in astack])
print("queue: ", [element for element in aqueue])
print("deque (RTL): ", [element for element in adeque.from_right()])
print("deque (LTR): ", [element for element in adeque.from_left()])
|
|
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption',
'key msg rkey removal_ver'
)
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _translate_key(key):
"""
if key is deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
raise TypeError('_set_option() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
# s = u('%s ') % k
s = '%s ' % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
# s += u('\n [default: %s] [currently: %s]') % (o.defval,
# _get_option(k, True))
s += '\n [default: %s] [currently: %s]' % (
o.defval,
_get_option(k, True)
)
if d:
# s += u('\n (Deprecated')
s += '\n (Deprecated'
# s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += (', use `%s` instead.' % d.rkey if d.rkey else '')
# s += u(')')
s += ')'
s += '\n\n'
return s
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
|
|
from __future__ import unicode_literals, division, absolute_import
from datetime import datetime
from functools import partial
import json
import logging
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils import qualities
from flexget.utils.database import with_session
from requests.auth import AuthBase
from sqlalchemy import (Table, Column, Integer, String, ForeignKey, DateTime, Boolean)
from sqlalchemy.orm import relation, backref
from flexget import db_schema
from flexget.utils.requests import Session
from sqlalchemy.orm.exc import NoResultFound
log = logging.getLogger('t411_api')
# region ORM definitions
SCHEMA_VER = 0
Base = db_schema.versioned_base('t411', SCHEMA_VER)
category_term_types = Table('category_term_types', Base.metadata,
Column('category_id', Integer, ForeignKey('categories.id')),
Column('term_type_id', Integer, ForeignKey('term_types.id')))
Base.register_table(category_term_types)
torrent_terms = Table('torrent_terms', Base.metadata,
Column('torrent_id', Integer, ForeignKey('torrent.id')),
Column('term_id', Integer, ForeignKey('term.id')))
Base.register_table(torrent_terms)
@db_schema.upgrade('t411')
def upgrade(ver, session):
return SCHEMA_VER
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String)
parent_id = Column(Integer, ForeignKey('categories.id'))
sub_categories = relation('Category',
backref=backref('parent', remote_side=[id]),
cascade='all, delete, delete-orphan')
term_types = relation('TermType',
secondary=category_term_types,
backref='categories')
torrents = relation('Torrent',
backref='category',
cascade='all, delete, delete-orphan')
class TermType(Base):
__tablename__ = 'term_types'
id = Column(Integer, primary_key=True)
name = Column(String)
mode = Column(String)
terms = relation('Term',
backref='type',
cascade='all, delete, delete-orphan')
class Term(Base):
__tablename__ = 'term'
id = Column(Integer, primary_key=True)
name = Column(String)
type_id = Column(Integer, ForeignKey('term_types.id'))
class Torrent(Base):
"""
Immutable torrent informations
"""
__tablename__ = 'torrent'
id = Column(Integer, primary_key=True)
name = Column(String)
rewrite_name = Column(String)
category_id = Column(Integer, ForeignKey('categories.id'))
terms = relation('Term',
secondary='torrent_terms',
backref='torrents')
owner = Column(Integer)
username = Column(String)
class TorrentStatus(Base):
__tablename__ = 'torrent_status'
id = Column(Integer, primary_key=True)
torrent_id = Column(Integer, ForeignKey('torrent.id'))
timestamp = Column(DateTime)
class Credential(Base):
__tablename__ = 'credential'
username = Column(String, primary_key=True)
password = Column(String, nullable=False)
api_token = Column(String)
default = Column(Boolean, nullable=False, default=False)
# endregion ORM definition
class FriendlySearchQuery(object):
def __init__(self):
self.expression = None
self.category_name = None
self.term_names = []
self.max_results = 10
T411API_DOMAIN_URL = "api.t411.in"
T411API_CATEGORY_TREE_PATH = "/categories/tree/"
T411API_AUTH_PATH = "/auth"
T411API_TERMS_PATH = "/terms/tree/"
T411API_SEARCH_PATH = "/torrents/search/"
T411API_DOWNLOAD_PATH = "/torrents/download/"
T411API_DETAILS_PATH = "/torrents/details/"
T411_TERM_TYPE_ID_VIDEO_QUALITY = 7
T411_VIDEO_QUALITY_MAP = {
8: qualities.get("bluray"),
1171: qualities.get("bluray"),
17: qualities.get("bluray 1080p"),
1220: qualities.get("remux"),
13: qualities.get("dvdrip"),
14: qualities.get("dvdrip"),
10: qualities.get("dvdrip"),
1208: qualities.get("bluray 1080p"),
1218: qualities.get("bluray 720p"),
16: qualities.get("bluray 1080p"),
1219: qualities.get("bluray"),
15: qualities.get("bluray 720p"),
11: qualities.get("tvrip"),
1162: qualities.get("hdtv 1080p"),
12: qualities.get("hdtv 720p"),
18: qualities.get("ppvrip"),
1233: qualities.get("webdl"),
1174: qualities.get("webdl 1080p"),
1182: qualities.get("webdl"),
1175: qualities.get("webdl 720p"),
19: qualities.get("webrip")
}
def auth_required(func):
"""
Decorator for ensuring rest client is authenticated
or will doing it before execute the command
:param func:
:return:
"""
def wrapper(self, *args, **kwargs):
if not self.is_authenticated():
log.debug('None API token. Authenticating with "%s" account...', self.credentials.get('username'))
self.auth()
assert self.is_authenticated()
return func(self, *args, **kwargs)
return wrapper
class ApiError(Exception):
"""
Exception raise when RestClient received a business error
from T411 server.
"""
def __init__(self, code, description):
self.description = description
self.code = code
class T411RestClient(object):
"""A REST client for T411 API"""
@staticmethod
def template_url(url_scheme='http'):
return url_scheme + '://' + T411API_DOMAIN_URL + '%s'
@staticmethod
def download_url(torrent_id, url_scheme='http'):
return (T411RestClient.template_url(url_scheme) % T411API_DOWNLOAD_PATH) + str(torrent_id)
def __init__(self, username=None, password=None, url_scheme='http'):
self.credentials = {'username': username, 'password': password}
self.api_token = None
self.api_template_url = url_scheme + '://' + T411API_DOMAIN_URL + '%s'
self.web_session = Session()
def auth(self):
"""
Request server to obtain a api token. Obtained
token will be set for future usage of the client instance
:return:
"""
auth_url = self.api_template_url % T411API_AUTH_PATH
response = self.web_session.post(auth_url, self.credentials)
json_response = response.json()
error_description = json_response.get('error', None)
if error_description:
log.error('%d - %s', json_response.get('code'), error_description)
else:
self.set_api_token(json_response.get('token'))
def set_api_token(self, api_token):
"""
Set the client for use an api token.
:param api_token:
:return:
"""
self.api_token = api_token
self.web_session.headers.update({'Authorization': self.api_token})
def is_authenticated(self):
"""
:return: True if an api token is set. Note that the client
doesn't check if the token is valid (expired or wrong).
"""
return self.api_token is not None
@staticmethod
def raise_on_fail_response(json_response):
"""
This method throw an Exception if server return a
error message
:return:
"""
if json_response is None:
pass
error_name = json_response.get('error', None)
error_code = json_response.get('code', None)
if error_name is not None:
raise ApiError(error_code, error_name)
def get_json(self, path, params=None):
"""
Common method for requesting JSON response
:param path:
:return:
"""
url = self.api_template_url % path
request = self.web_session.get(url, params=params)
try:
result = request.json()
except ValueError:
log.debug("Response from %s was not JSON encoded. Attempting deep inspection...", path)
try:
last_line = request.text.splitlines()[-1]
result = json.loads(last_line)
except (ValueError, IndexError):
log.warning("Server response doesn't contains any JSON encoded response.")
raise
T411RestClient.raise_on_fail_response(result)
return result
@auth_required
def retrieve_category_tree(self):
"""
Request T411 API for retrieving categories and them
subcategories
:return**kwargs:
"""
return self.get_json(T411API_CATEGORY_TREE_PATH)
@auth_required
def retrieve_terms_tree(self):
"""
Request T411 API for retrieving term types
and terms
:return **kwargs:
"""
return self.get_json(T411API_TERMS_PATH)
@auth_required
def search(self, query):
"""
Search torrent
:param query: dict
:param query['category_id']: Int optional
:param query['result_per_page']: Int optional
:param query['page_index']: Int optional
:param query['terms']: (Term type id, Term id,)
:return dict
"""
url = T411API_SEARCH_PATH
if query.get('expression') is not None:
url += query['expression']
url_params = {}
if query.get('category_id') is not None:
# using cat or cid will do the same result
# but using cid without query expression will not broke
# results
url_params['cid'] = query['category_id']
if query.get('result_per_page') is not None:
url_params['limit'] = query['result_per_page']
if query.get('page_index') is not None:
url_params['offset'] = query['page_index']
if query.get('terms') is not None:
for (term_type_id, term_id) in query['terms']:
term_type_key_param = 'term[' + str(term_type_id) + '][]'
if url_params.get(term_type_key_param) is None:
url_params[term_type_key_param] = []
url_params[term_type_key_param].append(term_id)
return self.get_json(url, params=url_params)
@auth_required
def details(self, torrent_id):
url = T411API_DETAILS_PATH + str(torrent_id)
return self.get_json(url)
class T411ObjectMapper(object):
"""
Tool class to convert JSON object from the REST client
into object for ORM
"""
date_format = "%Y-%m-%d %H:%M:%S"
def map_category(self, json_category):
"""
Parse one JSON object of a category (and its subcategories) to Category
:param json_category: dict
:return:
"""
# Some categories are empty, so we reject them
if json_category.get('id') is None \
or json_category.get('pid') is None \
or json_category.get('name') is None:
return None
mapped_category = Category()
mapped_category.id = int(json_category.get(u'id'))
pid = int(json_category.get(u'pid'))
if pid == 0:
mapped_category.parent_id = None
else:
mapped_category.parent_id = pid
mapped_category.name = json_category.get(u'name')
json_sub_categories = json_category.get(u'cats')
if json_sub_categories is not None:
for json_sub_category in json_sub_categories.itervalues():
mapped_sub_category = self.map_category(json_sub_category)
mapped_category.sub_categories.append(mapped_sub_category)
return mapped_category
def map_category_tree(self, json_category_tree):
"""
:param json_category_tree: dict
:return array of main Category, dict of [Integer, Category]
"""
indexed_categories = {}
main_categories = []
for json_main_category in json_category_tree.itervalues():
main_category = self.map_category(json_main_category)
if main_category is not None:
main_categories.append(main_category)
indexed_categories[main_category.id] = main_category
for sub_category in main_category.sub_categories:
indexed_categories[sub_category.id] = sub_category
return main_categories, indexed_categories
@staticmethod
def map_term_type_tree(json_tree):
"""
:param json_tree: dict
:return: (array of tupple, dict of TermType)
"""
# term type definition can appears multiple times
category_to_term_type = [] # relations category-term type
term_types = {} # term types, indexed by termtype id
terms = {} # terms, indexed by id
for category_key, json_term_types in json_tree.iteritems():
for term_type_key, term_type_content in json_term_types.iteritems():
term_type_id = int(term_type_key)
category_to_term_type.append((int(category_key), term_type_id))
# if a term type has already parsed
# then we just record the category-term type relation
if term_type_id not in term_types:
term_type = TermType()
term_type.id = term_type_id
term_type.name = term_type_content.get('type')
term_type.mode = term_type_content.get('mode')
term_types[term_type.id] = term_type # index term type
for term_id, term_name in term_type_content.get('terms').iteritems():
# Parsing & indexing terms
if term_id not in terms:
term = Term(id=int(term_id), name=term_name)
term_type.terms.append(term)
return category_to_term_type, term_types
@staticmethod
def map_search_result_entry(json_entry, download_auth=None):
"""
Parse json object of a torrent entry to flexget Entry
:param download_auth: Requests authenticator
"""
result = Entry()
result['t411_torrent_id'] = int(json_entry['id'])
result['title'] = json_entry['name']
result['url'] = T411RestClient.download_url(json_entry['id'])
result['t411_category'] = int(json_entry['category'])
result['seeders'] = int(json_entry['seeders'])
result['leechers'] = int(json_entry['leechers'])
result['t411_comments'] = int(json_entry['comments'])
result['t411_verified'] = json_entry['isVerified'] is '1'
result['t411_pubdate'] = datetime.strptime(json_entry['added'], T411ObjectMapper.date_format)
result['content_size'] = int(json_entry['size']) / (1024 ** 2)
result['t411_times_completed'] = int(json_entry['times_completed'])
result['t411_category_name'] = json_entry['categoryname']
result['t411_category_image'] = json_entry['categoryimage']
result['t411_privacy'] = json_entry['privacy']
result['t411_owner_id'] = int(json_entry['owner'])
result['t411_owner_username'] = json_entry['username']
result['download_auth'] = download_auth
return result
@staticmethod
def map_details(json_details, resolver):
"""
Parse json entry of details of a torrent entry
to Torrent object.
"""
result = Torrent()
result.id = json_details.get('id')
result.name = json_details.get('name')
result.category_id = json_details.get('category')
# Parse collection of termtype-termvalue
for (term_type_name, terms_candidat) in json_details.get('terms').iteritems():
if isinstance(terms_candidat, list):
# Some terms type are multi-valuable, eg. Genres
for term_name in terms_candidat:
term_entity = resolver(result.category_id, term_type_name, term_name)
if term_entity is not None:
result.terms.append(term_entity)
else:
term_entity = resolver(result.category_id, term_type_name, terms_candidat)
if term_entity is not None:
result.terms.append(term_entity)
return result
def cache_required(func):
"""
Decorator for ensuring cached data into db.
If not a synchronize will be launched
:param func:
:return:
"""
def wrapper(self, *args, **kwargs):
if not self.has_cached_criterias():
log.debug('None cached data. Synchronizing...')
self.synchronize_database()
return func(self, *args, **kwargs)
return wrapper
class T411Proxy(object):
"""
A T411 proxy service. This proxy interact both with
T411 Rest Client and T411 local database.
"""
def __init__(self, session=None):
"""
:param session: flexget.manager.Session
"""
self.rest_client = T411RestClient()
self.mapper = T411ObjectMapper()
self.__has_cached_criterias = None
def __set_credential(self, username=None, password=None, api_token=None):
self.rest_client.api_token = api_token
self.rest_client.credentials = {
'username': username,
'password': password
}
@with_session
def set_credential(self, username=None, session=None):
"""
Set REST client credential from database
:param username: if set, account's credential will be used.
:return:
"""
query = session.query(Credential)
if username:
query = query.filter(Credential.username == username)
credential = query.first()
if credential is None:
raise PluginError('You cannot use t411 plugin without credentials. '
'Please set credential with "flexget t411 add-auth <username> <password>".')
self.__set_credential(credential.username, credential.password, credential.api_token)
@with_session
def has_cached_criterias(self, session=None):
"""
:return: True if database contains data of a previous synchronization
"""
if self.__has_cached_criterias is None:
self.__has_cached_criterias = session.query(Category).count() > 0
return self.__has_cached_criterias
@with_session
def synchronize_database(self, session=None):
"""
If database has been cleaned, this method
will update it.
:return:
"""
log.debug('T411Proxy start database synchronization with T411')
category_tree = self.rest_client.retrieve_category_tree()
term_tree = self.rest_client.retrieve_terms_tree()
main_categories, indexed_categories = self.mapper.map_category_tree(category_tree)
category_to_term_type, term_types = self.mapper.map_term_type_tree(term_tree)
log.debug('%d categories (%d are main categories) and %d term types retrieved',
len(indexed_categories),
len(main_categories),
len(term_types))
for (category_id, term_type_id) in category_to_term_type:
category = indexed_categories.get(category_id)
term_type = term_types.get(term_type_id)
category.term_types.append(term_type)
session.add_all(main_categories)
session.commit()
self.__has_cached_criterias = None
@cache_required
@with_session
def find_categories(self, category_name=None, is_sub_category=False, session=None):
query = session.query(Category)
if category_name is not None:
query = query.filter(Category.name == category_name)
if is_sub_category:
query = query.filter(Category.parent_id.isnot(None))
return query.all()
@cache_required
@with_session
def find_term_types(self, category_id=None, term_type_name=None, session=None):
query = session.query(TermType) \
.filter(TermType.name == term_type_name) \
.filter(TermType.categories.any(Category.id == category_id))
return query.one()
@cache_required
@with_session
def find_term_by_name(self, term_type_id, term_name, session=None):
return session.query(Term) \
.filter(Term.type_id == term_type_id) \
.filter(Term.name == term_name) \
.one()
@cache_required
@with_session
def find_term(self, category_id, term_type_name, term_name, session=None):
result = session.query(Term) \
.filter(Term.type.has(TermType.categories.any(Category.id == category_id))) \
.filter(Term.type.has(TermType.name == term_type_name)) \
.filter(Term.name == term_name) \
.first()
return result
@cache_required
@with_session
def main_categories(self, session=None):
query = session.query(Category).filter(Category.parent_id.is_(None))
return query.all()
@cache_required
@with_session
def all_category_names(self, categories_filter='all', session=None):
name_query = session.query(Category.name)
if categories_filter == 'sub':
name_query.filter(Category.parent_id is not None)
elif categories_filter == 'main':
name_query.filter(Category.parent_id is None)
return [name for (name,) in name_query.all()]
@cache_required
@with_session
def all_term_names(self, session=None):
name_query = session.query(Term.name).all()
return [name for (name,) in name_query]
@cache_required
@with_session
def friendly_query_to_client_query(self, friendly_query, session=None):
"""
:param FriendlySearchQuery query:
:return (,)[]: T411RestClient.search compatible
"""
client_query = {'expression': friendly_query.expression}
if friendly_query.category_name is not None:
try:
(category_id,) = session \
.query(Category.id) \
.filter(Category.name == friendly_query.category_name) \
.one()
client_query['category_id'] = category_id
log.debug('Category named "%s" resolved by id %d', friendly_query.category_name, category_id)
if len(friendly_query.term_names) > 0:
or_like = (Term.name.like(friendly_query.term_names[0] + '%'))
for term_name in friendly_query.term_names[1:]:
or_like |= (Term.name.like(term_name + '%'))
client_query['terms'] = session \
.query(Term.type_id, Term.id) \
.filter(or_like) \
.filter(TermType.categories.any(Category.id == category_id)) \
.filter(Term.type_id == TermType.id).all()
except NoResultFound:
log.warning('Unable to resolve category named %s', friendly_query.category_name)
log.warning('Terms filter will be passed')
if friendly_query.max_results is not None:
client_query['result_per_page'] = friendly_query.max_results
client_query['page_index'] = 0
return client_query
def search(self, query):
"""
:param FriendlySearchQuery query:
:return:
"""
client_query = self.friendly_query_to_client_query(query)
json_results = self.rest_client.search(client_query)
json_torrents = json_results.get('torrents', [])
json_not_pending_torrents = filter(lambda x: not isinstance(x, int), json_torrents)
log.debug("Search produces %d results including %d 'on pending' (the latter will not produces entries)",
len(json_torrents),
len(json_torrents) - len(json_not_pending_torrents))
download_auth = T411BindAuth(self.rest_client.api_token)
map_function = partial(T411ObjectMapper.map_search_result_entry, download_auth=download_auth)
return map(map_function, json_not_pending_torrents)
@cache_required
@with_session
def details(self, torrent_id, session=None):
"""
WIP
Download and store torrent details
:param torrent_id:
:return:
"""
details = session \
.query(Torrent) \
.filter(Torrent.id == torrent_id) \
.first()
if details:
return details
else:
log.debug('Torrent %d cache miss. Online retrieving...', torrent_id)
# Cache dismiss, retrieve details via online way
json_details = self.rest_client.details(torrent_id)
def resolver(category_id, term_type_name, term_name):
return self.find_term(category_id, term_type_name, term_name, session=session)
details = self.mapper.map_details(json_details, resolver)
session.add(details)
session.commit()
return details
@with_session
def add_credential(self, username, password, session=None):
"""
Add a credential
:param username: T411 username
:param password: T411 password
:return: False if username still has an entry (password has been updated)
"""
credential = session.query(Credential).filter(Credential.username == username).first()
if credential:
credential.password = password
credential.api_token = None
result = False
else:
credential = Credential(username=username, password=password)
session.add(credential)
result = True
session.commit()
return result
@cache_required
@with_session
def parse_terms_to_quality(self, terms, session=None):
"""
If terms contains a term with the termtype 'video quality'
then this function convert it into a flexget Quality
else it return None
:param terms: Array of Term
:param session:
:return: flexget.utils.Quality
"""
video_quality_description = next((
term for term in terms
if term.get('term_type_id') == T411_TERM_TYPE_ID_VIDEO_QUALITY), None)
if video_quality_description is not None:
video_quality = T411_VIDEO_QUALITY_MAP.get(video_quality_description.get('term_id'))
return video_quality
else:
return None
class T411BindAuth(AuthBase):
def __init__(self, api_token):
self.api_token = api_token
def __call__(self, request):
request.headers['authorization'] = self.api_token
return request
@event('manager.db_cleanup')
def db_cleanup(manager, session):
session.query(Category).delete(synchronize_session=False)
session.query(TermType).delete(synchronize_session=False)
session.query(Term).delete(synchronize_session=False)
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to get random numbers, strings, etc.
The values returned by the various functions can be replaced in
templates to generate test cases.
"""
import math
import random
import sys
import uuid
# This script needs the utils.py and fuzzy_types.py modules in order
# to work. This files are copied by the setup.py script and not checked-in
# next to this code, so we need to disable the style warning.
# pylint: disable=F0401
from resources import utils
from resources import fuzzy_types
import gatt_aliases
import wbt_fakes
# Strings that are used to generate the beginning of a test. The replacement
# fields are replaced by Get*Base() functions below to generate valid test
# cases.
BASIC_BASE = \
' return setBluetoothFakeAdapter({fake_adapter_name})\n'\
' .then(() => {{\n'
DEVICE_DISCOVERY_BASE = BASIC_BASE + \
' return requestDeviceWithKeyDown({{\n'\
' filters: [{{services: [{service_uuid}]}}]}});\n'\
' }})\n'\
' .then(device => {{\n'
CONNECTABLE_BASE = DEVICE_DISCOVERY_BASE + \
' return device.gatt.connect();\n'\
' }})\n'\
' .then(gatt => {{\n'
SERVICE_RETRIEVED_BASE = CONNECTABLE_BASE + \
' return gatt.getPrimaryService({service_uuid});\n'\
' }})\n'\
' .then(services => {{\n'
SERVICES_RETRIEVED_BASE = CONNECTABLE_BASE + \
' return gatt.getPrimaryServices({optional_service_uuid});\n'\
' }})\n'\
' .then(services => {{\n'
CHARACTERISTIC_RETRIEVED_BASE = \
' TRANSFORM_PICK_A_SERVICE;\n'\
' return service.getCharacteristic({characteristic_uuid});\n'\
' }})\n'\
' .then(characteristics => {{\n'
CHARACTERISTICS_RETRIEVED_BASE = \
' TRANSFORM_PICK_A_SERVICE;\n'\
' return service.getCharacteristics({optional_characteristic_uuid});\n'\
' }})\n'\
' .then(characteristics => {{\n'
def _ToJsStr(s):
return u'\'{}\''.format(s)
def _get_random_number():
return utils.UniformExpoInteger(0, sys.maxsize.bit_length() + 1)
def _GetFuzzedJsString(s):
"""Returns a fuzzed string based on |s|.
Args:
s: The base string to fuzz.
Returns:
A single line string surrounded by quotes.
"""
while True:
fuzzed_string = fuzzy_types.FuzzyString(s)
try:
fuzzed_string = fuzzed_string.decode('utf8')
except UnicodeDecodeError:
print 'Can\'t decode fuzzed string. Trying again.'
else:
# Escape 'escape' characters.
fuzzed_string = fuzzed_string.replace('\\', r'\\')
# Escape quote characters.
fuzzed_string = fuzzed_string.replace('\'', r'\'')
# Put everything in a single line.
fuzzed_string = '\\n'.join(fuzzed_string.split())
return _ToJsStr(fuzzed_string)
def _get_array_of_random_ints(max_length, max_value):
"""Returns an string with an array of random integer."""
length = utils.UniformExpoInteger(0, math.log(max_length, 2))
exp_max_value = math.log(max_value, 2)
return '[{}]'.format(', '.join(
str(utils.UniformExpoInteger(0, exp_max_value)) for _ in xrange(length))
)
def _get_typed_array():
"""Generates a TypedArray constructor.
There are nine types of TypedArrays and TypedArray has four constructors.
Types:
* Int8Array
* Int16Array
* Int32Array
* Uint8Array
* Uint16Array
* Uint32Array
* Uint8ClampedArray
* Float32Array
* Float64Array
Constructors:
* new TypedArray(length)
* new TypedArray(typedArray)
* new TypedArray(object)
* new TypedArray(buffer)
Returns:
A string made up of a randomly chosen type and argument type from the
lists above.
"""
array_type = random.choice(['Int8Array', 'Int16Array', 'Int32Array',
'Uint8Array', 'Uint16Array', 'Uint32Array',
'Uint8ClampedArray', 'Float32Array',
'Float64Array'])
# Choose an argument type at random.
arguments = random.choice([
# length e.g. 293
# We choose 2**10 as the upper boundry because the max length allowed
# by WebBluetooth is 2**10.
lambda: utils.UniformExpoInteger(0, 10),
# typedArray e.g. new Uint8Array([1,2,3])
_get_typed_array,
# object e.g. [1,2,3]
lambda: _get_array_of_random_ints(max_length=1000, max_value=2 ** 64),
# buffer e.g. new Uint8Array(10).buffer
lambda: _get_typed_array() + '.buffer',
])
return 'new {array_type}({arguments})'.format(array_type=array_type,
arguments=arguments())
def GetAdvertisedServiceUUIDFromFakes():
"""Returns a random service string from the list of fake services."""
return _ToJsStr(random.choice(wbt_fakes.ADVERTISED_SERVICES))
def get_service_uuid_from_fakes():
"""Returns a random service string from a list of fake services."""
return _ToJsStr(random.choice(wbt_fakes.SERVICES))
def get_characteristic_uuid_from_fakes():
"""Returns a random characteristic string from a fake characteristics list."""
return _ToJsStr(random.choice(wbt_fakes.CHARACTERISTICS))
def GetValidServiceAlias():
"""Returns a valid service alias from the list of services aliases."""
return _ToJsStr(random.choice(gatt_aliases.SERVICES))
def get_valid_characteristic_alias():
"""Returns a valid service alias from the list of services aliases."""
return _ToJsStr(random.choice(gatt_aliases.CHARACTERISTICS))
def GetRandomUUID():
"""Returns a random UUID, a random number or a fuzzed uuid or alias."""
choice = random.choice(['uuid', 'number', 'fuzzed string'])
if choice == 'uuid':
return _ToJsStr(uuid.uuid4())
elif choice == 'number':
return _get_random_number()
elif choice == 'fuzzed string':
choice2 = random.choice(['uuid', 'alias'])
if choice2 == 'uuid':
random_uuid = str(uuid.uuid4())
return _GetFuzzedJsString(random_uuid)
elif choice2 == 'alias':
alias = random.choice(gatt_aliases.SERVICES)
return _GetFuzzedJsString(alias)
def GetAdvertisedServiceUUID():
"""Generates a random Service UUID from a set of functions.
See get_service_uuid_from_fakes(), GetValidServiceAlias() and GetRandomUUID()
for the different values this function can return.
This function weights get_service_uuid_from_fakes() more heavily to increase the
probability of generating test pages that can interact with the fake
adapters.
Returns:
A string or a number that can be used as a Service UUID by the Web
Bluetooth API.
"""
roll = random.random()
if roll < 0.8:
return GetAdvertisedServiceUUIDFromFakes()
elif roll < 0.9:
return GetValidServiceAlias()
else:
return GetRandomUUID()
def get_service_uuid():
"""Generates a random Service UUID from a set of functions.
Similar to GetAdvertisedServiceUUID() but weights get_service_uuid_from_fakes()
more heavily to increase the probability of generating test pages that can
interact with the fake adapters.
See get_service_uuid_from_fakes(), GetValidServiceAlias() and
GetRandomUUID() for the different values this function can return.
Returns:
A string or a number that can be used as a Service UUID by the Web
Bluetooth API.
"""
roll = random.random()
if roll < 0.8:
return get_service_uuid_from_fakes()
elif roll < 0.9:
return GetValidServiceAlias()
else:
return GetRandomUUID()
def get_characteristic_uuid():
"""Generates a random Characteristic UUID from a set of functions.
Similar to get_service_uuid() but weights get_characteristic_uuid_from_fakes()
more heavily to increase the probability of generating test pages that can
interact with the fake adapters.
See get_characteristic_uuid_from_fakes(), get_valid_characteristic_alias() and
GetRandomUUID() for the different values this function can return.
Returns:
A string or a number that can be used as a Service UUID by the Web
Bluetooth API.
"""
roll = random.random()
if roll < 0.8:
return get_characteristic_uuid_from_fakes()
elif roll < 0.9:
return get_valid_characteristic_alias()
else:
return GetRandomUUID()
def GetRequestDeviceOptions():
"""Returns an object used by navigator.bluetooth.requestDevice."""
# TODO(ortuno): Randomize the members, number of filters, services, etc.
return '{filters: [{services: [ %s ]}]}' % GetAdvertisedServiceUUID()
def GetBasicBase():
"""Returns a string that sets a random fake adapter."""
adapter = _ToJsStr(random.choice(wbt_fakes.ALL_ADAPTERS))
return BASIC_BASE.format(fake_adapter_name=adapter)
def GetDeviceDiscoveryBase():
"""Generates a string that contains all steps to find a device."""
adapter, services = random.choice(wbt_fakes.ADAPTERS_WITH_DEVICES)
return DEVICE_DISCOVERY_BASE.format(
fake_adapter_name=_ToJsStr(adapter),
service_uuid=_ToJsStr(random.choice(services)))
def GetConnectableBase():
"""Generates a string that contains all steps to connect to a device.
Returns: A string that:
1. Sets an adapter to a fake adapter with a connectable device.
2. Looks for the connectable device.
3. Connects to it.
"""
adapter, services = random.choice(wbt_fakes.ADAPTERS_WITH_DEVICES)
return DEVICE_DISCOVERY_BASE.format(
fake_adapter_name=_ToJsStr(adapter),
service_uuid=_ToJsStr(random.choice(services)))
def get_services_retrieved_base():
"""Returns a string that contains all steps to retrieve a service.
Returns: A string that:
1. Sets an adapter to a fake adapter with a connectable device with
services.
2. Use one of the device's services to look for that device.
3. Connects to it.
4. Retrieve the device's service used in 2.
"""
adapter, services = random.choice(wbt_fakes.ADAPTERS_WITH_SERVICES)
service_uuid = _ToJsStr(random.choice(services))
base = random.choice([SERVICE_RETRIEVED_BASE, SERVICES_RETRIEVED_BASE])
return base.format(
fake_adapter_name=_ToJsStr(adapter),
service_uuid=service_uuid,
optional_service_uuid=random.choice(['', service_uuid]))
def get_characteristics_retrieved_base():
"""Returns a string that contains all steps to retrieve a characteristic.
Returns: A string that:
1. Sets an adapter to a fake adapter with a connectable device with
services.
2. Use one of the device's services to look for that device.
3. Connects to it.
4. Retrieve the device's service used in 2.
5. Retrieve a characteristic from that service.
"""
adapter, services = random.choice(wbt_fakes.ADAPTERS_WITH_CHARACTERISTICS)
service_uuid, characteristics = random.choice(services)
service_uuid = _ToJsStr(service_uuid)
characteristic_uuid = _ToJsStr(random.choice(characteristics))
optional_service_uuid = random.choice(['', service_uuid])
optional_characteristic_uuid = random.choice(['', characteristic_uuid])
services_base = random.choice([SERVICE_RETRIEVED_BASE,
SERVICES_RETRIEVED_BASE])
characteristics_base = services_base + random.choice([
CHARACTERISTIC_RETRIEVED_BASE,
CHARACTERISTICS_RETRIEVED_BASE,
])
return characteristics_base.format(
fake_adapter_name=_ToJsStr(adapter),
service_uuid=service_uuid,
optional_service_uuid=optional_service_uuid,
characteristic_uuid=characteristic_uuid,
optional_characteristic_uuid=optional_characteristic_uuid)
def get_get_primary_services_call():
call = random.choice([u'getPrimaryService({service_uuid})',
u'getPrimaryServices({optional_service_uuid})'])
return call.format(
service_uuid=get_service_uuid(),
optional_service_uuid=random.choice(['', get_service_uuid()]))
def get_characteristics_call():
call = random.choice([
u'getCharacteristic({characteristic_uuid})',
u'getCharacteristics({optional_characteristic_uuid})'
])
return call.format(
characteristic_uuid=get_characteristic_uuid(),
optional_characteristic_uuid=random.choice(
['', get_characteristic_uuid()]))
def get_pick_a_service():
"""Returns a string that picks a service from 'services'."""
# 'services' may be defined by the GetPrimaryService(s) tokens.
string = \
'var service; '\
'if (typeof services !== \'undefined\') '\
' service = Array.isArray(services)'\
' ? services[{} % services.length]'\
' : services'
return string.format(random.randint(0, sys.maxint))
def get_pick_a_characteristic():
"""Returns a string that picks a characteristic from 'characteristics'."""
# 'characteristics' maybe be defined by the GetCharacteristic(s) tokens.
string = \
'var characteristic; '\
'if (typeof characteristics !== \'undefined\') '\
' characteristic = Array.isArray(characteristics)'\
' ? characteristics[{} % characteristics.length]'\
' : characteristics'
return string.format(random.randint(0, sys.maxint))
def get_reload_id():
return _ToJsStr(_get_random_number())
def get_buffer_source():
"""Returns a new BufferSource.
https://heycam.github.io/webidl/#BufferSource
"""
choice = random.choice(['ArrayBuffer', 'DataView', 'TypedArray'])
if choice == 'ArrayBuffer':
# We choose 2**10 as the upper boundry because the max length allowed
# by WebBluetooth is 2**10.
return 'new ArrayBuffer({length})'.format(
length=utils.UniformExpoInteger(0, 10))
if choice == 'DataView':
return 'new DataView({typed_array}.buffer)'.format(
typed_array=_get_typed_array())
if choice == 'TypedArray':
return _get_typed_array()
|
|
from time import time, sleep
import sys
import os
import marshal
from multiprocessing import Process, JoinableQueue, cpu_count, current_process
def tanimoto_similarity(set1, set2):
"""
Function calculates Tanimoto similarity between set1 and set2
:param set1: set #1 to compare
:param set2: set #2 to compare
"""
intersect = set1 & set2
if intersect:
union = set1 | set2
result = float(len(intersect)) / len(union)
return result
else:
return 0
def calculate_similarity(v1, v2):
"""
Function calculates linear combination of Tanimoto similarities.
If one of the sets is empty, then Tanimoto similarity is zero.
So, we can diminish the amount of computations checking if both of the sets are non-empty
:param v1: dictionary values for author #1
:param v2: dictionary values for author #2
"""
result = 0
if v1['names_set'] & v2['names_set']:
if v1['affiliation_set'] & v2['affiliation_set']:
return 1
if v1['paperid_set'] & v2['paperid_set']:
return 1
if v1['title_set'] & v2['title_set']:
return 1
if not (v1['paperid_set'] and v1['affiliation_set']) or not (v2['paperid_set'] and v2['affiliation_set']):
return 1
def create_submission_csv(dataset, output_file='submission_track2.csv'):
""" Function creates submission csv file for kdd2013 track2
:param dataset: results of the computation
:param output_file: name of csv file where to store the results
"""
f = open(output_file, 'w')
f.write('AuthorId,DuplicateAuthorIds\n')
#walk through the data set and create a string for each authorid
for k, v in dataset.iteritems():
dup_authors_string = ''
for elem in v:
dup_authors_string += ' ' + str(elem)
f.write(str(k) + ',' + str(k) + dup_authors_string + '\n')
def divide_for_cpu_units(num_authors, cpu_units):
""" Function divides interval [0, number_of_authorid] into subintervals
so that every cpu unit will get equal number of computations
:param num_authors: the number of the authors
:param cpu_units: number of cpu units (actually processes to run)
"""
lst = []
k = 0
for i in range(cpu_units):
sum_intermediate = 0
#sum of arithmetic progression divided by number of the cpu units
while sum_intermediate <= (num_authors - 1) * num_authors / (2 * cpu_units):
sum_intermediate += k
k += 1
#if at least one interval has been calculated, use its boundary
# and take measure to avoid negative boundaries
if lst:
z = lst[-1][0]
lst.append((0 if (num_authors - k + 1) < 0 else (num_authors - k + 1), z - 1))
#calculate the first interval
else:
lst.append((num_authors - k + 2, num_authors))
return sorted(lst)
def find_similar_authors(extended_dataset, begin, end, maxrows, queue):
"""
Function calculates similarity between each pair of authorid.
If similarity is greater than threshold, we add corresponding
authorid's to resulting dictionary.
This function is executed in multiprocessing way
:param extended_dataset: data set from dump with extended information on authors
:param begin: lower boundary of authorid interval
:param end: upper boundary of authorid interval
:param maxrows: maximal number of rows in data set to handle
:param queue: queue for IPC
"""
print('Beginning handling: process %d' % current_process().pid)
t0 = time()
#we need to access authorid in order
author_ids = sorted(extended_dataset.keys())
#resulting dictionary
authorids_dict = {}
for authorid in extended_dataset.keys():
if not (authorid in authorids_dict):
authorids_dict[authorid] = []
for i in range(begin, end):
for j in range(i + 1, maxrows):
sim = calculate_similarity(extended_dataset[author_ids[i]], extended_dataset[author_ids[j]])
if sim > 0.5:
authorids_dict[author_ids[i]].append(author_ids[j])
authorids_dict[author_ids[j]].append(author_ids[i])
print('pid:%d, %.3f, begin:%d, end:%d' % (os.getpid(), time() - t0, begin, end))
t0 = time()
#enqueue the result
queue.put(authorids_dict)
print('Put in queue in %.3f' % (time() - t0))
print('Finishing handling: process %d' % current_process().pid)
def multi_process(dataset, cpu_units):
"""
Multiprocessing is here
:param dataset: data set from dump with extended information on authors
:param cpu_units: number of cpu units (actually processes to run)
"""
intervals = divide_for_cpu_units(len(dataset), cpu_units)
process_list = []
similarities = []
#queue for results
q = JoinableQueue()
for i in range(cpu_units):
p = Process(target=find_similar_authors, args=(dataset, intervals[i][0], intervals[i][1], len(dataset), q))
process_list.append(p)
for p in process_list:
p.start()
while q.qsize() < cpu_units:
sleep(1)
#get the results from queue
while q.qsize() > 0:
try:
queue_data = q.get()
if len(queue_data) > 0:
print('Getting data from queue: process %d' % current_process().pid)
similarities.append(queue_data)
print('Results gathered %d' % len(similarities))
else:
break
except:
print 'Queue is empty'
break
for p in process_list:
print('Joining process %d' % p.pid)
p.join()
return similarities
if __name__ == '__main__':
cur_f = __file__.split('/')[-1]
if len(sys.argv) != 2:
print >> sys.stderr, 'usage: ' + cur_f + ' <dump>'
sys.exit(1)
else:
#read prepared dictionary from dump
try:
cpu_units = cpu_count()
t0 = time()
t_start = t0
dump = open(sys.argv[1], 'rb')
dataset = marshal.load(dump)
print('Dump loaded in %.3f seconds' % (time() - t0))
#calculate pairwise similarities in parallel, use several processes
t0 = time()
sim = []
print('Main process: %d' % current_process().pid)
sim = multi_process(dataset, cpu_units)
print('Similar authors found in %.3f seconds' % (time() - t0))
#merging dictionaries together
t0 = time()
for d in range(1, cpu_units):
for m in sim[0].keys():
#two lists are added
sim[0][m] += sim[d][m]
#get rid of repeating id's
for m in sim[0].keys():
sim[0][m] = list(set(sim[0][m]))
print('Resulting dictionaries united in %.3f seconds' % (time() - t0))
t0 = time()
create_submission_csv(sim[0])
print('Submission csv-file created in %.3f seconds' % (time() - t0))
print('TOTAL TIME: %.3f seconds' % (time() - t_start))
except Exception, e:
print e
|
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
# Immutable spatial search tree based on the SsTree implementation of the book
# "Advanced Algorithms and Data Structures"
# - SsTree JavaScript source code:
# (c) 2019, Marcello La Rocca, released under the GNU Affero General Public License v3.0
# https://github.com/mlarocca/AlgorithmsAndDataStructuresInAction/tree/master/JavaScript/src/ss_tree
# - Research paper of Antonin Guttman:
# http://www-db.deis.unibo.it/courses/SI-LS/papers/Gut84.pdf
import statistics
from typing import List, Iterator, Tuple, Callable, Sequence, Iterable
import abc
import math
from ezdxf.math import BoundingBox, AnyVec, Vec3, spherical_envelope
__all__ = ["RTree"]
INF = float("inf")
class Node:
__slots__ = ("bbox",)
def __init__(self, bbox: BoundingBox):
self.bbox = bbox
@abc.abstractmethod
def __len__(self):
...
@abc.abstractmethod
def __iter__(self) -> Iterator[AnyVec]:
...
@abc.abstractmethod
def contains(self, point: AnyVec) -> bool:
...
@abc.abstractmethod
def _nearest_neighbor(
self, target: AnyVec, nn: AnyVec = None, nn_dist: float = INF
) -> Tuple[AnyVec, float]:
...
@abc.abstractmethod
def points_in_sphere(
self, center: AnyVec, radius: float
) -> Iterator[AnyVec]:
...
@abc.abstractmethod
def points_in_bbox(self, bbox: BoundingBox) -> Iterator[AnyVec]:
...
def nearest_neighbor(self, target: AnyVec) -> Tuple[AnyVec, float]:
return self._nearest_neighbor(target)
class LeafNode(Node):
__slots__ = ("points", "bbox")
def __init__(self, points: List[AnyVec]):
self.points = tuple(points)
super().__init__(BoundingBox(self.points))
def __len__(self):
return len(self.points)
def __iter__(self) -> Iterator[AnyVec]:
return iter(self.points)
def contains(self, point: AnyVec) -> bool:
return any(point.isclose(p) for p in self.points)
def _nearest_neighbor(
self, target: AnyVec, nn: AnyVec = None, nn_dist: float = INF
) -> Tuple[AnyVec, float]:
distance, point = min((target.distance(p), p) for p in self.points)
if distance < nn_dist:
nn, nn_dist = point, distance
return nn, nn_dist
def points_in_sphere(
self, center: AnyVec, radius: float
) -> Iterator[AnyVec]:
return (p for p in self.points if center.distance(p) <= radius)
def points_in_bbox(self, bbox: BoundingBox) -> Iterator[AnyVec]:
return (p for p in self.points if bbox.inside(p))
class InnerNode(Node):
__slots__ = ("children", "bbox")
def __init__(self, children: Sequence[Node]):
super().__init__(BoundingBox())
self.children = tuple(children)
for child in self.children:
# build union of all child bounding boxes
self.bbox.extend([child.bbox.extmin, child.bbox.extmax])
def __len__(self):
return sum(len(c) for c in self.children)
def __iter__(self) -> Iterator[AnyVec]:
for child in self.children:
yield from iter(child)
def contains(self, point: AnyVec) -> bool:
for child in self.children:
if child.bbox.inside(point) and child.contains(point):
return True
return False
def _nearest_neighbor(
self, target: AnyVec, nn: AnyVec = None, nn_dist: float = INF
) -> Tuple[AnyVec, float]:
closest_child = find_closest_child(self.children, target)
nn, nn_dist = closest_child._nearest_neighbor(target, nn, nn_dist)
for child in self.children:
if child is closest_child:
continue
# is target inside the child bounding box + nn_dist in all directions
if grow_box(child.bbox, nn_dist).inside(target):
point, distance = child._nearest_neighbor(target, nn, nn_dist)
if distance < nn_dist:
nn = point
nn_dist = distance
return nn, nn_dist
def points_in_sphere(
self, center: AnyVec, radius: float
) -> Iterator[AnyVec]:
for child in self.children:
if is_sphere_intersecting_bbox(
Vec3(center), radius, child.bbox.center, child.bbox.size
):
yield from child.points_in_sphere(center, radius)
def points_in_bbox(self, bbox: BoundingBox) -> Iterator[AnyVec]:
for child in self.children:
if bbox.has_overlap(child.bbox):
yield from child.points_in_bbox(bbox)
class RTree:
"""Immutable spatial search tree loosely based on `R-trees`_.
The search tree is buildup once at initialization and immutable afterwards,
because rebuilding the tree after inserting or deleting nodes is very costly
and also keeps the implementation very simple. Without the ability to
alter the content the restrictions which forces the tree balance at growing
and shrinking of the original `R-trees`_, could be ignored, like the fixed
minimum and maximum node size.
This class uses internally only 3D bounding boxes, but also supports
:class:`Vec2` as well as :class:`Vec3` objects as input data, but point
types should not be mixed in a single search tree.
The point objects keep their type and identity and the returned points of
queries can be compared by the ``is`` operator for identity to the input
points.
The implementation requires a maximum node size of at least 2 and
does not support empty trees!
Raises:
ValueError: max. node size too small or no data given
.. versionadded:: 0.18
.. _R-trees: https://en.wikipedia.org/wiki/R-tree
"""
__slots__ = ("_root",)
def __init__(self, points: Iterable[AnyVec], max_node_size: int = 5):
if max_node_size < 2:
raise ValueError("max node size must be > 1")
_points = list(points)
if len(_points) == 0:
raise ValueError("no points given")
self._root = make_node(_points, max_node_size, box_split)
def __len__(self):
"""Returns the count of points in the search tree."""
return len(self._root)
def __iter__(self) -> Iterator[AnyVec]:
"""Yields all points in the search tree."""
yield from iter(self._root)
def contains(self, point: AnyVec) -> bool:
"""Returns ``True`` if `point` exists, the comparison is done by the
:meth:`isclose` method and not by the identity operator ``is``.
"""
return self._root.contains(point)
def nearest_neighbor(self, target: AnyVec) -> Tuple[AnyVec, float]:
"""Returns the closest point to the `target` point and the distance
between this points.
"""
return self._root.nearest_neighbor(target)
def points_in_sphere(
self, center: AnyVec, radius: float
) -> Iterator[AnyVec]:
"""Returns all points in the range of the given sphere including the
points at the boundary.
"""
return self._root.points_in_sphere(center, radius)
def points_in_bbox(self, bbox: BoundingBox) -> Iterator[AnyVec]:
"""Returns all points in the range of the given bounding box including
the points at the boundary.
"""
return self._root.points_in_bbox(bbox)
def avg_leaf_size(self, spread: float = 1.0) -> float:
"""Returns the average size of the leaf bounding boxes.
The size of a leaf bounding box is the maximum size in all dimensions.
Excludes outliers of sizes beyond mean + standard deviation * spread.
Returns 0.0 if less than two points in tree.
"""
sizes: List[float] = [
max(leaf.bbox.size.xyz) for leaf in collect_leafs(self._root)
]
return average_exclusive_outliers(sizes, spread)
def avg_spherical_envelope_radius(self, spread: float = 1.0) -> float:
"""Returns the average radius of spherical envelopes of the leaf nodes.
Excludes outliers with radius beyond mean + standard deviation * spread.
Returns 0.0 if less than two points in tree.
"""
radii: List[float] = [
spherical_envelope(leaf.points)[1]
for leaf in collect_leafs(self._root)
]
return average_exclusive_outliers(radii, spread)
def avg_nn_distance(self, spread: float = 1.0) -> float:
"""Returns the average of the nearest neighbor distances inside (!)
leaf nodes. Excludes outliers with a distance beyond the overall
mean + standard deviation * spread. Returns 0.0 if less than two points
in tree.
.. warning::
This is a brute force check with O(n!) for each leaf node, where n
is the point count of the leaf node.
"""
distances: List[float] = []
for leaf in collect_leafs(self._root):
distances.extend(nearest_neighbor_distances(leaf.points))
return average_exclusive_outliers(distances, spread)
def make_node(
points: List[AnyVec],
max_size: int,
split_strategy: Callable[[List[AnyVec], int], Sequence[Node]],
) -> Node:
if len(points) > max_size:
return InnerNode(split_strategy(points, max_size))
else:
return LeafNode(points)
def box_split(points: List[AnyVec], max_size: int) -> Sequence[Node]:
n = len(points)
size = BoundingBox(points).size.xyz
dim = size.index(max(size))
points.sort(key=lambda vec: vec[dim])
k = math.ceil(n / max_size)
return tuple(
make_node(points[i : i + k], max_size, box_split)
for i in range(0, n, k)
)
def is_sphere_intersecting_bbox(
centroid: Vec3, radius: float, center: Vec3, size: Vec3
) -> bool:
distance = centroid - center
intersection_distance = size * 0.5 + Vec3(radius, radius, radius)
# non-intersection is more often likely:
if abs(distance.x) > intersection_distance.x:
return False
if abs(distance.y) > intersection_distance.y:
return False
if abs(distance.z) > intersection_distance.z:
return False
return True
def find_closest_child(children: Sequence[Node], point: AnyVec) -> Node:
assert len(children) > 0
_, node = min(
(point.distance(child.bbox.center), child) for child in children
)
return node
def grow_box(box: BoundingBox, dist: float) -> BoundingBox:
bbox = box.copy()
bbox.grow(dist)
return bbox
def average_exclusive_outliers(values: List[float], spread: float) -> float:
if len(values) < 2:
return 0.0
stdev = statistics.stdev(values)
mean = sum(values) / len(values)
max_value = mean + stdev * spread
values = [value for value in values if value <= max_value]
if len(values):
return sum(values) / len(values)
return 0.0
def collect_leafs(node: Node) -> Iterable[LeafNode]:
"""Yields all leaf nodes below the given node."""
if isinstance(node, LeafNode):
yield node
elif isinstance(node, InnerNode):
for child in node.children:
yield from collect_leafs(child)
def nearest_neighbor_distances(points: Sequence[AnyVec]) -> List[float]:
"""Brute force calculation of nearest neighbor distances with a
complexity of O(n!).
"""
return [
min(point.distance(p) for p in points[index + 1 :])
for index, point in enumerate(points[:-1])
]
|
|
__author__ = 'stonerri'
'''
This code acts as the application's primary controller, and provides links to the following:
'''
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
import tornado.auth
import tornado.autoreload
from tornado.options import define, options
#import oauth2
from redis import Redis
from rq import Queue
import requests
import sys, pprint, time, ConfigParser, os
from ConfigParser import SafeConfigParser
import json
import os.path
import uuid
import datetime
import logging
import random
import time
import string
from time import mktime, sleep
import github
# constants
TARGET_APLOG = 'aplog'
TARGET_CONSOLE = 'console'
TARGET_REMOTELOG = 'remotelog'
TARGET_ANGULAR = 'angular'
TARGET_NOTICE = 'notice'
TARGET_INIT = 'init'
define("port", default=8000, help="run on the given port", type=int)
root = os.path.dirname(__file__)
# needed to serialize any non-standard objects
class GenericEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return int(mktime(obj.timetuple()))
return json.JSONEncoder.default(self, obj)
class FaviconHandler(tornado.web.RequestHandler):
def get(self):
self.redirect('/static/favicon.ico')
# generic object
class Object(object):
pass
class BaseHandler(tornado.web.RequestHandler):
@tornado.web.removeslash
def get_current_user(self):
return self.get_secure_cookie("user")
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next","/"))
class WebHandler(BaseHandler):
def get(self):
user = self.get_current_user()
print user
try:
if user:
with open(os.path.join(root, 'templates/index.html')) as f:
self.write(f.read())
else:
with open(os.path.join(root, 'templates/landing.html')) as f:
self.write(f.read())
except IOError as e:
self.write("404: Not Found")
#try:
# with open(os.path.join(root, 'templates/index.html')) as f:
# self.write(f.read())
#@tornado.web.authenticated
from enginetasks import simpleVirtuosoPostExample
# websockets
class WSHandler(tornado.websocket.WebSocketHandler):
ws_open = False
def build_message(self, target, data, callback_id):
return json.dumps(dict(target=target, data=data, callback_id=callback_id))
def allow_draft76(self):
# for iOS 5.0 Safari
return True
def open(self):
print 'open'
#self.application.log.debug('Websocket connection opened')
#self.application.firefly.ws_callback = self.send_message
def send_message(self, target, data):
#self.application.log.debug('Client::%s: %s' % (target, data))
self.write_message(self.build_message(target=target, data=data, callback_id=-1))
def send_message_with_callback(self, target, data, callback_id):
#self.application.log.debug('Client::%s: %s' % (target, data))
self.write_message(self.build_message(target=target, data=data, callback_id=callback_id))
def on_close(self):
#self.application.log.debug('Websocket connection closed.')
#self.application.firefly.ws_callback = None
self.ws_open = False
# messages from the frontend
def on_message(self, message):
self.ws_open = True
#self.application.log.debug('Websocket message received.')
# assume all messaging is done with json-encoded strings
message_dict = json.loads(message)
# verify we have a message target
if 'target' in message_dict.keys():
if message_dict['target'] == 'tornado':
self.handle_tornado_message(message_dict)
elif message_dict['target'] == 'init':
self.handle_init_message(message_dict)
else:
print message_dict
# control main tornado methods here
def handle_tornado_message(self, message):
# tornado message data
tornado_function = message['data']['function']
args = message['data']['args']
if tornado_function == 'save':
print 'saving current state to file'
# self.send_message('console', 'Saved application state')
self.send_message_with_callback(TARGET_CONSOLE, True, message['callback_id'])
if tornado_function == 'getStatus':
# print 'getting current application status'
d = ''
#d = self.application.firefly.getAccessPointStatus()
self.send_message_with_callback(TARGET_CONSOLE, d, message['callback_id'])
# print tornado_function, args
def handle_init_message(self, message):
return_message = self.build_message('init', 'Welcome client!', -1)
self.write_message(return_message)
#
# def on_message(self, message):
#
# print 'message received %s' % message
# messagedict = json.loads(message)
#
# return_data = {}
# return_data['result'] = False
# return_data['callback_id'] = messagedict['callback_id']
#
# if messagedict['type'] == 'kickoff_queue':
#
# print 'test'
#
##simpleVirtuosoPostExample
#
# job = self.application.q.enqueue(simpleVirtuosoPostExample, 'some query string')
#
# #job = self.application.q.enqueue(count_words_at_url, 'http://nvie.com')
#
# #self.shared.js.append(job)
# self.application.jobs.append(job)
#
# while not job.result:
# time.sleep(1)
#
# return_data['data'] = job.result
# return_data['result'] = True
#
# print 'job complete'
# print job.result
#
# self.write_message(json.dumps(return_data))
#elif messagedict['type'] == 'dropbox':
#
# job = self.application.q.enqueue(processDropboxImage, messagedict['files'])
# self.application.jobs.append(job)
#elif messagedict['type'] == 't1':
#
# job = self.application.q.enqueue(processT1, messagedict)
# self.application.jobs.append(job)
#while not job.result:
# time.sleep(1)
#
#return_data['data'] = job.result
#return_data['result'] = True
#
#print 'job complete'
#print job.result
#self.write_message(json.dumps(return_data))
#else:
#
# self.write_message(json.dumps(return_data))
#tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(seconds=1), self.check_queue)
def check_queue(self):
#print '.'
list_to_remove = []
for i in range(len(self.application.jobs)):
j = self.application.jobs[i]
j.refresh()
print '\n***************************\n'
print j.meta
#from pprint import pprint
#pprint (vars(j))
if j.result:
#print j.result
print 'now removing it'
list_to_remove.append(i)
return_data = {}
return_data['result'] = True
return_data['func'] = 'update_image'
return_data['contents'] = j.result
self.write_message(json.dumps(return_data))
elif j.meta:
#todo add support for multiple jobs
return_data = {}
return_data['result'] = True
return_data['func'] = 'update_meta'
return_data['contents'] = j.meta
self.write_message(json.dumps(return_data))
for index in sorted(list_to_remove, reverse=True):
print 'removing job at index %d' % index
del self.application.jobs[index]
#jobs_list = self.shared.q.get_jobs()
#print self.shared.q.job_ids
#print jobs_list
#for n,j in enumerate(jobs_list):
# print n, j.result
if len(self.application.jobs) > 0:
tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(seconds=1), self.check_queue)
#tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(seconds=5), self.pollForAccessPoint)
#class UploadHandler(tornado.web.RequestHandler):
#
# def post(self):
# #print self.request.files.keys()
# file1 = self.request.files['file'][0]
# original_fname = file1['filename']
# extension = os.path.splitext(original_fname)[1]
# fname = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(6))
# final_filename= fname+extension
# full_path = os.path.abspath(root) + "/static/uploads/" + final_filename
# output_file = open(os.path.abspath(root) + "/static/uploads/" + final_filename, 'w')
# output_file.write(file1['body'])
# output_file.close()
#
# from rqtasks import processImage
# job = self.application.q.enqueue(processImage,full_path)
# self.application.jobs.append(job)
#
# self.finish("file" + final_filename + " is uploaded")
class GithubLoginHandler(BaseHandler, github.GithubMixin):
#_OAUTH_REDIRECT_URL = 'http://localhost:8888/auth/github'
_OAUTH_REDIRECT_URL = 'http://frontend.incfcloud.org/auth/github'
@tornado.web.asynchronous
def get(self):
# we can append next to the redirect uri, so the user gets the
# correct URL on login
redirect_uri = tornado.httputil.url_concat(
self._OAUTH_REDIRECT_URL, {'next': self.get_argument('next', '/')})
# if we have a code, we have been authorized so we can log in
if self.get_argument("code", False):
print 'we have code'
self.get_authenticated_user(
redirect_uri=redirect_uri,
client_id=self.settings["github_client_id"],
client_secret=self.settings["github_secret"],
code=self.get_argument("code"),
callback=self.async_callback(self._on_login)
)
return
# otherwise we need to request an authorization code
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.settings["github_client_id"],
extra_params={"scope": self.settings['github_scope'], "foo":1})
def _on_login(self, user):
""" This handles the user object from the login request """
print('authenticated callback')
#print user
if user:
logging.info('logged in user from github: ' + str(user))
self.set_secure_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
self.redirect(self.get_argument("next","/"))
class Application(tornado.web.Application):
def __init__(self):
parser = SafeConfigParser()
parser.read('../settings.ini')
gen_key = parser.get('admin', 'admin_generate_key')
admin_key = ''
if gen_key==1:
import uuid
admin_key = uuid.uuid4()
else:
admin_key = parser.get('admin', 'admin_key')
self.jobs = []
self.admin_key = str(admin_key)
self.q = Queue(connection=Redis())
print 'to access the admin path, visit /admin/%s' % (admin_key)
handlers = [
(r'/', WebHandler),
(r'/ws', WSHandler),
(r"/auth/github", GithubLoginHandler),
(r"/auth/logout", LogoutHandler)
#(r'/upload', UploadHandler)
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
github_client_id="47663846f4ff92fbe8a3",
github_secret="3ac166be872348f12c848c7dbe8e92b32e5803e9",
github_scope = ['user','public_repo'],
login_url="http://frontend.incfcloud.org/auth/github",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
# Watch templates and static path directory
# for (path, dirs, files) in os.walk(settings["template_path"]):
# for item in files:
# tornado.autoreload.watch(os.path.join(path, item))
#
# for (path, dirs, files) in os.walk(settings["static_path"]):
# for item in files:
# tornado.autoreload.watch(os.path.join(path, item))
def main():
# I don't actually do anything here beyond configure port and allowable IPs
tornado.options.parse_command_line()
# init application
app = Application()
app.listen(options.port)
# start the IO loop
io_loop = tornado.ioloop.IOLoop.instance()
#tornado.autoreload.start(io_loop)
io_loop.start()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for Hue.
#
# Local customizations are done by symlinking a file
# as local_settings.py.
import logging
import os
import sys
import desktop.conf
import desktop.log
from desktop.lib.paths import get_desktop_root
import pkg_resources
HUE_DESKTOP_VERSION = pkg_resources.get_distribution("desktop").version or "Unknown"
NICE_NAME = "Hue"
ENV_HUE_PROCESS_NAME = "HUE_PROCESS_NAME"
ENV_DESKTOP_DEBUG = "DESKTOP_DEBUG"
############################################################
# Part 1: Logging and imports.
############################################################
# Configure debug mode
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Start basic logging as soon as possible.
if ENV_HUE_PROCESS_NAME not in os.environ:
_proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0])
os.environ[ENV_HUE_PROCESS_NAME] = _proc
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
logging.info("Welcome to Hue " + HUE_DESKTOP_VERSION)
# Then we can safely import some more stuff
from desktop import appmanager
from desktop.lib import conf
# Add fancy logging
desktop.log.fancy_logging()
############################################################
# Part 2: Generic Configuration
############################################################
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
#LANGUAGE_CODE = 'it'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
############################################################
# Part 3: Django configuration
############################################################
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'desktop.lib.template_loader.load_template_source',
)
MIDDLEWARE_CLASSES = [
'desktop.middleware.DatabaseLoggingMiddleware',
'django.middleware.common.CommonMiddleware',
'desktop.middleware.SessionOverPostMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'desktop.middleware.SpnegoMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'desktop.middleware.AjaxMiddleware',
# Must be after Session, Auth, and Ajax. Before everything else.
'desktop.middleware.LoginAndPermissionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'desktop.middleware.NotificationMiddleware',
'desktop.middleware.JFrameMiddleware',
'desktop.middleware.ExceptionMiddleware',
'desktop.middleware.ClusterMiddleware',
'desktop.middleware.AppSpecificMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware'
]
if os.environ.get(ENV_DESKTOP_DEBUG):
MIDDLEWARE_CLASSES.append('desktop.middleware.HtmlValidationMiddleware')
logging.debug("Will try to validate generated HTML.")
ROOT_URLCONF = 'desktop.urls'
TEMPLATE_DIRS = (
get_desktop_root("core/templates")
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_extensions',
# 'debug_toolbar',
'south', # database migration tool
# i18n support
'babeldjango',
# Desktop injects all the other installed apps into here magically.
'desktop'
]
# Desktop doesn't use an auth profile module, because
# because it doesn't mesh very well with the notion
# of having multiple apps. If your app needs
# to store data related to users, it should
# manage its own table with an appropriate foreign key.
AUTH_PROFILE_MODULE=None
LOGIN_REDIRECT_URL = "/"
PYLINTRC = get_desktop_root('.pylintrc')
# Insert our HDFS upload handler
FILE_UPLOAD_HANDLERS = (
'hadoop.fs.upload.HDFSfileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
############################################################
# Part 4: Installation of apps
############################################################
_config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
# Libraries are loaded and configured before the apps
appmanager.load_libs()
_lib_conf_modules = [dict(module=app.conf, config_key=None) for app in appmanager.DESKTOP_LIBS if app.conf is not None]
appmanager.load_apps()
for app in appmanager.DESKTOP_APPS:
INSTALLED_APPS.extend(app.django_apps)
logging.debug("Installed Django modules: %s" % ",".join(map(str, appmanager.DESKTOP_MODULES)))
# Load app configuration
_app_conf_modules = [dict(module=app.conf, config_key=app.config_key) for app in appmanager.DESKTOP_APPS if app.conf is not None]
_app_conf_modules.append(dict(module=desktop.conf, config_key=None))
conf.initialize(_lib_conf_modules, _config_dir)
conf.initialize(_app_conf_modules, _config_dir)
# Now that we've loaded the desktop conf, set the django DEBUG mode based on the conf.
DEBUG = desktop.conf.DJANGO_DEBUG_MODE.get()
TEMPLATE_DEBUG = DEBUG
############################################################
# Part 4a: Django configuration that requires bound Desktop
# configs.
############################################################
# Configure hue admins
ADMINS = []
for admin in desktop.conf.DJANGO_ADMINS.get():
admin_conf = desktop.conf.DJANGO_ADMINS[admin]
if 'name' in admin_conf.bind_to and 'email' in admin_conf.bind_to:
ADMINS.append(((admin_conf.NAME.get(), admin_conf.EMAIL.get())))
ADMINS = tuple(ADMINS)
MANAGERS = ADMINS
# Server Email Address
SERVER_EMAIL = desktop.conf.DJANGO_SERVER_EMAIL.get()
# Email backend
EMAIL_BACKEND = desktop.conf.DJANGO_EMAIL_BACKEND.get()
# Configure database
if os.getenv('DESKTOP_DB_CONFIG'):
conn_string = os.getenv('DESKTOP_DB_CONFIG')
logging.debug("DESKTOP_DB_CONFIG SET: %s" % (conn_string))
default_db = dict(zip(
["ENGINE", "NAME", "TEST__NAME", "USER", "PASSWORD", "HOST", "PORT"],
conn_string.split(':')))
else:
default_db = {
"ENGINE" : desktop.conf.DATABASE.ENGINE.get(),
"NAME" : desktop.conf.DATABASE.NAME.get(),
"USER" : desktop.conf.DATABASE.USER.get(),
"PASSWORD" : desktop.conf.DATABASE.PASSWORD.get(),
"HOST" : desktop.conf.DATABASE.HOST.get(),
"PORT" : desktop.conf.DATABASE.PORT.get(),
# DB used for tests
"TEST_NAME" : get_desktop_root('desktop-test.db')
}
DATABASES = {
'default': default_db
}
TIME_ZONE = desktop.conf.TIME_ZONE.get()
# Desktop supports only one authentication backend.
AUTHENTICATION_BACKENDS = (desktop.conf.AUTH.BACKEND.get(),)
EMAIL_HOST = desktop.conf.SMTP.HOST.get()
EMAIL_PORT = desktop.conf.SMTP.PORT.get()
EMAIL_HOST_USER = desktop.conf.SMTP.USER.get()
EMAIL_HOST_PASSWORD = desktop.conf.SMTP.PASSWORD.get()
EMAIL_USE_TLS = desktop.conf.SMTP.USE_TLS.get()
DEFAULT_FROM_EMAIL = desktop.conf.SMTP.DEFAULT_FROM.get()
# Used for securely creating sessions. Should be unique and not shared with anybody.
SECRET_KEY = desktop.conf.SECRET_KEY.get()
if SECRET_KEY == "":
logging.warning("secret_key should be configured")
############################################################
# Necessary for South to not futz with tests. Fixed in South 0.7.1
SKIP_SOUTH_TESTS = True
# Set up environment variable so Kerberos libraries look at our private
# ticket cache
os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get()
|
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
__all__ = ['set_mutation', 'set_indel']
from six.moves import range
from inspyred.ec.variators import mutator, crossover
from ordered_set import OrderedSet
from cameo.strain_design.heuristic.genomes import MultipleChromosomeGenome
from numpy import float32 as float
def _do_set_n_point_crossover(representation, mom, dad, points, random, candidate_size):
chunks = []
i = 0
for point in points:
chunks.append(representation[i:point])
i = point
chunks.append(representation[i:])
bro = OrderedSet()
sis = OrderedSet()
cross = True
for variables in chunks:
for v in variables:
if v in mom:
bro.append(v) if cross else sis.append(v)
if v in dad:
sis.append(v) if cross else bro.append(v)
cross = not cross
if len(bro) > candidate_size:
bro = random.sample(bro, candidate_size)
if len(sis) > candidate_size:
sis = random.sample(sis, candidate_size)
return bro, sis
@crossover
def set_n_point_crossover(random, mom, dad, args):
representation = list(set(mom).union(set(dad)))
crossover_rate = args.setdefault('crossover_rate', 1.0)
num_crossover_points = args.setdefault('num_crossover_points', 1)
candidate_size = args.setdefault('candidate_size', 9)
children = []
if random.random() <= crossover_rate:
points = random.sample(representation, num_crossover_points)
bro, sis = _do_set_n_point_crossover(representation, mom, dad, points, random, candidate_size)
# ensure number of knockouts > 0 or do not add individual
if len(bro) > 0:
children.append(bro)
if len(sis) > 0:
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
@mutator
def set_mutation(random, individual, args):
"""
Mutates a given set based on the entries available on the representation.
Parameters
----------
random: Random
individual: list
with unique integers
args: dict
must contain the representation
Returns
-------
list
created based on an ordered set
"""
representation = args.get('representation')
new_individual = []
mutation_rate = float(args.get('mutation_rate', .1))
for index in individual:
if random.random() < mutation_rate:
new_individual.append(random.randint(0, len(representation) - 1))
else:
new_individual.append(index)
return list(OrderedSet(new_individual))
@mutator
def set_indel(random, individual, args):
"""
Creates a random insertion or deletion in the individual.
Parameters
----------
random: Random
individual: list
with unique integers
args: dict
must contain the representation
Returns
-------
list
created based on an ordered set
"""
representation = args.get('representation')
indel_rate = float(args.get('indel_rate', .1))
new_individual = list(individual)
if random.random() < indel_rate:
if random.random() > 0.5:
if len(individual) > 1:
new_individual = random.sample(new_individual, len(new_individual) - 1)
else:
new_individual.append(random.sample(range(len(representation)), 1)[0])
return list(OrderedSet(new_individual))
@mutator
def multiple_chromosome_set_mutation(random, individual, args):
"""
Mutates a given set based on the entries available on the representation.
Parameters
----------
random: Random
individual: MultipleChromosomeGenome
with unique integers in each chromosome
args: dict
must contain the representation of each chromosome
Returns
-------
MultipleChromosomeGenome
A mutated individual
"""
new_individual = MultipleChromosomeGenome(keys=individual.keys)
for key in individual.keys:
representation = args.get('%s_representation' % key)
mutation_rate = args.get('%s_mutation_rate' % key, .1)
for index in individual[key]:
if random.random() < mutation_rate:
new_individual[key].append(random.randint(0, len(representation) - 1))
else:
new_individual[key].append(index)
return new_individual
@mutator
def multiple_chromosome_set_indel(random, individual, args):
"""
Creates a random insertion or deletion in the individual.
Parameters
----------
random: Random
individual: MultipleChromosomeGenome
with unique integers in each chromosome
args: dict
must contain the representation of each chromosome
Returns
-------
MultipleChromosomeGenome
A mutated individual
"""
new_individual = individual.copy()
for key in individual.keys:
representation = args.get('%s_representation' % key)
indel_rate = args.get('%s_indel_rate' % key, .1)
if random.random() < indel_rate:
if random.random() > 0.5:
if len(individual[key]) > 1:
new_individual[key] = random.sample(new_individual[key], len(new_individual[key]) - 1)
else:
new_individual[key].append(random.sample(range(len(representation)), 1)[0])
return new_individual
@crossover
def multiple_chromosome_n_point_crossover(random, mom, dad, args):
children = MultipleChromosomeGenome(keys=mom.keys)
for key in children.keys:
children[key] = set_n_point_crossover(random, [mom[key], dad[key]], args)
return children
|
|
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.core.exceptions import PermissionDenied
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from menus.menu_pool import menu_pool
from cms.api import (
create_page,
_verify_plugin_type,
assign_user_to_page,
publish_page,
)
from cms.apphook_pool import apphook_pool
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission
from cms.plugin_base import CMSPluginBase
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.menu_extender import TestMenu
from cms.tests.test_apphooks import APP_MODULE, APP_NAME
def _grant_page_permission(user, codename):
content_type = ContentType.objects.get_by_natural_key('cms', 'page')
perm = Permission.objects.get_or_create(codename='%s_page' % codename,
content_type=content_type)[0]
user.user_permissions.add(perm)
class PythonAPITests(CMSTestCase):
def _get_default_create_page_arguments(self):
return {
'title': 'Test',
'template': 'nav_playground.html',
'language': 'en'
}
def test_invalid_apphook_type(self):
self.assertRaises(TypeError, create_page, apphook=1,
**self._get_default_create_page_arguments())
def test_invalid_template(self):
kwargs = self._get_default_create_page_arguments()
kwargs['template'] = "not_valid.htm"
with self.settings(CMS_TEMPLATES=[("not_valid.htm", "notvalid")]):
self.assertRaises(TemplateDoesNotExist, create_page, **kwargs)
kwargs['template'] = TEMPLATE_INHERITANCE_MAGIC
create_page(**kwargs)
def test_apphook_by_class(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with self.settings(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
apphook = apphook_pool.get_apphook(APP_NAME)
page = create_page(apphook=apphook,
**self._get_default_create_page_arguments())
self.assertEqual(page.get_application_urls('en'), APP_NAME)
def test_invalid_dates(self):
self.assertRaises(AssertionError, create_page, publication_date=1,
**self._get_default_create_page_arguments())
self.assertRaises(AssertionError, create_page, publication_end_date=1,
**self._get_default_create_page_arguments())
def test_nav_extenders_invalid_type(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'TestMenu': TestMenu}
self.assertRaises(AssertionError, create_page, navigation_extenders=1,
**self._get_default_create_page_arguments())
menu_pool.menus = self.old_menu
def test_nav_extenders_invalid_menu(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {}
self.assertRaises(AssertionError, create_page,
navigation_extenders=TestMenu,
**self._get_default_create_page_arguments())
menu_pool.menus = self.old_menu
def test_nav_extenders_valid(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'TestMenu': TestMenu}
page = create_page(navigation_extenders='TestMenu',
**self._get_default_create_page_arguments())
self.assertEqual(page.navigation_extenders, 'TestMenu')
menu_pool.menus = self.old_menu
def test_verify_plugin_type_invalid_type(self):
self.assertRaises(TypeError, _verify_plugin_type, 1)
def test_verify_plugin_type_string(self):
plugin_model, plugin_type = _verify_plugin_type("TextPlugin")
self.assertEqual(plugin_model, Text)
self.assertEqual(plugin_type, 'TextPlugin')
def test_verify_plugin_type_string_invalid(self):
self.assertRaises(TypeError, _verify_plugin_type, "InvalidPlugin")
def test_verify_plugin_type_plugin_class(self):
plugin_model, plugin_type = _verify_plugin_type(TextPlugin)
self.assertEqual(plugin_model, Text)
self.assertEqual(plugin_type, 'TextPlugin')
def test_verify_plugin_type_invalid_plugin_class(self):
class InvalidPlugin(CMSPluginBase):
model = Text
self.assertRaises(AssertionError, _verify_plugin_type, InvalidPlugin)
def test_assign_user_to_page_nothing(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='user@django-cms.org',
password='user')
user.is_staff = True
self.assertFalse(page.has_change_permission(user))
def test_assign_user_to_page_single(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='user@django-cms.org',
password='user')
user.is_staff = True
user.save()
assign_user_to_page(page, user, can_change=True)
self.assertFalse(page.has_change_permission(user))
self.assertFalse(page.has_add_permission(user))
_grant_page_permission(user, 'change')
page = Page.objects.get(pk=page.pk)
user = get_user_model().objects.get(pk=user.pk)
self.assertTrue(page.has_change_permission(user))
self.assertFalse(page.has_add_permission(user))
def test_assign_user_to_page_all(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='user@django-cms.org',
password='user')
user.is_staff = True
user.save()
assign_user_to_page(page, user, grant_all=True)
self.assertFalse(page.has_change_permission(user))
self.assertFalse(page.has_add_permission(user))
_grant_page_permission(user, 'change')
_grant_page_permission(user, 'add')
page = Page.objects.get(pk=page.pk)
user = get_user_model().objects.get(pk=user.pk)
self.assertTrue(page.has_change_permission(user))
self.assertTrue(page.has_add_permission(user))
def test_page_overwrite_url_default(self):
self.assertEqual(Page.objects.all().count(), 0)
home = create_page('root', 'nav_playground.html', 'en', published=True)
self.assertTrue(home.is_published('en', True))
self.assertFalse(home.is_home)
page = create_page(**self._get_default_create_page_arguments())
self.assertFalse(page.is_home)
self.assertFalse(page.get_title_obj_attribute('has_url_overwrite'))
self.assertEqual(page.get_title_obj_attribute('path'), 'test')
def test_create_page_can_overwrite_url(self):
page_attrs = self._get_default_create_page_arguments()
page_attrs["overwrite_url"] = 'test/home'
page = create_page(**page_attrs)
self.assertTrue(page.get_title_obj_attribute('has_url_overwrite'))
self.assertEqual(page.get_title_obj_attribute('path'), 'test/home')
def test_create_page_atomic(self):
# Ref: https://github.com/divio/django-cms/issues/5652
# We'll simulate a scenario where a user creates a page with an
# invalid template which causes Django to throw an error when the
# template is scanned for placeholders and thus short circuits the
# creation mechanism.
page_attrs = self._get_default_create_page_arguments()
# It's important to use TEMPLATE_INHERITANCE_MAGIC to avoid the cms
# from loading the template before saving and triggering the template error
# Instead, we delay the loading of the template until after the save is executed.
page_attrs["template"] = TEMPLATE_INHERITANCE_MAGIC
self.assertFalse(Page.objects.filter(template=TEMPLATE_INHERITANCE_MAGIC).exists())
with self.settings(CMS_TEMPLATES=[("col_invalid.html", "notvalid")]):
self.assertRaises(TemplateSyntaxError, create_page, **page_attrs)
# The template raised an exception which should cause the database to roll back
# instead of committing a page in a partial state.
self.assertFalse(Page.objects.filter(template=TEMPLATE_INHERITANCE_MAGIC).exists())
def test_create_reverse_id_collision(self):
create_page('home', 'nav_playground.html', 'en', published=True, reverse_id="foo")
self.assertRaises(FieldError, create_page, 'foo', 'nav_playground.html', 'en', published=True, reverse_id="foo")
self.assertTrue(Page.objects.count(), 2)
def test_publish_page(self):
page_attrs = self._get_default_create_page_arguments()
page_attrs['language'] = 'en'
page_attrs['published'] = False
page = create_page(**page_attrs)
self.assertFalse(page.is_published('en'))
self.assertEqual(page.changed_by, 'script')
user = get_user_model().objects.create_user(username='user', email='user@django-cms.org',
password='user')
# Initially no permission
self.assertRaises(PermissionDenied, publish_page, page, user, 'en')
user.is_staff = True
user.save()
# Permissions are cached on user instances, so create a new one.
user = get_user_model().objects.get(pk=user.pk)
self.add_permission(user, 'change_page')
self.add_permission(user, 'publish_page')
gpp = GlobalPagePermission.objects.create(user=user, can_change=True, can_publish=True)
gpp.sites.add(page.node.site)
publish_page(page, user, 'en')
# Reload the page to get updates.
page = page.reload()
self.assertTrue(page.is_published('en'))
self.assertEqual(page.changed_by, user.get_username())
def test_create_page_assert_parent_is_draft(self):
page_attrs = self._get_default_create_page_arguments()
page_attrs['published'] = True
parent_page = create_page(**page_attrs)
parent_page_public = parent_page.get_public_object()
self.assertRaises(AssertionError, create_page, parent=parent_page_public, **page_attrs)
def test_create_page_page_title(self):
page = create_page(**dict(self._get_default_create_page_arguments(), page_title='page title'))
self.assertEqual(page.get_title_obj_attribute('page_title'), 'page title')
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import sys
from abc import abstractmethod
from collections import defaultdict
from pants.backend.core.tasks.group_task import GroupMember
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_global_strategy import JvmCompileGlobalStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_isolated_strategy import \
JvmCompileIsolatedStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.jvm_compile.jvm_fingerprint_strategy import JvmFingerprintStrategy
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.goal.products import MultipleRootedProducts
from pants.option.options import Options
from pants.reporting.reporting_utils import items_to_report_element
class JvmCompile(NailgunTaskBase, GroupMember):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--partition-size-hint', type=int, default=sys.maxint, metavar='<# source files>',
help='Roughly how many source files to attempt to compile together. Set to a large '
'number to compile all sources together. Set to 0 to compile target-by-target.')
register('--jvm-options', type=Options.list,
help='Run the compiler with these JVM options.')
register('--args', action='append', default=list(cls.get_args_default(register.bootstrap)),
help='Pass these args to the compiler.')
register('--confs', type=Options.list, default=['default'],
help='Compile for these Ivy confs.')
# TODO: Stale analysis should be automatically ignored via Task identities:
# https://github.com/pantsbuild/pants/issues/1351
register('--clear-invalid-analysis', default=False, action='store_true',
advanced=True,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
register('--warnings', default=True, action='store_true',
help='Compile with all configured warnings enabled.')
register('--warning-args', action='append', default=list(cls.get_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', action='append', default=list(cls.get_no_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are disabled.')
register('--strategy', choices=['global', 'isolated'], default='global',
help='Selects the compilation strategy to use. The "global" strategy uses a shared '
'global classpath for all compiled classes, and the "isolated" strategy uses '
'per-target classpaths.')
JvmCompileGlobalStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
JvmCompileIsolatedStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
@classmethod
def product_types(cls):
return ['classes_by_target', 'classes_by_source', 'resources_by_target']
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
# This task uses JvmDependencyAnalyzer as a helper, get its product needs
JvmDependencyAnalyzer.prepare(options, round_manager)
round_manager.require_data('compile_classpath')
round_manager.require_data('ivy_resolve_symlink_map')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.require_data('java')
round_manager.require_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.require_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_language = None
_file_suffix = None
_supports_concurrent_execution = None
@classmethod
def name(cls):
return cls._language
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@property
def config_section(self):
return self.options_scope
def select(self, target):
return target.has_sources(self._file_suffix)
def create_analysis_tools(self):
"""Returns an AnalysisTools implementation.
Subclasses must implement.
"""
raise NotImplementedError()
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file):
"""Invoke the compiler.
Must raise TaskError on compile failure.
Subclasses must implement."""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def extra_products(self, target):
"""Any extra, out-of-band resources created for a target.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the compile_classpath, and
made available in resources_by_target.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
return []
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
self.setup_artifact_cache()
# The ivy confs for which we're building.
self._confs = self.get_options().confs
# The compile strategy to use for analysis and classfile placement.
if self.get_options().strategy == 'global':
strategy_constructor = JvmCompileGlobalStrategy
else:
assert self.get_options().strategy == 'isolated'
strategy_constructor = JvmCompileIsolatedStrategy
self._strategy = strategy_constructor(self.context,
self.get_options(),
self.workdir,
self.create_analysis_tools(),
self._language,
lambda s: s.endswith(self._file_suffix))
def _jvm_fingerprint_strategy(self):
# Use a fingerprint strategy that allows us to also include java/scala versions.
return JvmFingerprintStrategy(self._platform_version_info())
def _platform_version_info(self):
return [self._strategy.name()] + self._language_platform_version_info()
@abstractmethod
def _language_platform_version_info(self):
"""
Provides extra platform information such as java version that will be used
in the fingerprinter. This in turn ensures different platform versions create different
cache artifacts.
Subclasses must override this and return a list of version info.
"""
pass
def pre_execute(self):
# Only create these working dirs during execution phase, otherwise, they
# would be wiped out by clean-all goal/task if it's specified.
self._strategy.pre_compile()
# TODO(John Sirois): Ensuring requested product maps are available - if empty - should probably
# be lifted to Task infra.
# In case we have no relevant targets and return early create the requested product maps.
self._create_empty_products()
def prepare_execute(self, chunks):
targets_in_chunks = list(itertools.chain(*chunks))
# Invoke the strategy's prepare_compile to prune analysis.
cache_manager = self.create_cache_manager(invalidate_dependents=True,
fingerprint_strategy=self._jvm_fingerprint_strategy())
self._strategy.prepare_compile(cache_manager, self.context.targets(), targets_in_chunks)
def execute_chunk(self, relevant_targets):
if not relevant_targets:
return
# Invalidation check. Everything inside the with block must succeed for the
# invalid targets to become valid.
partition_size_hint, locally_changed_targets = self._strategy.invalidation_hints(relevant_targets)
with self.invalidated(relevant_targets,
invalidate_dependents=True,
partition_size_hint=partition_size_hint,
locally_changed_targets=locally_changed_targets,
fingerprint_strategy=self._jvm_fingerprint_strategy(),
topological_order=True) as invalidation_check:
if invalidation_check.invalid_vts:
# Find the invalid targets for this chunk.
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
# Register products for all the valid targets.
# We register as we go, so dependency checking code can use this data.
valid_targets = list(set(relevant_targets) - set(invalid_targets))
valid_compile_contexts = [self._strategy.compile_context(t) for t in valid_targets]
self._register_vts(valid_compile_contexts)
# Invoke the strategy to execute compilations for invalid targets.
update_artifact_cache_vts_work = (self.get_update_artifact_cache_work
if self.artifact_cache_writes_enabled() else None)
self._strategy.compile_chunk(invalidation_check,
self.context.targets(),
relevant_targets,
invalid_targets,
self.extra_compile_time_classpath_elements(),
self._compile_vts,
self._register_vts,
update_artifact_cache_vts_work)
else:
# Nothing to build. Register products for all the targets in one go.
self._register_vts([self._strategy.compile_context(t) for t in relevant_targets])
def _compile_vts(self, vts, sources, analysis_file, upstream_analysis, classpath, outdir, progress_message):
"""Compiles sources for the given vts into the given output dir.
vts - versioned target set
sources - sources for this target set
analysis_file - the analysis file to manipulate
classpath - a list of classpath entries
outdir - the output dir to send classes to
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
# Do some reporting.
self.context.log.info(
'Compiling ',
items_to_report_element(sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile'):
# The compiler may delete classfiles, then later exit on a compilation error. Then if the
# change triggering the error is reverted, we won't rebuild to restore the missing
# classfiles. So we force-invalidate here, to be on the safe side.
vts.force_invalidate()
self.compile(self._args, classpath, sources, outdir, upstream_analysis, analysis_file)
def check_artifact_cache(self, vts):
post_process_cached_vts = lambda vts: self._strategy.post_process_cached_vts(vts)
return self.do_check_artifact_cache(vts, post_process_cached_vts=post_process_cached_vts)
def _create_empty_products(self):
make_products = lambda: defaultdict(MultipleRootedProducts)
if self.context.products.is_required_data('classes_by_source'):
self.context.products.safe_create_data('classes_by_source', make_products)
# Whether or not anything else requires resources_by_target, this task
# uses it internally.
self.context.products.safe_create_data('resources_by_target', make_products)
# JvmDependencyAnalyzer uses classes_by_target within this run
self.context.products.safe_create_data('classes_by_target', make_products)
def _register_vts(self, compile_contexts):
classes_by_source = self.context.products.get_data('classes_by_source')
classes_by_target = self.context.products.get_data('classes_by_target')
compile_classpath = self.context.products.get_data('compile_classpath')
resources_by_target = self.context.products.get_data('resources_by_target')
# Register class products.
if classes_by_source is not None or classes_by_target is not None:
computed_classes_by_source_by_context = self._strategy.compute_classes_by_source(
compile_contexts)
resource_mapping = self._strategy.compute_resource_mapping(compile_contexts)
for compile_context in compile_contexts:
computed_classes_by_source = computed_classes_by_source_by_context[compile_context]
target = compile_context.target
classes_dir = compile_context.classes_dir
target_products = classes_by_target[target] if classes_by_target is not None else None
for source in compile_context.sources: # Sources are relative to buildroot.
classes = computed_classes_by_source.get(source, []) # Classes are absolute paths.
for cls in classes:
clsname = self._strategy.class_name_for_class_file(compile_context, cls)
resources = resource_mapping.get(clsname, [])
resources_by_target[target].add_abs_paths(classes_dir, resources)
if classes_by_target is not None:
target_products.add_abs_paths(classes_dir, classes)
if classes_by_source is not None:
classes_by_source[source].add_abs_paths(classes_dir, classes)
# Register resource products.
for compile_context in compile_contexts:
extra_resources = self.extra_products(compile_context.target)
# Add to resources_by_target (if it was requested).
if resources_by_target is not None:
target_resources = resources_by_target[compile_context.target]
for root, abs_paths in extra_resources:
target_resources.add_abs_paths(root, abs_paths)
# And to the compile_classpath, to make them available within the next round.
# TODO(stuhood): This is redundant with resources_by_target, but resources_by_target
# are not available during compilation. https://github.com/pantsbuild/pants/issues/206
entries = [(conf, root) for conf in self._confs for root, _ in extra_resources]
compile_classpath.add_for_target(compile_context.target, entries)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and helper functions for creating Stochastic Tensors.
`StochasticTensor` objects wrap `Distribution` objects. Their
values may be samples from the underlying distribution, or the distribution
mean (as governed by `value_type`). These objects provide a `loss`
method for use when sampling from a non-reparameterized distribution.
The `loss`method is used in conjunction with `stochastic_graph.surrogate_loss`
to produce a single differentiable loss in stochastic graphs having
both continuous and discrete stochastic nodes.
## Stochastic Tensor Classes
@@BaseStochasticTensor
@@StochasticTensor
## Stochastic Tensor Value Types
@@MeanValue
@@SampleValue
@@SampleAndReshapeValue
@@value_type
@@get_current_value_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import threading
import six
from tensorflow.contrib import distributions
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators as sge
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
STOCHASTIC_TENSOR_COLLECTION = "_stochastic_tensor_collection_"
@six.add_metaclass(abc.ABCMeta)
class BaseStochasticTensor(object):
"""Base Class for Tensor-like objects that emit stochastic values."""
def __init__(self):
# Add self to this graph's Stochsatic Tensor collection for
# purposes of later performing correct surrogate loss calculation.
ops.add_to_collection(STOCHASTIC_TENSOR_COLLECTION, self)
@abc.abstractproperty
def name(self):
pass
@abc.abstractproperty
def dtype(self):
pass
@abc.abstractproperty
def graph(self):
pass
@abc.abstractmethod
def value(self, name=None):
pass
@abc.abstractmethod
def loss(self, sample_loss):
"""Returns the term to add to the surrogate loss.
This method is called by `surrogate_loss`. The input `sample_loss` should
have already had `stop_gradient` applied to it. This is because the
surrogate_loss usually provides a Monte Carlo sample term of the form
`differentiable_surrogate * sample_loss` where `sample_loss` is considered
constant with respect to the input for purposes of the gradient.
Args:
sample_loss: `Tensor`, sample loss downstream of this `StochasticTensor`.
Returns:
Either `None` or a `Tensor`.
"""
raise NotImplementedError("surrogate_loss not implemented")
@staticmethod
def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise ValueError("%s: Ref type is not supported." % v)
return v.value()
# pylint: disable=protected-access
ops.register_tensor_conversion_function(
BaseStochasticTensor, BaseStochasticTensor._tensor_conversion_function)
# pylint: enable=protected-access
class _StochasticValueType(object):
"""Interface for the ValueType classes.
This is the base class for MeanValue, SampleValue, SampleAndReshapeValue,
and their descendants.
"""
def pushed_above(self, unused_value_type):
pass
def popped_above(self, unused_value_type):
pass
def declare_inputs(self, unused_stochastic_tensor, unused_inputs_dict):
pass
@abc.abstractproperty
def stop_gradient(self):
"""Whether the value should be wrapped in stop_gradient.
StochasticTensors must respect this property.
"""
pass
class MeanValue(_StochasticValueType):
def __init__(self, stop_gradient=False):
self._stop_gradient = stop_gradient
@property
def stop_gradient(self):
return self._stop_gradient
class SampleValue(_StochasticValueType):
"""Draw n samples along a new outer dimension.
This ValueType draws `n` samples from StochasticTensors run within its
context, increasing the rank by one along a new outer dimension.
Example:
```python
mu = tf.zeros((2,3))
sigma = tf.ones((2, 3))
with sg.value_type(sg.SampleValue(n=4)):
st = sg.StochasticTensor(
distributions.Normal, mu=mu, sigma=sigma)
# draws 4 samples each with shape (2, 3) and concatenates
assertEqual(st.value().get_shape(), (4, 2, 3))
```
"""
def __init__(self, n=1, stop_gradient=False):
"""Sample `n` times and concatenate along a new outer dimension.
Args:
n: A python integer or int32 tensor. The number of samples to take.
stop_gradient: If `True`, StochasticTensors' values are wrapped in
`stop_gradient`, to avoid backpropagation through.
"""
self._n = n
self._stop_gradient = stop_gradient
@property
def n(self):
return self._n
@property
def stop_gradient(self):
return self._stop_gradient
class SampleAndReshapeValue(_StochasticValueType):
"""Ask the StochasticTensor for n samples and reshape the result.
Sampling from a StochasticTensor increases the rank of the value by 1
(because each sample represents a new outer dimension).
This ValueType requests `n` samples from StochasticTensors run within its
context that the outer two dimensions are reshaped to intermix the samples
with the outermost (usually batch) dimension.
Example:
```python
# mu and sigma are both shaped (2, 3)
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = tf.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleAndReshapeValue(n=2)):
st = sg.StochasticTensor(
distributions.Normal, mu=mu, sigma=sigma)
# sample(2) creates a (2, 2, 3) tensor, and the two outermost dimensions
# are reshaped into one: the final value is a (4, 3) tensor.
st_value = st.value()
assertEqual(st_value.get_shape(), (4, 3))
st_value_val = sess.run([st_value])[0] # or e.g. run([tf.identity(st)])[0]
assertEqual(st_value_val.shape, (4, 3))
```
"""
def __init__(self, n=1, stop_gradient=False):
"""Sample `n` times and reshape the outer 2 axes so rank does not change.
Args:
n: A python integer or int32 tensor. The number of samples to take.
stop_gradient: If `True`, StochasticTensors' values are wrapped in
`stop_gradient`, to avoid backpropagation through.
"""
self._n = n
self._stop_gradient = stop_gradient
@property
def n(self):
return self._n
@property
def stop_gradient(self):
return self._stop_gradient
# Keeps track of how a StochasticTensor's value should be accessed.
# Used by value_type and get_current_value_type below.
_STOCHASTIC_VALUE_STACK = collections.defaultdict(list)
@contextlib.contextmanager
def value_type(dist_value_type):
"""Creates a value type context for any StochasticTensor created within.
Typical usage:
```
with sg.value_type(sg.MeanValue(stop_gradients=True)):
st = sg.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
```
In the example above, `st.value()` (or equivalently, `tf.identity(st)`) will
be the mean value of the Normal distribution, i.e., `mu` (possibly
broadcasted to the shape of `sigma`). Furthermore, because the `MeanValue`
was marked with `stop_gradients=True`, this value will have been wrapped
in a `stop_gradients` call to disable any possible backpropagation.
Args:
dist_value_type: An instance of `MeanValue`, `SampleAndReshapeValue`, or
any other stochastic value type.
Yields:
A context for `StochasticTensor` objects that controls the
value created when they are initialized.
Raises:
TypeError: if `dist_value_type` is not an instance of a stochastic value
type.
"""
if not isinstance(dist_value_type, _StochasticValueType):
raise TypeError("dist_value_type must be a Distribution Value Type")
thread_id = threading.current_thread().ident
stack = _STOCHASTIC_VALUE_STACK[thread_id]
if stack:
stack[-1].pushed_above(dist_value_type)
stack.append(dist_value_type)
yield
stack.pop()
if stack:
stack[-1].popped_above(dist_value_type)
class NoValueTypeSetError(ValueError):
pass
def get_current_value_type():
thread_id = threading.current_thread().ident
if not _STOCHASTIC_VALUE_STACK[thread_id]:
raise NoValueTypeSetError(
"No value type currently set for this thread (%s). Did you forget to "
"wrap 'with stochastic_graph.value_type(...)'?" % thread_id)
return _STOCHASTIC_VALUE_STACK[thread_id][-1]
class StochasticTensor(BaseStochasticTensor):
"""StochasticTensor is a BaseStochasticTensor backed by a distribution."""
def __init__(self,
dist,
name="StochasticTensor",
dist_value_type=None,
loss_fn=sge.score_function):
"""Construct a `StochasticTensor`.
`StochasticTensor` is backed by the `dist` distribution and its `value`
method will return the same value each time it is called. What `value` is
returned is controlled by the `dist_value_type` (defaults to
`SampleAndReshapeValue`).
Some distributions' sample functions are not differentiable (e.g. a sample
from a discrete distribution like a Bernoulli) and so to differentiate
wrt parameters upstream of the sample requires a gradient estimator like
the score function estimator. This is accomplished by passing a
differentiable `loss_fn` to the `StochasticTensor`, which
defaults to a function whose derivative is the score function estimator.
Calling `stochastic_graph.surrogate_loss(final_losses)` will call
`loss()` on every `StochasticTensor` upstream of final losses.
`loss()` will return None for `StochasticTensor`s backed by
reparameterized distributions; it will also return None if the value type is
`MeanValueType` or if `loss_fn=None`.
Args:
dist: an instance of `Distribution`.
name: a name for this `StochasticTensor` and its ops.
dist_value_type: a `_StochasticValueType`, which will determine what the
`value` of this `StochasticTensor` will be. If not provided, the
value type set with the `value_type` context manager will be used.
loss_fn: callable that takes
`(st, st.value(), influenced_loss)`, where
`st` is this `StochasticTensor`, and returns a `Tensor` loss. By
default, `loss_fn` is the `score_function`, or more precisely, the
integral of the score function, such that when the gradient is taken,
the score function results. See the `stochastic_gradient_estimators`
module for additional loss functions and baselines.
Raises:
TypeError: if `dist` is not an instance of `Distribution`.
TypeError: if `loss_fn` is not `callable`.
"""
if not isinstance(dist, distributions.Distribution):
raise TypeError("dist must be an instance of Distribution")
if dist_value_type is None:
try:
self._value_type = get_current_value_type()
except NoValueTypeSetError:
self._value_type = SampleAndReshapeValue()
else:
# We want to enforce a value type here, but use the value_type()
# context manager to enforce some error checking.
with value_type(dist_value_type):
self._value_type = get_current_value_type()
if loss_fn is not None and not callable(loss_fn):
raise TypeError("loss_fn must be callable")
self._loss_fn = loss_fn
with ops.name_scope(name) as scope:
self._name = scope
self._dist = dist
self._value = self._create_value()
super(StochasticTensor, self).__init__()
@property
def value_type(self):
return self._value_type
@property
def distribution(self):
return self._dist
def _create_value(self):
"""Create the value Tensor based on the value type, store as self._value."""
if isinstance(self._value_type, MeanValue):
value_tensor = self._dist.mean()
elif isinstance(self._value_type, SampleValue):
value_tensor = self._dist.sample(self._value_type.n)
elif isinstance(self._value_type, SampleAndReshapeValue):
if self._value_type.n == 1:
value_tensor = self._dist.sample()
else:
samples = self._dist.sample(self._value_type.n)
samples_shape = array_ops.shape(samples)
samples_static_shape = samples.get_shape()
new_batch_size = samples_shape[0] * samples_shape[1]
value_tensor = array_ops.reshape(
samples, array_ops.concat(0, ([new_batch_size], samples_shape[2:])))
if samples_static_shape.ndims is not None:
# Update the static shape for shape inference purposes
shape_list = samples_static_shape.as_list()
new_shape = tensor_shape.vector(
shape_list[0] * shape_list[1]
if shape_list[0] is not None and shape_list[1] is not None
else None)
new_shape = new_shape.concatenate(samples_static_shape[2:])
value_tensor.set_shape(new_shape)
else:
raise TypeError(
"Unrecognized Distribution Value Type: %s", self._value_type)
if self._value_type.stop_gradient:
# stop_gradient is being enforced by the value type
return array_ops.stop_gradient(value_tensor)
if isinstance(self._value_type, MeanValue):
return value_tensor # Using pathwise-derivative for this one.
if self._dist.is_continuous and self._dist.is_reparameterized:
return value_tensor # Using pathwise-derivative for this one.
else:
# Will have to perform some variant of score function
# estimation. Call stop_gradient on the sampler just in case we
# may accidentally leak some gradient from it.
return array_ops.stop_gradient(value_tensor)
@property
def name(self):
return self._name
@property
def graph(self):
return self._value.graph
@property
def dtype(self):
return self._dist.dtype
def entropy(self, name="entropy"):
return self._dist.entropy(name=name)
def mean(self, name="mean"):
return self._dist.mean(name=name)
def value(self, name="value"):
return self._value
def loss(self, final_loss, name="Loss"):
# Return a loss based on final_loss and the distribution. Returns
# None if pathwise derivatives are supported, if the loss_fn
# was explicitly set to None, or if the value type is MeanValue.
if self._loss_fn is None:
return None
if (self._dist.is_continuous and self._dist.is_reparameterized and
not self._value_type.stop_gradient):
# Can perform pathwise-derivative on this one; no additional loss needed.
return None
with ops.name_scope(self.name, values=[final_loss]):
with ops.name_scope(name):
if (self._value_type.stop_gradient or
isinstance(self._value_type, SampleAndReshapeValue) or
isinstance(self._value_type, SampleValue)):
return self._loss_fn(self, self._value, final_loss)
elif isinstance(self._value_type, MeanValue):
return None # MeanValue generally provides its own gradient
else:
raise TypeError("Unrecognized Distribution Value Type: %s",
self._value_type)
class ObservedStochasticTensor(StochasticTensor):
"""A StochasticTensor with an observed value."""
# pylint: disable=super-init-not-called
def __init__(self, dist, value, name=None):
"""Construct an `ObservedStochasticTensor`.
`ObservedStochasticTensor` is backed by distribution `dist` and uses the
provided value instead of using the current value type to draw a value from
the distribution. The provided value argument must be appropriately shaped
to have come from the distribution.
Args:
dist: an instance of `Distribution`.
value: a Tensor containing the observed value
name: a name for this `ObservedStochasticTensor` and its ops.
Raises:
TypeError: if `dist` is not an instance of `Distribution`.
ValueError: if `value` is not compatible with the distribution.
"""
if not isinstance(dist, distributions.Distribution):
raise TypeError("dist must be an instance of Distribution")
with ops.name_scope(name, "ObservedStochasticTensor", [value]) as scope:
self._name = scope
self._dist = dist
dist_shape = self._dist.get_batch_shape().concatenate(
self._dist.get_event_shape())
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if not value_shape.is_compatible_with(dist_shape):
if value_shape.ndims < dist_shape.ndims:
raise ValueError(
"Rank of observed value (%d) must be >= rank of a sample from the"
" distribution (%d)." % (value_shape.ndims, dist_shape.ndims))
sample_shape = value_shape[(value_shape.ndims - dist_shape.ndims):]
if not sample_shape.is_compatible_with(dist_shape):
raise ValueError(
"Shape of observed value %s is incompatible with the shape of a "
"sample from the distribution %s." % (value_shape, dist_shape))
if value.dtype != self._dist.dtype:
raise ValueError("Type of observed value (%s) does not match type of "
"distribution (%s)." % (value.dtype, self._dist.dtype))
self._value = array_ops.identity(value)
# pylint: disable=non-parent-init-called
BaseStochasticTensor.__init__(self)
def loss(self, final_loss, name=None):
return None
__all__ = [
"BaseStochasticTensor",
"StochasticTensor",
"ObservedStochasticTensor",
"MeanValue",
"SampleValue",
"SampleAndReshapeValue",
"value_type",
"get_current_value_type",
]
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
logger = logging.getLogger(__name__)
_LOCK_SCREEN_SETTINGS_PATH = '/data/system/locksettings.db'
_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH = (
'/data/data/com.android.providers.settings/databases/settings.db')
PASSWORD_QUALITY_UNSPECIFIED = '0'
_COMPATIBLE_BUILD_TYPES = ['userdebug', 'eng']
ENABLE_LOCATION_SETTINGS = [
# Note that setting these in this order is required in order for all of
# them to take and stick through a reboot.
('com.google.settings/partner', [
('use_location_for_services', 1),
]),
(
'settings/secure',
[
# Ensure Geolocation is enabled and allowed for tests.
('location_providers_allowed', 'gps,network'),
]),
('com.google.settings/partner', [
('network_location_opt_in', 1),
])
]
DISABLE_LOCATION_SETTINGS = [
('com.google.settings/partner', [
('use_location_for_services', 0),
]),
(
'settings/secure',
[
# Ensure Geolocation is disabled.
('location_providers_allowed', ''),
]),
]
ENABLE_MOCK_LOCATION_SETTINGS = [
('settings/secure', [
('mock_location', 1),
]),
]
DISABLE_MOCK_LOCATION_SETTINGS = [
('settings/secure', [
('mock_location', 0),
]),
]
DETERMINISTIC_DEVICE_SETTINGS = [
(
'settings/global',
[
('assisted_gps_enabled', 0),
# Disable "auto time" and "auto time zone" to avoid network-provided
# time to overwrite the device's datetime and timezone synchronized
# from host when running tests later. See b/6569849.
('auto_time', 0),
('auto_time_zone', 0),
('development_settings_enabled', 1),
# Flag for allowing ActivityManagerService to send ACTION_APP_ERROR
# intens on application crashes and ANRs. If this is disabled, the
# crash/ANR dialog will never display the "Report" button.
# Type: int ( 0 = disallow, 1 = allow )
('send_action_app_error', 0),
('stay_on_while_plugged_in', 3),
('verifier_verify_adb_installs', 0),
('window_animation_scale', 0),
]),
(
'settings/secure',
[
('allowed_geolocation_origins',
'http://www.google.co.uk http://www.google.com'),
# Ensure that we never get random dialogs like "Unfortunately the
# process android.process.acore has stopped", which steal the focus,
# and make our automation fail (because the dialog steals the focus
# then mistakenly receives the injected user input events).
('anr_show_background', 0),
('lockscreen.disabled', 1),
('screensaver_enabled', 0),
('skip_first_use_hints', 1),
]),
(
'settings/system',
[
# Don't want devices to accidentally rotate the screen as that could
# affect performance measurements.
('accelerometer_rotation', 0),
('lockscreen.disabled', 1),
# Turn down brightness and disable auto-adjust so that devices run
# cooler.
('screen_brightness', 5),
('screen_brightness_mode', 0),
('user_rotation', 0),
('window_animation_scale', 0),
]),
]
NETWORK_DISABLED_SETTINGS = [
('settings/global', [
('airplane_mode_on', 1),
('wifi_on', 0),
]),
]
class ContentSettings(dict):
"""A dict interface to interact with device content settings.
System properties are key/value pairs as exposed by adb shell content.
"""
def __init__(self, table, device):
super(ContentSettings, self).__init__()
self._table = table
self._device = device
@staticmethod
def _GetTypeBinding(value):
if isinstance(value, bool):
return 'b'
if isinstance(value, float):
return 'f'
if isinstance(value, int):
return 'i'
if isinstance(value, long):
return 'l'
if isinstance(value, str):
return 's'
raise ValueError('Unsupported type %s' % type(value))
def iteritems(self):
for row in self._device.RunShellCommand(
['content', 'query', '--uri',
'content://%s' % self._table],
check_return=True,
as_root=True):
key, value = _ParseContentRow(row)
if not key:
continue
yield key, value
def __getitem__(self, key):
query_row = self._device.RunShellCommand([
'content', 'query', '--uri',
'content://%s' % self._table, '--where',
"name='%s'" % key
],
check_return=True,
as_root=True,
single_line=True)
parsed_key, parsed_value = _ParseContentRow(query_row)
if parsed_key is None:
raise KeyError('key=%s not found' % key)
if parsed_key != key:
raise KeyError('Expected key=%s, but got key=%s' % (key, parsed_key))
return parsed_value
def __setitem__(self, key, value):
if key in self:
self._device.RunShellCommand([
'content', 'update', '--uri',
'content://%s' % self._table, '--bind',
'value:%s:%s' % (self._GetTypeBinding(value), value), '--where',
"name='%s'" % key
],
check_return=True,
as_root=True)
else:
self._device.RunShellCommand([
'content', 'insert', '--uri',
'content://%s' % self._table, '--bind',
'name:%s:%s' % (self._GetTypeBinding(key), key), '--bind',
'value:%s:%s' % (self._GetTypeBinding(value), value)
],
check_return=True,
as_root=True)
def __delitem__(self, key):
self._device.RunShellCommand([
'content', 'delete', '--uri',
'content://%s' % self._table, '--bind',
'name:%s:%s' % (self._GetTypeBinding(key), key)
],
check_return=True,
as_root=True)
def ConfigureContentSettings(device, desired_settings):
"""Configures device content setings from a list.
Many settings are documented at:
http://developer.android.com/reference/android/provider/Settings.Global.html
http://developer.android.com/reference/android/provider/Settings.Secure.html
http://developer.android.com/reference/android/provider/Settings.System.html
Many others are undocumented.
Args:
device: A DeviceUtils instance for the device to configure.
desired_settings: A list of (table, [(key: value), ...]) for all
settings to configure.
"""
for table, key_value in desired_settings:
settings = ContentSettings(table, device)
for key, value in key_value:
settings[key] = value
logger.info('\n%s %s', table, (80 - len(table)) * '-')
for key, value in sorted(settings.iteritems()):
logger.info('\t%s: %s', key, value)
def SetLockScreenSettings(device):
"""Sets lock screen settings on the device.
On certain device/Android configurations we need to disable the lock screen in
a different database. Additionally, the password type must be set to
DevicePolicyManager.PASSWORD_QUALITY_UNSPECIFIED.
Lock screen settings are stored in sqlite on the device in:
/data/system/locksettings.db
IMPORTANT: The first column is used as a primary key so that all rows with the
same value for that column are removed from the table prior to inserting the
new values.
Args:
device: A DeviceUtils instance for the device to configure.
Raises:
Exception if the setting was not properly set.
"""
if device.build_type not in _COMPATIBLE_BUILD_TYPES:
logger.warning('Unable to disable lockscreen on %s builds.',
device.build_type)
return
def get_lock_settings(table):
return [(table, 'lockscreen.disabled', '1'),
(table, 'lockscreen.password_type', PASSWORD_QUALITY_UNSPECIFIED),
(table, 'lockscreen.password_type_alternate',
PASSWORD_QUALITY_UNSPECIFIED)]
if device.FileExists(_LOCK_SCREEN_SETTINGS_PATH):
db = _LOCK_SCREEN_SETTINGS_PATH
locksettings = get_lock_settings('locksettings')
columns = ['name', 'user', 'value']
generate_values = lambda k, v: [k, '0', v]
elif device.FileExists(_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH):
db = _ALTERNATE_LOCK_SCREEN_SETTINGS_PATH
locksettings = get_lock_settings('secure') + get_lock_settings('system')
columns = ['name', 'value']
generate_values = lambda k, v: [k, v]
else:
logger.warning('Unable to find database file to set lock screen settings.')
return
for table, key, value in locksettings:
# Set the lockscreen setting for default user '0'
values = generate_values(key, value)
cmd = """begin transaction;
delete from '%(table)s' where %(primary_key)s='%(primary_value)s';
insert into '%(table)s' (%(columns)s) values (%(values)s);
commit transaction;""" % {
'table': table,
'primary_key': columns[0],
'primary_value': values[0],
'columns': ', '.join(columns),
'values': ', '.join(["'%s'" % value for value in values])
}
output_msg = device.RunShellCommand(['sqlite3', db, cmd],
check_return=True,
as_root=True)
if output_msg:
logger.info(' '.join(output_msg))
def _ParseContentRow(row):
"""Parse key, value entries from a row string."""
# Example row:
# 'Row: 0 _id=13, name=logging_id2, value=-1fccbaa546705b05'
fields = row.split(', ')
key = None
value = ''
for field in fields:
k, _, v = field.partition('=')
if k == 'name':
key = v
elif k == 'value':
value = v
return key, value
|
|
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mox
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occurred.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
def test_str(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEquals(str(e), 'OAuth error occurred.')
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_build_xoauth_string(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
token = oauth.Token('user_token', 'user_secret')
url = "https://mail.google.com/mail/b/joe@example.com/imap/"
xoauth_string = oauth.build_xoauth_string(url, consumer, token)
method, oauth_url, oauth_string = xoauth_string.split(' ')
self.assertEqual("GET", method)
self.assertEqual(url, oauth_url)
returned = {}
parts = oauth_string.split(',')
for part in parts:
var, val = part.split('=')
returned[var] = val.strip('"')
self.assertEquals('HMAC-SHA1', returned['oauth_signature_method'])
self.assertEquals('user_token', returned['oauth_token'])
self.assertEquals('consumer_token', returned['oauth_consumer_key'])
self.assertTrue('oauth_signature' in returned, 'oauth_signature')
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class TestRequest(unittest.TestCase):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
self.assertTrue(req.url is None)
self.assertTrue(req.normalized_url is None)
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.normalized_url, exp1)
self.assertEquals(req.url, url1)
req = oauth.Request(method, url2)
self.assertEquals(req.normalized_url, exp2)
self.assertEquals(req.url, url2)
def test_bad_url(self):
request = oauth.Request()
try:
request.url = "ftp://example.com"
self.fail("Invalid URL scheme was accepted.")
except ValueError:
pass
def test_unset_consumer_and_token(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request("GET", "http://example.com/fetch.php")
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer,
token)
self.assertEquals(consumer.key, request['oauth_consumer_key'])
self.assertEquals(token.key, request['oauth_token'])
def test_no_url_set(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request()
try:
try:
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
consumer, token)
except TypeError:
self.fail("Signature method didn't check for a normalized URL.")
except ValueError:
pass
def test_url_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
normalized_url = urlparse.urlunparse(urlparse.urlparse(url)[:3] + (None, None, None))
method = "GET"
req = oauth.Request(method, url)
self.assertEquals(req.url, url)
self.assertEquals(req.normalized_url, normalized_url)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
'foo': 'baz',
'bar': 'foo',
'multi': ['FOO','BAR']
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_to_url_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
# Note: the url above already has query parameters, so append new ones with &
exp = urlparse.urlparse("%s&%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertTrue('alt' in b)
self.assertTrue('max-contacts' in b)
self.assertEquals(b['alt'], ['json'])
self.assertEquals(b['max-contacts'], ['10'])
self.assertEquals(a, b)
def test_signature_base_string_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
self.assertEquals(req.normalized_url, 'https://www.google.com/m8/feeds/contacts/default/full/')
self.assertEquals(req.url, 'https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10')
normalized_params = parse_qsl(req.get_normalized_parameters())
self.assertTrue(len(normalized_params), len(params) + 2)
normalized_params = dict(normalized_params)
for key, value in params.iteritems():
if key == 'oauth_signature':
continue
self.assertEquals(value, normalized_params[key])
self.assertEquals(normalized_params['alt'], 'json')
self.assertEquals(normalized_params['max-contacts'], '10')
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR'],
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
srtd = [(k, v if type(v) != ListType else sorted(v)) for k,v in sorted(params.items())]
self.assertEquals(urllib.urlencode(srtd, True), res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_set_signature_method(self):
consumer = oauth.Consumer('key', 'secret')
client = oauth.Client(consumer)
class Blah:
pass
try:
client.set_signature_method(Blah())
self.fail("Client.set_signature_method() accepted invalid method.")
except ValueError:
pass
m = oauth.SignatureMethod_HMAC_SHA1()
client.set_signature_method(m)
self.assertEquals(m, client.method)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'TQ6vGQ5A6IZn8dmeGB4+/Jl3EMI=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
tok.set_verifier('this_is_a_test_verifier')
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
self.assertEquals(tok.verifier, req['oauth_verifier'])
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_build_authenticate_header(self):
server = oauth.Server()
headers = server.build_authenticate_header('example.com')
self.assertTrue('WWW-Authenticate' in headers)
self.assertEquals('OAuth realm="example.com"',
headers['WWW-Authenticate'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.mox = mox.Mox()
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def tearDown(self):
self.mox.UnsetStubs()
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_init(self):
class Blah():
pass
try:
client = oauth.Client(Blah())
self.fail("Client.__init__() accepted invalid Consumer.")
except ValueError:
pass
consumer = oauth.Consumer('token', 'secret')
try:
client = oauth.Client(consumer, Blah())
self.fail("Client.__init__() accepted invalid Token.")
except ValueError:
pass
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
def test_multipart_post_does_not_alter_body(self):
self.mox.StubOutWithMock(httplib2.Http, 'request')
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
expected_kwargs = {
'method':'POST',
'body':body,
'redirections':httplib2.DEFAULT_MAX_REDIRECTS,
'connection_type':None,
'headers':mox.IsA(dict),
}
httplib2.Http.request(client, uri, **expected_kwargs).AndReturn(random_result)
self.mox.ReplayAll()
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
self.mox.VerifyAll()
def test_url_with_query_string(self):
self.mox.StubOutWithMock(httplib2.Http, 'request')
uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf'
client = oauth.Client(self.consumer, None)
expected_kwargs = {
'method': 'GET',
'body': None,
'redirections': httplib2.DEFAULT_MAX_REDIRECTS,
'connection_type': None,
'headers': mox.IsA(dict),
}
def oauth_verifier(url):
req = oauth.Request.from_consumer_and_token(self.consumer, None,
http_method='GET', http_url=uri, parameters={})
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None)
expected = parse_qsl(urlparse.urlparse(req.to_url()).query)
actual = parse_qsl(urlparse.urlparse(url).query)
if len(expected) != len(actual):
return False
actual = dict(actual)
for key, value in expected:
if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'):
if actual[key] != value:
return False
return True
httplib2.Http.request(client, mox.Func(oauth_verifier), **expected_kwargs)
self.mox.ReplayAll()
client.request(uri, 'GET')
self.mox.VerifyAll()
if __name__ == "__main__":
unittest.main()
|
|
import os
import pytest
from astropy import units as u
from astropy.time import Time
from pocs.observatory import Observatory
from pocs.scheduler.dispatch import Scheduler
from pocs.scheduler.observation import Observation
from pocs.utils import error
has_camera = pytest.mark.skipif(
not pytest.config.getoption("--camera"),
reason="need --camera to observe"
)
@pytest.fixture
def simulator(request):
sim = list()
if not request.config.getoption("--camera"):
sim.append('camera')
if not request.config.getoption("--mount"):
sim.append('mount')
if not request.config.getoption("--weather"):
sim.append('weather')
return sim
@pytest.fixture
def observatory(simulator):
""" Return a valid Observatory instance with a specific config """
obs = Observatory(simulator=simulator, ignore_local_config=True)
return obs
@pytest.fixture(scope='module')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
def test_error_exit(config):
with pytest.raises(SystemExit):
Observatory(ignore_local_config=True, simulator=['none'])
def test_bad_site(simulator, config):
conf = config.copy()
conf['location'] = {}
with pytest.raises(error.PanError):
Observatory(simulator=simulator, config=conf, ignore_local_config=True)
def test_bad_mount(config):
conf = config.copy()
simulator = ['weather', 'camera', 'night']
conf['mount']['port'] = '/dev/'
conf['mount']['driver'] = 'foobar'
with pytest.raises(error.NotFound):
Observatory(simulator=simulator, config=conf, ignore_local_config=True)
def test_bad_scheduler(config):
conf = config.copy()
simulator = ['all']
conf['scheduler']['type'] = 'foobar'
with pytest.raises(error.NotFound):
Observatory(simulator=simulator, config=conf, ignore_local_config=True)
def test_bad_scheduler_fields_file(config):
conf = config.copy()
simulator = ['all']
conf['scheduler']['fields_file'] = 'foobar'
with pytest.raises(error.NotFound):
Observatory(simulator=simulator, config=conf, ignore_local_config=True)
def test_bad_camera(config):
conf = config.copy()
simulator = ['weather', 'mount', 'night']
with pytest.raises(error.PanError):
Observatory(simulator=simulator, config=conf, auto_detect=True, ignore_local_config=True)
def test_camera_not_found(config):
conf = config.copy()
simulator = ['weather', 'mount', 'night']
with pytest.raises(error.PanError):
Observatory(simulator=simulator, config=conf, ignore_local_config=True)
def test_camera_port_error(config):
conf = config.copy()
conf['cameras']['devices'][0]['model'] = 'foobar'
simulator = ['weather', 'mount', 'night']
with pytest.raises(error.CameraNotFound):
Observatory(simulator=simulator, config=conf, auto_detect=False, ignore_local_config=True)
def test_camera_import_error(config):
conf = config.copy()
conf['cameras']['devices'][0]['model'] = 'foobar'
conf['cameras']['devices'][0]['port'] = 'usb:001,002'
simulator = ['weather', 'mount', 'night']
with pytest.raises(error.NotFound):
Observatory(simulator=simulator, config=conf, auto_detect=False, ignore_local_config=True)
def test_status(observatory):
os.environ['POCSTIME'] = '2016-08-13 10:00:00'
status = observatory.status()
assert 'mount' not in status
assert 'observation' not in status
assert 'observer' in status
observatory.mount.initialize(unpark=True)
status2 = observatory.status()
assert status != status2
assert 'mount' in status2
observatory.get_observation()
status3 = observatory.status()
assert status3 != status
assert status3 != status2
assert 'mount' in status3
assert 'observation' in status3
def test_default_config(observatory):
""" Creates a default Observatory and tests some of the basic parameters """
assert observatory.location is not None
assert observatory.location.get('elevation') - observatory.config['location']['elevation'] < 1. * u.meter
assert observatory.location.get('horizon') == observatory.config['location']['horizon']
assert hasattr(observatory, 'scheduler')
assert isinstance(observatory.scheduler, Scheduler)
def test_is_dark(observatory):
os.environ['POCSTIME'] = '2016-08-13 10:00:00'
assert observatory.is_dark is True
os.environ['POCSTIME'] = '2016-08-13 22:00:00'
assert observatory.is_dark is False
def test_standard_headers(observatory):
os.environ['POCSTIME'] = '2016-08-13 22:00:00'
observatory.scheduler.fields_list = [
{'name': 'HAT-P-20',
'priority': '100',
'position': '07h27m39.89s +24d20m14.7s',
},
]
observatory.get_observation()
headers = observatory.get_standard_headers()
test_headers = {
'airmass': 1.091778,
'creator': 'POCSv0.1.2',
'elevation': 3400.0,
'ha_mnt': 1.6844671878927793,
'latitude': 19.54,
'longitude': -155.58,
'moon_fraction': 0.7880103086091879,
'moon_separation': 156.1607340087774,
'observer': 'Generic PANOPTES Unit',
'origin': 'Project PANOPTES'}
assert (headers['airmass'] - test_headers['airmass']) < 1e-4
assert (headers['ha_mnt'] - test_headers['ha_mnt']) < 1e-4
assert (headers['moon_fraction'] - test_headers['moon_fraction']) < 1e-4
assert (headers['moon_separation'] - test_headers['moon_separation']) < 1e-4
assert headers['creator'] == test_headers['creator']
assert headers['elevation'] == test_headers['elevation']
assert headers['latitude'] == test_headers['latitude']
assert headers['longitude'] == test_headers['longitude']
def test_sidereal_time(observatory):
os.environ['POCSTIME'] = '2016-08-13 10:00:00'
st = observatory.sidereal_time
assert abs(st.value - 21.11269263733713) < 1e-4
os.environ['POCSTIME'] = '2016-08-13 22:00:00'
st = observatory.sidereal_time
assert abs(st.value - 9.145547849536634) < 1e-4
def test_primary_camera(observatory):
assert observatory.primary_camera is not None
def test_get_observation(observatory):
observation = observatory.get_observation()
assert isinstance(observation, Observation)
assert observatory.current_observation == observation
@has_camera
def test_observe(observatory):
assert observatory.current_observation is None
assert len(observatory.scheduler.observed_list) == 0
time = Time('2016-08-13 10:00:00')
observatory.scheduler.fields_list = [
{'name': 'Kepler 1100',
'priority': '100',
'position': '19h27m29.10s +44d05m15.00s',
'exp_time': 10,
},
]
observatory.get_observation(time=time)
assert observatory.current_observation is not None
assert len(observatory.scheduler.observed_list) == 1
assert observatory.current_observation.current_exp == 0
observatory.observe()
assert observatory.current_observation.current_exp == 1
observatory.cleanup_observations()
assert len(observatory.scheduler.observed_list) == 0
def test_autofocus_disconnected(observatory):
# 'Disconnect' simulated cameras which will cause
# autofocus to fail with errors and no events returned.
for camera in observatory.cameras.values():
camera._connected = False
events = observatory.autofocus_cameras()
assert events == {}
def test_autofocus_all(observatory, images_dir):
observatory.config['directories']['images'] = images_dir
events = observatory.autofocus_cameras()
# Two simulated cameras
assert len(events) == 2
# Wait for autofocus to finish
for event in events.values():
event.wait()
def test_autofocus_coarse(observatory, images_dir):
observatory.config['directories']['images'] = images_dir
events = observatory.autofocus_cameras(coarse=True)
assert len(events) == 2
for event in events.values():
event.wait()
def test_autofocus_named(observatory, images_dir):
observatory.config['directories']['images'] = images_dir
cam_names = [name for name in observatory.cameras.keys()]
# Call autofocus on just one camera.
events = observatory.autofocus_cameras(camera_list=[cam_names[0]])
assert len(events) == 1
assert [name for name in events.keys()] == [cam_names[0]]
for event in events.values():
event.wait()
def test_autofocus_bad_name(observatory):
events = observatory.autofocus_cameras(camera_list=['NOTAREALCAMERA', 'ALSONOTACAMERA'])
# Will get a warning and a empty dictionary.
assert events == {}
def test_autofocus_focusers_disconnected(observatory):
for camera in observatory.cameras.values():
camera.focuser._connected = False
events = observatory.autofocus_cameras()
assert events == {}
def test_autofocus_no_focusers(observatory):
for camera in observatory.cameras.values():
camera.focuser = None
events = observatory.autofocus_cameras()
assert events == {}
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from textwrap import dedent
import mock
import errno
from swift.common.utils import Timestamp
from test.unit import debug_logger
from swift.container import sync
from swift.common.db import DatabaseConnectionError
from swift.common import utils
from swift.common.wsgi import ConfigString
from swift.common.exceptions import ClientException
from swift.common.storage_policy import StoragePolicy
import test
from test.unit import patch_policies, with_tempdir
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'endcap'
class FakeRing(object):
def __init__(self):
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in range(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.db_dir = os.path.dirname(path)
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_max_row(self):
return 1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerSync(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('test-container-sync')
def test_FileLikeIter(self):
# Retained test to show new FileLikeIter acts just like the removed
# _Iter2FileLikeObject did.
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
expect = '1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEqual(flo.read(), expect)
self.assertEqual(flo.read(), '')
self.assertEqual(flo.read(2), '')
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
self.assertEqual(flo.read(), '1234567890')
self.assertEqual(flo.read(), '')
self.assertEqual(flo.read(2), '')
def assertLogMessage(self, msg_level, expected, skip=0):
for line in self.logger.get_lines_for_level(msg_level)[skip:]:
msg = 'expected %r not in %r' % (expected, line)
self.assertTrue(expected in line, msg)
@with_tempdir
def test_init(self, tempdir):
ic_conf_path = os.path.join(tempdir, 'internal-client.conf')
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
# specified but not exists will not start
conf = {'internal_client_conf_path': ic_conf_path}
self.assertRaises(SystemExit, sync.ContainerSync, conf,
container_ring=cring, logger=self.logger)
# not specified will use default conf
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertTrue(isinstance(conf_path, ConfigString))
self.assertEqual(conf_path.contents.getvalue(),
dedent(sync.ic_conf_body))
self.assertLogMessage('warning', 'internal_client_conf_path')
self.assertLogMessage('warning', 'internal-client.conf-sample')
# correct
contents = dedent(sync.ic_conf_body)
with open(ic_conf_path, 'w') as f:
f.write(contents)
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync(conf, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertEqual(conf_path, ic_conf_path)
sample_conf_filename = os.path.join(
os.path.dirname(test.__file__),
'../etc/internal-client.conf-sample')
with open(sample_conf_filename) as sample_conf_file:
sample_conf = sample_conf_file.read()
self.assertEqual(contents, sample_conf)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603] # Elapsed time for "under interval" (yes)
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
gen_func = ('swift.container.sync_store.'
'ContainerSyncStore.synced_containers_generator')
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.time', fake_time), \
mock.patch('swift.container.sync.sleep', fake_sleep), \
mock.patch(gen_func) as fake_generator, \
mock.patch('swift.container.sync.ContainerBroker',
lambda p: FakeContainerBroker(p, info={
'account': 'a', 'container': 'c',
'storage_policy_index': 0})):
fake_generator.side_effect = [iter(['container.db']),
iter(['container.db'])]
cs = sync.ContainerSync({}, container_ring=FakeRing())
try:
cs.run_forever()
except Exception as err:
if str(err) != 'we are now done':
raise
self.assertEqual(time_calls, [9])
self.assertEqual(len(sleep_calls), 2)
self.assertLessEqual(sleep_calls[0], cs.interval)
self.assertEqual(cs.interval - 1, sleep_calls[1])
self.assertEqual(2, fake_generator.call_count)
self.assertEqual(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interim report.
time_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605] # For elapsed
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
gen_func = ('swift.container.sync_store.'
'ContainerSyncStore.synced_containers_generator')
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.time', fake_time), \
mock.patch(gen_func) as fake_generator, \
mock.patch('swift.container.sync.ContainerBroker',
lambda p: FakeContainerBroker(p, info={
'account': 'a', 'container': 'c',
'storage_policy_index': 0})):
fake_generator.side_effect = [iter(['container.db']),
iter(['container.db'])]
cs = sync.ContainerSync({}, container_ring=FakeRing())
try:
cs.run_once()
self.assertEqual(time_calls, [6])
self.assertEqual(1, fake_generator.call_count)
self.assertEqual(cs.reported, 3602)
cs.run_once()
except Exception as err:
if str(err) != 'we are now done':
raise
self.assertEqual(time_calls, [10])
self.assertEqual(2, fake_generator.call_count)
self.assertEqual(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertEqual(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
broker = 'swift.container.backend.ContainerBroker'
store = 'swift.container.sync_store.ContainerSyncStore'
# In this test we call the container_sync instance several
# times with a missing db in various combinations.
# Since we use the same ContainerSync instance for all tests
# its failures counter increases by one with each call.
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and we succeed in deleting it.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and we fail to delete it.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
fake_remove.side_effect = OSError('1')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 2)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and it returns an error != ENOENT.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
fake_remove.side_effect = OSError(errno.EPERM, 'a')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# error different than DB does not exist
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError('a', 'a')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 4)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(0, fake_remove.call_count)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({
'bind_ip': '10.0.0.0',
}, container_ring=cring)
# Plumbing test for bind_ip and whataremyips()
self.assertEqual(['10.0.0.0'], cs._myips)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 2)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker('path', info={'account': 'a',
'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 1,
'x_container_sync_point2': 1},
metadata={'x-container-sync-to':
('http://127.0.0.1/a/c', 1),
'x-container-sync-key':
('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated yet
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, -1)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker', lambda p: fcb):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEqual(cs.container_failures, 2)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
raise ClientException
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.delete_object',
fake_delete_object):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.delete_object',
lambda *x, **y: None):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, 1)
def test_container_second_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
# Nevertheless the fault is skipped
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_report(self):
container_stats = {'puts': 0,
'deletes': 0,
'bytes': 0}
def fake_container_sync_row(self, row, sync_to,
user_key, broker, info, realm, realm_key):
if 'deleted' in row:
container_stats['deletes'] += 1
return True
container_stats['puts'] += 1
container_stats['bytes'] += row['size']
return True
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 5,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o1', 'size': 0,
'deleted': True},
{'ROWID': 2, 'name': 'o2', 'size': 1010},
{'ROWID': 3, 'name': 'o3', 'size': 0,
'deleted': True},
{'ROWID': 4, 'name': 'o4', 'size': 90},
{'ROWID': 5, 'name': 'o5', 'size': 0}])
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.hash_path',
fake_hash_path), \
mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb):
cring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
cs.container_stats = container_stats
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
funcType = type(sync.ContainerSync.container_sync_row)
cs.container_sync_row = funcType(fake_container_sync_row,
cs, sync.ContainerSync)
cs.container_sync('isa.db')
# Succeeds because no rows match
log_line = cs.logger.get_lines_for_level('info')[0]
lines = log_line.split(',')
self.assertTrue('sync_point2: 5', lines.pop().strip())
self.assertTrue('sync_point1: 5', lines.pop().strip())
self.assertTrue('bytes: 1100', lines.pop().strip())
self.assertTrue('deletes: 2', lines.pop().strip())
self.assertTrue('puts: 3', lines.pop().strip())
def test_container_sync_row_delete(self):
self._test_container_sync_row_delete(None, None)
def test_container_sync_row_delete_using_realms(self):
self._test_container_sync_row_delete('US', 'realm_key')
def _test_container_sync_row_delete(self, realm, realm_key):
orig_uuid = sync.uuid
orig_delete_object = sync.delete_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
ts_data = Timestamp(1.1)
def fake_delete_object(path, name=None, headers=None, proxy=None,
logger=None, timeout=None):
self.assertEqual(path, 'http://sync/to/path')
self.assertEqual(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef a2401ecb1256f469494a0abcb0eb62ffa73eca63',
'x-timestamp': ts_data.internal})
else:
self.assertEqual(
headers,
{'x-container-sync-key': 'key',
'x-timestamp': ts_data.internal})
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.delete_object = fake_delete_object
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
# Success.
# simulate a row with tombstone at 1.1 and later ctype, meta times
created_at = ts_data.internal + '+1388+1388' # last modified = 1.2
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': created_at,
'size': '1000'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
exc = []
def fake_delete_object(*args, **kwargs):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 2)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 2)
self.assertEqual(len(exc), 3)
self.assertEqual(str(exc[-1]), 'test client exception: 404')
finally:
sync.uuid = orig_uuid
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
self._test_container_sync_row_put(None, None)
def test_container_sync_row_put_using_realms(self):
self._test_container_sync_row_put('US', 'realm_key')
def _test_container_sync_row_put(self, realm, realm_key):
orig_uuid = sync.uuid
orig_put_object = sync.put_object
orig_head_object = sync.head_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
ts_data = Timestamp(1.1)
timestamp = Timestamp(1.2)
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None, logger=None,
timeout=None):
self.assertEqual(sync_to, 'http://sync/to/path')
self.assertEqual(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef a5fb3cf950738e6e3b364190e246bd7dd21dad3c',
'x-timestamp': timestamp.internal,
'etag': 'etagvalue',
'other-header': 'other header value',
'content-type': 'text/plain'})
else:
self.assertEqual(headers, {
'x-container-sync-key': 'key',
'x-timestamp': timestamp.internal,
'other-header': 'other header value',
'etag': 'etagvalue',
'content-type': 'text/plain'})
self.assertEqual(contents.read(), 'contents')
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.put_object = fake_put_object
expected_put_count = 0
excepted_failure_count = 0
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'other-header': 'other header value',
'etag': '"etagvalue"',
'x-timestamp': timestamp.internal,
'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
cs.swift.get_object = fake_get_object
# Success as everything says it worked.
# simulate a row with data at 1.1 and later ctype, meta times
created_at = ts_data.internal + '+1388+1388' # last modified = 1.2
def fake_object_in_rcontainer(row, sync_to, user_key,
broker, realm, realm_key):
return False
orig_object_in_rcontainer = cs._object_in_remote_container
cs._object_in_remote_container = fake_object_in_rcontainer
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': created_at,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': timestamp.internal,
'other-header': 'other header value',
'etag': '"etagvalue"',
'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
cs.swift.get_object = fake_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 60}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
# Success as everything says it worked, also check that PUT
# timestamp equals GET timestamp when it is newer than created_at
# value.
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.1',
'size': 60}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(Exception('test exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 70}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(ClientException('test client exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 80}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200, {'other-header': 'other header value',
'x-timestamp': timestamp.internal,
'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
cs.swift.get_object = fake_get_object
sync.put_object = fake_put_object
# Fail due to 401
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 90}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('info', 'Unauth')
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('info', 'Not found', 1)
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('error', 'ERROR Syncing')
# Test the following cases:
# remote has the same date and a put doesn't take place
# remote has more up to date copy and a put doesn't take place
# head_object returns ClientException(404) and a put takes place
# head_object returns other ClientException put doesn't take place
# and we get failure
# head_object returns other Exception put does not take place
# and we get failure
# remote returns old copy and a put takes place
test_row = {'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'etag': '1111',
'size': 10}
test_info = {'account': 'a',
'container': 'c',
'storage_policy_index': 0}
actual_puts = []
def fake_put_object(*args, **kwargs):
actual_puts.append((args, kwargs))
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.2'}, '')
sync.put_object = fake_put_object
sync.head_object = fake_head_object
cs._object_in_remote_container = orig_object_in_rcontainer
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info,
realm, realm_key))
# No additional put has taken place
self.assertEqual(len(actual_puts), 0)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.3'}, '')
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info,
realm, realm_key))
# No additional put has taken place
self.assertEqual(len(actual_puts), 0)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
actual_puts = []
def fake_head_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# Additional put has taken place
self.assertEqual(len(actual_puts), 1)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
sync.head_object = fake_head_object
self.assertFalse(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# No additional put has taken place, failures increased
self.assertEqual(len(actual_puts), 1)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
raise Exception()
sync.head_object = fake_head_object
self.assertFalse(cs.container_sync_row(
test_row,
'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# No additional put has taken place, failures increased
self.assertEqual(len(actual_puts), 1)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.1'}, '')
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# Additional put has taken place
self.assertEqual(len(actual_puts), 2)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
finally:
sync.uuid = orig_uuid
sync.put_object = orig_put_object
sync.head_object = orig_head_object
def test_select_http_proxy_None(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': ''}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), None)
def test_select_http_proxy_one(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one'}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), 'http://one')
def test_select_http_proxy_multiple(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one,http://two,http://three'},
container_ring=FakeRing())
self.assertEqual(
set(cs.http_proxies),
set(['http://one', 'http://two', 'http://three']))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.workflows_v1.types import workflows
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-workflows",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class WorkflowsTransport(abc.ABC):
"""Abstract transport class for Workflows."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "workflows.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_workflows: gapic_v1.method.wrap_method(
self.list_workflows, default_timeout=None, client_info=client_info,
),
self.get_workflow: gapic_v1.method.wrap_method(
self.get_workflow, default_timeout=None, client_info=client_info,
),
self.create_workflow: gapic_v1.method.wrap_method(
self.create_workflow, default_timeout=None, client_info=client_info,
),
self.delete_workflow: gapic_v1.method.wrap_method(
self.delete_workflow, default_timeout=None, client_info=client_info,
),
self.update_workflow: gapic_v1.method.wrap_method(
self.update_workflow, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_workflows(
self,
) -> Callable[
[workflows.ListWorkflowsRequest],
Union[
workflows.ListWorkflowsResponse, Awaitable[workflows.ListWorkflowsResponse]
],
]:
raise NotImplementedError()
@property
def get_workflow(
self,
) -> Callable[
[workflows.GetWorkflowRequest],
Union[workflows.Workflow, Awaitable[workflows.Workflow]],
]:
raise NotImplementedError()
@property
def create_workflow(
self,
) -> Callable[
[workflows.CreateWorkflowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_workflow(
self,
) -> Callable[
[workflows.DeleteWorkflowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_workflow(
self,
) -> Callable[
[workflows.UpdateWorkflowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("WorkflowsTransport",)
|
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import unittest
import dimod.testing as dtest
from dimod import ExactSolver, ScaleComposite, HigherOrderComposite, \
BinaryQuadraticModel, Sampler, PolySampler
from dimod.reference.composites.scalecomposite import _check_params, _scaled_bqm, _calc_norm_coeff
from numbers import Number
def _scaled_hubo(h, j, offset, scalar, bias_range,
quadratic_range,
ignored_variables,
ignored_interactions,
ignore_offset):
"""Helper function of sample_ising for scaling"""
if scalar is None:
scalar = _calc_norm_coeff(h, j, bias_range, quadratic_range,
ignored_variables, ignored_interactions)
h_sc = dict(h)
j_sc = dict(j)
offset_sc = offset
if not isinstance(scalar, Number):
raise TypeError("expected scalar to be a Number")
if scalar != 1:
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
j_sc = {}
for u, v in j.items():
if u in ignored_interactions:
j_sc[u] = v
else:
j_sc[u] = v * scalar
if not ignore_offset:
offset_sc = offset * scalar
h_sc = {}
for k, v in h.items():
if k in ignored_variables:
h_sc[k] = v
else:
h_sc[k] = v * scalar
return h_sc, j_sc, offset_sc
class ScalingChecker(Sampler, PolySampler):
def __init__(self, child_sampler, bqm=None, h=None, J=None, offset=0,
scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **other_params):
scale_options = dict(scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
self.child = child_sampler
if bqm is not None:
self.bqm = _scaled_bqm(bqm, **scale_options)
elif h is not None and J is not None:
if max(map(len, J.keys())) == 2:
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
self.bqm = _scaled_bqm(bqm, **scale_options)
else:
h_sc, J_sc, offset_sc = _scaled_hubo(h, J, offset=offset,
**scale_options)
self.h = h_sc
self.J = J_sc
self.offset = offset_sc
def sample(self, bqm, **parameters):
assert self.bqm == bqm
return self.child.sample(bqm, **parameters)
def sample_ising(self, h, J, offset=0, **parameters):
assert self.h == h
assert self.J == J
assert self.offset == offset
return self.child.sample_ising(h, J, offset=offset, **parameters)
def sample_poly(self, poly, **parameters):
h, J, offset = poly.to_hising()
assert self.h == h
selfJ = {frozenset(term): bias for term, bias in self.J.items()}
J = {frozenset(term): bias for term, bias in J.items()}
assert selfJ == J
assert self.offset == offset
return self.child.sample_poly(poly, **parameters)
def parameters(self):
return self.child.parameters()
def properties(self):
return self.child.properties()
class TestScaleComposite(unittest.TestCase):
def test_instantiation_smoketest(self):
sampler = ScaleComposite(ExactSolver())
dtest.assert_sampler_api(sampler)
def test_scaling_bqm(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=5.0)
scalar = None
quadratic_range = None
ignore_offset = False
bqm_new = _scaled_bqm(bqm, scalar, 2, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
sc = 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
bqm_scaled = BinaryQuadraticModel.from_ising(hsc, Jsc, offset=5.0 / 2.)
self.assertEqual(bqm_scaled, bqm_new)
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=5.0)
bqm_new = _scaled_bqm(bqm, scalar, 2, quadratic_range,
ignored_variables, ignored_interactions,
True)
sc = 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
bqm_scaled = BinaryQuadraticModel.from_ising(hsc, Jsc, offset=5.0)
self.assertEqual(bqm_scaled, bqm_new)
bqm_new = _scaled_bqm(bqm, scalar, 1, (-1, 0.4),
ignored_variables, ignored_interactions,
ignore_offset)
sc = 3.2 / 0.4
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
bqm_scaled = BinaryQuadraticModel.from_ising(hsc, Jsc, offset=5.0 / sc)
self.assertEqual(bqm_scaled, bqm_new)
ignored_variables = ['a', 'b']
ignored_interactions = None
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic)
bqm_new = _scaled_bqm(bqm, scalar, (2, 2), quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
sc = 3.2 / 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
bqm_scaled = BinaryQuadraticModel.from_ising(hsc, Jsc, offset=0)
self.assertEqual(bqm_scaled, bqm_new)
ignored_variables = None
ignored_interactions = [('a', 'b')]
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic)
bqm_new = _scaled_bqm(bqm, scalar, 1, 0.5,
ignored_variables, ignored_interactions,
ignore_offset)
sc = 4.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
bqm_scaled = BinaryQuadraticModel.from_ising(hsc, Jsc, offset=0)
self.assertEqual(bqm_scaled, bqm_new)
def test_scaling_hubo(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables = None
ignored_interactions = None
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
hnew, jnew, offsetnew = _scaled_hubo(linear, quadratic, offset,
None, 2, None, ignored_variables,
ignored_interactions, False)
sc = 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
self.assertEqual(hsc, hnew)
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
self.assertEqual(Jsc, jnew)
self.assertEqual(offsetnew, offset / sc)
hnew, jnew, offsetnew = _scaled_hubo(linear, quadratic, offset,
None, 2, None, ignored_variables,
ignored_interactions, True)
sc = 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
self.assertEqual(hsc, hnew)
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
self.assertEqual(Jsc, jnew)
self.assertEqual(offsetnew, offset)
hnew, jnew, offsetnew = _scaled_hubo(linear, quadratic, offset,
None, 1, (-1, 0.4),
ignored_variables,
ignored_interactions, False)
sc = 3.2 / 0.4
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
self.assertEqual(hsc, hnew)
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
self.assertEqual(Jsc, jnew)
self.assertEqual(offsetnew, offset / sc)
ignored_variables = ['a', 'b']
ignored_interactions = None
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
hnew, jnew, offsetnew = _scaled_hubo(linear, quadratic, offset,
None, (-2, 2), None,
ignored_variables,
ignored_interactions, False)
sc = 3.2 / 2.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
self.assertEqual(hsc, hnew)
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
self.assertEqual(Jsc, jnew)
self.assertEqual(offsetnew, offset / sc)
ignored_variables = None
ignored_interactions = [('a', 'b', 'c')]
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
hnew, jnew, offsetnew = _scaled_hubo(linear, quadratic, offset,
None, 1, 0.5, ignored_variables,
ignored_interactions, False)
sc = 4.
hsc = {k: v / sc if k not in ignored_variables else v for
k, v in linear.items()}
self.assertEqual(hsc, hnew)
Jsc = {k: v / sc if k not in ignored_interactions else v for
k, v in quadratic.items()}
self.assertEqual(Jsc, jnew)
self.assertEqual(offsetnew, offset / sc)
def test_sample_hising_nonescale(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
penalty_strength=5.
)
sampler = ScaleComposite(
ScalingChecker(HigherOrderComposite(ExactSolver()),
h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_hising_bias_range(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
penalty_strength=5.,
bias_range=2)
sampler = ScaleComposite(
ScalingChecker(HigherOrderComposite(ExactSolver()),
h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_hising_quadratic_range(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
penalty_strength=5.,
quadratic_range=(-1, 2)
)
sampler = ScaleComposite(
ScalingChecker(HigherOrderComposite(ExactSolver()),
h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
penalty_strength=5.,
quadratic_range=(-1, 0.4)
)
sampler = ScaleComposite(
ScalingChecker(HigherOrderComposite(ExactSolver()),
h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_hising_ranges(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
penalty_strength=5.,
quadratic_range=(-1, 10),
bias_range=(-8.0, 5)
)
sampler = ScaleComposite(
ScalingChecker(HigherOrderComposite(ExactSolver()),
h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_nonescale(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), bqm=bqm,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_bias_range(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
bias_range=2.
)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), bqm=bqm,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, -4.8)
def test_sample_quadratic_range(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
quadratic_range=(-1, 2)
)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), bqm=bqm,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
quadratic_range=(-1, 0.4)
)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), bqm=bqm,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ranges(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
quadratic_range=(-1, 10),
bias_range=(-8.0, 5)
)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), bqm=bqm,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ising_quadratic(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic,
offset=offset)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ising_ignore_interaction(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(
None, [('a', 'b')])
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
scalar=0.5
)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ising_ignore_offset(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ignore_offset(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_hising_ignore_offset(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(None, None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(HigherOrderComposite(
ExactSolver()), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_ising_ignore_interactions(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(
None, [('a', 'b')])
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ignore_interactions(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
ignored_variables, ignored_interactions = _check_params(
None, [('a', 'b')])
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_hising_ignore_interactions(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(
None, [('a', 'b', 'c')])
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(HigherOrderComposite(
ExactSolver()), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
def test_sample_ising_ignore_variables(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(
['a'], None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_ignore_variables(self):
linear = {'a': -4.0, 'b': -4.0}
quadratic = {('a', 'b'): 3.2}
offset = 5
bqm = BinaryQuadraticModel.from_ising(linear, quadratic, offset=offset)
ignored_variables, ignored_interactions = _check_params(
['a'], None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(ExactSolver(), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample(bqm, **comp_parameters)
self.assertAlmostEqual(response.first.energy, 0.2)
def test_sample_hising_ignore_variables(self):
linear = {'a': -4.0, 'b': -4.0, 'c': -4.0}
quadratic = {('a', 'b', 'c'): 3.2}
offset = 5
ignored_variables, ignored_interactions = _check_params(
['a'], None)
comp_parameters = dict(ignored_interactions=ignored_interactions,
ignored_variables=ignored_variables,
ignore_offset=True,
scalar=0.5)
sampler = ScaleComposite(ScalingChecker(HigherOrderComposite(
ExactSolver()), h=linear,
J=quadratic, offset=offset,
**comp_parameters))
response = sampler.sample_ising(linear, quadratic, offset=offset,
**comp_parameters)
self.assertAlmostEqual(response.first.energy, -3.8)
|
|
# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from neutron.common import constants as l3_const
from neutron import context
from neutron.db import l3_dvr_db
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
class L3DvrTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(L3DvrTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.mixin = l3_dvr_db.L3_NAT_with_dvr_db_mixin()
def _create_router(self, router):
with self.ctx.session.begin(subtransactions=True):
return self.mixin._create_router_db(self.ctx, router, 'foo_tenant')
def _test__create_router_db(self, expected=False, distributed=None):
router = {'name': 'foo_router', 'admin_state_up': True}
if distributed is not None:
router['distributed'] = distributed
result = self._create_router(router)
self.assertEqual(expected, result.extra_attributes['distributed'])
def test_create_router_db_default(self):
self._test__create_router_db(expected=False)
def test_create_router_db_centralized(self):
self._test__create_router_db(expected=False, distributed=False)
def test_create_router_db_distributed(self):
self._test__create_router_db(expected=True, distributed=True)
def test__validate_router_migration_on_router_update(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertIsNone(self.mixin._validate_router_migration(
self.ctx, router_db, {'name': 'foo_router_2'}))
def test__validate_router_migration_raise_error(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertRaises(NotImplementedError,
self.mixin._validate_router_migration,
self.ctx, router_db, {'distributed': False})
def test_update_router_db_centralized_to_distributed(self):
router = {'name': 'foo_router', 'admin_state_up': True}
agent = {'id': _uuid()}
distributed = {'distributed': True}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertFalse(router_db.extra_attributes.distributed)
self.mixin._get_router = mock.Mock(return_value=router_db)
self.mixin._validate_router_migration = mock.Mock()
self.mixin._update_distributed_attr = mock.Mock()
self.mixin.list_l3_agents_hosting_router = mock.Mock(
return_value={'agents': [agent]})
self.mixin._unbind_router = mock.Mock()
router_db = self.mixin._update_router_db(
self.ctx, router_id, distributed, mock.ANY)
# Assert that the DB value has changed
self.assertTrue(router_db.extra_attributes.distributed)
self.assertEqual(1,
self.mixin._update_distributed_attr.call_count)
def _test_get_device_owner(self, is_distributed=False,
expected=l3_const.DEVICE_OWNER_ROUTER_INTF,
pass_router_id=True):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': is_distributed
}
router_db = self._create_router(router)
router_pass = router_db['id'] if pass_router_id else router_db
with mock.patch.object(self.mixin, '_get_router') as f:
f.return_value = router_db
result = self.mixin._get_device_owner(self.ctx, router_pass)
self.assertEqual(expected, result)
def test_get_device_owner_by_router_id(self):
self._test_get_device_owner()
def test__get_device_owner_centralized(self):
self._test_get_device_owner(pass_router_id=False)
def test__get_device_owner_distributed(self):
self._test_get_device_owner(
is_distributed=True,
expected=l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE,
pass_router_id=False)
def _test__is_distributed_router(self, router, expected):
result = l3_dvr_db.is_distributed_router(router)
self.assertEqual(expected, result)
def test__is_distributed_router_by_db_object(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
self.mixin._get_device_owner(mock.ANY, router_db)
def test__is_distributed_router_default(self):
router = {'id': 'foo_router_id'}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_centralized(self):
router = {'id': 'foo_router_id', 'distributed': False}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_distributed(self):
router = {'id': 'foo_router_id', 'distributed': True}
self._test__is_distributed_router(router, True)
def test_get_agent_gw_ports_exist_for_network(self):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = []
self.mixin.get_agent_gw_ports_exist_for_network(
self.ctx, 'network_id', 'host', 'agent_id')
plugin.get_ports.assert_called_with(self.ctx, {
'network_id': ['network_id'],
'device_id': ['agent_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
def test__create_gw_port_with_no_gateway(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True,
}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertTrue(router_db.extra_attributes.distributed)
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_create_gw_port'),
mock.patch.object(self.mixin,
'create_snat_intf_ports_if_not_exists')
) as (cw, cs):
self.mixin._create_gw_port(
self.ctx, router_id, router_db, mock.ANY,
mock.ANY, mock.ANY)
self.assertFalse(cs.call_count)
def test_build_routers_list_with_gw_port_mismatch(self):
routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
gw_ports = {}
routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports)
self.assertIsNone(routers[0].get('gw_port'))
def test_clear_unused_fip_agent_gw_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': _uuid(),
'floating_network_id': _uuid()
}
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_get_floatingip'),
mock.patch.object(self.mixin,
'get_vm_port_hostid'),
mock.patch.object(self.mixin,
'check_fips_availability_on_host_ext_net'),
mock.patch.object(self.mixin,
'delete_floatingip_agent_gateway_port')
) as (gfips, gvm, cfips, dfips):
gfips.return_value = floatingip
gvm.return_value = 'my-host'
cfips.return_value = True
self.mixin.clear_unused_fip_agent_gw_port(
self.ctx, floatingip)
self.assertTrue(dfips.called)
self.assertTrue(cfips.called)
self.assertTrue(gvm.called)
def test_delete_floatingip_agent_gateway_port(self):
port = {
'id': 'my_port_id',
'binding:host_id': 'foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
}
with contextlib.nested(
mock.patch.object(manager.NeutronManager, 'get_plugin'),
mock.patch.object(self.mixin,
'get_vm_port_hostid')) as (gp, vm_host):
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = [port]
vm_host.return_value = 'foo_host'
self.mixin.delete_floatingip_agent_gateway_port(
self.ctx, 'foo_host', 'network_id')
plugin.get_ports.assert_called_with(self.ctx, filters={
'network_id': ['network_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
plugin._delete_port.assert_called_with(self.ctx, 'my_port_id')
def _delete_floatingip_test_setup(self, floatingip):
fip_id = floatingip['id']
with contextlib.nested(
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_get_floatingip'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'delete_floatingip')) as (gf, vf, df):
gf.return_value = floatingip
self.mixin.delete_floatingip(self.ctx, fip_id)
return vf
def _disassociate_floatingip_setup(self, port_id=None, floatingip=None):
with contextlib.nested(
mock.patch.object(self.mixin, '_get_floatingip_on_port'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
) as (gf, vf):
gf.return_value = floatingip
self.mixin.disassociate_floatingips(
self.ctx, port_id, do_notify=False)
return vf
def test_disassociate_floatingip_with_vm_port(self):
port_id = '1234'
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'floating_network_id': _uuid()
}
mock_disassociate_fip = self._disassociate_floatingip_setup(
port_id=port_id, floatingip=floatingip)
self.assertTrue(mock_disassociate_fip.called)
def test_disassociate_floatingip_with_no_vm_port(self):
mock_disassociate_fip = self._disassociate_floatingip_setup()
self.assertFalse(mock_disassociate_fip.called)
def test_delete_floatingip_without_internal_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': None,
'floating_network_id': _uuid()
}
mock_fip_clear = self._delete_floatingip_test_setup(floatingip)
self.assertFalse(mock_fip_clear.call_count)
def test_delete_floatingip_with_internal_port(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': _uuid(),
'floating_network_id': _uuid()
}
mock_fip_clear = self._delete_floatingip_test_setup(floatingip)
self.assertTrue(mock_fip_clear.called)
def _floatingip_on_port_test_setup(self, hostid):
router = {'id': 'foo_router_id', 'distributed': True}
floatingip = {
'id': _uuid(),
'port_id': _uuid(),
'router_id': 'foo_router_id'
}
routers = {
'foo_router_id': router
}
fipagent = {
'id': _uuid()
}
# NOTE: mock.patch is not needed here since self.mixin is created fresh
# for each test. It doesn't work with some methods since the mixin is
# tested in isolation (e.g. _get_agent_by_type_and_host).
self.mixin.get_vm_port_hostid = mock.Mock(return_value=hostid)
self.mixin._get_agent_by_type_and_host = mock.Mock(
return_value=fipagent)
self.mixin.get_fip_sync_interfaces = mock.Mock(
return_value='fip_interface')
self.mixin._process_floating_ips(self.ctx, routers, [floatingip])
return (router, floatingip)
def test_floatingip_on_port_no_host(self):
router, fip = self._floatingip_on_port_test_setup(None)
self.assertTrue(self.mixin.get_vm_port_hostid.called)
self.assertFalse(self.mixin._get_agent_by_type_and_host.called)
self.assertFalse(self.mixin.get_fip_sync_interfaces.called)
self.assertNotIn(l3_const.FLOATINGIP_KEY, router)
self.assertNotIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
def test_floatingip_on_port_with_host(self):
router, fip = self._floatingip_on_port_test_setup(_uuid())
self.assertTrue(self.mixin.get_vm_port_hostid.called)
self.assertTrue(self.mixin._get_agent_by_type_and_host.called)
self.assertTrue(self.mixin.get_fip_sync_interfaces.called)
self.assertIn(l3_const.FLOATINGIP_KEY, router)
self.assertIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
self.assertIn(fip, router[l3_const.FLOATINGIP_KEY])
self.assertIn('fip_interface',
router[l3_const.FLOATINGIP_AGENT_INTF_KEY])
def test_delete_disassociated_floatingip_agent_port(self):
fip = {
'id': _uuid(),
'port_id': None
}
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
with contextlib.nested(
mock.patch.object(self.mixin,
'get_router'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'),
) as (grtr, vf, cf):
grtr.return_value = router
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip, mock.ANY)
self.assertTrue(vf.called)
def _setup_test_create_delete_floatingip(
self, fip, floatingip_db, router_db):
port = {
'id': '1234',
'binding:host_id': 'myhost',
'network_id': 'external_net'
}
with contextlib.nested(
mock.patch.object(self.mixin,
'get_router'),
mock.patch.object(self.mixin,
'get_vm_port_hostid'),
mock.patch.object(self.mixin,
'clear_unused_fip_agent_gw_port'),
mock.patch.object(self.mixin,
'create_fip_agent_gw_port_if_not_exists'),
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'),
) as (grtr, vmp, d_fip, c_fip, up_fip):
grtr.return_value = router_db
vmp.return_value = 'my-host'
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip_db, port)
return d_fip, c_fip
def test_create_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertTrue(create_fip.called)
self.assertFalse(delete_fip.called)
def test_create_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
self.assertFalse(delete_fip.called)
def test_delete_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': None
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertTrue(delete_fip.called)
self.assertFalse(create_fip.called)
def test_delete_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'fixed_port_id': 1234,
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': None
}
delete_fip, create_fip = (
self._setup_test_create_delete_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
self.assertFalse(delete_fip.called)
def test__validate_router_migration_prevent_check_advanced_svc(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
# make sure the check are invoked, whether they pass or
# raise, it does not matter in the context of this test
with contextlib.nested(
mock.patch.object(self.mixin, 'check_router_has_no_firewall'),
mock.patch.object(self.mixin, 'check_router_has_no_vpnaas')
) as (check_fw, check_vpn):
self.mixin._validate_router_migration(
self.ctx, router_db, {'distributed': True})
check_fw.assert_called_once_with(self.ctx, router_db)
check_vpn.assert_called_once_with(self.ctx, router_db)
def test_check_router_has_no_firewall_raises(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
fw_plugin = mock.Mock()
sp.return_value = {'FIREWALL': fw_plugin}
fw_plugin.get_firewalls.return_value = [mock.ANY]
self.assertRaises(
l3.RouterInUse,
self.mixin.check_router_has_no_firewall,
self.ctx, {'id': 'foo_id', 'tenant_id': 'foo_tenant'})
def test_check_router_has_no_firewall_passes(self):
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={}):
self.assertTrue(
self.mixin.check_router_has_no_firewall(mock.ANY, mock.ANY))
def test_check_router_has_no_vpn(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
vpn_plugin = mock.Mock()
sp.return_value = {'VPN': vpn_plugin}
self.mixin.check_router_has_no_vpnaas(mock.ANY, {'id': 'foo_id'})
vpn_plugin.check_router_in_use.assert_called_once_with(
mock.ANY, 'foo_id')
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from enable.component_editor import ComponentEditor
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import Property
from traitsui.api import (
View,
UItem,
Group,
InstanceEditor,
HGroup,
EnumEditor,
Item,
spring,
Spring,
ButtonEditor,
VGroup,
RangeEditor,
Handler,
)
from pychron.core.helpers.traitsui_shortcuts import rfloatitem
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.led_editor import LEDEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.experiment.utilities.identifier import pretty_extract_device
class BaseLaserPane(TraitsTaskPane):
def trait_context(self):
return {"object": self.model.stage_manager}
def traits_view(self):
editor = self.model.stage_manager.canvas_editor_factory()
return View(UItem("canvas", style="custom", editor=editor))
class AxesPane(TraitsDockPane):
name = "Axes"
def traits_view(self):
agrp = UItem("stage_controller", style="custom")
return View(agrp)
class StageControlPane(TraitsDockPane):
name = "Stage"
def trait_context(self):
return {
"canvas": self.model.stage_manager.canvas,
"stage_manager": self.model.stage_manager,
"degasser": self.model.degasser,
"tray_calibration": self.model.stage_manager.tray_calibration_manager,
"object": self.model,
}
def _get_tabs(self):
cg = VGroup(
HGroup(
Item("canvas.show_laser_position", label="Display Current"),
UItem("canvas.crosshairs_color"),
Item("canvas.crosshairs_line_width", label="Line Wt."),
),
Item("canvas.show_hole_label", label="Display Hole Label"),
HGroup(
Item("canvas.show_desired_position", label="Show Desired"),
UItem("canvas.desired_position_color"),
),
HGroup(
Item("canvas.crosshairs_kind", label="Kind"),
Item("canvas.crosshairs_radius", label="Radius"),
),
HGroup(
Item("canvas.crosshairs_offsetx", label="Offset (mm)"),
UItem("canvas.crosshairs_offsety"),
Item("canvas.crosshairs_offset_color"),
),
label="Crosshairs",
show_border=True,
)
acg = VGroup(
HGroup(
Item("canvas.aux_show_laser_position", label="Display Current"),
UItem("canvas.aux_crosshairs_color"),
Item("canvas.aux_crosshairs_line_width", label="Line Wt."),
),
HGroup(
Item("canvas.aux_crosshairs_kind", label="Kind"),
Item("canvas.aux_crosshairs_radius", label="Radius"),
),
HGroup(
Item("canvas.aux_crosshairs_offsetx", label="Offset (mm)"),
UItem("canvas.aux_crosshairs_offsety"),
# Item('canvas.aux_crosshairs_offset_color')
),
label="Aux Crosshairs",
show_border=True,
)
canvas_grp = VGroup(
Item("canvas.show_bounds_rect", label="Show Bounds Rectangle"),
Item("canvas.show_grids", label="Show Grids"),
cg,
acg,
label="Canvas",
)
tabs = Group(
UItem("stage_manager.stage_controller", style="custom", label="Axes"),
canvas_grp,
layout="tabbed",
)
if self.model.stage_manager.__class__.__name__ == "VideoStageManager":
degasser_grp = VGroup(
HGroup(
VGroup(
UItem("degas_test_button"), show_border=True, label="Testing"
),
VGroup(
Item("degasser.threshold"), show_border=True, label="Preprocess"
),
icon_button_editor("degasser.edit_pid_button", "cog"),
icon_button_editor("degasser.save_button", "save"),
VGroup(
Item("degasser.pid.kp"),
Item("degasser.pid.ki"),
Item("degasser.pid.kd"),
),
show_border=True,
label="PID",
),
UItem(
"degasser.plot_container", style="custom", editor=ComponentEditor()
),
label="Degas",
show_border=True,
)
mvgrp = VGroup(
VGroup(
UItem(
"stage_manager.autocenter_manager.display_image",
width=240,
height=240,
editor=ImageEditor(
refresh="stage_manager.autocenter_manager."
"display_image.refresh_needed"
),
)
),
label="Machine Vision",
show_border=True,
)
recgrp = VGroup(
HGroup(
icon_button_editor(
"stage_manager.snapshot_button",
"camera",
tooltip="Take a snapshot",
),
Item("stage_manager.snapshot_mode", label="Mode"),
icon_button_editor(
"stage_manager.record", "media-record", tooltip="Record video"
),
CustomLabel("stage_manager.record_label", color="red"),
),
HGroup(
Item("stage_manager.auto_save_snapshot", label="Auto Save"),
Item("stage_manager.render_with_markup", label="Add Markup"),
),
show_border=True,
label="Recording",
)
cfggrp = VGroup(
Item("stage_manager.camera_zoom_coefficients", label="Coeff."),
icon_button_editor(
"stage_manager.configure_camera_device_button",
"cog",
tooltip="Reload camera configuration file",
),
show_border=True,
label="Zoom",
)
# camera_grp.content.extend((HGroup(cfggrp, recgrp), mvgrp))
camera_grp = VGroup(
HGroup(cfggrp, recgrp), mvgrp, visible_when="use_video", label="Camera"
)
tabs.content.append(camera_grp)
tabs.content.append(degasser_grp)
mode = self.model.mode
if mode != "client":
pp_grp = UItem(
"stage_manager.points_programmer", label="Points", style="custom"
)
tabs.content.append(pp_grp)
cal_help_grp = VGroup(
CustomLabel("tray_calibration.calibration_help", color="green"),
label="Help",
show_border=True,
)
cal_results_grp = VGroup(
HGroup(
rfloatitem("tray_calibration.cx"), rfloatitem("tray_calibration.cy")
),
rfloatitem("tray_calibration.rotation"),
rfloatitem("tray_calibration.scale", sigfigs=4),
rfloatitem("tray_calibration.error", sigfigs=2),
label="Results",
show_border=True,
)
# holes_grp = VGroup(HGroup(UItem('tray_calibration.add_holes_button',
# tooltip='Add Holes'),
# UItem('tray_calibration.reset_holes_button',
# tooltip='Reset Holes')),
# UItem('tray_calibration.holes_list',
# editor=ListStrEditor()))
cal_grp = HGroup(
UItem(
"tray_calibration.style",
enabled_when="not tray_calibration.isCalibrating()",
),
UItem(
"stage_manager.stage_map_name",
editor=EnumEditor(name="stage_manager.stage_map_names"),
),
UItem(
"tray_calibration.calibrate",
enabled_when="tray_calibration.calibration_enabled",
editor=ButtonEditor(
label_value="tray_calibration.calibration_step"
),
width=-125,
),
UItem(
"tray_calibration.cancel_button",
enabled_when="tray_calibration.isCalibrating()",
),
UItem("tray_calibration.set_center_button"),
)
tc_grp = VGroup(
cal_grp,
UItem("tray_calibration.calibrator", style="custom"),
HGroup(cal_results_grp, cal_help_grp),
label="Calibration",
)
tabs.content.append(tc_grp)
return tabs
def traits_view(self):
if self.model.stage_manager.__class__.__name__ == "VideoStageManager":
pgrp = HGroup(
UItem(
"stage_manager.calibrated_position_entry",
tooltip="Enter a position e.g 1 for a hole, " "or 3,4 for X,Y",
),
icon_button_editor(
"stage_manager.autocenter_button",
"find",
tooltip="Do an autocenter at the current location",
enabled_when="stage_manager.autocenter_manager.use_autocenter",
),
icon_button_editor(
"stage_manager.manual_override_position_button",
"edit-move",
tooltip="Manual define the X,Y coordinates for current position",
enabled_when="stage_manager.calibrated_position_entry",
),
label="Calibrated Position",
show_border=True,
)
else:
pgrp = HGroup(
UItem(
"stage_manager.calibrated_position_entry",
tooltip="Enter a position e.g 1 for a hole, " "or 3,4 for X,Y",
),
icon_button_editor(
"stage_manager.manual_override_position_button",
"edit-move",
tooltip="Manual define the X,Y coordinates for current position",
enabled_when="stage_manager.calibrated_position_entry",
),
label="Calibrated Position",
show_border=True,
)
hgrp = HGroup(
UItem("stage_manager.stop_button"),
UItem("stage_manager.home"),
UItem(
"stage_manager.home_option",
editor=EnumEditor(name="stage_manager.home_options"),
),
)
tabs = self._get_tabs()
v = View(VGroup(hgrp, pgrp, tabs))
return v
class ControlPane(TraitsDockPane):
name = "Control"
movable = False
closable = True
floatable = False
def traits_view(self):
led_grp = HGroup(
UItem(
"enabled",
editor=LEDEditor(colors=["red", "green"]),
style="custom",
height=-35,
),
UItem("enable", editor=ButtonEditor(label_value="enable_label")),
)
status_grp = HGroup(
spring,
CustomLabel(
"status_text",
weight="bold",
use_color_background=False,
# bgcolor='transparent',
color="orange",
size=40,
),
spring,
)
request_grp = HGroup(
Item("requested_power", style="readonly", format_str="%0.2f", width=100),
Spring(springy=False, width=50),
UItem("units", style="readonly"),
spring,
)
v = View(
VGroup(led_grp, spring, status_grp, spring, request_grp, show_border=True)
)
return v
class SupplementalPane(TraitsDockPane):
pass
# ===============================================================================
# generic
# ===============================================================================
class PulseHandler(Handler):
def close(self, info, ok):
info.object.dump_pulse()
return ok
class PulsePane(TraitsDockPane):
id = "pychron.lasers.pulse"
name = "Pulse"
def trait_context(self):
ctx = super(PulsePane, self).trait_context()
ctx["object"] = self.model.pulse
return ctx
def traits_view(self):
agrp = VGroup(
HGroup(
Item("power", tooltip="Hit Enter for change to take effect"),
Item("units", style="readonly", show_label=False),
spring,
Item(
"duration",
label="Duration (s)",
tooltip="Set the laser pulse duration in seconds",
),
Item(
"pulse_button",
editor=ButtonEditor(label_value="pulse_label"),
show_label=False,
enabled_when="enabled",
),
)
)
mgrp = VGroup(
HGroup(
Spring(width=-5, springy=False),
Item("object.wait_control.high", label="Set Max. Seconds"),
spring,
UItem("object.wait_control.continue_button"),
),
HGroup(
Spring(width=-5, springy=False),
Item(
"object.wait_control.current_time",
show_label=False,
editor=RangeEditor(
mode="slider",
low=1,
# low_name='low_name',
high_name="object.wait_control.duration",
),
),
CustomLabel("object.wait_control.current_time", size=14, weight="bold"),
),
show_border=True,
)
v = View(
VGroup(agrp, mgrp, show_border=True), id="pulse", handler=PulseHandler()
)
return v
class OpticsPane(TraitsDockPane):
id = "pychron.lasers.optics"
name = "Optics"
def traits_view(self):
v = View(
Group(
UItem("laser_controller", editor=InstanceEditor(), style="custom"),
show_border=True,
)
)
return v
class ClientMixin(object):
name = Property(depends_on="model")
id = "pychron.lasers.client"
xmax = 25
xmin = -25
ymax = 25
ymin = -25
zmax = 25
zmin = -25
def _get_name(self):
n = "Laser Client"
if self.model:
n = pretty_extract_device(self.model.name)
return n
def traits_view(self):
pos_grp = VGroup(
UItem("move_enabled_button"),
VGroup(
HGroup(
Item("position"),
UItem(
"object.stage_manager.stage_map_name",
editor=EnumEditor(name="object.stage_manager.stage_map_names"),
),
UItem("stage_stop_button"),
),
Item("x", editor=RangeEditor(low=self.xmin, high=self.xmax)),
Item("y", editor=RangeEditor(low=self.ymin, high=self.ymax)),
Item("z", editor=RangeEditor(low=self.zmin, high=self.zmax)),
enabled_when="_move_enabled",
),
label="Positioning",
)
# ogrp = Group(UItem('optics_client', style='custom'),
# label='Optics')
# cgrp = Group(UItem('controls_client', style='custom'),
# defined_when='controls_client',
# label='Controls')
tgrp = Group(
# cgrp,
# ogrp,
pos_grp,
layout="tabbed",
)
egrp = HGroup(
UItem("enabled", editor=LEDEditor(colors=["red", "green"])),
UItem("enable", editor=ButtonEditor(label_value="enable_label")),
UItem("fire_laser_button", enabled_when="enabled"),
Item("output_power", label="Power"),
UItem("units"),
spring,
icon_button_editor("snapshot_button", "camera"),
icon_button_editor(
"test_connection_button", "connect", tooltip="Test Connection"
),
)
v = View(VGroup(egrp, tgrp))
return v
class ClientPane(TraitsTaskPane, ClientMixin):
pass
# def traits_view(self):
# v = View(
# Item('test_connection_button', show_label=False),
# HGroup(
# UItem('enabled_led', editor=LEDEditor()),
# UItem('enable', editor=ButtonEditor(label_value='enable_label'))),
# Item('position'),
# UItem('snapshot_button'),
# Item('x', editor=RangeEditor(low=-25.0, high=25.0)),
# Item('y', editor=RangeEditor(low=-25.0, high=25.0)),
# Item('z', editor=RangeEditor(low=-25.0, high=25.0)))
# return v
class ClientDockPane(TraitsDockPane, ClientMixin):
pass
class AuxilaryGraphPane(TraitsDockPane):
name = "Auxilary Graph"
def traits_view(self):
v = View(UItem("auxilary_graph", editor=ComponentEditor()))
return v
# ============= EOF =============================================
|
|
#!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import datetime
import json
import os.path
from distutils import log
from distutils.core import Command
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
IS_LIGHT_BUILD = os.environ.get('SENTRY_LIGHT_BUILD') == '1'
dev_requires = [
'flake8>=2.0,<2.1',
'Click',
]
tests_require = [
'blist', # used by cassandra
'casscache',
'cqlsh',
'datadog',
'elasticsearch',
'httpretty',
'pytest-cov>=1.4',
'pytest-timeout',
'python-coveralls',
'responses',
]
install_requires = [
'BeautifulSoup>=3.2.1,<3.3.0',
'celery>=3.1.8,<3.2.0',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-debug-toolbar>=1.3.2,<1.4.0',
'django-paging>=0.2.5,<0.3.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-recaptcha>=1.0.4,<1.1.0',
'django-social-auth>=0.7.28,<0.8.0',
'django-sudo>=1.1.3,<1.2.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<0.10.0',
'exam>=0.5.1',
'gunicorn>=19.2.1,<20.0.0',
'ipaddr>=2.1.11,<2.2.0',
'logan>=0.7.1,<0.8.0',
'lxml>=3.4.1',
'mock>=0.8.0,<1.1',
'markdown>=2.4.1,<2.5.0',
'petname>=1.7,<1.8',
'progressbar>=2.2,<2.4',
'pytest',
'pytest-django',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'PyYAML>=3.11,<4.0',
'raven>=5.3.0',
'redis>=2.10.3,<2.11.0',
'requests%s>=2.7.0,<2.8.0' % (not IS_LIGHT_BUILD and '[security]' or ''),
'simplejson>=3.2.0,<3.9.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'South==1.0.1',
'toronado>=0.0.4,<0.1.0',
'urllib3>=1.11,<1.12',
'rb>=1.1.0,<2.0.0',
]
postgres_requires = [
'psycopg2>=2.5.0,<2.6.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class DevelopWithBuildStatic(develop):
def install_for_development(self):
if not IS_LIGHT_BUILD:
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'sentry-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
user_options = [
('work-path=', 'w',
"The working directory for source files. Defaults to ."),
]
def initialize_options(self):
self.work_path = None
def finalize_options(self):
if self.work_path is None:
self.work_path = ROOT
def run(self):
work_path = self.work_path
log.info("initializing git submodules")
check_output(['git', 'submodule', 'init'], cwd=work_path)
check_output(['git', 'submodule', 'update'], cwd=work_path)
log.info("running [npm install --quiet]")
check_output(['npm', 'install', '--quiet'], cwd=work_path)
log.info("running [gulp dist]")
check_output([os.path.join('node_modules', '.bin', 'gulp'), 'dist:css'],
cwd=work_path)
# Enable React production optimization
os.environ['NODE_ENV'] = 'production'
log.info("running [webpack]")
check_output([os.path.join('node_modules', '.bin', 'webpack'), '-p'],
cwd=work_path)
class SmartInstall(install):
"""
Installs Sentry into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'sentry-package.json'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
setup(
name='sentry',
version='8.0.0.dev0',
author='David Cramer',
author_email='dcramer@gmail.com',
url='https://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
cmdclass={
'build_static': BuildStatic,
'develop': DevelopWithBuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall,
},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development'
],
)
|
|
"""Utility functions for environment.py"""
import os
import cv2
import math
import random
import numpy as np
import carla
import pygame
import threading
import datetime
from typing import Union
from tensorforce.agents import Agent
# constants:
epsilon = np.finfo(np.float32).eps
# Use this dict to convert lanes objects to integers:
WAYPOINT_DICT = dict(lane_change={carla.LaneChange.NONE: 0,
carla.LaneChange.Both: 1,
carla.LaneChange.Left: 2,
carla.LaneChange.Right: 3},
lane_type={carla.LaneType.NONE: 0,
carla.LaneType.Bidirectional: 1,
carla.LaneType.Biking: 2,
carla.LaneType.Border: 3,
carla.LaneType.Driving: 4,
carla.LaneType.Entry: 5,
carla.LaneType.Exit: 6,
carla.LaneType.Median: 7,
carla.LaneType.OffRamp: 8,
carla.LaneType.OnRamp: 9,
carla.LaneType.Parking: 10,
carla.LaneType.Rail: 11,
carla.LaneType.Restricted: 12,
carla.LaneType.RoadWorks: 13,
carla.LaneType.Shoulder: 14,
carla.LaneType.Sidewalk: 15,
carla.LaneType.Special1: 16,
carla.LaneType.Special2: 17,
carla.LaneType.Special3: 18,
carla.LaneType.Stop: 19,
carla.LaneType.Tram: 20,
carla.LaneType.Any: 21},
lane_marking_type={carla.LaneMarkingType.NONE: 0,
carla.LaneMarkingType.BottsDots: 1,
carla.LaneMarkingType.Broken: 2,
carla.LaneMarkingType.BrokenBroken: 3,
carla.LaneMarkingType.BrokenSolid: 4,
carla.LaneMarkingType.Curb: 5,
carla.LaneMarkingType.Grass: 6,
carla.LaneMarkingType.Solid: 7,
carla.LaneMarkingType.SolidBroken: 8,
carla.LaneMarkingType.SolidSolid: 9,
carla.LaneMarkingType.Other: 10},
traffic_light={carla.TrafficLightState.Green: 0,
carla.TrafficLightState.Red: 1,
carla.TrafficLightState.Yellow: 2,
carla.TrafficLightState.Off: 3,
carla.TrafficLightState.Unknown: 4}
)
# -------------------------------------------------------------------------------------------------
# -- PyGame
# -------------------------------------------------------------------------------------------------
def init_pygame():
if not pygame.get_init():
pygame.init()
if not pygame.font.get_init():
pygame.font.init()
def get_display(window_size, mode=pygame.HWSURFACE | pygame.DOUBLEBUF):
"""Returns a display used to render images and text.
:param window_size: a tuple (width: int, height: int)
:param mode: pygame rendering mode. Default: pygame.HWSURFACE | pygame.DOUBLEBUF
:return: a pygame.display instance.
"""
return pygame.display.set_mode(window_size, mode)
def get_font(size=14):
return pygame.font.Font(pygame.font.get_default_font(), size)
def display_image(display, image, window_size=(800, 600), blend=False):
"""Displays the given image on a pygame window
:param blend: whether to blend or not the given image.
:param window_size: the size of the pygame's window. Default is (800, 600)
:param display: pygame.display
:param image: the image (numpy.array) to display/render on.
"""
# Resize image if necessary
if (image.shape[1], image.shape[0]) != window_size:
image = resize(image, size=window_size)
image_surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
display.blit(image_surface, (0, 0))
def display_text(display, font, text: [str], color=(255, 255, 255), origin=(0, 0), offset=(0, 2)):
position = origin
for line in text:
if isinstance(line, dict):
display.blit(font.render(line.get('text'), True, line.get('color', color)), position)
else:
display.blit(font.render(line, True, color), position)
position = (position[0] + offset[0], position[1] + offset[1])
def pygame_save(display, path: str, name: str = None):
if name is None:
name = 'image-' + str(datetime.datetime.now()) + '.jpg'
thread = threading.Thread(target=lambda: pygame.image.save(display, os.path.join(path, name)))
thread.start()
# -------------------------------------------------------------------------------------------------
# -- CARLA
# -------------------------------------------------------------------------------------------------
def get_client(address, port, timeout=2.0) -> carla.Client:
"""Connects to the simulator.
@:returns a carla.Client instance if the CARLA simulator accepts the connection.
"""
client: carla.Client = carla.Client(address, port)
client.set_timeout(timeout)
return client
def random_blueprint(world: carla.World, actor_filter='vehicle.*', role_name='agent') -> carla.ActorBlueprint:
"""Retrieves a random blueprint.
:param world: a carla.World instance.
:param actor_filter: a string used to filter (select) blueprints. Default: 'vehicle.*'
:param role_name: blueprint's role_name, Default: 'agent'.
:return: a carla.ActorBlueprint instance.
"""
blueprints = world.get_blueprint_library().filter(actor_filter)
blueprint: carla.ActorBlueprint = random.choice(blueprints)
blueprint.set_attribute('role_name', role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# set the max speed
if blueprint.has_attribute('speed'):
float(blueprint.get_attribute('speed').recommended_values[1])
float(blueprint.get_attribute('speed').recommended_values[2])
else:
print("No recommended values for 'speed' attribute")
return blueprint
def random_spawn_point(world_map: carla.Map, different_from: carla.Location = None) -> carla.Transform:
"""Returns a random spawning location.
:param world_map: a carla.Map instance obtained by calling world.get_map()
:param different_from: ensures that the location of the random spawn point is different from the one specified here.
:return: a carla.Transform instance.
"""
available_spawn_points = world_map.get_spawn_points()
if different_from is not None:
while True:
spawn_point = random.choice(available_spawn_points)
if spawn_point.location != different_from:
return spawn_point
else:
return random.choice(available_spawn_points)
def spawn_actor(world: carla.World, blueprint: carla.ActorBlueprint, spawn_point: carla.Transform,
attach_to: carla.Actor = None, attachment_type=carla.AttachmentType.Rigid) -> carla.Actor:
"""Tries to spawn an actor in a CARLA simulator.
:param world: a carla.World instance.
:param blueprint: specifies which actor has to be spawned.
:param spawn_point: where to spawn the actor. A transform specifies the location and rotation.
:param attach_to: whether the spawned actor has to be attached (linked) to another one.
:param attachment_type: the kind of the attachment. Can be 'Rigid' or 'SpringArm'.
:return: a carla.Actor instance.
"""
actor = world.try_spawn_actor(blueprint, spawn_point, attach_to, attachment_type)
if actor is None:
raise ValueError(f'Cannot spawn actor. Try changing the spawn_point ({spawn_point}) to something else.')
return actor
def get_blueprint(world: carla.World, actor_id: str) -> carla.ActorBlueprint:
return world.get_blueprint_library().find(actor_id)
def global_to_local(point: carla.Location, reference: Union[carla.Transform, carla.Location, carla.Rotation]):
"""Translates a 3D point from global to local coordinates using the current transformation as reference"""
if isinstance(reference, carla.Transform):
reference.transform(point)
elif isinstance(reference, carla.Location):
carla.Transform(reference, carla.Rotation()).transform(point)
elif isinstance(reference, carla.Rotation):
carla.Transform(carla.Location(), reference).transform(point)
else:
raise ValueError('Argument "reference" is none of carla.Transform or carla.Location or carla.Rotation!')
# -------------------------------------------------------------------------------------------------
# -- Other
# -------------------------------------------------------------------------------------------------
def resize(image, size: (int, int), interpolation=cv2.INTER_CUBIC):
"""Resize the given image.
:param image: a numpy array with shape (height, width, channels).
:param size: (width, height) to resize the image to.
:param interpolation: Default: cv2.INTER_CUBIC.
:return: the reshaped image.
"""
return cv2.resize(image, dsize=size, interpolation=interpolation)
def scale(num, from_interval=(-1.0, +1.0), to_interval=(0.0, 7.0)) -> float:
"""Scales (interpolates) the given number to a given interval.
:param num: a number
:param from_interval: the interval the number is assumed to lie in.
:param to_interval: the target interval.
:return: the scaled/interpolated number.
"""
x = np.interp(num, from_interval, to_interval)
return float(round(x))
def cv2_grayscale(image, is_bgr=True, depth=1):
"""Convert a RGB or BGR image to grayscale using OpenCV (cv2).
:param image: input image, a numpy.ndarray.
:param is_bgr: tells whether the image is in BGR format. If False, RGB format is assumed.
:param depth: replicates the gray depth channel multiple times. E.g. useful to display grayscale images as rgb.
"""
assert depth >= 1
if is_bgr:
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if depth > 1:
return np.stack((grayscale,) * depth, axis=-1)
return grayscale
def save_agent(agent: Agent, agent_name: str, directory: str, separate_dir=True) -> str:
if separate_dir:
save_path = os.path.join(directory, agent_name)
os.makedirs(save_path, exist_ok=True)
else:
save_path = directory
checkpoint_path = agent.save(directory=save_path, filename=agent_name)
return checkpoint_path
def get_record_path(base_dir: str, prefix='ep', pattern='-'):
dirs = sorted(os.listdir(base_dir))
count = 0
if len(dirs) > 0:
count = 1 + int(dirs[-1].split(pattern)[1])
record_path = os.path.join(base_dir, f'{prefix}{pattern}{count}')
os.mkdir(record_path)
return record_path
def replace_nans(data: dict, nan=0.0, pos_inf=0.0, neg_inf=0.0):
"""In-place replacement of non-numerical values, i.e. NaNs and +/- infinity"""
for key, value in data.items():
if np.isnan(value).any() or np.isinf(value).any():
data[key] = np.nan_to_num(value, nan=nan, posinf=pos_inf, neginf=neg_inf)
return data
# -------------------------------------------------------------------------------------------------
# -- Debug
# -------------------------------------------------------------------------------------------------
class Colors(object):
"""Wraps some carla.Color instances."""
red = carla.Color(255, 0, 0)
green = carla.Color(0, 255, 0)
blue = carla.Color(47, 210, 231)
cyan = carla.Color(0, 255, 255)
yellow = carla.Color(255, 255, 0)
orange = carla.Color(255, 162, 0)
white = carla.Color(255, 255, 255)
black = carla.Color(0, 0, 0)
def draw_transform(debug, trans, col=Colors.red, lt=-1):
yaw_in_rad = math.radians(trans.rotation.yaw)
pitch_in_rad = math.radians(trans.rotation.pitch)
p1 = carla.Location(x=trans.location.x + math.cos(pitch_in_rad) * math.cos(yaw_in_rad),
y=trans.location.y + math.cos(pitch_in_rad) * math.sin(yaw_in_rad),
z=trans.location.z + math.sin(pitch_in_rad))
debug.draw_arrow(trans.location, p1, thickness=0.05, arrow_size=0.1, color=col, life_time=lt)
def draw_radar_measurement(debug_helper: carla.DebugHelper, data: carla.RadarMeasurement, velocity_range=7.5,
size=0.075, life_time=0.06):
"""Code adapted from carla/PythonAPI/examples/manual_control.py:
- White: means static points.
- Red: indicates points moving towards the object.
- Blue: denoted points moving away.
"""
radar_rotation = data.transform.rotation
for detection in data:
azimuth = math.degrees(detection.azimuth) + radar_rotation.yaw
altitude = math.degrees(detection.altitude) + radar_rotation.pitch
# move to local coordinates:
forward_vec = carla.Vector3D(x=detection.depth - 0.25)
global_to_local(forward_vec,
reference=carla.Rotation(pitch=altitude, yaw=azimuth, roll=radar_rotation.roll))
# draw:
debug_helper.draw_point(data.transform.location + forward_vec, size=size, life_time=life_time,
persistent_lines=False, color=carla.Color(255, 255, 255))
# -------------------------------------------------------------------------------------------------
# -- Math
# -------------------------------------------------------------------------------------------------
def l2_norm(location1, location2):
"""Computes the Euclidean distance between two carla.Location objects."""
dx = location1.x - location2.x
dy = location1.y - location2.y
dz = location1.z - location2.z
return math.sqrt(dx**2 + dy**2 + dz**2) + epsilon
def vector_norm(vec: carla.Vector3D) -> float:
"""Returns the norm/magnitude (a scalar) of the given 3D vector."""
return math.sqrt(vec.x**2 + vec.y**2 + vec.z**2)
def speed(actor: carla.Actor) -> float:
"""Returns the speed of the given actor in km/h."""
return 3.6 * vector_norm(actor.get_velocity())
def dot_product(a: carla.Vector3D, b: carla.Vector3D) -> float:
return a.x * b.x + a.y * b.y + a.z * b.z
def cosine_similarity(a: carla.Vector3D, b: carla.Vector3D) -> float:
"""-1: opposite vectors (pointing in the opposite direction),
0: orthogonal,
1: exactly the same (pointing in the same direction)
"""
return dot_product(a, b) / (vector_norm(a) * vector_norm(b))
|
|
# -*- coding: utf-8 -*-
"""
tests.views
~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import flask.views
from werkzeug.http import parse_set_header
def common_test(app):
c = app.test_client()
assert c.get('/').data == b'GET'
assert c.post('/').data == b'POST'
assert c.put('/').status_code == 405
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
assert sorted(meths) == ['GET', 'HEAD', 'OPTIONS', 'POST']
def test_basic_view(app):
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
common_test(app)
def test_method_based_view(app):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
common_test(app)
def test_view_patching(app):
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
view = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
common_test(app)
def test_view_inheritance(app, client):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
meths = parse_set_header(client.open('/', method='OPTIONS').headers['Allow'])
assert sorted(meths) == ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST']
def test_view_decorators(app, client):
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
rv = client.get('/')
assert rv.headers['X-Parachute'] == 'awesome'
assert rv.data == b'Awesome'
def test_view_provide_automatic_options_attr():
app = flask.Flask(__name__)
class Index1(flask.views.View):
provide_automatic_options = False
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index1.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert rv.status_code == 405
app = flask.Flask(__name__)
class Index2(flask.views.View):
methods = ['OPTIONS']
provide_automatic_options = True
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index2.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['OPTIONS']
app = flask.Flask(__name__)
class Index3(flask.views.View):
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index3.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert 'OPTIONS' in rv.allow
def test_implicit_head(app, client):
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
rv = client.get('/')
assert rv.data == b'Blub'
assert rv.headers['X-Method'] == 'GET'
rv = client.head('/')
assert rv.data == b''
assert rv.headers['X-Method'] == 'HEAD'
def test_explicit_head(app, client):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
rv = client.get('/')
assert rv.data == b'GET'
rv = client.head('/')
assert rv.data == b''
assert rv.headers['X-Method'] == 'HEAD'
def test_endpoint_override(app):
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with pytest.raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
common_test(app)
def test_multiple_inheritance(app, client):
class GetView(flask.views.MethodView):
def get(self):
return 'GET'
class DeleteView(flask.views.MethodView):
def delete(self):
return 'DELETE'
class GetDeleteView(GetView, DeleteView):
pass
app.add_url_rule('/', view_func=GetDeleteView.as_view('index'))
assert client.get('/').data == b'GET'
assert client.delete('/').data == b'DELETE'
assert sorted(GetDeleteView.methods) == ['DELETE', 'GET']
def test_remove_method_from_parent(app, client):
class GetView(flask.views.MethodView):
def get(self):
return 'GET'
class OtherView(flask.views.MethodView):
def post(self):
return 'POST'
class View(GetView, OtherView):
methods = ['GET']
app.add_url_rule('/', view_func=View.as_view('index'))
assert client.get('/').data == b'GET'
assert client.post('/').status_code == 405
assert sorted(View.methods) == ['GET']
|
|
import morepath
from morepath.traject import (
TrajectRegistry,
Node,
Step,
TrajectError,
is_identifier,
parse_variables,
Path,
create_path,
parse_path,
normalize_path,
ParameterFactory,
)
from morepath.converter import Converter, IDENTITY_CONVERTER
import pytest
from webob.exc import HTTPBadRequest
def traject_consume():
pass
class Root:
pass
class Model:
pass
class Special:
pass
def test_name_step():
step = Step("foo")
assert step.s == "foo"
assert step.generalized == "foo"
assert step.parts == ("foo",)
assert step.names == []
assert step.converters == {}
assert step.discriminator_info() == "foo"
assert not step.has_variables()
variables = {}
assert step.match("foo", variables)
assert variables == {}
assert not step.match("bar", variables)
assert variables == {}
def test_variable_step():
step = Step("{foo}")
assert step.s == "{foo}"
assert step.generalized == "{}"
assert step.parts == ("", "")
assert step.names == ["foo"]
assert step.converters == {}
assert step.has_variables()
assert step.discriminator_info() == "{}"
variables = {}
assert step.match("bar", variables)
assert variables == {"foo": "bar"}
def test_mixed_step():
step = Step("a{foo}b")
assert step.s == "a{foo}b"
assert step.generalized == "a{}b"
assert step.parts == ("a", "b")
assert step.names == ["foo"]
assert step.converters == {}
assert step.has_variables()
assert step.discriminator_info() == "a{}b"
variables = {}
assert step.match("abarb", variables)
assert variables == {"foo": "bar"}
variables = {}
assert not step.match("ab", variables)
assert not variables
variables = {}
assert not step.match("xbary", variables)
assert not variables
variables = {}
assert not step.match("yabarbx", variables)
assert not variables
variables = {}
assert not step.match("afoo", variables)
assert not variables
def test_multi_mixed_step():
step = Step("{foo}a{bar}")
assert step.s == "{foo}a{bar}"
assert step.generalized == "{}a{}"
assert step.parts == ("", "a", "")
assert step.names == ["foo", "bar"]
assert step.converters == {}
assert step.has_variables()
assert step.discriminator_info() == "{}a{}"
def test_converter():
step = Step("{foo}", converters=dict(foo=Converter(int)))
assert step.discriminator_info() == "{}"
variables = {}
assert step.match("1", variables)
assert variables == {"foo": 1}
variables = {}
assert not step.match("x", variables)
assert not variables
def sorted_steps(input_list):
steps = [Step(s) for s in input_list]
return [step.s for step in sorted(steps)]
def test_steps_the_same():
step1 = Step("{foo}")
step2 = Step("{foo}")
assert step1 == step2
assert not step1 != step2
assert not step1 < step2
assert not step1 > step2
assert step1 >= step2
assert step1 <= step2
def test_step_different():
step1 = Step("{foo}")
step2 = Step("bar")
assert step1 != step2
assert not step1 == step2
assert not step1 < step2
assert step1 > step2
assert step1 >= step2
assert not step1 <= step2
def test_order_prefix_earlier():
assert sorted_steps(["{foo}", "prefix{foo}"]) == ["prefix{foo}", "{foo}"]
def test_order_postfix_earlier():
assert sorted_steps(["{foo}", "{foo}postfix"]) == ["{foo}postfix", "{foo}"]
def test_order_prefix_before_postfix():
assert sorted_steps(["{foo}", "a{foo}", "{foo}a"]) == [
"a{foo}",
"{foo}a",
"{foo}",
]
def test_order_prefix_before_postfix2():
assert sorted_steps(["{foo}", "a{foo}", "{foo}b"]) == [
"a{foo}",
"{foo}b",
"{foo}",
]
def test_order_longer_prefix_before_shorter():
assert sorted_steps(["ab{f}", "a{f}"]) == ["ab{f}", "a{f}"]
def test_order_longer_postfix_before_shorter():
assert sorted_steps(["{f}ab", "{f}b"]) == ["{f}ab", "{f}b"]
def test_order_dont_care_variable_names():
assert sorted_steps(["a{f}", "ab{g}"]) == ["ab{g}", "a{f}"]
def test_order_two_variables_before_one():
assert sorted_steps(["{a}x{b}", "{a}"]) == ["{a}x{b}", "{a}"]
def test_order_two_variables_before_with_postfix():
assert sorted_steps(["{a}x{b}x", "{a}x"]) == ["{a}x{b}x", "{a}x"]
def test_order_two_variables_before_with_prefix():
assert sorted_steps(["x{a}x{b}", "x{a}"]) == ["x{a}x{b}", "x{a}"]
def test_order_two_variables_infix():
assert sorted_steps(
["{a}xyz{b}", "{a}xy{b}", "{a}yz{b}", "{a}x{b}", "{a}z{b}", "{a}y{b}"]
) == ["{a}xyz{b}", "{a}yz{b}", "{a}z{b}", "{a}xy{b}", "{a}y{b}", "{a}x{b}"]
def test_order_alphabetical():
# reverse alphabetical
assert sorted_steps(["a{f}", "b{f}"]) == ["b{f}", "a{f}"]
assert sorted_steps(["{f}a", "{f}b"]) == ["{f}b", "{f}a"]
def test_invalid_step():
with pytest.raises(TrajectError):
Step("{foo")
def test_illegal_consecutive_variables():
with pytest.raises(TrajectError):
Step("{a}{b}")
def test_illegal_variable():
with pytest.raises(TrajectError):
Step("{a:int:int}")
def test_illegal_identifier():
with pytest.raises(TrajectError):
Step("{1}")
def test_unknown_converter():
with pytest.raises(TrajectError):
Step("{foo:blurb}")
def test_name_node():
node = Node()
step_node = node.add(Step("foo"))
variables = {}
assert node.resolve("foo", variables) is step_node
assert not variables
assert node.resolve("bar", variables) is None
assert not variables
def test_variable_node():
node = Node()
step_node = node.add(Step("{x}"))
variables = {}
assert node.resolve("foo", variables) is step_node
assert variables == {"x": "foo"}
variables = {}
assert node.resolve("bar", variables) is step_node
assert variables == {"x": "bar"}
def test_mixed_node():
node = Node()
step_node = node.add(Step("prefix{x}postfix"))
variables = {}
assert node.resolve("prefixfoopostfix", variables) is step_node
assert variables == {"x": "foo"}
variables = {}
assert node.resolve("prefixbarpostfix", variables) is step_node
assert variables == {"x": "bar"}
variables = {}
assert node.resolve("prefixwhat", variables) is None
assert variables == {}
def test_variable_node_specific_first():
node = Node()
x_node = node.add(Step("{x}"))
prefix_node = node.add(Step("prefix{x}"))
variables = {}
assert node.resolve("what", variables) is x_node
assert variables == {"x": "what"}
variables = {}
assert node.resolve("prefixwhat", variables) is prefix_node
assert variables == {"x": "what"}
def test_variable_node_more_specific_first():
node = Node()
xy_node = node.add(Step("x{x}y"))
xay_node = node.add(Step("xa{x}y"))
ay_node = node.add(Step("a{x}y"))
variables = {}
assert node.resolve("xwhaty", variables) is xy_node
assert variables == {"x": "what"}
variables = {}
assert node.resolve("xawhaty", variables) is xay_node
assert variables == {"x": "what"}
variables = {}
assert node.resolve("awhaty", variables) is ay_node
assert variables == {"x": "what"}
def test_variable_node_optional_colon():
node = Node()
x_node = node.add(Step("{x}"))
xy_node = node.add(Step("{x}:{y}"))
variables = {}
assert node.resolve("a", variables) is x_node
assert variables == {"x": "a"}
variables = {}
assert node.resolve("a:b", variables) is xy_node
assert variables == {"x": "a", "y": "b"}
def req(path):
return morepath.Request.blank(path, app=morepath.App())
def test_traject_simple():
traject = TrajectRegistry()
class abc:
pass
class abd:
pass
class xy:
pass
class xz:
pass
traject.add_pattern("a/b/c", abc)
traject.add_pattern("a/b/d", abd)
traject.add_pattern("x/y", xy)
traject.add_pattern("x/z", xz)
r = req("a/b/c")
assert isinstance(traject.consume(r), abc)
assert r.unconsumed == []
assert isinstance(traject.consume(req("a/b/d")), abd)
assert isinstance(traject.consume(req("x/y")), xy)
assert isinstance(traject.consume(req("x/z")), xz)
r = req("a/b/c/d")
assert isinstance(traject.consume(r), abc)
assert r.unconsumed == ["d"]
r = req("a/b/d/d")
assert isinstance(traject.consume(r), abd)
assert r.unconsumed == ["d"]
r = req("x/y/1/2/3")
assert isinstance(traject.consume(r), xy)
assert r.unconsumed == ["3", "2", "1"]
r = req("1/2/3")
assert traject.consume(r) is None
assert r.unconsumed == ["3", "2", "1"]
r = req("a/b")
assert traject.consume(r) is None
assert r.unconsumed == []
def test_traject_variable_specific_first():
traject = TrajectRegistry()
class axb:
def __init__(self, x):
self.x = x
class aprefixxb:
def __init__(self, x):
self.x = x
traject.add_pattern("a/{x}/b", axb)
traject.add_pattern("a/prefix{x}/b", aprefixxb)
obj = traject.consume(req("a/lah/b"))
assert isinstance(obj, axb)
assert obj.x == "lah"
obj = traject.consume(req("a/prefixlah/b"))
assert isinstance(obj, aprefixxb)
assert obj.x == "lah"
def test_traject_multiple_steps_with_variables():
traject = TrajectRegistry()
class xy:
def __init__(self, x, y):
self.x = x
self.y = y
traject.add_pattern("{x}/{y}", xy)
obj = traject.consume(req("x/y"))
assert obj.x == "x"
assert obj.y == "y"
def test_traject_with_converter():
traject = TrajectRegistry()
class found:
def __init__(self, x):
self.x = x
traject.add_pattern("{x}", found, converters=dict(x=Converter(int)))
obj = traject.consume(req("1"))
assert obj.x == 1
assert traject.consume(req("foo")) is None
def test_traject_type_conflict():
traject = TrajectRegistry()
class found_int:
def __init__(self, x):
self.x = x
class found_str:
def __init__(self, x):
self.x = x
traject.add_pattern("{x}", found_int, converters=dict(x=Converter(int)))
with pytest.raises(TrajectError):
traject.add_pattern("{x}", found_str, converters=dict(x=Converter(str)))
def test_traject_type_conflict_default_type():
traject = TrajectRegistry()
class found_str:
def __init__(self, x):
self.x = x
class found_int:
def __init__(self, x):
self.x = x
traject.add_pattern("{x}", found_str)
with pytest.raises(TrajectError):
traject.add_pattern("{x}", found_int, converters=dict(x=Converter(int)))
def test_traject_type_conflict_explicit_default():
traject = TrajectRegistry()
class found_explicit:
def __init__(self, x):
self.x = x
class found_implicit:
def __init__(self, x):
self.x = x
traject.add_pattern(
"{x}", found_explicit, converters=dict(x=IDENTITY_CONVERTER)
)
traject.add_pattern("{x}", found_implicit)
# these add_pattern calls are equivalent so will not result in an error
assert True
def test_traject_type_conflict_middle():
traject = TrajectRegistry()
class int_f:
def __init__(self, x):
self.x = x
class str_f:
def __init__(self, x):
self.x = x
traject.add_pattern("a/{x}/y", int_f, converters=dict(x=Converter(int)))
with pytest.raises(TrajectError):
traject.add_pattern("a/{x}/z", str_f)
def test_traject_no_type_conflict_middle():
traject = TrajectRegistry()
class int_f:
def __init__(self, x):
self.x = x
class int_f2:
def __init__(self, x):
self.x = x
traject.add_pattern("a/{x}/y", int_f, converters=dict(x=Converter(int)))
traject.add_pattern("a/{x}/z", int_f2, converters=dict(x=Converter(int)))
def test_traject_greedy_middle_prefix():
traject = TrajectRegistry()
class prefix:
def __init__(self, x):
self.x = x
class no_prefix:
def __init__(self, x):
self.x = x
traject.add_pattern("a/prefix{x}/y", prefix)
traject.add_pattern("a/{x}/z", no_prefix)
obj = traject.consume(req("a/prefixX/y"))
assert obj.x == "X"
assert isinstance(obj, prefix)
assert traject.consume(req("a/prefixX/z")) is None
obj = traject.consume(req("a/blah/z"))
assert obj.x == "blah"
assert isinstance(obj, no_prefix)
def test_traject_type_conflict_middle_end():
traject = TrajectRegistry()
class int_f:
def __init__(self, x):
self.x = x
class str_f:
def __init__(self, x):
self.x = x
traject.add_pattern("a/{x}/y", int_f, converters=dict(x=Converter(int)))
with pytest.raises(TrajectError):
traject.add_pattern("a/{x}", str_f)
def test_traject_no_type_conflict_middle_end():
traject = TrajectRegistry()
class int_f:
def __init__(self, x):
self.x = x
class int_f2:
def __init__(self, x):
self.x = x
traject.add_pattern("a/{x}/y", int_f, converters=dict(x=Converter(int)))
traject.add_pattern("a/{x}", int_f2, converters=dict(x=Converter(int)))
assert True
def test_parse_path():
assert parse_path("/a/b/c") == ["a", "b", "c"]
def test_parse_path_empty():
assert parse_path("") == []
def test_parse_path_slash():
assert parse_path("/") == []
def test_parse_path_no_slash():
assert parse_path("a/b/c") == ["a", "b", "c"]
def test_parse_path_end_slash():
assert parse_path("a/b/c/") == ["a", "b", "c"]
def test_parse_path_multi_slash():
assert parse_path("/a/b/c") == parse_path("/a//b/c")
assert parse_path("/a/b/c") == parse_path("/a///b/c")
def test_parse_path_dots():
assert parse_path("/a/b/../c") == parse_path("/a/c")
def test_parse_path_single_dots():
assert parse_path("/a/./b") == parse_path("/a/b")
assert parse_path("./a/b") == parse_path("/a/b")
def test_parse_path_dots_start():
assert parse_path("/../a/b") == parse_path("/a/b")
def test_create_path():
assert create_path(["a", "b", "c"]) == "/a/b/c"
assert create_path([]) == "/"
def test_normalize_path():
assert normalize_path("/a/..") == "/"
assert normalize_path("/a/../../../../b") == "/b"
assert normalize_path("/a/../c") == "/c"
assert normalize_path("/a/../../a/") == "/a"
assert normalize_path("/") == "/"
assert normalize_path("") == "/"
assert normalize_path("../../") == "/"
assert normalize_path("../static//../app.py") == "/app.py"
assert normalize_path("../a//b/") == "/a/b"
assert normalize_path("/////a/////../b") == "/b"
assert normalize_path("//foo") == "/foo"
assert normalize_path("/a/b/c/../..") == "/a"
assert normalize_path("/a/b/c/../../d") == "/a/d"
def test_identifier():
assert is_identifier("a")
not is_identifier("")
assert is_identifier("a1")
assert not is_identifier("1")
assert is_identifier("_")
assert is_identifier("_foo")
assert is_identifier("foo")
assert not is_identifier(".")
def test_parse_variables():
assert parse_variables("No variables") == []
assert parse_variables("The {foo} is the {bar}.") == ["foo", "bar"]
with pytest.raises(TrajectError):
parse_variables("{}")
with pytest.raises(TrajectError):
parse_variables("{1illegal}")
def test_traject_consume():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
traject.add_pattern("sub", Model)
r = req("sub")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
def test_traject_consume_parameter():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
class Model:
def __init__(self, a):
self.a = a
traject.add_pattern(
"sub",
Model,
defaults={"a": 0},
converters={"a": Converter(int)},
required=[],
)
r = req("sub?a=1")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert obj.a == 1
assert r.unconsumed == []
r = req("sub")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert obj.a == 0
assert r.unconsumed == []
def test_traject_consume_model_factory_gets_request():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
class Model:
def __init__(self, info):
self.info = info
def get_model(request):
return Model(request.method)
traject.add_pattern("sub", get_model)
r = req("sub")
obj = traject.consume(r)
assert r.unconsumed == []
assert obj.info == "GET"
def test_traject_consume_not_found():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
r = req("sub")
assert traject.consume(r) is None
assert r.unconsumed == ["sub"]
def test_traject_consume_factory_returns_none():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model():
return None
traject.add_pattern("sub", get_model)
r = req("sub")
assert traject.consume(r) is None
assert r.unconsumed == []
def test_traject_consume_variable():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(foo):
result = Model()
result.foo = foo
return result
traject.add_pattern("{foo}", get_model)
r = req("something")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert obj.foo == "something"
assert r.unconsumed == []
def test_traject_consume_view():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(foo):
result = Model()
result.foo = foo
return result
traject.add_pattern("", Root)
traject.add_pattern("{foo}", get_model)
r = req("+something")
obj = traject.consume(r)
assert isinstance(obj, Root)
assert r.unconsumed == ["+something"]
def test_traject_root():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
traject.add_pattern("", Root)
r = req("")
obj = traject.consume(r)
assert isinstance(obj, Root)
assert r.unconsumed == []
def test_traject_consume_combination():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(foo):
result = Model()
result.foo = foo
return result
traject.add_pattern("special", Special)
traject.add_pattern("{foo}", get_model)
r = req("something")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
assert obj.foo == "something"
r = req("special")
obj = traject.consume(r)
assert isinstance(obj, Special)
assert r.unconsumed == []
def test_traject_consume_extra_path_variable():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(foo):
result = Model()
result.foo = foo
return result
traject.add_pattern("{bar}/{foo}", get_model)
r = req("bar/foo")
# we get a TypeError. ``register_path`` actually checks for
# this case and prevents it from happening
with pytest.raises(TypeError):
traject.consume(r)
def test_traject_nested():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
traject.add_pattern("a", Model)
traject.add_pattern("a/b", Special)
r = req("a")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
r = req("a/b")
obj = traject.consume(r)
assert isinstance(obj, Special)
assert r.unconsumed == []
def test_traject_nested_not_resolved_entirely_by_consumer():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
traject.add_pattern("a", Model)
r = req("a")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
r = req("a/b")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == ["b"]
def test_traject_nested_with_variable():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(id):
result = Model()
result.id = id
return result
def get_special(id):
result = Special()
result.id = id
return result
traject.add_pattern("{id}", get_model)
traject.add_pattern("{id}/sub", get_special)
r = req("a")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
r = req("b")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert r.unconsumed == []
r = req("a/sub")
obj = traject.consume(r)
assert isinstance(obj, Special)
assert r.unconsumed == []
def test_traject_with_multiple_variables():
class App(morepath.App):
pass
App.commit()
traject = App.config.path_registry
def get_model(first_id):
result = Model()
result.first_id = first_id
return result
def get_special(first_id, second_id):
result = Special()
result.first_id = first_id
result.second_id = second_id
return result
traject.add_pattern("{first_id}", get_model)
traject.add_pattern("{first_id}/{second_id}", get_special)
r = req("a")
obj = traject.consume(r)
assert isinstance(obj, Model)
assert obj.first_id == "a"
assert not hasattr(obj, "second_id")
assert r.unconsumed == []
r = req("a/b")
obj = traject.consume(r)
assert isinstance(obj, Special)
assert obj.first_id == "a"
assert obj.second_id == "b"
assert r.unconsumed == []
def test_traject_no_concecutive_variables():
traject = TrajectRegistry()
def f():
pass
with pytest.raises(TrajectError):
traject.add_pattern("{foo}{bar}", f)
def test_traject_no_duplicate_variables():
traject = TrajectRegistry()
def f():
pass
with pytest.raises(TrajectError):
traject.add_pattern("{foo}-{foo}", f)
with pytest.raises(TrajectError):
traject.add_pattern("{foo}/{foo}", f)
def test_interpolation_str():
assert Path("{foo} is {bar}").interpolation_str() == "%(foo)s is %(bar)s"
def test_path_discriminator():
p = Path("/foo/{x}/bar/{y}")
assert p.discriminator() == "foo/{}/bar/{}"
def test_empty_parameter_factory():
get_parameters = ParameterFactory({}, {}, [])
assert get_parameters(req("")) == {}
# unexpected parameter is ignored
assert get_parameters(req("?a=A")) == {}
def test_single_parameter():
get_parameters = ParameterFactory({"a": None}, {"a": Converter(str)}, [])
assert get_parameters(req("?a=A")) == {"a": "A"}
assert get_parameters(req("")) == {"a": None}
def test_single_parameter_int():
get_parameters = ParameterFactory({"a": None}, {"a": Converter(int)}, [])
assert get_parameters(req("?a=1")) == {"a": 1}
assert get_parameters(req("")) == {"a": None}
with pytest.raises(HTTPBadRequest):
get_parameters(req("?a=A"))
def test_single_parameter_default():
get_parameters = ParameterFactory({"a": "default"}, {}, [])
assert get_parameters(req("?a=A")) == {"a": "A"}
assert get_parameters(req("")) == {"a": "default"}
def test_single_parameter_int_default():
get_parameters = ParameterFactory({"a": 0}, {"a": Converter(int)}, [])
assert get_parameters(req("?a=1")) == {"a": 1}
assert get_parameters(req("")) == {"a": 0}
with pytest.raises(HTTPBadRequest):
get_parameters(req("?a=A"))
def test_parameter_required():
get_parameters = ParameterFactory({"a": None}, {}, ["a"])
assert get_parameters(req("?a=foo")) == {"a": "foo"}
with pytest.raises(HTTPBadRequest):
get_parameters(req(""))
def test_extra_parameters():
get_parameters = ParameterFactory({"a": None}, {}, [], True)
assert get_parameters(req("?a=foo")) == {"a": "foo", "extra_parameters": {}}
assert get_parameters(req("?b=foo")) == {
"a": None,
"extra_parameters": {"b": "foo"},
}
assert get_parameters(req("?a=foo&b=bar")) == {
"a": "foo",
"extra_parameters": {"b": "bar"},
}
|
|
import os
import json
import random
import time
import logging
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.key import Key
from .constants import *
from .docManager import DocManager
from .cluster import Cluster
from .clusterTableManager import ClusterTableManager
from .processedClusterStore import ProcessedClusterStore
from .clusterJobManager import ClusterJobManager
from .workerJob import WorkerJob
NOTIFICATION_IMPORTANCE_THRESHOLD = 1.85
NOTIFICATION_IMPORTANCE_THRESHOLD_LOW = 1.1
NOTIFICATION_MIN_NUMBER_THRESHOLD = 3
NOTIFICATION_MAX_NUMBER_THRESHOLD = 10
class ClusterManager:
"""
Manage clusters stored in cloud.
"""
def __init__(self):
"""
Instantiates a new instance of ClusterManager class
"""
self.clusterTableManager = ClusterTableManager()
self.docManager = DocManager()
self.processedClusterStore = ProcessedClusterStore()
def getProcessedCluster(self, cluster):
return self.processedClusterStore.getProcessedCluster(cluster)
def processNewCluster(self, cluster):
cluster.isCurrent = 'true'
cluster = self.processedClusterStore.processAndSaveCluster(cluster)
self.clusterTableManager.addCluster(cluster)
def __getProcessedClusterArticles(self, cluster):
cluster = self.getProcessedCluster(cluster)
return cluster.articles
def __getClusterResponse(self, cluster, filters = None):
articles = self.__getProcessedClusterArticles(
self.__filterDocsInCluster(cluster, filters))
title = articles[0]['title'];
description = articles[0]['title'] + " - " + \
articles[0]['publisher'][PUBLISHER_DETAILS_NAME] + ".";
if len(articles) > 1:
description += " " + articles[1]['title'] + " - " + \
articles[1]['publisher'][PUBLISHER_DETAILS_NAME] + ".";
return {
"articles": articles,
"title": title,
"description": description,
"locales": cluster.locales,
"languages": cluster.languages,
"importance": self.__computeClusterRankingScore(cluster)
}
def __computeClusterRankingScore(self, cluster):
return (0.3 * (len(cluster) - len(cluster.duplicates))) + \
(0.7 * len(cluster.publishers))
def __sortClustersByImportance(self, clusters):
clusterList = list(clusters)
clusterList.sort(key = self.__computeClusterRankingScore, reverse=True)
return clusterList;
def __filterClusters(self, clusterList, filters):
if not filters:
return clusterList
if CLUSTERS_FILTER_LANGUAGES in filters:
clusterList = [cluster for cluster in clusterList if not \
set(filters[CLUSTERS_FILTER_LANGUAGES]).isdisjoint(cluster.languages)]
return clusterList;
def __filterDocsInCluster(self, cluster, filters):
if not filters:
return cluster
filteredDocs = []
for docKey in cluster:
isDocAllowed = True;
doc = self.docManager.get(docKey)
if CLUSTERS_FILTER_LANGUAGES in filters:
if doc.tags[FEEDTAG_LANG] not in filters[CLUSTERS_FILTER_LANGUAGES]:
isDocAllowed = False
if isDocAllowed:
filteredDocs.append(docKey)
return Cluster(filteredDocs)
def __constructQueryResponse(self, clusters, skip, top, filters=None):
response = []
clusterList = list(clusters)
clusterList = self.__filterClusters(clusterList, filters)
clusterList = self.__sortClustersByImportance(clusterList)
for cluster in clusterList[skip:(skip + top)]:
try:
response.append(self.__getClusterResponse(cluster, filters))
except Exception as e:
logging.exception(
"Could not construct query response for cluster id %s",
cluster.id);
continue
return response;
def queryByCategoryAndCountry(self, category, country, skip, top, filters=None):
clusters = self.clusterTableManager.queryByCategoryAndCountry(
category,
country)
return self.__constructQueryResponse(clusters, skip, top, filters)
def queryByLocale(self, locale, skip, top, filters=None):
clusters = self.clusterTableManager.queryByLocale(locale)
response = []
return self.__constructQueryResponse(clusters, skip, top, filters)
def queryByDocId(self, docId, filters=None):
cluster = self.clusterTableManager.queryByDocId(docId)
if not cluster:
return None
else:
return self.__constructQueryResponse([cluster], 0, 1, filters)[0]
def putCurrentClusters(self, clusters):
jobManager = ClusterJobManager()
existingClusters = list(self.getCurrentClusters())
newClusters = [cluster for cluster in clusters
if cluster not in existingClusters]
expiredClusters = [cluster for cluster in existingClusters
if cluster not in clusters]
for cluster in newClusters:
job = WorkerJob(
JOB_PROCESSNEWCLUSTER,
{ JOBARG_PROCESSNEWCLUSTER_CLUSTER : list(cluster)})
jobManager.enqueueJob(job)
logging.info(
"Put process new cluster job. Cluster id: %s.",
cluster.id)
logging.info("Number of clusters to delete are: %i", len(expiredClusters))
self.clusterTableManager.deleteClusters(expiredClusters)
self.processedClusterStore.deleteClusters(expiredClusters)
def getCurrentClusters(self):
return self.clusterTableManager.getCurrentClusters()
def getCurrentDocs(self):
currentClusters = self.getCurrentClusters()
return (doc for cluster in currentClusters for doc in cluster)
def archiveOldClusters(self):
return self.clusterTableManager.archiveOldClusters();
def reprocessCurrentClusters(self):
currentClusters = self.getCurrentClusters()
for cluster in currentClusters:
cluster = self.processedClusterStore.processAndSaveCluster(cluster)
self.clusterTableManager.addClusters(currentClusters)
def getNotfiableClustersForLocale(self, jobId, locale):
logging.info("Fetching clusters for locale %s. %s", locale, jobId)
clusters = list(self.clusterTableManager.queryByLocale(locale))
importantClusters = [cluster for cluster in clusters if \
self.__computeClusterRankingScore(cluster) > NOTIFICATION_IMPORTANCE_THRESHOLD]
importantClusters = self.__sortClustersByImportance(importantClusters)
lessImportantClusters = [cluster for cluster in clusters if \
self.__computeClusterRankingScore(cluster) > NOTIFICATION_IMPORTANCE_THRESHOLD_LOW]
lessImportantClusters = self.__sortClustersByImportance(lessImportantClusters)
notifiableClusters = importantClusters
# if we have too less notifiable clusters, send notification for less important clusters.
if len(notifiableClusters) < NOTIFICATION_MIN_NUMBER_THRESHOLD:
notifiableClusters = lessImportantClusters[:NOTIFICATION_MIN_NUMBER_THRESHOLD]
# if we have too many notifiable clusters, limit the number to avoid spamming.
if len(notifiableClusters) > NOTIFICATION_MAX_NUMBER_THRESHOLD:
notifiableClusters = notifiableClusters[:NOTIFICATION_MAX_NUMBER_THRESHOLD]
logging.info("Number of notfiable clusters are: %i. %s", len(notifiableClusters), jobId)
return notifiableClusters
|
|
"""
Module where admin tools dashboard modules classes are defined.
"""
from django.apps import apps as django_apps
from django.core.urlresolvers import reverse
from django.forms.utils import flatatt
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from admin_tools.utils import AppListElementMixin, uniquify
class DashboardModule(object):
"""
Base class for all dashboard modules.
Dashboard modules have the following properties:
``enabled``
Boolean that determines whether the module should be enabled in
the dashboard by default or not. Default value: ``True``.
``draggable``
Boolean that determines whether the module can be draggable or not.
Draggable modules can be re-arranged by users. Default value: ``True``.
``collapsible``
Boolean that determines whether the module is collapsible, this
allows users to show/hide module content. Default: ``True``.
``deletable``
Boolean that determines whether the module can be removed from the
dashboard by users or not. Default: ``True``.
``title``
String that contains the module title, make sure you use the django
gettext functions if your application is multilingual.
Default value: ''.
``title_url``
String that contains the module title URL. If given the module
title will be a link to this URL. Default value: ``None``.
``css_classes``
A list of css classes to be added to the module ``div`` class
attribute. Default value: ``None``.
``pre_content``
Text or HTML content to display above the module content.
Default value: ``None``.
``content``
The module text or HTML content. Default value: ``None``.
``post_content``
Text or HTML content to display under the module content.
Default value: ``None``.
``template``
The template to use to render the module.
Default value: 'admin_tools/dashboard/module.html'.
"""
template = 'admin_tools/dashboard/module.html'
enabled = True
draggable = True
collapsible = True
deletable = True
show_title = True
title = ''
title_url = None
css_classes = None
pre_content = None
post_content = None
children = None
id = None
def __init__(self, title=None, **kwargs):
if title is not None:
self.title = title
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
self.css_classes = self.css_classes or []
# boolean flag to ensure that the module is initialized only once
self._initialized = False
def init_with_context(self, context):
"""
Like for the :class:`~admin_tools.dashboard.Dashboard` class, dashboard
modules have a ``init_with_context`` method that is called with a
``django.template.RequestContext`` instance as unique argument.
This gives you enough flexibility to build complex modules, for
example, let's build a "history" dashboard module, that will list the
last ten visited pages::
from admin_tools.dashboard import modules
class HistoryDashboardModule(modules.LinkList):
title = 'History'
def init_with_context(self, context):
request = context['request']
# we use sessions to store the visited pages stack
history = request.session.get('history', [])
for item in history:
self.children.append(item)
# add the current page to the history
history.insert(0, {
'title': context['title'],
'url': request.META['PATH_INFO']
})
if len(history) > 10:
history = history[:10]
request.session['history'] = history
Here's a screenshot of our history item:
.. image:: images/history_dashboard_module.png
"""
pass
def is_empty(self):
"""
Return True if the module has no content and False otherwise.
>>> mod = DashboardModule()
>>> mod.is_empty()
True
>>> mod.pre_content = 'foo'
>>> mod.is_empty()
False
>>> mod.pre_content = None
>>> mod.is_empty()
True
>>> mod.children.append('foo')
>>> mod.is_empty()
False
>>> mod.children = []
>>> mod.is_empty()
True
"""
return self.pre_content is None and \
self.post_content is None and \
len(self.children) == 0
def render_css_classes(self):
"""
Return a string containing the css classes for the module.
>>> mod = DashboardModule(enabled=False, draggable=True,
... collapsible=True, deletable=True)
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable'
>>> mod.css_classes.append('foo')
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable foo'
>>> mod.enabled = True
>>> mod.render_css_classes()
'dashboard-module draggable collapsible deletable foo'
"""
ret = ['dashboard-module']
if not self.enabled:
ret.append('disabled')
if self.draggable:
ret.append('draggable')
if self.collapsible:
ret.append('collapsible')
if self.deletable:
ret.append('deletable')
ret += self.css_classes
return ' '.join(ret)
def _prepare_children(self):
pass
class Group(DashboardModule):
"""
Represents a group of modules, the group can be displayed in tabs,
accordion, or just stacked (default).
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.Group`
has two extra properties:
``display``
A string determining how the group should be rendered, this can be one
of the following values: 'tabs' (default), 'accordion' or 'stacked'.
``force_show_title``
Default behaviour for Group module is to force children to always show
the title if Group has ``display`` = ``stacked``. If this flag is set
to ``False``, children title is shown according to their``show_title``
property. Note that in this case is children responsibility to have
meaningful content if no title is shown.
Here's an example of modules group::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.Group(
title="My group",
display="tabs",
children=[
modules.AppList(
title='Administration',
models=('django.contrib.*',)
),
modules.AppList(
title='Applications',
exclude=('django.contrib.*',)
)
]
))
The screenshot of what this code produces:
.. image:: images/dashboard_module_group.png
"""
force_show_title = True
template = 'admin_tools/dashboard/modules/group.html'
display = 'tabs'
def init_with_context(self, context):
if self._initialized:
return
for module in self.children:
# to simplify the whole stuff, modules have some limitations,
# they cannot be dragged, collapsed or closed
module.collapsible = False
module.draggable = False
module.deletable = False
if self.force_show_title:
module.show_title = (self.display == 'stacked')
module.init_with_context(context)
self._initialized = True
def is_empty(self):
"""
A group of modules is considered empty if it has no children or if
all its children are empty.
>>> from admin_tools.dashboard.modules import DashboardModule, LinkList
>>> mod = Group()
>>> mod.is_empty()
True
>>> mod.children.append(DashboardModule())
>>> mod.is_empty()
True
>>> mod.children.append(LinkList('links', children=[
... {'title': 'example1', 'url': 'http://example.com'},
... {'title': 'example2', 'url': 'http://example.com'},
... ]))
>>> mod.is_empty()
False
"""
if super(Group, self).is_empty():
return True
for child in self.children:
if not child.is_empty():
return False
return True
def _prepare_children(self):
# computes ids for children: generates them if they are not set
# and then prepends them with this group's id
seen = set()
for id, module in enumerate(self.children):
proposed_id = "%s_%s" % (self.id, module.id or id+1)
module.id = uniquify(proposed_id, seen)
module._prepare_children()
class LinkList(DashboardModule):
"""
A module that displays a list of links.
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.LinkList` takes
an extra keyword argument:
``layout``
The layout of the list, possible values are ``stacked`` and ``inline``.
The default value is ``stacked``.
Link list modules children are simple python dictionaries that can have the
following keys:
``title``
The link title.
``url``
The link URL.
``external``
Boolean that indicates whether the link is an external one or not.
``description``
A string describing the link, it will be the ``title`` attribute of
the html ``a`` tag.
``attrs``
Hash comprising attributes of the html ``a`` tag.
Children can also be iterables (lists or tuples) of length 2, 3, 4 or 5.
Here's a small example of building a link list module::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.LinkList(
layout='inline',
children=(
{
'title': 'Python website',
'url': 'http://www.python.org',
'external': True,
'description': 'Python programming language rocks !',
'attrs': {'target': '_blank'},
},
['Django website', 'http://www.djangoproject.com', True],
['Some internal link', '/some/internal/link/'],
)
))
The screenshot of what this code produces:
.. image:: images/linklist_dashboard_module.png
"""
title = _('Links')
template = 'admin_tools/dashboard/modules/link_list.html'
layout = 'stacked'
def init_with_context(self, context):
if self._initialized:
return
new_children = []
for link in self.children:
if isinstance(link, (tuple, list,)):
link_dict = {'title': link[0], 'url': link[1]}
if len(link) >= 3:
link_dict['external'] = link[2]
if len(link) >= 4:
link_dict['description'] = link[3]
if len(link) >= 5:
link_dict['attrs'] = link[4]
link = link_dict
if 'attrs' not in link:
link['attrs'] = {}
link['attrs']['href'] = link['url']
if link.get('description', ''):
link['attrs']['title'] = link['description']
if link.get('external', False):
link['attrs']['class'] = ' '.join(['external-link']
+ link['attrs'].get('class', '').split(' ')).strip()
link['attrs'] = flatatt(link['attrs'])
new_children.append(link)
self.children = new_children
self._initialized = True
class AppList(DashboardModule, AppListElementMixin):
"""
Module that lists installed apps and their models.
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.AppList`
has two extra properties:
``models``
A list of models to include, only models whose name (e.g.
"blog.comments.models.Comment") match one of the strings (e.g. "blog.*")
in the models list will appear in the dashboard module.
``exclude``
A list of models to exclude, if a model name (e.g.
"blog.comments.models.Comment") match an element of this list (e.g.
"blog.comments.*") it won't appear in the dashboard module.
If no models/exclude list is provided, **all apps** are shown.
Here's a small example of building an app list module::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
# will only list the django.contrib apps
self.children.append(modules.AppList(
title='Administration',
models=('django.contrib.*',)
))
# will list all apps except the django.contrib ones
self.children.append(modules.AppList(
title='Applications',
exclude=('django.contrib.*',)
))
The screenshot of what this code produces:
.. image:: images/applist_dashboard_module.png
.. note::
Note that this module takes into account user permissions, for
example, if a user has no rights to change or add a ``Group``, then
the django.contrib.auth.Group model line will not be displayed.
"""
title = _('Applications')
template = 'admin_tools/dashboard/modules/app_list.html'
models = None
exclude = None
include_list = None
exclude_list = None
def __init__(self, title=None, **kwargs):
self.models = list(kwargs.pop('models', []))
self.exclude = list(kwargs.pop('exclude', []))
self.include_list = kwargs.pop('include_list', []) # deprecated
self.exclude_list = kwargs.pop('exclude_list', []) # deprecated
super(AppList, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
items = self._visible_models(context['request'])
apps = {}
for model, perms in items:
app_label = model._meta.app_label
if app_label not in apps:
apps[app_label] = {
'title': django_apps.get_app_config(app_label).verbose_name,
'url': self._get_admin_app_list_url(model, context),
'models': []
}
model_dict = {}
model_dict['title'] = model._meta.verbose_name_plural
if perms['change']:
model_dict['change_url'] = self._get_admin_change_url(model, context)
if perms['add']:
model_dict['add_url'] = self._get_admin_add_url(model, context)
apps[app_label]['models'].append(model_dict)
for app in sorted(apps.keys()):
# sort model list alphabetically
apps[app]['models'].sort(key=lambda x: x['title'])
self.children.append(apps[app])
self._initialized = True
class ModelList(DashboardModule, AppListElementMixin):
"""
Module that lists a set of models.
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.ModelList` takes
two extra arguments:
``models``
A list of models to include, only models whose name (e.g.
"blog.comments.models.Comment") match one of the strings (e.g. "blog.*")
in the models list will appear in the dashboard module.
``exclude``
A list of models to exclude, if a model name (e.g.
"blog.comments.models.Comment") match an element of this list (e.g.
"blog.comments.*") it won't appear in the dashboard module.
Here's a small example of building a model list module::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
# will only list the django.contrib.auth models
self.children += [
modules.ModelList(
title='Authentication',
models=['django.contrib.auth.*',]
)
]
The screenshot of what this code produces:
.. image:: images/modellist_dashboard_module.png
.. note::
Note that this module takes into account user permissions, for
example, if a user has no rights to change or add a ``Group``, then
the django.contrib.auth.Group model line will not be displayed.
"""
template = 'admin_tools/dashboard/modules/model_list.html'
models = None
exclude = None
include_list = None
exclude_list = None
def __init__(self, title=None, models=None, exclude=None, **kwargs):
self.models = list(models or [])
self.exclude = list(exclude or [])
self.include_list = kwargs.pop('include_list', []) # deprecated
self.exclude_list = kwargs.pop('exclude_list', []) # deprecated
if 'extra' in kwargs:
self.extra = kwargs.pop('extra')
else:
self.extra = []
super(ModelList, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
items = self._visible_models(context['request'])
if not items:
return
for model, perms in items:
model_dict = {}
model_dict['title'] = model._meta.verbose_name_plural
if perms['change']:
model_dict['change_url'] = self._get_admin_change_url(model, context)
if perms['add']:
model_dict['add_url'] = self._get_admin_add_url(model, context)
self.children.append(model_dict)
if self.extra:
# TODO - permissions support
for extra_url in self.extra:
model_dict = {}
model_dict['title'] = extra_url['title']
model_dict['change_url'] = extra_url['change_url']
model_dict['add_url'] = extra_url.get('add_url', None)
self.children.append(model_dict)
self._initialized = True
class RecentActions(DashboardModule):
"""
Module that lists the recent actions for the current user.
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.RecentActions`
takes three extra keyword arguments:
``include_list``
A list of contenttypes (e.g. "auth.group" or "sites.site") to include,
only recent actions that match the given contenttypes will be
displayed.
``exclude_list``
A list of contenttypes (e.g. "auth.group" or "sites.site") to exclude,
recent actions that match the given contenttypes will not be
displayed.
``limit``
The maximum number of children to display. Default value: 10.
Here's a small example of building a recent actions module::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
# will only list the django.contrib apps
self.children.append(modules.RecentActions(
title='Django CMS recent actions',
include_list=('cms.page', 'cms.cmsplugin',)
))
The screenshot of what this code produces:
.. image:: images/recentactions_dashboard_module.png
"""
title = _('Recent Actions')
template = 'admin_tools/dashboard/modules/recent_actions.html'
limit = 10
include_list = None
exclude_list = None
def __init__(self, title=None, limit=10, include_list=None,
exclude_list=None, **kwargs):
self.include_list = include_list or []
self.exclude_list = exclude_list or []
kwargs.update({'limit': limit})
super(RecentActions, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
from django.db.models import Q
from django.contrib.admin.models import LogEntry
request = context['request']
def get_qset(list):
# Import this here to silence RemovedInDjango19Warning. See #15
from django.contrib.contenttypes.models import ContentType
qset = None
for contenttype in list:
if isinstance(contenttype, ContentType):
current_qset = Q(content_type__id=contenttype.id)
else:
try:
app_label, model = contenttype.split('.')
except:
raise ValueError('Invalid contenttype: "%s"' % contenttype)
current_qset = Q(
content_type__app_label=app_label,
content_type__model=model
)
if qset is None:
qset = current_qset
else:
qset = qset | current_qset
return qset
if request.user is None:
qs = LogEntry.objects.all()
else:
qs = LogEntry.objects.filter(user__id__exact=request.user.id)
if self.include_list:
qs = qs.filter(get_qset(self.include_list))
if self.exclude_list:
qs = qs.exclude(get_qset(self.exclude_list))
self.children = qs.select_related('content_type', 'user')[:self.limit]
if not len(self.children):
self.pre_content = _('No recent actions.')
self._initialized = True
class Feed(DashboardModule):
"""
Class that represents a feed dashboard module.
.. important::
This class uses the
`Universal Feed Parser module <http://www.feedparser.org/>`_ to parse
the feeds, so you'll need to install it, all feeds supported by
FeedParser are thus supported by the Feed
As well as the :class:`~admin_tools.dashboard.modules.DashboardModule`
properties, the :class:`~admin_tools.dashboard.modules.Feed` takes two
extra keyword arguments:
``feed_url``
The URL of the feed.
``limit``
The maximum number of feed children to display. Default value: None,
which means that all children are displayed.
Here's a small example of building a recent actions module::
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
# will only list the django.contrib apps
self.children.append(modules.Feed(
title=_('Latest Django News'),
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
The screenshot of what this code produces:
.. image:: images/feed_dashboard_module.png
"""
title = _('RSS Feed')
template = 'admin_tools/dashboard/modules/feed.html'
feed_url = None
limit = None
def __init__(self, title=None, feed_url=None, limit=None, **kwargs):
kwargs.update({'feed_url': feed_url, 'limit': limit})
super(Feed, self).__init__(title, **kwargs)
def init_with_context(self, context):
if self._initialized:
return
import datetime
if self.feed_url is None:
raise ValueError('You must provide a valid feed URL')
try:
import feedparser
except ImportError:
self.children.append({
'title': ('You must install the FeedParser python module'),
'warning': True,
})
return
feed = feedparser.parse(self.feed_url)
if self.limit is not None:
entries = feed['entries'][:self.limit]
else:
entries = feed['entries']
for entry in entries:
entry.url = entry.link
try:
entry.date = datetime.date(*entry.published_parsed[0:3])
except:
# no date for certain feeds
pass
self.children.append(entry)
self._initialized = True
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import datetime
from dateutil.parser import parse
from decimal import Decimal
import sys
import uuid
from amaascore.error_messages import ERROR_LOOKUP
from amaascore.exceptions import TransactionNeedsSaving
from amaascore.core.amaas_model import AMaaSModel
from amaascore.transactions.children import Charge, Code, Comment, Link, Party, Rate, Reference
from amaascore.transactions.enums import TRANSACTION_ACTIONS, TRANSACTION_STATUSES, TRANSACTION_TYPES
# This extremely ugly hack is due to the whole Python 2 vs 3 debacle.
type_check = str if sys.version_info >= (3, 0, 0) else (str, unicode)
class Transaction(AMaaSModel):
@staticmethod
def children():
""" A dict of which of the attributes are collections of other objects, and what type """
return {'charges': Charge, 'codes': Code, 'comments': Comment, 'links': Link, 'parties': Party,
'rates': Rate, 'references': Reference}
def __init__(self, asset_manager_id, asset_book_id, counterparty_book_id, transaction_action, asset_id, quantity,
transaction_date, settlement_date, price, transaction_currency, settlement_currency=None,
asset=None, execution_time=None, transaction_type='Trade', transaction_id=None,
transaction_status='New', charges=None, codes=None, comments=None, links=None, parties=None,
rates=None, references=None, *args, **kwargs):
"""
:param asset_manager_id:
:param asset_book_id:
:param counterparty_book_id:
:param transaction_action:
:param asset_id:
:param quantity:
:param transaction_date:
:param settlement_date:
:param price:
:param transaction_currency:
:param settlement_currency: The currency in which the transaction will be settled. Defaults to the
transaction_currency if not specified.
:param asset:
:param execution_time:
:param transaction_type:
:param transaction_id:
:param transaction_status:
:param charges:
:param codes:
:param comments:
:param links:
:param parties:
:param rates:
:param references:
:param args:
:param kwargs:
"""
self.transaction_id = transaction_id or uuid.uuid4().hex
self.asset_manager_id = asset_manager_id
self.asset_book_id = asset_book_id
self.counterparty_book_id = counterparty_book_id
self.transaction_action = transaction_action
self.asset_id = asset_id # This is duplicated on the child asset. Remove?
self.quantity = quantity
self.transaction_date = transaction_date
self.settlement_date = settlement_date
self.price = price
self.transaction_currency = transaction_currency
self.settlement_currency = settlement_currency or transaction_currency
self.transaction_type = transaction_type
self.transaction_status = transaction_status
# Cannot be in method signature or the value gets bound to the constructor call
self.execution_time = execution_time or datetime.datetime.utcnow()
# Defaults are here not in constructor for mutability reasons.
self.charges = charges.copy() if charges else {}
self.codes = codes.copy() if codes else {}
self.comments = comments.copy() if comments else {}
self.links = links.copy() if links else {}
self.parties = parties.copy() if parties else {}
self.rates = rates.copy() if rates else {}
self.references = references.copy() if references else {}
self.references['AMaaS'] = Reference(reference_value=self.transaction_id) # Upserts the AMaaS Reference
self.postings = []
self.asset = asset
super(Transaction, self).__init__(*args, **kwargs)
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
"""
Force the quantity to always be a decimal
:param value:
:return:
"""
self._quantity = Decimal(value)
@property
def price(self):
return self._price
@price.setter
def price(self, value):
"""
Force the price to always be a decimal
:param value:
:return:
"""
self._price = Decimal(value)
@property
def transaction_currency(self):
return self._transaction_currency
@transaction_currency.setter
def transaction_currency(self, transaction_currency):
if transaction_currency and len(transaction_currency) == 3:
self._transaction_currency = transaction_currency
else:
raise ValueError(ERROR_LOOKUP['currency_invalid'] % (transaction_currency,
self.transaction_id,
self.asset_manager_id))
@property
def settlement_currency(self):
return self._settlement_currency
@settlement_currency.setter
def settlement_currency(self, settlement_currency):
if settlement_currency and len(settlement_currency) == 3:
self._settlement_currency = settlement_currency
else:
raise ValueError(ERROR_LOOKUP['currency_invalid'] % (settlement_currency,
self.transaction_id,
self.asset_manager_id))
@property
def transaction_date(self):
return self._transaction_date
@transaction_date.setter
def transaction_date(self, value):
"""
Force the transaction_date to always be a date
:param value:
:return:
"""
if value:
self._transaction_date = parse(value).date() if isinstance(value, type_check) else value
@property
def settlement_date(self):
return self._settlement_date
@settlement_date.setter
def settlement_date(self, value):
"""
Force the settlement_date to always be a date
:param value:
:return:
"""
if value:
self._settlement_date = parse(value).date() if isinstance(value, type_check) else value
@property
def execution_time(self):
return self._execution_time
@execution_time.setter
def execution_time(self, value):
"""
Force the execution_time to always be a datetime
:param value:
:return:
"""
if value:
self._execution_time = parse(value) if isinstance(value, type_check) else value
@property
def gross_settlement(self):
if hasattr(self, '_gross_settlement'):
return self.__gross_settlement
return self.quantity * self.price
@gross_settlement.setter
def gross_settlement(self, gross_settlement):
"""
:param gross_settlement:
:return:
"""
if gross_settlement:
self._gross_settlement = Decimal(gross_settlement)
@property
def net_settlement(self):
if hasattr(self, '_net_settlement'):
return self._net_settlement
return self.gross_settlement - self.charges_net_effect()
@net_settlement.setter
def net_settlement(self, net_settlement):
"""
:param gross_settlement:
:return:
"""
if net_settlement:
self._net_settlement = Decimal(net_settlement)
@property
def transaction_action(self):
if hasattr(self, '_transaction_action'):
return self._transaction_action
@transaction_action.setter
def transaction_action(self, transaction_action):
"""
:param transaction_action: The action that this transaction is recording - e.g. Buy, Deliver
:return:
"""
if transaction_action not in TRANSACTION_ACTIONS:
raise ValueError(ERROR_LOOKUP.get('transaction_action_invalid') % (transaction_action, self.transaction_id,
self.asset_manager_id))
else:
self._transaction_action = transaction_action
@property
def transaction_status(self):
if hasattr(self, '_transaction_status'):
return self._transaction_status
@transaction_status.setter
def transaction_status(self, transaction_status):
"""
:param transaction_status: The status of the transaction - e.g. New, Netted
:return:
"""
if transaction_status not in TRANSACTION_STATUSES:
raise ValueError(ERROR_LOOKUP.get('transaction_status_invalid') % (transaction_status, self.transaction_id,
self.asset_manager_id))
else:
self._transaction_status = transaction_status
@property
def transaction_type(self):
if hasattr(self, '_transaction_type'):
return self._transaction_type
@transaction_type.setter
def transaction_type(self, transaction_type):
"""
:param transaction_type: The type of transaction that we are recording - e.g. Trade, Payment, Coupon
:return:
"""
if transaction_type not in TRANSACTION_TYPES:
raise ValueError(ERROR_LOOKUP.get('transaction_type_invalid') % (transaction_type, self.transaction_id,
self.asset_manager_id))
else:
self._transaction_type = transaction_type
def charges_net_effect(self):
"""
The total effect of the net_affecting charges (note affect vs effect here).
Currently this is single currency only (AMAAS-110).
Cast to Decimal in case the result is zero (no net_affecting charges).
:return:
"""
return Decimal(sum([charge.charge_value for charge in self.charges.values()
if charge.net_affecting]))
def charge_types(self):
"""
TODO - are these helper functions useful?
:return:
"""
return self.charges.keys()
def code_types(self):
"""
TODO - are these helper functions useful?
:return:
"""
return self.codes.keys()
def rate_types(self):
"""
TODO - are these helper functions useful?
:return:
"""
return self.rates.keys()
def reference_types(self):
"""
TODO - are these helper functions useful?
:return:
"""
return self.references.keys()
def __str__(self):
return "Transaction object - ID: %s" % self.transaction_id
@property
def postings(self):
if hasattr(self, '_postings'):
return self._postings
else:
raise TransactionNeedsSaving
@postings.setter
def postings(self, postings):
"""
TODO - when do we save this from AMaaS Core?
:param postings:
:return:
"""
if postings:
self._postings = postings
# Upsert methods for safely adding children - this is more important for cases where we trigger action when there
# is a change, e.g. for the case of a @property on the collection. Since we don't have that case yet for
# transactions, I have not yet filled out all of these.
def upsert_code(self, code_type, code):
codes = copy.deepcopy(self.codes)
codes.update({code_type: code})
self.codes = codes
def upsert_link_set(self, link_type, link_set):
"""
Remove an item altogether by setting link_list to None.
Currently, only links can contain multiple children of the same type.
:param link_type:
:param link_set:
:return:
"""
if link_set is None:
self.links.pop(link_type, None)
return
links = copy.deepcopy(self.links)
links.update({link_type: link_set})
self.links = links
def add_link(self, link_type, linked_transaction_id):
new_link = Link(linked_transaction_id=linked_transaction_id)
link_set = self.links.get(link_type)
if link_set:
if not isinstance(link_set, set):
link_set = {link_set}
link_set.add(new_link)
else:
link_set = new_link
self.upsert_link_set(link_type=link_type, link_set=link_set)
def remove_link(self, link_type, linked_transaction_id):
link_set = self.links.get(link_type)
if not link_set:
raise KeyError(ERROR_LOOKUP.get('transaction_link_not_found'))
if isinstance(link_set, Link):
if link_set.linked_transaction_id == linked_transaction_id:
link_set = None
else:
raise KeyError(ERROR_LOOKUP.get('transaction_link_not_found'))
else:
output = [link for link in link_set if link.linked_transaction_id == linked_transaction_id]
if output:
link_set.remove(output[0])
else:
raise KeyError(ERROR_LOOKUP.get('transaction_link_not_found'))
self.upsert_link_set(link_type=link_type, link_set=link_set)
|
|
import contextlib
import json
import os
import re
import shutil
import tempfile
import time
import dcoscli
from dcos import util
from .helpers.common import (assert_command, exec_command, file_json_ast,
zip_contents_as_json)
from .helpers.marathon import watch_all_deployments
command_base = ['dcos', 'experimental']
data_dir = os.path.join(os.getcwd(), 'tests', 'data')
build_data_dir = os.path.join(data_dir, 'package_build')
def runnable_package_path(index):
return os.path.join(
build_data_dir, 'helloworld', 'helloworld{}.json'.format(index))
def test_experimental():
command = command_base + ['--help']
with open('dcoscli/data/help/experimental.txt') as content:
assert_command(command, stdout=content.read().encode())
def test_info():
command = command_base + ['--info']
out = b'Manage commands that are under development\n'
assert_command(command, stdout=out)
def test_version():
command = command_base + ['--version']
out = b'dcos-experimental version SNAPSHOT\n'
assert_command(command, stdout=out)
def test_package_build_with_only_resources():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_resource_only_reference.json"),
expected_package_path=os.path.join(
build_data_dir,
"package_resource_only_reference_expected.json"))
def test_package_build_with_only_config_with_no_references():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_config_reference_expected.json"),
expected_package_path=os.path.join(
build_data_dir,
"package_config_reference_expected.json"))
def test_package_build_with_only_config():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_config_reference.json"),
expected_package_path=os.path.join(
build_data_dir,
"package_config_reference_expected.json"))
def test_package_build_with_only_marathon():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_marathon_reference.json"),
expected_package_path=os.path.join(
build_data_dir,
"package_marathon_reference_expected.json"))
def test_package_build_with_only_resources_reference():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_resource_reference.json"))
def test_package_build_with_no_references():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_no_references.json"))
def test_package_build_with_all_references():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_all_references.json"))
def test_package_build_with_all_references_json():
_successful_package_build_test(
os.path.join(
build_data_dir,
"package_all_references.json"),
expects_json=True)
def test_package_build_where_build_definition_does_not_exist():
with _temporary_directory() as output_directory:
build_definition_path = os.path.join(build_data_dir,
"does_not_exist.json")
stderr = ("The file [{}] does not exist\n"
.format(build_definition_path)
.encode())
_package_build_failure(build_definition_path,
output_directory,
stderr=stderr)
def test_package_build_where_project_is_missing_references():
with _temporary_directory() as output_directory:
build_definition_path = (
os.path.join(build_data_dir,
"package_missing_references.json"))
marathon_json_path = os.path.join(build_data_dir,
"marathon.json")
stderr = ("Error opening file [{}]: No such file or directory\n"
.format(marathon_json_path)
.encode())
_package_build_failure(build_definition_path,
output_directory,
stderr=stderr)
def test_package_build_where_reference_does_not_match_schema():
with _temporary_directory() as output_directory:
build_definition_path = os.path.join(
build_data_dir,
"package_reference_does_not_match_schema.json"
)
bad_resource_path = os.path.join(
build_data_dir,
"resource-bad.json"
)
stderr = ("Error validating package: "
"[{}] does not conform to the specified schema\n"
.format(bad_resource_path)
.encode())
_package_build_failure(build_definition_path,
output_directory,
stderr=stderr)
def test_package_build_where_build_definition_does_not_match_schema():
with _temporary_directory() as output_directory:
bad_build_definition_path = os.path.join(
build_data_dir,
"package_no_match_schema.json"
)
stderr = ("Error validating package: "
"[{}] does not conform to the specified schema\n"
.format(bad_build_definition_path)
.encode())
_package_build_failure(bad_build_definition_path,
output_directory,
stderr=stderr)
def test_package_build_where_build_definition_has_badly_formed_reference():
with _temporary_directory() as output_directory:
bad_build_definition_path = os.path.join(
build_data_dir,
"package_badly_formed_reference.json"
)
stderr = ("Error validating package: "
"[{}] does not conform to the specified schema\n"
.format(bad_build_definition_path)
.encode())
_package_build_failure(bad_build_definition_path,
output_directory,
stderr=stderr)
def test_package_add_argument_exclussion():
command = command_base + ['package', 'add',
'--dcos-package', runnable_package_path(1),
'--package-version', '3.0']
code, out, err = exec_command(command)
assert code == 1
assert err == b''
stdout = out.decode()
not_recognized = 'Command not recognized'
assert not_recognized in stdout
def test_service_start_happy_path():
with _temporary_directory() as output_directory:
runnable_package = _package_build(
runnable_package_path(2), output_directory)
name, version = _package_add(runnable_package)
try:
_service_start(name, version)
finally:
_service_stop(name)
def test_service_start_happy_path_json():
with _temporary_directory() as output_directory:
runnable_package = _package_build(
runnable_package_path(3), output_directory)
name, version = _package_add(runnable_package, expects_json=True)
try:
_service_start(name, version, expects_json=True)
finally:
_service_stop(name)
def test_service_start_happy_path_from_universe():
package_name = 'hello-world'
name, version = _package_add_universe(package_name)
try:
_service_start(name, version)
finally:
_service_stop(name)
def test_service_start_happy_path_from_universe_json():
package_name = 'cassandra'
name, version = _package_add_universe(package_name, expects_json=True)
try:
_service_start(name, version)
finally:
_service_stop(name)
def test_service_start_by_starting_same_service_twice():
name, version = _package_add_universe('kafka')
try:
_service_start(name, version)
stderr = b'The DC/OS service has already been started\n'
_service_start_failure(name, version, stderr=stderr)
finally:
_service_stop(name)
def test_service_start_by_starting_service_not_added():
stderr = b'Package [foo] not found\n'
_service_start_failure('foo', stderr=stderr)
def _service_stop_cmd(package_name):
return ['dcos', 'package', 'uninstall', package_name, '--yes']
def _service_list_cmd():
return ['dcos', 'package', 'list', '--json']
def _service_start_cmd(package_name,
package_version=None,
options=None,
json=False):
return (command_base +
(['service', 'start']) +
(['--json'] if json else []) +
([package_name]) +
(['--package-version', package_version]
if package_version else []) +
(['--options', options] if options else []))
def _package_add_cmd(dcos_package=None,
package_name=None,
package_version=None,
json=False):
return (command_base +
(['package', 'add']) +
(['--json'] if json else []) +
(['--dcos-package', dcos_package] if dcos_package else []) +
(['--package-name', package_name] if package_name else []) +
(['--package-version', package_version]
if package_version else []))
def _package_build_cmd(build_definition,
output_directory=None,
expects_json=False):
return (command_base +
(['package', 'build']) +
(['--json'] if expects_json else []) +
(['--output-directory', output_directory]
if output_directory else []) +
([build_definition]))
def _service_stop(package_name):
command = _service_stop_cmd(package_name)
exec_command(command)
watch_all_deployments()
def _service_list():
command = _service_list_cmd()
code, out, err = exec_command(command)
assert code == 0
assert err == b''
return json.loads(out.decode())
def _service_start(package_name,
package_version,
options=None,
expects_json=False):
command = _service_start_cmd(package_name,
package_version,
options,
json=expects_json)
code = 1
max_retries = 10
retry_number = 0
while code != 0:
code, out, err = exec_command(command)
assert retry_number != max_retries, \
'Waiting for package add to complete took too long'
retry_number += 1
time.sleep(5)
assert code == 0
assert err == b''
if expects_json:
expected = {
'packageName': package_name,
'packageVersion': package_version
}
actual = json.loads(out.decode())
actual.pop('appId')
assert expected == actual, (expected, actual)
else:
stdout = 'The service [{}] version [{}] has been started\n'.format(
package_name, package_version).encode()
assert out == stdout, (out, stdout)
running_services = _service_list()
assert package_name in map(lambda pkg: pkg['name'], running_services)
def _service_start_failure(package_name,
package_version=None,
options=None,
return_code=1,
stdout=b'',
stderr=b''):
command = _service_start_cmd(package_name,
package_version,
options)
assert_command(command,
returncode=return_code,
stdout=stdout,
stderr=stderr)
def _package_add(package, expects_json=False):
command = _package_add_cmd(dcos_package=package, json=expects_json)
code, out, err = exec_command(command)
assert code == 0
assert err == b''
if expects_json:
metadata = json.loads(out.decode())
metadata.pop('releaseVersion')
assert metadata == zip_contents_as_json(package, 'metadata.json')
else:
metadata = zip_contents_as_json(package, 'metadata.json')
stdout = (
'The package [{}] version [{}] has been added to DC/OS\n'.format(
metadata['name'], metadata['version'])).encode()
assert out == stdout, (out, stdout)
return metadata['name'], metadata['version']
def _package_add_universe(package_name,
package_version=None,
expects_json=False):
command = _package_add_cmd(package_name=package_name,
package_version=package_version,
json=expects_json)
code, out, err = exec_command(command)
assert code == 0
assert err == b''
if expects_json:
metadata = json.loads(out.decode())
name = metadata['name']
version = metadata['version']
else:
name_version = re.search("\[(.*)\].*\[(.*)\]", out.decode())
name = name_version.group(1)
version = name_version.group(2)
stdout = (
'The package [{}] version [{}] has been added to DC/OS\n'.format(
name, version)).encode()
assert out == stdout, (out, stdout)
assert name == package_name
assert version == package_version if package_version else True
return name, version
def _package_build(build_definition_path,
output_directory,
metadata=None,
manifest=None,
expects_json=False):
command = _package_build_cmd(build_definition_path,
output_directory,
expects_json=expects_json)
code, out, err = exec_command(command)
assert code == 0
assert err == b''
out_str = out.decode()
if expects_json:
out_json = json.loads(out_str)
assert out_json, out_str
package_path = out_json.get('package_path')
else:
assert out_str.startswith("Created DC/OS Universe Package")
package_path = re.search("\[(.*)\]", out_str).group(1)
assert package_path, out_str
assert os.path.exists(package_path)
name, version, md5 = _decompose_name(package_path)
build_definition = file_json_ast(build_definition_path)
assert name == build_definition['name']
assert version == build_definition['version']
assert md5 == _get_md5_hash(package_path)
assert (manifest is None or
manifest == zip_contents_as_json(package_path, 'manifest.json'))
assert (metadata is None or
metadata == zip_contents_as_json(package_path, 'metadata.json'))
return package_path
def _package_build_failure(build_definition_path,
output_directory,
return_code=1,
stdout=b'',
stderr=b''):
command = _package_build_cmd(build_definition_path, output_directory)
assert_command(command,
returncode=return_code,
stdout=stdout,
stderr=stderr)
assert len(os.listdir(output_directory)) == 0
def _successful_package_build_test(
build_definition_path,
expected_package_path=os.path.join(
build_data_dir,
"package_no_references.json"),
expects_json=False):
with _temporary_directory() as output_directory:
metadata = file_json_ast(expected_package_path)
manifest = {
'built-by': "dcoscli.version={}".format(dcoscli.version)
}
_package_build(build_definition_path,
output_directory,
metadata=metadata,
manifest=manifest,
expects_json=expects_json)
def _decompose_name(package_path):
parts = re.search(
'^([^-]+)-(.+)-([^-]+)\.dcos',
os.path.basename(package_path))
assert parts is not None, package_path
return parts.group(1), parts.group(2), parts.group(3)
def _get_md5_hash(path):
with open(path, 'rb') as f:
return util.md5_hash_file(f)
@contextlib.contextmanager
def _temporary_directory():
tmp_dir = tempfile.mkdtemp()
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import errno
import itertools
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
import traceback
BARE_INTERFACE_SEARCH_PATHS = [
"usr/lib/swift",
"System/iOSSupport/usr/lib/swift"
]
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS = [
"System/Library/Frameworks",
"System/iOSSupport/System/Library/Frameworks"
]
STDLIB_NAME = 'Swift'
MONOTONIC_VERSION = 1
def create_parser():
parser = argparse.ArgumentParser(
description="Builds an SDK's swiftinterfaces into swiftmodules. "
"Always searches usr/lib/swift in addition to whichever "
"framework directories are passed on the command line.",
prog=os.path.basename(__file__),
usage='%(prog)s -o output/ [INTERFACE_SEARCH_DIRS]',
epilog='Environment variables: SDKROOT, SWIFT_EXEC, '
'SWIFT_FORCE_MODULE_LOADING')
parser.add_argument('interface_framework_dirs', nargs='*',
metavar='INTERFACE_SEARCH_DIRS',
help='Relative paths to search for frameworks with '
'interfaces (default: System/Library/Frameworks)')
parser.add_argument('-o', dest='output_dir',
help='Directory to which the output will be emitted '
'(required)')
parser.add_argument('-j', dest='jobs', type=int,
help='The number of parallel jobs to execute '
'(default: # of cores)')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Print command invocations and progress info')
parser.add_argument('-n', dest='dry_run', action='store_true',
help='Dry run: don\'t actually run anything')
parser.add_argument('-sdk', default=os.getenv('SDKROOT'),
help='SDK to find frameworks and interfaces in '
'(default: $SDKROOT)')
parser.add_argument('-F', dest='framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional framework search paths')
parser.add_argument('-Fsystem', '-iframework',
dest='system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional system framework search paths')
parser.add_argument('-Fsystem-iosmac',
dest='iosmac_system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add system framework search paths '
'for iOSMac only')
parser.add_argument('-I', dest='include_dirs', metavar='DIR',
action='append', default=[],
help='Add additional header/module search paths')
parser.add_argument('-module-cache-path',
help='Temporary directory to store intermediate info')
parser.add_argument('-log-path',
help='Directory to write stdout/stderr output to')
parser.add_argument('-skip-stdlib', action='store_true',
help='Don\'t build the standard library interface')
parser.add_argument('-disable-modules-validate-system-headers',
action='store_true',
help='Disable modules verification for system headers')
parser.add_argument('-xfails', metavar='PATH',
help='JSON file containing an array of the modules '
'expected to fail')
parser.add_argument('-check-only', action='store_true',
help='Assume the resulting modules will be thrown '
'away (may be faster)')
parser.add_argument('-ignore-non-stdlib-failures', action='store_true',
help='Treat all modules but the stdlib as XFAILed')
parser.add_argument('-debug-crash-compiler', action='store_true',
help='Have the compiler crash (for testing purposes)')
parser.add_argument('-machine-parseable-monotonic-version',
action='store_true',
help='For comparing versions of this tool')
return parser
def fatal(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def run_command(args, dry_run):
if dry_run:
return (0, "", "")
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
out, err = proc.communicate()
exitcode = proc.returncode
return (exitcode, out.decode('utf-8'), err.decode('utf-8'))
except KeyboardInterrupt:
proc.terminate()
raise
def make_dirs_if_needed(path, dry_run):
if dry_run:
return
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class NegatedSet:
def __init__(self, contents):
self._contents = frozenset(contents)
def __contains__(self, item):
return item not in self._contents
class ModuleFile:
def __init__(self, name, path, is_expected_to_fail):
self.name = name
self.path = path
self.is_expected_to_fail = is_expected_to_fail
def collect_slices(xfails, swiftmodule_dir):
if not os.path.isdir(swiftmodule_dir):
return
module_name, extension = \
os.path.splitext(os.path.basename(swiftmodule_dir))
assert extension == ".swiftmodule"
is_xfail = module_name in xfails
for entry in os.listdir(swiftmodule_dir):
_, extension = os.path.splitext(entry)
if extension == ".swiftinterface":
yield ModuleFile(module_name, os.path.join(swiftmodule_dir, entry),
is_xfail)
def collect_framework_modules(sdk, xfails, sdk_relative_framework_dirs):
for sdk_relative_framework_dir in sdk_relative_framework_dirs:
framework_dir = os.path.join(sdk, sdk_relative_framework_dir)
if not os.access(framework_dir, os.R_OK):
continue
for entry in os.listdir(framework_dir):
path_without_extension, extension = os.path.splitext(entry)
if extension != ".framework":
continue
module_name = os.path.basename(path_without_extension)
swiftmodule = os.path.join(framework_dir, entry, "Modules",
module_name + ".swiftmodule")
if os.access(swiftmodule, os.R_OK):
for x in collect_slices(xfails, swiftmodule):
yield x
def collect_non_framework_modules(sdk, xfails, sdk_relative_search_dirs):
for sdk_relative_search_dir in sdk_relative_search_dirs:
search_dir = os.path.join(sdk, sdk_relative_search_dir)
for dir_path, _, file_names in os.walk(search_dir, followlinks=True):
if os.path.splitext(dir_path)[1] == ".swiftmodule":
for x in collect_slices(xfails, dir_path):
yield x
else:
for interface in file_names:
module_name, extension = os.path.splitext(interface)
if extension == ".swiftinterface":
is_xfail = module_name in xfails
yield ModuleFile(module_name,
os.path.join(dir_path, interface),
is_xfail)
def should_retry_compilation(stderr):
if "has been modified since the module file" in stderr:
return True
if "mismatched umbrella headers in submodule" in stderr:
return True
if "is out of date and needs to be rebuilt: signature mismatch" in stderr:
return True
if "current parser token 'include'" in stderr:
return True
if "current parser token 'import'" in stderr:
return True
return False
def run_with_module_cache_retry(command_args, module_cache_path, dry_run):
"""Hack: runs a command several times, clearing the module cache if we get
an error about header files being modified during the run.
This shouldn't be necessary (the cached PCM files should automatically be
regenerated) but there seems to still be a bug in Clang that we haven't
tracked down yet.
"""
RETRIES = 3
attempts_stderr = ""
for r in range(RETRIES):
status, stdout, stderr = run_command(command_args, dry_run)
if status == 0:
break
if not should_retry_compilation(stderr):
break
if module_cache_path:
shutil.rmtree(module_cache_path, ignore_errors=True)
# If all retries fail, output information for each instance.
attempts_stderr += (
"\n*** Compilation attempt {}/{} failed with modules bugs. "
"Error output:\n".format(r + 1, RETRIES))
attempts_stderr += stderr
stderr = attempts_stderr
return (status, stdout, stderr)
def log_output_to_file(content, module_name, interface_base, label, log_path):
if not log_path:
return
if not content:
return
make_dirs_if_needed(log_path, dry_run=False)
log_name = module_name + "-" + interface_base + "-" + label + ".txt"
with open(os.path.join(log_path, log_name), "w") as output_file:
output_file.write(content)
def looks_like_iosmac(interface_base):
return 'ios-macabi' in interface_base
def process_module(module_file):
global args, shared_output_lock
try:
interface_base, _ = \
os.path.splitext(os.path.basename(module_file.path))
swiftc = os.getenv('SWIFT_EXEC',
os.path.join(os.path.dirname(__file__), 'swiftc'))
command_args = [
swiftc, '-frontend',
'-build-module-from-parseable-interface',
'-sdk', args.sdk,
'-prebuilt-module-cache-path', args.output_dir,
'-track-system-dependencies'
]
module_cache_path = ""
if args.module_cache_path:
module_cache_path = os.path.join(args.module_cache_path,
str(os.getpid()))
command_args += ('-module-cache-path', module_cache_path)
if args.debug_crash_compiler:
command_args += ('-debug-crash-immediately',)
if not args.check_only:
command_args += (
'-serialize-parseable-module-interface-dependency-hashes',)
if args.disable_modules_validate_system_headers:
command_args += (
'-disable-modules-validate-system-headers',)
# FIXME: This shouldn't be necessary, but the module name is checked
# before the frontend action is.
if module_file.name == STDLIB_NAME:
command_args += ('-parse-stdlib',)
if looks_like_iosmac(interface_base):
for system_framework_path in args.iosmac_system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
command_args += ('-Fsystem', os.path.join(args.sdk, "System",
"iOSSupport", "System",
"Library", "Frameworks"))
for include_path in args.include_dirs:
command_args += ('-I', include_path)
for system_framework_path in args.system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
for framework_path in args.framework_dirs:
command_args += ('-F', framework_path)
command_args += ('-module-name', module_file.name, module_file.path)
output_path = os.path.join(args.output_dir,
module_file.name + ".swiftmodule")
if interface_base != module_file.name:
make_dirs_if_needed(output_path, args.dry_run)
output_path = os.path.join(output_path,
interface_base + ".swiftmodule")
command_args += ('-o', output_path)
if args.verbose:
with shared_output_lock:
print("# Starting " + module_file.path)
print(' '.join(command_args))
sys.stdout.flush()
status, stdout, stderr = run_with_module_cache_retry(
command_args, module_cache_path=module_cache_path,
dry_run=args.dry_run)
log_output_to_file(stdout, module_file.name, interface_base, "out",
log_path=args.log_path)
log_output_to_file(stderr, module_file.name, interface_base, "err",
log_path=args.log_path)
return (module_file, status, stdout, stderr)
except BaseException:
# We're catching everything here because we don't want to take down the
# other jobs.
return (module_file, 1, "",
"".join(traceback.format_exception(*sys.exc_info())))
def set_up_child(parent_args, lock):
global args, shared_output_lock
args = parent_args
shared_output_lock = lock
def process_module_files(pool, module_files):
results = pool.imap_unordered(process_module, module_files)
overall_exit_status = 0
for (module_file, exit_status, stdout, stderr) in results:
with shared_output_lock:
if exit_status != 0:
print("# ", end="")
if module_file.is_expected_to_fail:
print("(XFAIL) ", end="")
else:
print("(FAIL) ", end="")
print(module_file.path)
if (not module_file.is_expected_to_fail) or args.verbose:
print(stdout, end="")
print(stderr, end="", file=sys.stderr)
elif module_file.is_expected_to_fail:
print("# (UPASS) " + module_file.path)
elif args.verbose:
print("# (PASS) " + module_file.path)
sys.stdout.flush()
if overall_exit_status == 0 and \
not module_file.is_expected_to_fail:
overall_exit_status = exit_status
return overall_exit_status
def main():
global args, shared_output_lock
parser = create_parser()
args = parser.parse_args()
if args.machine_parseable_monotonic_version:
print(MONOTONIC_VERSION)
sys.exit(0)
if 'SWIFT_FORCE_MODULE_LOADING' not in os.environ:
os.environ['SWIFT_FORCE_MODULE_LOADING'] = 'prefer-serialized'
if not args.output_dir:
fatal("argument -o is required")
if not args.sdk:
fatal("SDKROOT must be set in the environment")
if not os.path.isdir(args.sdk):
fatal("invalid SDK: " + args.sdk)
xfails = ()
if args.ignore_non_stdlib_failures:
if args.xfails:
print("warning: ignoring -xfails because "
"-ignore-non-stdlib-failures was provided", file=sys.stderr)
xfails = NegatedSet((STDLIB_NAME,))
elif args.xfails:
with open(args.xfails) as xfails_file:
xfails = json.load(xfails_file)
make_dirs_if_needed(args.output_dir, args.dry_run)
if 'ANDROID_DATA' not in os.environ:
shared_output_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(args.jobs, set_up_child,
(args, shared_output_lock))
else:
# Android doesn't support Python's multiprocessing as it doesn't have
# sem_open, so switch to a ThreadPool instead.
import threading
shared_output_lock = threading.Lock()
from multiprocessing.pool import ThreadPool
pool = ThreadPool(args.jobs, set_up_child,
(args, shared_output_lock))
interface_framework_dirs = (args.interface_framework_dirs or
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS)
module_files = list(itertools.chain(
collect_non_framework_modules(args.sdk, xfails,
BARE_INTERFACE_SEARCH_PATHS),
collect_framework_modules(args.sdk, xfails, interface_framework_dirs)))
if not args.skip_stdlib:
# Always do the stdlib first, so that we can use it in later steps
stdlib_module_files = (
x for x in module_files if x.name == STDLIB_NAME)
status = process_module_files(pool, stdlib_module_files)
if status != 0:
sys.exit(status)
non_stdlib_module_files = (
x for x in module_files if x.name != STDLIB_NAME)
status = process_module_files(pool, non_stdlib_module_files)
if os.name == 'nt':
import ctypes
Kernel32 = ctypes.cdll.LoadLibrary("Kernel32.dll")
Kernel32.ExitProcess(ctypes.c_ulong(status))
sys.exit(status)
if __name__ == '__main__':
main()
|
|
#coding=utf-8
import os
import uuid
from django.http import Http404
from django.shortcuts import render, redirect
from django.utils import six
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from avatar.conf import settings
from avatar.forms import PrimaryAvatarForm, DeleteAvatarForm, UploadAvatarForm
from avatar.models import Avatar
from avatar.signals import avatar_updated
from avatar.util import (get_primary_avatar, get_default_avatar_url,
get_user_model, get_user)
def _get_next(request):
"""
The part that's the least straightforward about views in this module is
how they determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the
following order:
1. If there is a variable named ``next`` in the *POST* parameters, the
view will redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters,
the view will redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers,
the view will redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next',
request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def _get_avatars(user):
# Default set. Needs to be sliced, but that's it. Keep the natural order.
avatars = user.avatar_set.all()
# Current avatar
primary_avatar = avatars.order_by('-primary')[:1]
if primary_avatar:
avatar = primary_avatar[0]
else:
avatar = None
if settings.AVATAR_MAX_AVATARS_PER_USER == 1:
avatars = primary_avatar
else:
# Slice the default set now that we used
# the queryset for the primary avatar
avatars = avatars[:settings.AVATAR_MAX_AVATARS_PER_USER]
return (avatar, avatars)
@login_required
def add(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
upload_avatar_form = upload_form(request.POST or None,
request.FILES or None,
user=request.user)
if request.method == "POST" and 'avatar' in request.FILES:
if upload_avatar_form.is_valid():
avatar = Avatar(user=request.user, primary=True)
image_file = request.FILES['avatar']
filename_parts = os.path.splitext(image_file.name)
extension = filename_parts[1]
filename = u'%s%s' % (unicode(uuid.uuid4()), unicode(extension))
#filename = image_file.name
avatar.avatar.save(filename, image_file)
avatar.save()
messages.success(request, _("Successfully uploaded a new avatar."))
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/add.html', context)
@login_required
def change(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, primary_form=PrimaryAvatarForm,
*args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
if avatar:
kwargs = {'initial': {'choice': avatar.id}}
else:
kwargs = {}
upload_avatar_form = upload_form(user=request.user, **kwargs)
primary_avatar_form = primary_form(request.POST or None,
user=request.user,
avatars=avatars, **kwargs)
if request.method == "POST":
updated = False
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(
id=primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
updated = True
messages.success(request, _("Successfully updated your avatar."))
if updated:
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request)
}
context.update(extra_context)
return render(request, 'avatar/change.html', context)
@login_required
def delete(request, extra_context=None, next_override=None, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
delete_avatar_form = DeleteAvatarForm(request.POST or None,
user=request.user,
avatars=avatars)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
if six.text_type(avatar.id) in ids and avatars.count() > len(ids):
# Find the next best avatar, and set it as the new primary
for a in avatars:
if six.text_type(a.id) not in ids:
a.primary = True
a.save()
avatar_updated.send(sender=Avatar, user=request.user,
avatar=avatar)
break
Avatar.objects.filter(id__in=ids).delete()
messages.success(request,
_("Successfully deleted the requested avatars."))
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/confirm_delete.html', context)
def avatar_gallery(request, username, template_name="avatar/gallery.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
context = {
"other_user": user,
"avatars": user.avatar_set.all(),
}
return render(request, template_name, context)
def avatar(request, username, id, template_name="avatar/avatar.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
avatars = user.avatar_set.order_by("-date_uploaded")
index = None
avatar = None
if avatars:
avatar = avatars.get(pk=id)
if not avatar:
return Http404
index = avatars.filter(date_uploaded__gt=avatar.date_uploaded).count()
count = avatars.count()
if index == 0:
prev = avatars.reverse()[0]
if count <= 1:
next = avatars[0]
else:
next = avatars[1]
else:
prev = avatars[index - 1]
if (index + 1) >= count:
next = avatars[0]
prev_index = index - 1
if prev_index < 0:
prev_index = 0
prev = avatars[prev_index]
else:
next = avatars[index + 1]
return render(request, template_name, {
"other_user": user,
"avatar": avatar,
"index": index + 1,
"avatars": avatars,
"next": next,
"prev": prev,
"count": count,
})
def render_primary(request, user=None, size=settings.AVATAR_DEFAULT_SIZE):
size = int(size)
avatar = get_primary_avatar(user, size=size)
if avatar:
# FIXME: later, add an option to render the resized avatar dynamically
# instead of redirecting to an already created static file. This could
# be useful in certain situations, particulary if there is a CDN and
# we want to minimize the storage usage on our static server, letting
# the CDN store those files instead
url = avatar.avatar_url(size)
else:
url = get_default_avatar_url()
return redirect(url)
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages enforcement of policies for a single cloud project."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import threading
from googleapiclient import errors
from google.apputils import datelib
from google.cloud.security.common.gcp_api import compute
from google.cloud.security.common.util import log_util
from google.cloud.security.enforcer import enforcer_log_pb2
from google.cloud.security.enforcer import gce_firewall_enforcer as fe
STATUS_SUCCESS = enforcer_log_pb2.SUCCESS
STATUS_ERROR = enforcer_log_pb2.ERROR
STATUS_SKIPPED = enforcer_log_pb2.SKIPPED
STATUS_DELETED = enforcer_log_pb2.PROJECT_DELETED
# Default number of times to try applying the firewall policy to a project
# before the status is changed to ERROR and the enforcement fails.
MAX_ENFORCEMENT_RETRIES = 3
LOGGER = log_util.get_logger(__name__)
class ProjectEnforcer(object):
"""Manages enforcement of policies for a single cloud project."""
# TODO: Investigate improving so we can avoid the pylint disable.
# pylint: disable=too-many-instance-attributes
def __init__(self,
project_id,
dry_run=False,
project_sema=None,
max_running_operations=0):
"""Initialize.
Args:
project_id: The project id for the project to enforce.
dry_run: Set to true to ensure no actual changes are made to the
project. EnforcePolicy will still return a ProjectResult proto
showing the changes that would have been made.
project_sema: An optional semaphore object, used to limit the number
of concurrent projects getting written to.
max_running_operations: Used to limit the number of concurrent running
operations on an API.
"""
self.project_id = project_id
self.result = enforcer_log_pb2.ProjectResult()
self.result.project_id = self.project_id
self._dry_run = dry_run
self._project_sema = project_sema
if max_running_operations:
self._operation_sema = threading.BoundedSemaphore(
value=max_running_operations)
else:
self._operation_sema = None
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: Investigate not having to disable some of these messages.
def enforce_firewall_policy(self,
firewall_policy,
compute_service=None,
networks=None,
allow_empty_ruleset=False,
prechange_callback=None,
retry_on_dry_run=False,
maximum_retries=MAX_ENFORCEMENT_RETRIES):
"""Enforces the firewall policy on the project.
Args:
firewall_policy: A list of rules to enforce on the project.
compute_service: A Compute service object. If not provided, one will
be created using the default credentials.
networks: An optional list of network names to apply the policy to. If
undefined, then the policy will be applied to all networks.
allow_empty_ruleset: If set to true and firewall_policy has no rules,
all current firewall rules will be deleted from the project.
prechange_callback: An optional callback function to pass to
FirewallEnforcer.apply_firewall. It gets called if the
firewall policy for a project does not match the expected policy,
before any changes are actually applied. If the callback returns
False then no changes will be made to the project. If it returns
True then the changes will be pushed.
See FirewallEnforcer.apply_firewall() docstring for more details.
retry_on_dry_run: Set to True to retry applying firewall rules when
the expected policy does not match the current policy when
dry_run is enabled.
maximum_retries: The number of times enforce_firewall_policy will
attempt to set the current firewall policy to the expected
firewall policy. Set to 0 to disable retry behavior.
Returns:
A ProjectResult proto with details on the status of the enforcement
and an audit log with any changes made.
"""
if not compute_service:
gce_api = compute.ComputeClient()
compute_service = gce_api.service
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
self.firewall_api = fe.ComputeFirewallAPI(compute_service,
dry_run=self._dry_run)
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
self.firewall_policy = firewall_policy
if networks:
self.project_networks = sorted(networks)
else:
self.project_networks = self._get_project_networks()
self.result.timestamp_sec = datelib.Timestamp.now().AsMicroTimestamp()
try:
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
self.enforcer = self._initialize_firewall_enforcer()
except EnforcementError as e:
return self._set_error_status(e.reason())
except ProjectDeletedError as e:
self.result.status = STATUS_DELETED
self.result.status_reason = 'Project scheduled for deletion: %s' % e
LOGGER.warn('Project %s scheduled for deletion: %s',
self.project_id, e)
return self.result
except ComputeApiDisabledError as e:
# Reuse the DELETED status, since the project should be moved to the
# archive queue if the API is disabled.
self.result.status = STATUS_DELETED
self.result.status_reason = 'Project has GCE API disabled: %s' % e
LOGGER.warn('Project %s has the GCE API disabled: %s',
self.project_id, e)
return self.result
retry_enforcement_count = 0
while True:
try:
change_count = self.enforcer.apply_firewall(
prechange_callback=prechange_callback,
allow_empty_ruleset=allow_empty_ruleset,
networks=self.project_networks)
except fe.FirewallEnforcementFailedError as e:
return self._set_error_status(
'error enforcing firewall for project: %s', e)
try:
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
self.rules_after_enforcement = self._get_current_fw_rules()
except EnforcementError as e:
return self._set_error_status(e.reason())
if not change_count:
# Don't attempt to retry if there were no changes. This can be
# caused by the prechange callback returning false or an
# exception.
break
if ((self._dry_run and not retry_on_dry_run) or
self.rules_after_enforcement == self.expected_rules):
break
retry_enforcement_count += 1
if retry_enforcement_count <= maximum_retries:
LOGGER.warn('New firewall rules do not match the expected '
'rules enforced by the policy for project %s, '
'retrying. (Retry #%d)', self.project_id,
retry_enforcement_count)
self.enforcer.refresh_current_rules()
else:
return self._set_error_status('New firewall rules do not match '
'the expected rules enforced by '
'the policy')
self.result.status = STATUS_SUCCESS
self._update_fw_results()
if not self.result.gce_firewall_enforcement.rules_modified_count:
LOGGER.info('Firewall policy not changed for %s', self.project_id)
return self.result
def _initialize_firewall_enforcer(self):
"""Gets current and expected rules, returns a FirewallEnforcer object.
Returns:
A new FirewallEnforcer object configured with the expected policy for
the project.
Raises:
EnforcementError: Raised if there are any errors fetching the current
firewall rules or building the expected rules from the policy.
"""
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
if not self.project_networks:
raise EnforcementError(STATUS_ERROR,
'no networks found for project')
self.rules_before_enforcement = self._get_current_fw_rules()
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
self.expected_rules = fe.FirewallRules(self.project_id)
try:
for network_name in self.project_networks:
self.expected_rules.add_rules(
self.firewall_policy, network_name=network_name)
except fe.InvalidFirewallRuleError as e:
raise EnforcementError(STATUS_ERROR, 'error adding the expected '
'firewall rules from the policy: %s' % e)
enforcer = fe.FirewallEnforcer(
self.project_id,
self.firewall_api,
self.expected_rules,
self.rules_before_enforcement,
project_sema=self._project_sema,
operation_sema=self._operation_sema)
return enforcer
def _get_project_networks(self):
"""Enumerate the current project networks and returns a sorted list."""
networks = set()
try:
response = self.firewall_api.list_networks(
self.project_id, fields='items/selfLink')
except errors.HttpError as e:
LOGGER.error('Error listing networks for project %s: %s',
self.project_id, e)
else:
for item in response.get('items', []):
if 'selfLink' in item:
network_name = fe.get_network_name_from_url(
item['selfLink'])
networks.add(network_name)
else:
LOGGER.error('Network URL not found in %s for project %s',
item, self.project_id)
return sorted(networks)
def _get_current_fw_rules(self):
"""Create a new FirewallRules object with the current rules.
Returns:
A new FirewallRules object with the current rules added to it.
Raises:
EnforcementError: Raised if there are any exceptions raised while
adding the firewall rules.
"""
current_rules = fe.FirewallRules(self.project_id)
try:
current_rules.add_rules_from_api(self.firewall_api)
except errors.HttpError as e:
# Handle race condition where a project is deleted after it is
# enqueued.
error_msg = str(
e) # HttpError Class decodes json encoded error into str
if ((e.resp.status in (400, 404) and
('Invalid value for project' in error_msg or
'Failed to find project' in error_msg))
or # Error string changed
(e.resp.status == 403 and
'scheduled for deletion' in error_msg)):
raise ProjectDeletedError(error_msg)
elif (e.resp.status == 403 and
'Compute Engine API has not been used' in error_msg):
raise ComputeApiDisabledError(error_msg)
else:
raise EnforcementError(STATUS_ERROR,
'error getting current firewall '
'rules from API: %s' % e)
except fe.InvalidFirewallRuleError as e:
raise EnforcementError(STATUS_ERROR,
'error getting current firewall '
'rules from API: %s' % e)
return current_rules
def _update_fw_results(self):
"""Update the result proto with details on any changes made."""
results = self.result.gce_firewall_enforcement
results.rules_modified_count = 0
for rule in sorted(
[r['name'] for r in self.enforcer.get_inserted_rules()]):
results.rules_added.append(rule)
results.rules_modified_count += 1
for rule in sorted(
[r['name'] for r in self.enforcer.get_deleted_rules()]):
results.rules_removed.append(rule)
results.rules_modified_count += 1
for rule in sorted(
[r['name'] for r in self.enforcer.get_updated_rules()]):
results.rules_updated.append(rule)
results.rules_modified_count += 1
# If an error occured during enforcement, rules_after_enforcement may
# not exist yet.
# pylint: disable=attribute-defined-outside-init
# TODO: Investigate improving to avoid the pylint disable.
if not hasattr(self, 'rules_after_enforcement'):
try:
self.rules_after_enforcement = self._get_current_fw_rules()
except EnforcementError as e:
LOGGER.error(
'Project %s raised an error while listing firewall '
'rules after enforcement: %s', self.project_id, e)
# Ensure original rules are in audit log in case roll back is
# required
results.rules_before.json = (
self.rules_before_enforcement.as_json())
results.rules_before.hash = (
hashlib.sha256(results.rules_before.json).hexdigest())
return
if self.rules_before_enforcement != self.rules_after_enforcement:
results.rules_before.json = self.rules_before_enforcement.as_json()
results.rules_before.hash = (
hashlib.sha256(results.rules_before.json).hexdigest())
results.rules_after.json = self.rules_after_enforcement.as_json()
results.rules_after.hash = (
hashlib.sha256(results.rules_after.json).hexdigest())
for (rule_name,
rule) in sorted(self.rules_after_enforcement.rules.items()):
if rule == self.rules_before_enforcement.rules.get(rule_name, {}):
results.rules_unchanged.append(rule_name)
if (self.result.status == STATUS_SUCCESS and
results.rules_modified_count and not results.rules_updated and
not results.rules_unchanged):
# Check if all previous rules were deleted and all current rules
# were added during this enforcement. If so, this is a newly
# enforced project.
previous_fw_rules_count = len(self.rules_before_enforcement.rules)
current_fw_rules_count = len(self.rules_after_enforcement.rules)
if (len(results.rules_removed) >= previous_fw_rules_count and
len(results.rules_added) == current_fw_rules_count):
LOGGER.info(
'Project %s had all of its firewall rules changed..',
self.project_id)
results.all_rules_changed = True
def _set_error_status(self, msg, *args):
"""Set status to result ERROR and update the reason string from msg."""
if args:
msg %= args
self.result.status = STATUS_ERROR
self.result.status_reason = msg
LOGGER.warn('Project %s had an error: %s', self.project_id, msg)
if hasattr(self, 'enforcer'): # Verify enforcer was initialized.
self._update_fw_results()
return self.result
class Error(Exception):
"""Base error class for the module."""
class EnforcementError(Error):
"""Error encountered while enforcing firewall on project."""
def __init__(self, status, reason):
self._status = int(status)
self._status_string = enforcer_log_pb2.EnforcementStatus.Name(status)
self._reason = reason
super(EnforcementError, self).__init__(str(self))
def status(self):
"""Return status."""
return self._status
def reason(self):
"""Return reason."""
return self._reason
def __str__(self):
return '{}: {}'.format(self._status_string, self._reason)
class ProjectDeletedError(Error):
"""Error raised if a project to be enforced has been marked for deletion."""
class ComputeApiDisabledError(Error):
"""Error raised if a project to be enforced has the compute API disabled."""
|
|
"""Plugs are ins and outs for Nodes through which they exchange data."""
from __future__ import print_function
import sys
import warnings
from abc import abstractmethod
from .utilities import get_hash
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
if sys.version_info.major > 2: # pragma: no cover
basestring = str
class IPlug(object):
"""The interface for the plugs.
Plugs are associated with a Node and can be connected, disconnected
and hold a value, that can be accesses by the associated Node.
"""
def __init__(self, name, node):
"""Initialize the Interface.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
if "." in name and not isinstance(self, SubPlug):
raise ValueError(
'Names for plugs can not contain dots "." as these are '
"reserved to identify sub plugs."
)
self.name = name
self.node = node
self.connections = []
self._sub_plugs = OrderedDict()
self._value = None
self._is_dirty = True
def __rshift__(self, other):
"""Create a connection to the given IPlug.
Args:
other (IPlug): The IPlug to connect to.
"""
warnings.warn(
"Use the connect method instead", DeprecationWarning, stacklevel=2
)
self.connect(other)
def __lshift__(self, other):
"""Break a connection to the given IPlug.
Args:
other (IPlug): The IPlug to disconnect.
"""
warnings.warn(
"Use the disconnect method instead",
DeprecationWarning,
stacklevel=2,
)
self.disconnect(other)
# Extra function to make re-use in subclasses easier
def _update_value(self, value):
"""Update the internal value."""
old_hash = get_hash(self._value)
new_hash = get_hash(value)
self._value = value
if old_hash is None or new_hash is None or (old_hash != new_hash):
self.is_dirty = True
@property
def value(self):
"""Access to the value on this Plug."""
if self._sub_plugs:
return {name: plug.value for name, plug in self._sub_plugs.items()}
return self._value
@value.setter
def value(self, value):
"""Set the Plug dirty when the value is being changed."""
self._update_value(value)
@property
def is_dirty(self):
"""Access to the dirty status on this Plug."""
if self._sub_plugs:
for sub_plug in self._sub_plugs.values():
if sub_plug.is_dirty:
return True
return False
else:
return self._is_dirty
@is_dirty.setter
def is_dirty(self, status):
"""Set the Plug dirty informs the node this Plug belongs to."""
self._is_dirty = status
if status:
self.node.on_input_plug_set_dirty()
@abstractmethod
def connect(self, plug): # pragma: no cover
"""Has to be implemented in the subclass."""
raise NotImplementedError("The subclass has to define connect()")
def disconnect(self, plug):
"""Break the connection to the given Plug."""
if isinstance(plug, InputPlugGroup):
for plug_ in plug:
self.disconnect(plug_)
return
if plug in self.connections:
self.connections.pop(self.connections.index(plug))
self.is_dirty = True
if self in plug.connections:
plug.connections.pop(plug.connections.index(self))
plug.is_dirty = True
def promote_to_graph(self, name=None):
"""Add this plug to the graph of this plug's node.
Args:
name (str): Optionally provide a different name for the Plug
"""
self.node.graph.add_plug(self, name=name)
class OutputPlug(IPlug):
"""Provides data to an InputPlug."""
def __init__(self, name, node, accepted_plugs=None):
"""Initialize the OutputPlug.
Can be connected to an InputPlug.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
self.accepted_plugs = (InputPlug, InputPlugGroup)
super(OutputPlug, self).__init__(name, node)
if not isinstance(self, SubPlug):
self.node.outputs[self.name] = self
def __rshift__(self, other):
"""Syntactic sugar for the connect() method.
If `other` is a INode with an input matching this plug's name, connect.
"""
# softly check if the "other" is a Node with inputs
if hasattr(other, "inputs"):
for iname, iplug in other.inputs.items():
if iname == self.name:
target = iplug
else:
target = other
self.connect(target)
def connect(self, plug):
"""Connect this Plug to the given InputPlug.
Set both participating Plugs dirty.
"""
if not isinstance(plug, self.accepted_plugs):
raise TypeError(
"Cannot connect {0} to {1}".format(type(self), type(plug))
)
if isinstance(plug, InputPlugGroup):
for plug_ in plug:
self.connect(plug_)
return
if self.node.graph.accepts_connection(self, plug):
for connection in plug.connections:
plug.disconnect(connection)
if plug not in self.connections:
self.connections.append(plug)
plug.value = self.value
self.is_dirty = True
plug.is_dirty = True
if self not in plug.connections:
plug.connections = [self]
plug.is_dirty = True
def __getitem__(self, key):
"""Retrieve a sub plug by key.
If it does not exist yet, it is created automatically!
Args:
key (str): The name of the sub plug
"""
if not isinstance(key, basestring):
raise TypeError(
"Only strings are allowed as sub-plug keys! "
"This is due to the fact that JSON serialization only allows "
"strings as keys."
)
if not self._sub_plugs.get(key):
self._sub_plugs[key] = SubOutputPlug(
key=key, node=self.node, parent_plug=self
)
return self._sub_plugs[key]
def _update_value(self, value):
"""Propagate the dirty state to all connected Plugs as well."""
super(OutputPlug, self)._update_value(value)
for plug in self.connections:
plug.value = value
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
for connection in self.connections:
connections.setdefault(connection.node.identifier, [])
connections[connection.node.identifier].append(connection.name)
return {
"name": self.name,
"value": self.value if not self._sub_plugs else None,
"connections": connections,
"sub_plugs": {
name: sub_plug.serialize()
for name, sub_plug in self._sub_plugs.items()
},
}
class InputPlug(IPlug):
"""Receives data from an OutputPlug."""
def __init__(self, name, node, value=None):
"""Initialize the InputPlug.
Can be connected to an OutputPlug.
Args:
name (str): The name of the Plug.
node (INode): The Node holding the Plug.
"""
self.accepted_plugs = (OutputPlug,)
super(InputPlug, self).__init__(name, node)
self.value = value
self.is_dirty = True
if not isinstance(self, SubPlug):
self.node.inputs[self.name] = self
def connect(self, plug):
"""Connect this Plug to the given OutputPlug.
Set both participating Plugs dirty.
"""
if not isinstance(plug, self.accepted_plugs):
raise TypeError(
"Cannot connect {0} to {1}".format(type(self), type(plug))
)
plug.connect(self)
def __getitem__(self, key):
"""Retrieve a sub plug by key.
If it does not exist yet, it is created automatically!
Args:
key (str): The name of the sub plug
"""
if not isinstance(key, basestring):
raise TypeError(
"Only strings are allowed as sub-plug keys! "
"This is due to the fact that JSON serialization only allows "
"strings as keys."
)
if not self._sub_plugs.get(key):
self._sub_plugs[key] = SubInputPlug(
key=key, node=self.node, parent_plug=self
)
return self._sub_plugs[key]
def _update_value(self, value):
if self._sub_plugs:
return
super(InputPlug, self)._update_value(value)
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
if self.connections:
connections[
self.connections[0].node.identifier
] = self.connections[0].name
return {
"name": self.name,
"value": self.value if not self._sub_plugs else None,
"connections": connections,
"sub_plugs": {
name: sub_plug.serialize()
for name, sub_plug in self._sub_plugs.items()
},
}
class SubPlug(object):
"""Mixin that unifies common properties of subplugs."""
@property
def is_dirty(self):
"""Access to the dirty status on this Plug."""
return self._is_dirty
@is_dirty.setter
def is_dirty(self, status):
"""Setting the Plug dirty informs its parent plug."""
self._is_dirty = status
if status:
self.parent_plug.is_dirty = status
def promote_to_graph(self, name=None):
"""Add this plug to the graph of this plug's node.
NOTE: Subplugs can only be added to a graph via their parent plug.
Args:
name (str): Optionally provide a different name for the Plug
"""
# prevent adding SubPlug to the graph witout their parents
raise TypeError(
"Cannot add SubPlug to graph! Add the parent plug instead."
)
class SubInputPlug(SubPlug, InputPlug):
"""Held by a parent input plug to form a compound plug."""
def __init__(self, key, node, parent_plug, value=None):
"""Initialize the plug.
Can be connected to an OutputPlug.
Args:
key (str): The key will be used to form the name of the Plug:
{parent_plug.name}.{key}.
node (INode): The Node holding the Plug.
parent_plug (InputPlug): The parent plug holding this Plug.
"""
# super().__init__() refers to self.parent_plug, so need to set it here
self.key = key
self.parent_plug = parent_plug
self.parent_plug._sub_plugs[key] = self
super(SubInputPlug, self).__init__(
"{0}.{1}".format(parent_plug.name, key), node
)
self.value = value
self.is_dirty = True
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
if self.connections:
connections[
self.connections[0].node.identifier
] = self.connections[0].name
return {
"name": self.name,
"value": self.value,
"connections": connections,
}
class SubOutputPlug(SubPlug, OutputPlug):
"""Held by a parent output plug to form a compound plug."""
def __init__(self, key, node, parent_plug, value=None):
"""Initialize the plug.
Can be connected to an InputPlug.
Args:
key (str): The key will be used to form the name of the Plug:
{parent_plug.name}.{key}.
node (INode): The Node holding the Plug.
parent_plug (InputPlug): The parent plug holding this Plug.
"""
# super().__init__() refers to self.parent_plug, so need to set it here
self.key = key
self.parent_plug = parent_plug
self.parent_plug._sub_plugs[key] = self
super(SubOutputPlug, self).__init__(
"{0}.{1}".format(parent_plug.name, key), node
)
self.value = value
self.is_dirty = True
def _update_value(self, value):
"""Propagate the dirty state to all connected Plugs as well."""
super(SubOutputPlug, self)._update_value(value)
for plug in self.connections:
plug.value = value
parent_value = self.parent_plug.value or {}
parent_value[self.key] = value
self.parent_plug.value = parent_value
def serialize(self):
"""Serialize the Plug containing all it's connections."""
connections = {}
for connection in self.connections:
connections.setdefault(connection.node.identifier, [])
connections[connection.node.identifier].append(connection.name)
return {
"name": self.name,
"value": self.value,
"connections": connections,
}
class InputPlugGroup(object):
"""Group plugs inside a group into one entry point on the graph."""
def __init__(self, name, graph, plugs=None):
"""Initialize the group and assigning it to the `Graph.input_groups`.
Can be connected to an OutputPlug.
Args:
name (str): The name of the InputPlugGroup.
graph (Graph): The Graph holding the PlugGroup.
plugs (list of InputPlug): The plugs in this group.
"""
self.name = name
self.graph = graph
self.plugs = plugs or []
self.graph.inputs[self.name] = self
def connect(self, plug):
"""Connect all plugs in this group to the given plug."""
for input_plug in self.plugs:
plug.connect(input_plug)
def disconnect(self, plug):
"""Disconnect all plugs in this group from the given plug."""
for input_plug in self.plugs:
plug.disconnect(input_plug)
def __iter__(self):
"""Convenience to iterate over the plugs in this group."""
for plug in self.plugs:
yield plug
def __rshift__(self, other):
"""Syntactic sugar for the connect() method."""
self.connect(other)
def __lshift__(self, other):
"""Syntactic sugar for the disconnect() method."""
self.disconnect(other)
@property
def value(self):
"""Getting the value of an InputPlugGroup is not supported.
The value property is implemented nonetheless, in order to allow for
convenient setting of the value of all plugs in the InputPlugGroup.
"""
raise AttributeError(
"Getting the value of an InputPlugGroup is not supported"
)
@value.setter
def value(self, new_value):
"""Set the value for all grouped plugs."""
for plug in self.plugs:
plug.value = new_value
|
|
import pytest
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
csr_row_norms)
from sklearn.utils._testing import assert_allclose
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
with pytest.raises(TypeError):
incr_mean_variance_axis(X=axis, axis=last_mean, last_mean=last_var,
last_var=last_n)
with pytest.raises(TypeError):
incr_mean_variance_axis(X_lil, axis=axis, last_mean=last_mean,
last_var=last_var, last_n=last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis=axis, last_mean=last_mean,
last_var=last_var, last_n=last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
# X.shape[axis] picks # samples
assert_array_equal(X.shape[axis], n_incr)
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
last_mean = last_mean.astype(output_dtype)
last_var = last_var.astype(output_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n)
assert X_means_incr.dtype == output_dtype
assert X_vars_incr.dtype == output_dtype
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
@pytest.mark.parametrize(
"X1, X2",
[
(sp.random(5, 2, density=0.8, format='csr', random_state=0),
sp.random(13, 2, density=0.8, format='csr', random_state=0)),
(sp.random(5, 2, density=0.8, format='csr', random_state=0),
sp.hstack([sp.csr_matrix(np.full((13, 1), fill_value=np.nan)),
sp.random(13, 1, density=0.8, random_state=42)],
format="csr"))
]
)
def test_incr_mean_variance_axis_equivalence_mean_variance(X1, X2):
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16448
# check that computing the incremental mean and variance is equivalent to
# computing the mean and variance on the stacked dataset.
axis = 0
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=updated_mean, last_var=updated_var,
last_n=updated_n
)
X = sp.vstack([X1, X2])
assert_allclose(updated_mean, np.nanmean(X.A, axis=axis))
assert_allclose(updated_var, np.nanvar(X.A, axis=axis))
assert_allclose(updated_n, np.count_nonzero(~np.isnan(X.A), axis=0))
def test_incr_mean_variance_no_new_n():
# check the behaviour when we update the variance with an empty matrix
axis = 0
X1 = sp.random(5, 1, density=0.8, random_state=0).tocsr()
X2 = sp.random(0, 1, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
last_mean, last_var, last_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# update statistic with a column which should ignored
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(updated_mean, last_mean)
assert_allclose(updated_var, last_var)
assert_allclose(updated_n, last_n)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("sparse_constructor", [sp.csc_matrix, sp.csr_matrix])
def test_incr_mean_variance_axis_ignore_nan(axis, sparse_constructor):
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int64)
X = sparse_constructor(
np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]]))
X_nan = sparse_constructor(
np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]]))
# we avoid creating specific data for axis 0 and 1: translating the data is
# enough.
if axis:
X = X.T
X_nan = X_nan.T
# take a copy of the old statistics since they are modified in place.
X_means, X_vars, X_sample_count = incr_mean_variance_axis(
X, axis=axis, last_mean=old_means.copy(),
last_var=old_variances.copy(), last_n=old_sample_count.copy())
X_nan_means, X_nan_vars, X_nan_sample_count = incr_mean_variance_axis(
X_nan, axis=axis, last_mean=old_means.copy(),
last_var=old_variances.copy(), last_n=old_sample_count.copy())
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_vars, X_vars)
assert_allclose(X_nan_sample_count, X_sample_count)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-3)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=2)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-1)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=-3, last_mean=None, last_var=None,
last_n=None)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=2, last_mean=None, last_var=None,
last_n=None)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=-1, last_mean=None, last_var=None,
last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("axis", [0, 1, None])
@pytest.mark.parametrize("sparse_format", [sp.csr_matrix, sp.csc_matrix])
@pytest.mark.parametrize(
"missing_values, min_func, max_func, ignore_nan",
[(0, np.min, np.max, False),
(np.nan, np.nanmin, np.nanmax, True)]
)
@pytest.mark.parametrize("large_indices", [True, False])
def test_min_max(dtype, axis, sparse_format, missing_values, min_func,
max_func, ignore_nan, large_indices):
X = np.array([[0, 3, 0],
[2, -1, missing_values],
[0, 0, 0],
[9, missing_values, 7],
[4, 0, 5]], dtype=dtype)
X_sparse = sparse_format(X)
if large_indices:
X_sparse.indices = X_sparse.indices.astype('int64')
X_sparse.indptr = X_sparse.indptr.astype('int64')
mins_sparse, maxs_sparse = min_max_axis(X_sparse, axis=axis,
ignore_nan=ignore_nan)
assert_array_equal(mins_sparse, min_func(X, axis=axis))
assert_array_equal(maxs_sparse, max_func(X, axis=axis))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
with pytest.raises(TypeError):
min_max_axis(X_csr.tolil(), axis=0)
with pytest.raises(ValueError):
min_max_axis(X_csr, axis=2)
with pytest.raises(ValueError):
min_max_axis(X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
with pytest.raises(TypeError):
count_nonzero(X_csc)
with pytest.raises(ValueError):
count_nonzero(X_csr, axis=2)
assert (count_nonzero(X_csr, axis=0).dtype ==
count_nonzero(X_csr, axis=1).dtype)
assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype ==
count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype)
# Check dtypes with large sparse matrices too
# XXX: test fails on 32bit (Windows/Linux)
try:
X_csr.indices = X_csr.indices.astype(np.int64)
X_csr.indptr = X_csr.indptr.astype(np.int64)
assert (count_nonzero(X_csr, axis=0).dtype ==
count_nonzero(X_csr, axis=1).dtype)
assert (count_nonzero(X_csr, axis=0,
sample_weight=sample_weight).dtype ==
count_nonzero(X_csr, axis=1,
sample_weight=sample_weight).dtype)
except TypeError as e:
assert ("according to the rule 'safe'" in e.args[0]
and np.intp().nbytes < 8), e
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
with pytest.raises(TypeError):
csc_median_axis_0(sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
for index_dtype in [np.int32, np.int64]:
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if index_dtype is np.int64:
X_csr.indptr = X_csr.indptr.astype(index_dtype)
X_csr.indices = X_csr.indices.astype(index_dtype)
assert X_csr.indices.dtype == index_dtype
assert X_csr.indptr.dtype == index_dtype
inplace_csr_row_normalize(X_csr)
assert X_csr.dtype == dtype
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_csr_row_norms(dtype):
# checks that csr_row_norms returns the same output as
# scipy.sparse.linalg.norm, and that the dype is the same as X.dtype.
X = sp.random(100, 10, format='csr', dtype=dtype, random_state=42)
scipy_norms = sp.linalg.norm(X, axis=1)**2
norms = csr_row_norms(X)
assert norms.dtype == dtype
rtol = 1e-6 if dtype == np.float32 else 1e-7
assert_allclose(norms, scipy_norms, rtol=rtol)
|
|
# -*- coding: utf-8 -*-
"""
DNS server framework - intended to simplify creation of custom resolvers.
Comprises the following components:
DNSServer - socketserver wrapper (in most cases you should just
need to pass this an appropriate resolver instance
and start in either foreground/background)
DNSHandler - handler instantiated by DNSServer to handle requests
The 'handle' method deals with the sending/receiving
packets (handling TCP length prefix) and delegates
the protocol handling to 'get_reply'. This decodes
packet, hands off a DNSRecord to the Resolver instance,
and encodes the returned DNSRecord.
In most cases you dont need to change DNSHandler unless
you need to get hold of the raw protocol data in the
Resolver
DNSLogger - The class provides a default set of logging functions for
the various stages of the request handled by a DNSServer
instance which are enabled/disabled by flags in the 'log'
class variable.
Resolver - Instance implementing a 'resolve' method that receives
the decodes request packet and returns a response.
To implement a custom resolver in most cases all you need
is to implement this interface.
Note that there is only a single instance of the Resolver
so need to be careful about thread-safety and blocking
The following examples use the server framework:
fixedresolver.py - Simple resolver which will respond to all
requests with a fixed response
zoneresolver.py - Resolver which will take a standard zone
file input
shellresolver.py - Example of a dynamic resolver
proxy.py - DNS proxy
intercept.py - Intercepting DNS proxy
>>> resolver = BaseResolver()
>>> logger = DNSLogger(prefix=False)
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger)
>>> server.start_thread()
>>> q = DNSRecord.question("abc.def")
>>> a = q.send("localhost",8053)
Request: [...] (udp) / 'abc.def.' (A)
Reply: [...] (udp) / 'abc.def.' (A) / RRs:
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
>>> server.stop()
>>> class TestResolver:
... def resolve(self,request,handler):
... reply = request.reply()
... reply.add_answer(*RR.fromZone("abc.def. 60 A 1.2.3.4"))
... return reply
>>> resolver = TestResolver()
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger,tcp=True)
>>> server.start_thread()
>>> a = q.send("localhost",8053,tcp=True)
Request: [...] (tcp) / 'abc.def.' (A)
Reply: [...] (tcp) / 'abc.def.' (A) / RRs: A
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
;; ANSWER SECTION:
abc.def. 60 IN A 1.2.3.4
>>> server.stop()
"""
from __future__ import print_function
import binascii,socket,struct,threading,time
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from dnslib import DNSRecord,DNSError,QTYPE,RCODE,RR
class BaseResolver(object):
"""
Base resolver implementation. Provides 'resolve' method which is
called by DNSHandler with the decode request (DNSRecord instance)
and returns a DNSRecord instance as reply.
In most cases you should be able to create a custom resolver by
just replacing the resolve method with appropriate resolver code for
application (see fixedresolver/zoneresolver/shellresolver for
examples)
Note that a single instance is used by all DNSHandler instances so
need to consider blocking & thread safety.
"""
def resolve(self,request,handler):
"""
Example resolver - respond to all requests with NXDOMAIN
"""
reply = request.reply()
reply.header.rcode = getattr(RCODE,'NXDOMAIN')
return reply
class DNSHandler(socketserver.BaseRequestHandler):
"""
Handler for socketserver. Transparently handles both TCP/UDP requests
(TCP requests have length prepended) and hands off lookup to resolver
instance specified in <SocketServer>.resolver
"""
udplen = 0 # Max udp packet length (0 = ignore)
def handle(self):
if self.server.socket_type == socket.SOCK_STREAM:
self.protocol = 'tcp'
data = self.request.recv(8192)
length = struct.unpack("!H",bytes(data[:2]))[0]
while len(data) - 2 < length:
data += self.request.recv(8192)
data = data[2:]
else:
self.protocol = 'udp'
data,connection = self.request
self.server.logger.log_recv(self,data)
try:
rdata = self.get_reply(data)
self.server.logger.log_send(self,rdata)
if self.protocol == 'tcp':
rdata = struct.pack("!H",len(rdata)) + rdata
self.request.sendall(rdata)
else:
connection.sendto(rdata,self.client_address)
except DNSError as e:
self.server.logger.log_error(self,e)
def get_reply(self,data):
request = DNSRecord.parse(data)
self.server.logger.log_request(self,request)
resolver = self.server.resolver
reply = resolver.resolve(request,self)
self.server.logger.log_reply(self,reply)
if self.protocol == 'udp':
rdata = reply.pack()
if self.udplen and len(rdata) > self.udplen:
truncated_reply = reply.truncate()
rdata = truncated_reply.pack()
self.server.logger.log_truncated(self,truncated_reply)
else:
rdata = reply.pack()
return rdata
class DNSLogger:
"""
The class provides a default set of logging functions for the various
stages of the request handled by a DNSServer instance which are
enabled/disabled by flags in the 'log' class variable.
To customise logging create an object which implements the DNSLogger
interface and pass instance to DNSServer.
The methods which the logger instance must implement are:
log_recv - Raw packet received
log_send - Raw packet sent
log_request - DNS Request
log_reply - DNS Response
log_truncated - Truncated
log_error - Decoding error
log_data - Dump full request/response
"""
def __init__(self,log="",prefix=True):
"""
Selectively enable log hooks depending on log argument
(comma separated list of hooks to enable/disable)
- If empty enable default log hooks
- If entry starts with '+' (eg. +send,+recv) enable hook
- If entry starts with '-' (eg. -data) disable hook
- If entry doesn't start with +/- replace defaults
Prefix argument enables/disables log prefix
"""
default = ["request","reply","truncated","error"]
log = log.split(",") if log else []
enabled = set([ s for s in log if s[0] not in '+-'] or default)
[ enabled.add(l[1:]) for l in log if l.startswith('+') ]
[ enabled.discard(l[1:]) for l in log if l.startswith('-') ]
for l in ['log_recv','log_send','log_request','log_reply',
'log_truncated','log_error','log_data']:
if l[4:] not in enabled:
setattr(self,l,self.log_pass)
self.prefix = prefix
def log_pass(self,*args):
pass
def log_prefix(self,handler):
if self.prefix:
return "%s [%s:%s] " % (time.strftime("%Y-%M-%d %X"),
handler.__class__.__name__,
handler.server.resolver.__class__.__name__)
else:
return ""
def log_recv(self,handler,data):
print("%sReceived: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_send(self,handler,data):
print("%sSent: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_request(self,handler,request):
print("%sRequest: [%s:%d] (%s) / '%s' (%s)" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
request.q.qname,
QTYPE[request.q.qtype]))
self.log_data(request)
def log_reply(self,handler,reply):
print("%sReply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
self.log_data(reply)
def log_truncated(self,handler,reply):
print("%sTruncated Reply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
self.log_data(reply)
def log_error(self,handler,e):
print("%sInvalid Request: [%s:%d] (%s) :: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
e))
def log_data(self,dnsobj):
print("\n",dnsobj.toZone(" "),"\n",sep="")
class UDPServer(socketserver.UDPServer):
allow_reuse_address = True
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
class DNSServer(object):
"""
Convenience wrapper for socketserver instance allowing
either UDP/TCP server to be started in blocking more
or as a background thread.
Processing is delegated to custom resolver (instance) and
optionally custom logger (instance), handler (class), and
server (class)
In most cases only a custom resolver instance is required
(and possibly logger)
"""
def __init__(self,resolver,
address="",
port=53,
tcp=False,
logger=None,
handler=DNSHandler,
server=None):
"""
resolver: resolver instance
address: listen address (default: "")
port: listen port (default: 53)
tcp: UDP (false) / TCP (true) (default: False)
logger: logger instance (default: DNSLogger)
handler: handler class (default: DNSHandler)
server: socketserver class (default: UDPServer/TCPServer)
"""
if not server:
if tcp:
server = TCPServer
else:
server = UDPServer
self.server = server((address,port),handler)
self.server.resolver = resolver
self.server.logger = logger or DNSLogger()
def start(self):
self.server.serve_forever()
def start_thread(self):
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.server.shutdown()
def isAlive(self):
return self.thread.isAlive()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from automaton import machines
from taskflow import logging
from taskflow import states as st
from taskflow.types import failure
from taskflow.utils import iter_utils
# Default waiting state timeout (in seconds).
WAITING_TIMEOUT = 60
# Meta states the state machine uses.
UNDEFINED = 'UNDEFINED'
GAME_OVER = 'GAME_OVER'
META_STATES = (GAME_OVER, UNDEFINED)
# Event name constants the state machine uses.
SCHEDULE = 'schedule_next'
WAIT = 'wait_finished'
ANALYZE = 'examine_finished'
FINISH = 'completed'
FAILED = 'failed'
SUSPENDED = 'suspended'
SUCCESS = 'success'
REVERTED = 'reverted'
START = 'start'
LOG = logging.getLogger(__name__)
class MachineMemory(object):
"""State machine memory."""
def __init__(self):
self.next_up = set()
self.not_done = set()
self.failures = []
self.done = set()
class MachineBuilder(object):
"""State machine *builder* that powers the engine components.
NOTE(harlowja): the machine (states and events that will trigger
transitions) that this builds is represented by the following
table::
+--------------+------------------+------------+----------+---------+
| Start | Event | End | On Enter | On Exit |
+--------------+------------------+------------+----------+---------+
| ANALYZING | completed | GAME_OVER | . | . |
| ANALYZING | schedule_next | SCHEDULING | . | . |
| ANALYZING | wait_finished | WAITING | . | . |
| FAILURE[$] | . | . | . | . |
| GAME_OVER | failed | FAILURE | . | . |
| GAME_OVER | reverted | REVERTED | . | . |
| GAME_OVER | success | SUCCESS | . | . |
| GAME_OVER | suspended | SUSPENDED | . | . |
| RESUMING | schedule_next | SCHEDULING | . | . |
| REVERTED[$] | . | . | . | . |
| SCHEDULING | wait_finished | WAITING | . | . |
| SUCCESS[$] | . | . | . | . |
| SUSPENDED[$] | . | . | . | . |
| UNDEFINED[^] | start | RESUMING | . | . |
| WAITING | examine_finished | ANALYZING | . | . |
+--------------+------------------+------------+----------+---------+
Between any of these yielded states (minus ``GAME_OVER`` and ``UNDEFINED``)
if the engine has been suspended or the engine has failed (due to a
non-resolveable task failure or scheduling failure) the machine will stop
executing new tasks (currently running tasks will be allowed to complete)
and this machines run loop will be broken.
NOTE(harlowja): If the runtimes scheduler component is able to schedule
tasks in parallel, this enables parallel running and/or reversion.
"""
def __init__(self, runtime, waiter):
self._runtime = weakref.proxy(runtime)
self._analyzer = runtime.analyzer
self._completer = runtime.completer
self._scheduler = runtime.scheduler
self._storage = runtime.storage
self._waiter = waiter
def build(self, timeout=None):
"""Builds a state-machine (that is used during running)."""
memory = MachineMemory()
if timeout is None:
timeout = WAITING_TIMEOUT
# Cache some local functions/methods...
do_schedule = self._scheduler.schedule
do_complete = self._completer.complete
def is_runnable():
# Checks if the storage says the flow is still runnable...
return self._storage.get_flow_state() == st.RUNNING
def iter_next_atoms(atom=None, apply_deciders=True):
# Yields and filters and tweaks the next atoms to run...
maybe_atoms_it = self._analyzer.iter_next_atoms(atom=atom)
for atom, late_decider in maybe_atoms_it:
if apply_deciders:
proceed = late_decider.check_and_affect(self._runtime)
if proceed:
yield atom
else:
yield atom
def resume(old_state, new_state, event):
# This reaction function just updates the state machines memory
# to include any nodes that need to be executed (from a previous
# attempt, which may be empty if never ran before) and any nodes
# that are now ready to be ran.
memory.next_up.update(
iter_utils.unique_seen(self._completer.resume(),
iter_next_atoms()))
return SCHEDULE
def game_over(old_state, new_state, event):
# This reaction function is mainly a intermediary delegation
# function that analyzes the current memory and transitions to
# the appropriate handler that will deal with the memory values,
# it is *always* called before the final state is entered.
if memory.failures:
return FAILED
leftover_atoms = iter_utils.count(
# Avoid activating the deciders, since at this point
# the engine is finishing and there will be no more further
# work done anyway...
iter_next_atoms(apply_deciders=False))
if leftover_atoms:
# Ok we didn't finish (either reverting or executing...) so
# that means we must of been stopped at some point...
LOG.blather("Suspension determined to have been reacted to"
" since (at least) %s atoms have been left in an"
" unfinished state", leftover_atoms)
return SUSPENDED
elif self._analyzer.is_success():
return SUCCESS
else:
return REVERTED
def schedule(old_state, new_state, event):
# This reaction function starts to schedule the memory's next
# nodes (iff the engine is still runnable, which it may not be
# if the user of this engine has requested the engine/storage
# that holds this information to stop or suspend); handles failures
# that occur during this process safely...
if is_runnable() and memory.next_up:
not_done, failures = do_schedule(memory.next_up)
if not_done:
memory.not_done.update(not_done)
if failures:
memory.failures.extend(failures)
memory.next_up.intersection_update(not_done)
return WAIT
def wait(old_state, new_state, event):
# TODO(harlowja): maybe we should start doing 'yield from' this
# call sometime in the future, or equivalent that will work in
# py2 and py3.
if memory.not_done:
done, not_done = self._waiter(memory.not_done, timeout=timeout)
memory.done.update(done)
memory.not_done = not_done
return ANALYZE
def analyze(old_state, new_state, event):
# This reaction function is responsible for analyzing all nodes
# that have finished executing and completing them and figuring
# out what nodes are now ready to be ran (and then triggering those
# nodes to be scheduled in the future); handles failures that
# occur during this process safely...
next_up = set()
while memory.done:
fut = memory.done.pop()
atom = fut.atom
try:
outcome, result = fut.result()
retain = do_complete(atom, outcome, result)
if isinstance(result, failure.Failure):
if retain:
memory.failures.append(result)
else:
# NOTE(harlowja): avoid making any
# intention request to storage unless we are
# sure we are in DEBUG enabled logging (otherwise
# we will call this all the time even when DEBUG
# is not enabled, which would suck...)
if LOG.isEnabledFor(logging.DEBUG):
intention = self._storage.get_atom_intention(
atom.name)
LOG.debug("Discarding failure '%s' (in"
" response to outcome '%s') under"
" completion units request during"
" completion of atom '%s' (intention"
" is to %s)", result, outcome,
atom, intention)
except Exception:
memory.failures.append(failure.Failure())
else:
try:
more_work = set(iter_next_atoms(atom=atom))
except Exception:
memory.failures.append(failure.Failure())
else:
next_up.update(more_work)
if is_runnable() and next_up and not memory.failures:
memory.next_up.update(next_up)
return SCHEDULE
elif memory.not_done:
return WAIT
else:
return FINISH
def on_exit(old_state, event):
LOG.debug("Exiting old state '%s' in response to event '%s'",
old_state, event)
def on_enter(new_state, event):
LOG.debug("Entering new state '%s' in response to event '%s'",
new_state, event)
# NOTE(harlowja): when ran in blather mode it is quite useful
# to track the various state transitions as they happen...
watchers = {}
if LOG.isEnabledFor(logging.BLATHER):
watchers['on_exit'] = on_exit
watchers['on_enter'] = on_enter
m = machines.FiniteMachine()
m.add_state(GAME_OVER, **watchers)
m.add_state(UNDEFINED, **watchers)
m.add_state(st.ANALYZING, **watchers)
m.add_state(st.RESUMING, **watchers)
m.add_state(st.REVERTED, terminal=True, **watchers)
m.add_state(st.SCHEDULING, **watchers)
m.add_state(st.SUCCESS, terminal=True, **watchers)
m.add_state(st.SUSPENDED, terminal=True, **watchers)
m.add_state(st.WAITING, **watchers)
m.add_state(st.FAILURE, terminal=True, **watchers)
m.default_start_state = UNDEFINED
m.add_transition(GAME_OVER, st.REVERTED, REVERTED)
m.add_transition(GAME_OVER, st.SUCCESS, SUCCESS)
m.add_transition(GAME_OVER, st.SUSPENDED, SUSPENDED)
m.add_transition(GAME_OVER, st.FAILURE, FAILED)
m.add_transition(UNDEFINED, st.RESUMING, START)
m.add_transition(st.ANALYZING, GAME_OVER, FINISH)
m.add_transition(st.ANALYZING, st.SCHEDULING, SCHEDULE)
m.add_transition(st.ANALYZING, st.WAITING, WAIT)
m.add_transition(st.RESUMING, st.SCHEDULING, SCHEDULE)
m.add_transition(st.SCHEDULING, st.WAITING, WAIT)
m.add_transition(st.WAITING, st.ANALYZING, ANALYZE)
m.add_reaction(GAME_OVER, FINISH, game_over)
m.add_reaction(st.ANALYZING, ANALYZE, analyze)
m.add_reaction(st.RESUMING, START, resume)
m.add_reaction(st.SCHEDULING, SCHEDULE, schedule)
m.add_reaction(st.WAITING, WAIT, wait)
m.freeze()
return (m, memory)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
2D Convolution Optimization
===========================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an overview on how to use TVM to map a 2D convolution
workload efficiently on the VTA design.
We recommend covering the :ref:`vta-mat-mult-opt` tutorial first.
2D convolution is dominant in most computer vision deep neural networks.
In this tutorial, we will demonstrate TVM schedule optimizations to map
2D convolution operators in NCHW layout onto VTA.
We also introduce the notion of latency hiding, which allows us to
maximize VTA's compute and memory resource utilization.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime.
from __future__ import absolute_import, print_function
import os
import tvm
import tvm.testing
from tvm import te
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import utils
from vta.testing import simulator
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# As a first step, we need to describe our 2D convolution computation
# in NCHW format.
#
# We define the 2D convolution shape by the batch size,
# spatial dimensions, input channels, output channels, kernel dimensions,
# kernel dimensions, padding dimensions, and stride dimensions.
#
# We pick the shape of the 9th convolutional layer of the ResNet-18
# architecture as our convolution workload parameters.
#
# We've added extra operators to the 2D convolution that apply
# shifting and clipping to the output in order to mimic a fixed-point
# convolution followed by a rectified linear activation.
# We describe the TVM dataflow graph of the 2D convolution layer below:
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/conv2d_dataflow.png
# :align: center
#
# This computation is intentionally too large to fit onto VTA's on-chip
# buffers all at once. Therefore in the scheduling phase we'll
# rely on computation blocking strategies to break the computation down into
# manageable chunks.
#
# .. note::
#
# *Spatial padding*
#
# Note that we'll need to import the TOPI library to apply spatial padding
# on the input feature map tensor.
# Spatial padding facilitates blocking in the context of 2D convolutions
# due to the fact that the same (x, y) spatial location of the input
# feature map of any given layer is read more than once if the convolution
# kernel window size is greater than one.
# On CPUs, and GPUs, one way to increase efficiency of memory accesses
# when parallelizing work is spatial packing, which requires data re-layout.
# VTA load DMA engine can insert padding automatically so that the original
# input feature map does not have to be re-packed in memory.
#
# We show the effect of VTA's on the fly spatial padding when data is being
# loaded from DRAM into VTA's SRAM, following a 2D strided and padded memory
# read.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/padding.png
# :align: center
# :width: 480px
from tvm import topi
# 2D convolution layer dimensions taken from ResNet-18 architecture
# (9th convolutional layer)
batch_size = 1
height = 14
width = 14
in_channels = 256
out_channels = 256
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
# Input feature map: (N, IC, H, W, n, ic)
data_shape = (
batch_size // env.BATCH,
in_channels // env.BLOCK_IN,
height,
width,
env.BATCH,
env.BLOCK_IN,
)
# Kernel: (OC, IC, H, W, oc, ic)
kernel_shape = (
out_channels // env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
kernel_h,
kernel_w,
env.BLOCK_OUT,
env.BLOCK_IN,
)
# Derive output feature map dimensions
fout_height = (height + 2 * pad_h - kernel_h) // stride_h + 1
fout_width = (width + 2 * pad_w - kernel_w) // stride_w + 1
# Output feature map: (N, OC, H, W, n, oc)
output_shape = (
batch_size // env.BATCH,
out_channels // env.BLOCK_OUT,
fout_height,
fout_width,
env.BATCH,
env.BLOCK_OUT,
)
# Convolution reduction axes
dy = te.reduce_axis((0, kernel_h), name="dy")
dx = te.reduce_axis((0, kernel_w), name="dx")
ic = te.reduce_axis((0, in_channels // env.BLOCK_IN), name="ic")
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name="ic_tns")
# Input placeholder tensors
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
# Copy buffers:
# Apply spatial padding to input feature map
data_buf = topi.nn.pad(data, [0, 0, pad_h, pad_w, 0, 0], name="data_buf")
kernel_buf = te.compute(kernel_shape, lambda *i: kernel(*i), "kernel_buf")
# Declare 2D convolution
res_conv = te.compute(
output_shape,
lambda bo, co, i, j, bi, ci: te.sum(
data_buf[bo, ic, i * stride_h + dy, j * stride_w + dx, bi, ic_tns].astype(env.acc_dtype)
* kernel_buf[co, ic, dy, dx, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, dy, dx, ic_tns],
),
name="res_conv",
)
# Add shift stage for fix-point normalization
res_shr = te.compute(output_shape, lambda *i: res_conv(*i) >> 8, name="res_shr")
# Apply clipping between (0, input max value)
inp_max = (1 << (env.INP_WIDTH - 1)) - 1
res_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), "res_max")
res_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), "res_min")
# Result Tensor
res = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
######################################################################
# Scheduling the Computation
# --------------------------
# We'll look at a set of schedule transformations necessary to map the
# 2D convolution onto VTA in an efficient fashion.
# Those include:
#
# - Computation blocking
# - Virtual threading to increase compute utilization
# - Lowering to VTA hardware intrinsics
# Create TVM schedule
s = te.create_schedule(res.op)
# Let's look at the default TVM schedule
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# Blocking the Computation
# ~~~~~~~~~~~~~~~~~~~~~~~~
# The 2D convolution is by default too large for activations or kernel weights
# to fit on VTA's on-chip buffers all at once.
# We apply blocking along input channels, output channels, and along
# the height spatial dimensions.
# We don't apply blocking along the width spatial dimension since it's
# the innermost dimension in the NCHW layout (and consequently to increase
# locality, it's best not to block along the innermost dimension).
# Let's define tiling sizes
b_block = 1 // env.BATCH
oc_block = 128 // env.BLOCK_OUT
ic_block = 16 // env.BLOCK_IN
h_block = 7
w_block = 14
# Tile the output tensor along the spatial and output channel dimensions
# (since by default we are doing single batch inference, the split along
# the batch dimension has no effect)
b, oc, y, x, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, factor=b_block)
oc_out, oc_inn = s[res].split(oc, factor=oc_block)
y_out, y_inn = s[res].split(y, factor=h_block)
x_out, x_inn = s[res].split(x, factor=w_block)
s[res].reorder(b_out, oc_out, y_out, x_out, b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns)
# Move intermediate computation into each output compute tile
s[res_conv].compute_at(s[res], x_out)
s[res_shr].compute_at(s[res], x_out)
s[res_max].compute_at(s[res], x_out)
s[res_min].compute_at(s[res], x_out)
# Apply additional loop split along reduction axis (input channel)
b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns = s[res_conv].op.axis
ic_out, ic_inn = s[res_conv].split(ic, factor=ic_block)
# Reorder axes.
# 1) Group the VTA tensor axes in the inner most position: b_tns, oc_tns, ic_tns
# to allow TVM to tensorize.
# 2) We move the ic_out axis all the way out of the convolution loop to block
# along the reduction axis.
# 3) Now we re-order the block axes: b_inn, oc_inn, y_inn, x_inn, ic_inn, dy, dx.
# VTA runtime/hardware requires us to write to a different output feature map
# location for every VTA tensor operation.
# This restriction requires us to order one of oc_inn, y_inn or x_inn right
# before b_tns, since they all affect output feature map indexing.
# Therefore, we choose to bring x_inn inside as shown below.
s[res_conv].reorder(ic_out, b_inn, oc_inn, y_inn, ic_inn, dy, dx, x_inn, b_tns, oc_tns, ic_tns)
######################################################################
# Virtual Threading
# ~~~~~~~~~~~~~~~~~
# Virtual threading is a mechanism that increases task-level pipeline
# parallelism in the VTA hardware design.
# Put it another way, it increases compute resource utilization by hiding
# memory access latency.
#
# In the implementation below, virtual threading distributes work across two
# threads split along the output channel axis.
# We show how work is split when computing the 2D convolution in the figure
# below.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/virtual_threading.png
# :align: center
# :width: 480px
# VTA only supports 2 virtual threads
v_threads = 2
# Perform virtual thread split along output channel outer axis
_, tx = s[res].split(oc_out, factor=v_threads)
s[res].reorder(tx, b_out)
s[res].bind(tx, te.thread_axis("cthread"))
# Let's look at the current TVM schedule after blocking and virtual threading
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# Lowering Copies to DMA Transfers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Next we set the buffer scopes to the corresponding on-chip VTA SRAM buffers.
# We move the load loops into the 2D convolution computation loop to stage
# memory loads such that they fit in the on-chip SRAM buffers.
# Finally we annotate the load/store loop outer axes with the DMA copy pragma
# to perform bulk memory transfers on VTA.
# Set scope of SRAM buffers
s[data_buf].set_scope(env.inp_scope)
s[kernel_buf].set_scope(env.wgt_scope)
s[res_conv].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
# Block data and kernel cache reads
s[data_buf].compute_at(s[res_conv], ic_out)
s[kernel_buf].compute_at(s[res_conv], ic_out)
# Use DMA copy pragma on DRAM->SRAM operations
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[kernel_buf].pragma(s[kernel_buf].op.axis[0], env.dma_copy)
# Use DMA copy pragma on SRAM->DRAM operation in each result block
# (this implies that these copies should be performed along b_inn,
# or result axis 4)
s[res].pragma(s[res].op.axis[4], env.dma_copy)
######################################################################
# Lowering Computation to VTA Compute Intrinsics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The last phase is to lower the computation loops down to VTA hardware
# intrinsics by mapping the 2D convolution to tensor intrinsics,
# and mapping the shift, and clipping computation to the vector ALU.
# Apply tensorization over the batch tensor tile axis
s[res_conv].tensorize(b_tns, env.gemm)
# Add an ALU pragma over the shift and clipping operations
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
# Let's look at the final lowered TVM schedule after lowering memory
# loads/stores down to DMA copy intrinsics, and the computation down to
# VTA compute intrinsics.
print(vta.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# TVM Compilation and Verification
# --------------------------------
# After specifying the schedule, we can compile it into a TVM function.
# We save the module so we can send it over RPC.
# We run the function and verify it against a numpy implementation to
# ensure correctness.
# This library facilitates 2D convolution testing
from tvm.topi.testing import conv2d_nchw_python
# Compile the TVM module
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
my_conv = vta.build(
s, [data, kernel, res], tvm.target.Target("ext_dev", host=env.target_host), name="my_conv"
)
temp = utils.tempdir()
my_conv.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the data and kernel arrays randomly in the int range
# of (-128, 128] in NCHW layout
data_np = np.random.randint(-128, 128, size=(batch_size, in_channels, height, width)).astype(
data.dtype
)
kernel_np = np.random.randint(
-128, 128, size=(out_channels, in_channels, kernel_h, kernel_w)
).astype(kernel.dtype)
# Apply packing to the data and kernel arrays from a 2D NCHW
# to a 4D NCHWnc packed layout
data_packed = data_np.reshape(
batch_size // env.BATCH, env.BATCH, in_channels // env.BLOCK_IN, env.BLOCK_IN, height, width
).transpose((0, 2, 4, 5, 1, 3))
kernel_packed = kernel_np.reshape(
out_channels // env.BLOCK_OUT,
env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
env.BLOCK_IN,
kernel_h,
kernel_w,
).transpose((0, 2, 4, 5, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
data_nd = tvm.nd.array(data_packed, ctx)
kernel_nd = tvm.nd.array(kernel_packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(data_nd, kernel_nd, res_nd)
# Verify against numpy implementation
res_ref = conv2d_nchw_python(
data_np.astype(env.acc_dtype),
kernel_np.astype(env.acc_dtype),
(stride_h, stride_w),
(pad_h, pad_w),
).astype(env.acc_dtype)
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(
(
batch_size // env.BATCH,
env.BATCH,
out_channels // env.BLOCK_OUT,
env.BLOCK_OUT,
fout_height,
fout_width,
)
).transpose((0, 2, 4, 5, 1, 3))
tvm.testing.assert_allclose(res_ref, res_nd.numpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful 2D convolution test!")
######################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can be used to
# lower 2D convolution onto hardware accelerator intrinsics, making
# use of hardware specific optimizations, such as latency hiding with
# virtual threading.
#
|
|
import requests
import json
import re
import fileinput
from optparse import OptionParser
import subprocess
import os
import sys
import time
from datetime import datetime
import pprint
from perf_data_management import manage_test_result
from couchbase.bucket import Bucket
from couchbase.exceptions import *
import couchbase
"""
# An evolving thing - takes as input:
- a file which is the output from perfrunner - this file will contain some json which describes the perf results
- the perf keys and expected values
This program parses out the results from the files and compares them against the expected values
"""
test_workload_output = '''
[20/Oct/2015 15:01:26] INFO - Creating new database: iostatperfregression_410-4859-enterprise_27b10170106
[20/Oct/2015 15:01:29] INFO - Creating new database: ns_serverperfregression_410-4859-enterprise_27bbucket-110170107
[20/Oct/2015 15:02:08] INFO - Adding snapshot: perfregression_410-4859-enterprise_27b_access
[20/Oct/2015 15:02:15] INFO - http://cbmonitor.sc.couchbase.com/reports/html/?snapshot=perfregression_410-4859-enterprise_27b_access
[20/Oct/2015 15:03:04] INFO - http://cbmonitor.sc.couchbase.com/reports/get_corr_matrix/?snapshot=perfregression_410-4859-enterprise_27b_access
[20/Oct/2015 15:03:31] INFO - Dry run stats: {
"build": "4.1.0-4859-enterprise",
"build_url": null,
"metric": "perf_sanity_kv_latency_mixed_2M_short_get_95th_perf_sanity_base_test",
"snapshots": [
"perfregression_410-4859-enterprise_27b_access"
],
"value": 0.56
}
[20/Oct/2015 15:03:31] INFO - Dry run stats: {
"build": "4.1.0-4859-enterprise",
"build_url": null,
"metric": "perf_sanity_kv_latency_mixed_2M_short_set_95th_perf_sanity_base_test",
"snapshots": [
"perfregression_410-4859-enterprise_27b_access"
],
"value": 0.95
}
[20/Oct/2015 15:03:31] INFO - Terminating local Celery workers
'''
def cb_data_analysis(actual_values,test_name,variation,params,analysis_data):
variation = .01 #float(variation)
upper_variation = 1 + variation
lower_variation = 1 - variation
print '\n analysis of result \n'
result = True
temp_analysis_data=""
for k in params.keys():
if actual_values[k]['value'] > (upper_variation * params[k]):
print ' ', test_name, ' is greater than expected. Expected value for key ',k, ' ', params[k], ' Actual ', actual_values[k]['value'],'\n'
temp_analysis_data += (str('Fail: result is greater than expected: '+str(params[k])))
result *=False
elif actual_values[k]['value'] < (lower_variation * params[k]):
# sort of want to yellow flag this but for now all we have is a red flag so use that
print ' ', test_name, ' is less than expected. Expected for key ',k, ' ', params[k], 'Actual ', actual_values[k]['value'],'\n'
temp_analysis_data += (str('Fail: result is lower than expected: '+str(params[k])))
result *= False
else:
result *= True
print test_name ,' ' ,params[k], ' result is expected'
temp_analysis_data += (str('Pass : result is expected: '+str(params[k])))
analysis_data.append(temp_analysis_data)
return result
def get_set_env(property,options):
spec= property["test_details"]["spec"]
test= property["test_details"]["test"]
params=property["test_details"]["params"]
test = 'perfSanity/tests/' + test
spec = 'perfSanity/clusters/' + spec
my_env = os.environ
my_env['cluster'] = spec
my_env['test_config'] = test
my_env['version'] = options.version
return my_env,test,spec,params
def main():
print 'Starting the perf regression runner'
usage = '%prog -f conf-file'
parser = OptionParser(usage)
parser.add_option('-f', '--filename', dest='filename')
parser.add_option('-v', '--version', dest='version')
parser.add_option('-s', '--summaryFile', dest='summaryFile')
parser.add_option('-p','--property',dest='property')
parser.add_option('-b','--build',dest='build')
parser.add_option('-t','--tag',dest='tag')
options, args = parser.parse_args()
summary = []
mng_data = manage_test_result()
data=None
try:
with open(options.filename) as data_file:
data = json.load(data_file)
except (OSError, IOError,ValueError) as e:
raise e
mng_data.create_cb_instance(data["couchbase_server"],data["couchbase_bucket"])
mng_data.create_cb_instance(data["couchbase_server"],data["couchbase_test_bucket"])
test_id=options.tag+ '_'+options.build
mng_data.cb_load_test(data["couchbase_test_bucket"],data)
mng_data.set_test_id(test_id)
count = 0
for property in options.property.split(','):
for property_test in data["test_category"][property]:
temp_data=[]
analysis_data=[]
my_env,test,spec,params=get_set_env(property_test,options)
proc = subprocess.Popen('./scripts/setup.sh', env=my_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True)
for line in iter(proc.stdout.readline, ''):
print 'Setup output', line
sys.stdout.flush()
(stdoutdata, stderrdata) = proc.communicate()
current_summary = {'test': test, 'status':'run', 'results':[]}
if proc.returncode == 1:
print '\n\nHave an error during setup'
print stderrdata
print stdoutdata
current_summary['output'] = ' Have an error during setup'
current_summary['status'] = 'not run'
summary.append(current_summary)
continue
print 'Setup complete, starting workload'
sys.stdout.flush()
flag = True
count = 0
while count < data["iteration"] and flag:
print '\n\n', time.asctime( time.localtime(time.time()) ), 'Now running', test
proc = subprocess.Popen('./perfSanity/scripts/workload_dev.sh', env=my_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
workload_output = ''
for line in iter(proc.stdout.readline, ''):
print line
workload_output += line
(stdoutdata, stderrdata) = proc.communicate()
print 'stderrdata', stderrdata
if proc.returncode == 1:
print ' Have an error during workload generation'
current_summary['output'] = ' Have an error during workload generation'
sys.stdout.flush()
current_summary['status'] = 'not run'
print stderrdata
summary.append(current_summary)
continue
print '\n\nWorkload complete, analyzing results'
#workload_output = test_workload_output
p = re.compile(r'Dry run stats: {(.*?)}', re.MULTILINE)
matches = p.findall(workload_output.replace('\n', ''))
actual_values = {}
for m in matches:
actual = json.loads('{' + m + '}')
actual_values[actual['metric']] = actual
print '\n\nWorkload gen output:', workload_output, '\n\n'
expected_keys = params
print "------- actual values ----------\n"
print actual_values
tmp=[]
for k in expected_keys.keys():
tmp.append(actual_values[k]['value'])
temp_data.append(tmp)
print '\nCompleted analysis for', test
time.sleep(10)
if cb_data_analysis(actual_values,test,data["variation"],params,analysis_data):
flag = False
summary.append(current_summary)
print '\nCompleted analysis for', test
time.sleep(10)
count += 1
iter_str='test result of : ' + str(len(analysis_data)) + ' iteration'
analysis_data.insert(0,iter_str)
mng_data.load_cb_data_sanity(data["couchbase_bucket"],temp_data,options.version,property,actual_values,analysis_data,mng_data.get_test_id(),params.keys(),test)
return mng_data.create_report_sanity(data["couchbase_bucket"],mng_data.get_test_id())
if __name__ == "__main__":
if not main():
sys.exit(1)
|
|
import storytext.guishared, util, types, logging, sys, os
from storytext.definitions import UseCaseScriptError
from storytext.gridformatter import GridFormatter
# Some unused imports to make sure classloader loads necessary classes
from org.eclipse.swt.layout import GridLayout, FillLayout, FormLayout, RowLayout #@UnusedImport
from org.eclipse import swt
from browserhtmlparser import BrowserHtmlParser
from java.util import Date
from java.io import File, FilenameFilter
from org.eclipse.jface.resource import ImageDescriptor
from array import array
from ordereddict import OrderedDict
from java.awt import Color
class ColorNameFinder:
def __init__(self):
self.names = {}
self.widgetDefaults = set()
# Add java.awt colors
self.addColors(Color, postfix="&")
def shortenColorName(self, name, abbreviations):
ret = name.lower()
for text, repl in abbreviations:
ret = ret.replace(text, repl)
return ret
def addColor(self, name, color, postfix="", modifiers=[], abbreviations=[]):
if hasattr(color, "getRed"):
newName = name + postfix
nameToUse = self.shortenColorName(newName, abbreviations)
self.names[self.getRGB(color)] = nameToUse
for modifier, prefix in modifiers:
rgb = self.getRGB(self.applyModifier(modifier, color))
if rgb not in self.names:
self.names[rgb] = prefix + nameToUse
def applyModifier(self, modifier, color):
try:
return modifier(color)
except:
colorToUse = swt.graphics.Color(swt.widgets.Display.getDefault(), color.getRed(), color.getGreen(), color.getBlue())
return modifier(colorToUse)
def addColors(self, cls, **kw):
for name in sorted(cls.__dict__):
if not name.startswith("__"):
try:
color = getattr(cls, name)
self.addColor(name, color, **kw)
except AttributeError:
pass
def addSWTColors(self, display):
for name in sorted(swt.SWT.__dict__):
if name.startswith("COLOR_"):
colorKey = getattr(swt.SWT, name)
color = display.getSystemColor(colorKey)
rgb = self.getRGB(color)
# Have to do this last because we can only retrieve them in the UI thread
# Don't override any custom names we might have
if rgb not in self.names:
self.names[rgb] = name[6:].lower()
if "WIDGET" in name:
self.widgetDefaults.add(rgb)
def getRGB(self, color):
return color.getRed(), color.getGreen(), color.getBlue()
def getName(self, color):
return self.names.get(self.getRGB(color), "unknown")
def getNameForWidget(self, color):
rgb = self.getRGB(color)
return self.names.get(rgb, "unknown") if rgb not in self.widgetDefaults else ""
colorNameFinder = ColorNameFinder()
class Describer(storytext.guishared.Describer):
styleNames = [ (swt.widgets.CoolItem, []),
(swt.widgets.Item , [ "SEPARATOR", "DROP_DOWN", "CHECK", "CASCADE", "RADIO" ]),
(swt.widgets.Button , [ "CHECK", "RADIO", "TOGGLE", "ARROW", "UP", "DOWN" ]),
(swt.widgets.DateTime, [ "DATE", "TIME", "CALENDAR", "SHORT" ]),
(swt.widgets.Combo , [ "READ_ONLY", "SIMPLE" ]),
(swt.custom.CCombo , [ "READ_ONLY", "FLAT", "BORDER" ]),
(swt.widgets.Text , [ "PASSWORD", "SEARCH", "READ_ONLY" ]) ]
ignoreWidgets = [ types.NoneType ]
# DateTime children are an implementation detail
# Coolbars, Toolbars and Expandbars describe their children directly : they have two parallel children structures
ignoreChildren = (swt.widgets.CoolBar, swt.widgets.ExpandBar, swt.widgets.ToolBar, swt.widgets.DateTime, swt.widgets.Group)
statelessWidgets = [ swt.widgets.Sash ]
stateWidgets = [ swt.widgets.Shell, swt.widgets.Button, swt.widgets.Menu, swt.widgets.Link,
swt.widgets.CoolBar, swt.widgets.ToolBar, swt.widgets.Label, swt.custom.CLabel,
swt.widgets.Combo, swt.widgets.ExpandBar, swt.widgets.Text, swt.widgets.List,
swt.widgets.Tree, swt.widgets.DateTime, swt.widgets.TabFolder, swt.widgets.Table,
swt.custom.CTabFolder, swt.widgets.Canvas, swt.browser.Browser, swt.custom.CCombo,
swt.widgets.Spinner, swt.widgets.Group, swt.widgets.Composite ]
childrenMethodName = "getChildren"
visibleMethodName = "getVisible"
def __init__(self, canvasDescriberClasses=[]):
storytext.guishared.Describer.__init__(self)
self.canvasCounter = storytext.guishared.WidgetCounter()
self.contextMenuCounter = storytext.guishared.WidgetCounter(self.contextMenusEqual)
self.customTooltipCounter = storytext.guishared.WidgetCounter(self.tooltipsEqual)
self.widgetsAppeared = []
self.widgetsMoved = []
self.parentsResized = set()
self.widgetsDescribed = set()
self.browserStates = {}
self.clipboardText = None
self.storedImages = {}
self.imageToName = {}
self.handleImages()
self.screenshotNumber = 0
self.colorsAdded = False
self.canvasDescriberClasses = canvasDescriberClasses
def setWidgetPainted(self, widget):
if widget not in self.widgetsDescribed and widget not in self.windows and widget not in self.widgetsAppeared:
self.logger.debug("Widget painted " + self.getRawData(widget))
self.widgetsAppeared.append(widget)
def setWidgetShown(self, widget):
# Menu show events seem a bit spurious, they aren't really shown at this point:
# ScrollBar shows are not relevant to anything
if isinstance(widget, swt.widgets.Control) and widget not in self.widgetsAppeared:
self.logger.debug("Widget shown " + self.getRawData(widget))
self.widgetsAppeared.append(widget)
if widget in self.widgetsMoved:
self.widgetsMoved.remove(widget)
def setWidgetMoved(self, widget):
if isinstance(widget, swt.widgets.Control) and widget not in self.widgetsAppeared and widget.getParent() not in self.parentsResized:
self.logger.debug("Widget moved " + self.getRawData(widget))
self.widgetsMoved.append(widget)
def setWidgetResized(self, widget):
if isinstance(widget, swt.widgets.Control):
self.parentsResized.add(widget)
self.parentsResized.add(widget.getParent())
def addFilters(self, display):
class ShowListener(swt.widgets.Listener):
def handleEvent(listenerSelf, e):
storytext.guishared.catchAll(self.setWidgetShown, e.widget)
class PaintListener(swt.widgets.Listener):
def handleEvent(listenerSelf, e):
storytext.guishared.catchAll(self.setWidgetPainted, e.widget)
class MoveListener(swt.widgets.Listener):
def handleEvent(listenerSelf, e):
storytext.guishared.catchAll(self.setWidgetMoved, e.widget)
class ResizeListener(swt.widgets.Listener):
def handleEvent(listenerSelf, e):
storytext.guishared.catchAll(self.setWidgetResized, e.widget)
display.addFilter(swt.SWT.Show, ShowListener())
display.addFilter(swt.SWT.Paint, PaintListener())
display.addFilter(swt.SWT.Move, MoveListener())
display.addFilter(swt.SWT.Resize, ResizeListener())
display.addFilter(swt.SWT.Dispose, ResizeListener()) # Being disposed is the ultimate resize :)
def getScreenshotFileName(self, screenshotDir):
return os.path.join(screenshotDir, "screenshot" + str(self.screenshotNumber) + ".png")
def writeScreenshot(self, shell):
display = shell.getDisplay()
gc = swt.graphics.GC(display);
image = swt.graphics.Image(display, shell.getBounds())
gc.copyArea(image, shell.getBounds().x, shell.getBounds().y)
gc.dispose()
imageLoader = swt.graphics.ImageLoader()
imageLoader.data = [ image.getImageData() ]
self.screenshotNumber += 1
screenshotDir = os.path.join(os.getenv("TEXTTEST_LOG_DIR", os.getcwd()), "screenshots")
if not os.path.isdir(screenshotDir):
os.makedirs(screenshotDir)
fileName = self.getScreenshotFileName(screenshotDir)
while os.path.isfile(fileName):
self.screenshotNumber += 1
fileName = self.getScreenshotFileName(screenshotDir)
imageLoader.save(fileName, swt.SWT.IMAGE_PNG)
def describeWithUpdates(self, shellMethod):
shell = shellMethod()
if self.writeScreenshots:
self.writeScreenshot(shell)
if not self.colorsAdded:
self.colorsAdded = True
colorNameFinder.addSWTColors(shell.getDisplay())
if shell in self.windows:
stateChanges = self.findStateChanges(shell)
stateChangeWidgets = [ widget for widget, _, _ in stateChanges ]
if self.structureLog.isEnabledFor(logging.DEBUG):
for widget in stateChangeWidgets:
self.structureLog.info("Widget changed state:")
self.describeStructure(widget)
self.processMovedWidgets()
describedForAppearance = self.describeAppearedWidgets(stateChangeWidgets, shell)
self.describeStateChanges(stateChanges, describedForAppearance)
self.widgetsAppeared = filter(lambda w: not w.isDisposed() and self.inDifferentShell(w, shell), self.widgetsAppeared)
self.parentsResized = set()
self.widgetsMoved = []
if shell is not None:
self.describeClipboardChanges(shell.getDisplay())
self.describe(shell)
def processMovedWidgets(self):
# We are looking for cases of reordering: at least two widgets in the same parent must have moved for this to happen
moved = filter(lambda w: not w.isDisposed() and self.describeClass(w.__class__.__name__), self.widgetsMoved)
if len(moved) > 1:
self.logger.debug("Handling moved widgets " + repr(map(self.getRawData, moved)))
parents = [ w.getParent() for w in moved ]
self.logger.debug("Parents " + repr(map(self.getRawData, parents)))
for widget in moved:
if parents.count(widget.getParent()) > 1:
self.widgetsAppeared.append(widget)
def shouldCheckForUpdates(self, widget, shell):
return not widget.isDisposed() and widget.getShell() == shell
def inDifferentShell(self, widget, shell):
return not isinstance(widget, swt.widgets.Shell) and widget.getShell() != shell
def validAndShowing(self, widget):
return not widget.isDisposed() and util.isVisible(widget)
def widgetShowing(self, widget, shell):
return self.validAndShowing(widget) and not self.inDifferentShell(widget, shell)
def describeClipboardChanges(self, display):
clipboard = swt.dnd.Clipboard(display)
textTransfer = swt.dnd.TextTransfer.getInstance()
if self.clipboardText is None:
# Initially. For some reason it doesn't let us set empty strings here
# clearContents seemed the way to go, but seems not to work on Windows
self.clipboardText = "dummy text for StoryText tests"
clipboard.setContents([ self.clipboardText ], [ textTransfer ])
else:
newText = clipboard.getContents(textTransfer) or ""
if newText != self.clipboardText:
self.logger.info("Copied following to clipboard :\n" + newText)
self.clipboardText = newText
clipboard.dispose()
def getWindowClasses(self):
return swt.widgets.Shell, swt.widgets.Dialog
def getTextEntryClass(self):
return swt.widgets.Text
def getWindowString(self):
return "Shell"
def getShellState(self, shell):
return shell.getText()
def getAllItemDescriptions(self, itemBar, indent=0, subItemMethod=None,
prefix="", selection=[], columnCount=0, enclosingJfaceTooltip=None, **kw):
descs = []
for item in itemBar.getItems():
currPrefix = prefix + " " * indent * 2
selected = item in selection or (hasattr(item, "getSelection") and item.getSelection())
if columnCount:
row = [ self.getItemColumnDescription(item, i, currPrefix, selected, enclosingJfaceTooltip) for i in range(columnCount) ]
descs.append(row)
else:
itemDesc = self.getItemDescription(item, currPrefix, selected, enclosingJfaceTooltip)
if itemDesc:
descs.append(itemDesc)
if subItemMethod:
descs += subItemMethod(item, indent, prefix=prefix, selection=selection,
columnCount=columnCount, enclosingJfaceTooltip=enclosingJfaceTooltip, **kw)
return descs
def getCascadeMenuDescriptions(self, item, indent, storeStatesForSubMenus=False, describeMenus=None, **kw):
cascadeMenu = item.getMenu()
if cascadeMenu:
if describeMenus:
text = item.getText().replace("&", "")
if text not in describeMenus:
return []
descs = self.getAllItemDescriptions(cascadeMenu, indent+1, subItemMethod=self.getCascadeMenuDescriptions,
storeStatesForSubMenus=storeStatesForSubMenus, **kw)
if indent == 1 and storeStatesForSubMenus:
self.widgetsWithState[cascadeMenu] = "\n".join(descs)
return descs
else:
return []
def getSubTreeDescriptions(self, item, indent, **kw):
if item.getExpanded():
return self.getAllItemDescriptions(item, indent+1, subItemMethod=self.getSubTreeDescriptions, **kw)
else:
return []
def getExpandItemDescriptions(self, item, indent, *args, **kw):
if item.getExpanded():
return self.getCoolItemDescriptions(item, indent + 1)
else:
return []
def getToolItemControls(self, item, indent, **kw):
control = item.getControl()
if control:
return [ (control, indent) ]
else:
return []
def getCoolItemDescriptions(self, item, *args, **kw):
return [ self.getItemControlDescription(c, i) for c, i in self.getToolItemControls(item, *args, **kw) ]
def getItemControlDescription(self, control, indent):
descLines = self.getDescription(control).splitlines()
paddedLines = [ " " * indent * 2 + line for line in descLines ]
return "\n".join(paddedLines) + "\n"
def getMenuDescription(self, menu, indent=1, **kw):
return self.getItemBarDescription(menu, indent=indent, subItemMethod=self.getCascadeMenuDescriptions, **kw)
def getMenuState(self, menu):
return self.getMenuDescription(menu, indent=2)
def getMenuBarDescription(self, menubar):
if menubar and self.describeClass("Menu"):
describeMenus = self.excludeClassNames.get("Menu")
return "Menu Bar:\n" + self.getMenuDescription(menubar, storeStatesForSubMenus=True, describeMenus=describeMenus)
else:
return ""
def getExpandBarDescription(self, widget):
state = self.getState(widget)
self.widgetsWithState[widget] = state
return "Expand Bar:\n" + self.getItemBarDescription(widget, indent=1, subItemMethod=self.getExpandItemDescriptions)
def getExpandBarState(self, expandbar):
return expandbar.getChildren(), [ item.getExpanded() for item in expandbar.getItems() ]
def itemStateToString(self, itemState):
if isinstance(itemState, (str, unicode)):
return itemState
else:
return self.getItemControlDescription(*itemState)
def getToolBarDescription(self, toolbar):
itemStates = self.getToolBarState(toolbar)
self.widgetsWithState[toolbar] = itemStates
descs = map(self.itemStateToString, itemStates)
return "\n".join(descs)
def getToolBarState(self, toolbar):
return [ "Tool Bar:" ] + self.getAllItemDescriptions(toolbar, indent=1,
subItemMethod=self.getToolItemControls)
def getCoolBarDescription(self, coolbar):
state = self.getCoolBarState(coolbar)
self.widgetsWithState[coolbar] = state
desc = "Cool Bar"
if state:
desc += " (" + state + ") "
return desc + ":\n" + self.getItemBarDescription(coolbar, indent=1, subItemMethod=self.getCoolItemDescriptions)
def getCoolBarState(self, coolbar):
return colorNameFinder.getNameForWidget(coolbar.getBackground())
def contextMenusEqual(self, menu1, menu2):
return [ (item.getText(), item.getEnabled()) for item in menu1.getItems() ] == \
[ (item.getText(), item.getEnabled()) for item in menu2.getItems() ]
def imagesEqual(self, image1, image2):
return image1.getImageData().data == image2.getImageData().data
def tooltipsEqual(self, data1, data2):
tip1, widget1 = data1
tip2, widget2 = data2
return tip1 == tip2 and widget1 == widget2
def getImageDescription(self, image):
# Seems difficult to get any sensible image information out, there is
# basically no query API for this in SWT
if self.imageDescriptionType == "name":
return self.getImageNameDescription(image)
else:
return self.getDefaultImageDescription(image)
def getImageNameDescription(self, image):
desc = self.getImageName(image)
if desc is not None:
return "Icon '" + desc + "'"
else:
return "Unknown Image"
def getDefaultImageDescription(self, image):
name = "Image"
if self.imageDescriptionType == "number":
name += " " + self.imageCounter.getId(image)
return name
def getPixels(self, data):
pixels = array('i', (0, ) * data.width * data.height)
data.getPixels(0, 0, data.width * data.height, pixels, 0)
return pixels
def imageDataMatches(self, data, data2, hasExcessData):
if hasExcessData:
return self.getPixels(data) == self.getPixels(data2)
else:
return data.data == data2.data
def getImageName(self, image):
name = self.imageToName.get(image)
if name is not None:
return name
data = image.getImageData()
hasExcessData = data.width * data.depth / 8 < data.bytesPerLine
imageDict = self.storedImages.get((data.width, data.height), {})
for name, imgData in imageDict.items():
if self.imageDataMatches(data, imgData, hasExcessData):
baseName = os.path.basename(name)
self.imageToName[image] = baseName
return baseName
def storeImageData(self, url):
imgDesc = ImageDescriptor.createFromURL(url)
name = url.getFile()
self.logger.debug("Storing image data for file " + name)
if imgDesc is not None:
newImage = imgDesc.createImage()
data = newImage.getImageData()
imageDict = self.storedImages.setdefault((data.width, data.height), OrderedDict())
if name not in imageDict:
imageDict[name] = data
newImage.dispose()
def getCanvasDescription(self, widget):
return self.getAndStoreState(widget)
def getCanvasState(self, widget):
for canvasDescriberClass in self.canvasDescriberClasses:
if canvasDescriberClass.canDescribe(widget):
return canvasDescriberClass(widget).getCanvasDescription(self)
return "Canvas " + self.canvasCounter.getId(widget)
def findStyleList(self, item):
for widgetClass, styleList in self.styleNames:
if isinstance(item, widgetClass):
return styleList
return []
def getStyleDescriptions(self, item):
styleList = self.findStyleList(item)
style = item.getStyle()
descs = []
for tryStyle in styleList:
if style & getattr(swt.SWT, tryStyle) != 0:
descs.append(tryStyle.lower().replace("_", " ").replace("separator", "---"))
return descs
def getItemColumnDescription(self, item, colIndex, prefix, *args):
elements = [ item.getText(colIndex) ]
if colIndex:
if item.getImage(colIndex):
elements.append(self.getImageDescription(item.getImage(colIndex)))
else:
elements += self.getPropertyElements(item, *args)
desc = self.combineElements(elements)
if desc and colIndex == 0:
return prefix + desc
else:
return desc
def hasPrivateMethod(self, obj, methodName):
return any((method.getName() == methodName for method in obj.getClass().getDeclaredMethods()))
def getJfaceTooltip(self, item):
for listener in item.getListeners(swt.SWT.MouseHover):
tooltip = self.getEnclosingInstance(listener)
if self.hasPrivateMethod(tooltip, "createToolTipContentArea"):
return tooltip
def getControlDecorations(self, item):
decorations = []
for listener in self.getControlDecorationListeners(item):
deco = self.getEnclosingInstance(listener)
if deco:
decorations.append(deco)
return decorations
def getControlDecorationListeners(self, item):
listeners = []
for typedListener in item.getListeners(swt.SWT.FocusIn):
if hasattr(typedListener, "getEventListener"):
focusListener = typedListener.getEventListener()
if "ControlDecoration" in focusListener.__class__.__name__:
listeners.append(focusListener)
return listeners
def getEnclosingInstance(self, listener):
cls = listener.getClass()
for field in cls.getDeclaredFields():
if field.getName().startswith("this"):
field.setAccessible(True)
return field.get(listener)
def getControlDecorationDescription(self, item):
for deco in self.getControlDecorations(item):
if deco:
image = deco.getImage()
imgDesc = self.getImageDescription(deco.getImage()) if image is not None else ""
if deco and self.decorationVisible(deco):
text = "Decoration " + imgDesc
desc = deco.getDescriptionText()
if desc:
text += "\n'" + desc + "'"
return text
def decorationVisible(self, deco):
if hasattr(deco, "isVisible"): # added in 3.6
return deco.isVisible()
else:
# Workaround for reflection bug in Jython 2.5.1
# args = (None,) if sys.version_info[:3] <= (2, 5, 1) else ()
# Jython 2.5.2 doesn't work anyway so we don't include this fix for now
return util.callPrivateMethod(deco, "shouldShowDecoration")
def isCustomTooltip(self, jfaceTooltip):
return not jfaceTooltip.__class__.__module__.startswith("org.eclipse.jface")
def getToolTipText(self, item, jfaceTooltip):
if hasattr(item, "getToolTipText") and item.getToolTipText():
return item.getToolTipText()
elif jfaceTooltip and not self.isCustomTooltip(jfaceTooltip):
event = self.makeToolTipEvent(item)
if util.callPrivateMethod(jfaceTooltip, "shouldCreateToolTip", [ event ]):
return util.callPrivateMethod(jfaceTooltip, "getText", [ event ])
def getPropertyElements(self, *args, **kw):
return self.getPropertyElementsAndTooltip(*args, **kw)[0]
def getPropertyElementsAndTooltip(self, item, selected=False, enclosingJfaceTooltip=None):
elements = []
decoText = self.getControlDecorationDescription(item)
if decoText:
elements.append(decoText)
if isinstance(item, swt.widgets.Spinner):
elements += self.getSpinnerPropertyElements(item)
jfaceTooltip = enclosingJfaceTooltip if isinstance(item, swt.widgets.Item) else self.getJfaceTooltip(item)
tooltipText = self.getToolTipText(item, jfaceTooltip)
if tooltipText:
elements.append("Tooltip '" + tooltipText + "'")
elements += self.getStyleDescriptions(item)
if hasattr(item, "getImage") and item.getImage():
elements.append(self.getImageDescription(item.getImage()))
if hasattr(item, "getEnabled") and not item.getEnabled():
elements.append("greyed out")
if selected:
elements.append("selected")
elements.append(self.getContextMenuReference(item))
customTooltipText = self.getCustomTooltipReference(item, jfaceTooltip)
if customTooltipText:
elements.append(customTooltipText)
if hasattr(item, "getItemCount") and hasattr(item, "getExpanded") and item.getItemCount() > 0 and not item.getExpanded():
elements.append("+")
return elements, jfaceTooltip
def getSpinnerPropertyElements(self, item):
elements = []
min = item.getMinimum()
if min != 0:
elements.append("Min " + str(min))
elements.append("Max " + str(item.getMaximum()))
step = item.getIncrement()
if step != 1:
elements.append("Step " + str(step))
step = item.getPageIncrement()
if step != 10:
elements.append("Page Step " + str(step))
return elements
def getLabelState(self, label):
if label.getStyle() & swt.SWT.SEPARATOR:
if label.getStyle() & swt.SWT.VERTICAL:
return "-" * 5 + "vertical" + "-" * 5
else:
return "-" * 10
elements = []
if label.getText():
elements.append("'" + label.getText() + "'")
for fontData in label.getFont().getFontData():
fontStyle = fontData.getStyle()
for fontAttr in [ "BOLD", "ITALIC" ]:
if fontStyle & getattr(swt.SWT, fontAttr):
elements.append(fontAttr.lower())
if label.getImage():
elements.append(self.getImageDescription(label.getImage()))
elements.append(self.getContextMenuReference(label))
return self.combineElements(elements)
def getLabelDescription(self, label):
return self.getAndStoreState(label)
getCLabelDescription = getLabelDescription
getCLabelState = getLabelState
def getButtonDescription(self, widget):
desc = "Button"
if widget.getText():
desc += " '" + widget.getText() + "'"
properties = self.getButtonState(widget)
self.widgetsWithState[widget] = properties
elements = [ desc ] + properties
return self.combineElements(elements)
def getButtonState(self, widget):
return self.getPropertyElements(widget, selected=widget.getSelection())
def getSashDescription(self, widget):
orientation = "Horizontal"
if widget.getStyle() & swt.SWT.VERTICAL:
orientation = "Vertical"
return "-" * 15 + " " + orientation + " sash " + "-" * 15
def getLinkDescription(self, widget):
return self.getAndStoreState(widget)
def getLinkState(self, widget):
return "Link '" + widget.getText() + "'"
def getBrowserDescription(self, widget):
state = self.getBrowserState(widget)
self.widgetsWithState[widget] = state
return self.addHeaderAndFooter(widget, state)
def getBrowserState(self, widget):
url = util.getRealUrl(widget)
if url and not url.startswith("file://"):
return url
# Ignore non-breaking spaces, they are invisible anyway
# Webkit returns them in invalid format without the semicolons... handle that too.
text = widget.getText().replace(u"\xa0", " ")
return BrowserHtmlParser().parse(text)
def getUpdatePrefix(self, widget, oldState, state):
if isinstance(widget, (self.getTextEntryClass(), swt.browser.Browser, swt.widgets.Spinner)):
return "\nUpdated " + (util.getTextLabel(widget, useContext=True) or self.getShortWidgetIdentifier(widget) or "Text") + " Field\n"
elif isinstance(widget, (swt.widgets.Combo, swt.custom.CCombo)):
return "\nUpdated " + util.getTextLabel(widget, useContext=True) + " Combo Box\n"
elif util.getTopControl(widget) or isinstance(widget, swt.widgets.Group):
return "\n"
elif isinstance(widget, swt.widgets.Menu):
parentItem = widget.getParentItem()
menuRefNr = self.contextMenuCounter.getWidgetNumber(widget)
menuRefNr = " " + str(menuRefNr) if menuRefNr > 0 else ""
menuName = parentItem.getText() if parentItem else "Context"
return "\nUpdated " + menuName + " Menu" + menuRefNr +":\n"
elif isinstance(widget, (swt.widgets.Label, swt.custom.CLabel)) and len(state) == 0:
return "\nLabel now empty, previously " + oldState
elif isinstance(widget, swt.widgets.Canvas) and not isinstance(widget, swt.custom.CLabel):
for canvasDescriberClass in self.canvasDescriberClasses:
if canvasDescriberClass.canDescribe(widget):
return canvasDescriberClass(widget).getUpdatePrefix(oldState, state)
return "\nUpdated "
def getShortWidgetIdentifier(self, widget):
return widget.getData("org.eclipse.swtbot.widget.key")
def getState(self, widget):
if widget.isDisposed():
# Will be caught, and the widget cleaned up
raise UseCaseScriptError, "Widget is Disposed"
else:
return self.getSpecificState(widget)
def getTextState(self, widget):
return widget.getText(), self.getPropertyElements(widget)
def getComboState(self, widget):
return self.getTextState(widget)
def getTextDescription(self, widget):
contents, properties = self.getState(widget)
self.widgetsWithState[widget] = contents, properties
desc = self.addHeaderAndFooter(widget, contents)
return self.combineElements([ desc ] + properties)
def getComboDescription(self, widget):
return self.getTextDescription(widget)
getCComboDescription = getComboDescription
getCComboState = getComboState
def getTreeDescription(self, widget):
return self.getAndStoreState(widget)
def getTableDescription(self, widget):
return self.getAndStoreState(widget)
def getListDescription(self, widget):
return self.getAndStoreState(widget)
def getDateTimeDescription(self, widget):
return self.getAndStoreState(widget)
def getSpinnerDescription(self, widget):
return self.getTextDescription(widget)
def getSpinnerState(self, widget):
return self.getTextState(widget)
def getDateString(self, widget):
if widget.getStyle() & swt.SWT.TIME:
widgetDate = Date()
widgetDate.setHours(widget.getHours())
widgetDate.setMinutes(widget.getMinutes())
widgetDate.setSeconds(widget.getSeconds())
return util.getDateFormat(swt.SWT.TIME).format(widgetDate)
else:
widgetDate = Date(widget.getYear() - 1900, widget.getMonth(), widget.getDay())
return util.getDateFormat(swt.SWT.DATE).format(widgetDate)
def getDateTimeState(self, widget):
elements = [ "DateTime" ] + self.getPropertyElements(widget) + [ "showing " + self.getDateString(widget) ]
return self.combineElements(elements)
def getListState(self, widget):
text = self.combineElements([ "List" ] + self.getPropertyElements(widget)) + " :\n"
selection = widget.getSelection()
for item in widget.getItems():
text += "-> " + item
if item in selection:
text += " (selected)"
text += "\n"
return text
def getContextMenuReference(self, widget):
if "Menu" not in self.excludeClassNames and not isinstance(widget, swt.widgets.MenuItem) and hasattr(widget, "getMenu") and widget.getMenu():
return "Context Menu " + self.contextMenuCounter.getId(widget.getMenu())
else:
return ""
def getCustomTooltipReference(self, item, jfaceTooltip):
if "CustomTooltip" not in self.excludeClassNames and jfaceTooltip and self.isCustomTooltip(jfaceTooltip):
itemTooltip = self.hasPrivateMethod(jfaceTooltip, "createViewerToolTipContentArea")
isItem = isinstance(item, swt.widgets.Item)
if isItem == itemTooltip:
return "Custom Tooltip " + self.customTooltipCounter.getId((jfaceTooltip, item))
def getTreeState(self, widget):
columns = widget.getColumns()
columnCount = len(columns)
props, jfaceTooltip = self.getPropertyElementsAndTooltip(widget)
text = self.combineElements([ "Tree" ] + props) + " :\n"
rows = self.getAllItemDescriptions(widget, indent=0, subItemMethod=self.getSubTreeDescriptions,
prefix="-> ", selection=widget.getSelection(),
columnCount=columnCount, enclosingJfaceTooltip=jfaceTooltip)
if columnCount > 0:
rows.insert(0, [ c.getText() for c in columns ])
text += str(GridFormatter(rows, columnCount))
else:
text += "\n".join(rows)
return text
def getTableState(self, widget):
columns = widget.getColumns()
columnCount = len(columns)
props, jfaceTooltip = self.getPropertyElementsAndTooltip(widget)
text = self.combineElements([ "Table" ] + props) + " :\n"
rows = self.getAllTableItemDescriptions(widget, indent=0,
selection=widget.getSelection(),
columnCount=columnCount,
enclosingJfaceTooltip=jfaceTooltip)
sortColumn = widget.getSortColumn()
if widget.getSortDirection() == swt.SWT.UP:
sortDirection = "(->)"
elif widget.getSortDirection() == swt.SWT.DOWN:
sortDirection = "(<-)"
else:
sortDirection = ""
headerRow = [ c.getText() + sortDirection if c == sortColumn else c.getText() for c in columns if c.getWidth() > 0] # Don't show hidden columns
return text + self.formatTable(headerRow, rows, max(1, columnCount))
def getAllTableItemDescriptions(self, widget, indent=0,
prefix="", selection=[], columnCount=0, enclosingJfaceTooltip=None):
descs = []
for item in widget.getItems():
currPrefix = prefix + " " * indent * 2
selected = item in selection
if columnCount:
row = [ self.getItemColumnDescription(item, i, currPrefix, selected, enclosingJfaceTooltip)
for i in range(columnCount) if widget.getColumn(i).getWidth() > 0 ]
descs.append(row)
else:
descs.append([ self.getItemDescription(item, currPrefix, selected, enclosingJfaceTooltip) ])
return descs
def getTabFolderDescription(self, widget):
state = self.getState(widget)
self.widgetsWithState[widget] = state
if state:
return "TabFolder with tabs " + state
else:
return "TabFolder with no tabs"
def getTabFolderState(self, widget):
return " , ".join(self.getAllItemDescriptions(widget, selection=widget.getSelection()))
def getCTabFolderState(self, widget):
return " , ".join(self.getAllItemDescriptions(widget, selection=[ widget.getSelection() ]))
getCTabFolderDescription = getTabFolderDescription
def getCompositeState(self, widget):
return util.getTopControl(widget)
def getCompositeDescription(self, widget):
return self.combineElements([ self.getStateControlDescription(widget), self.getContextMenuReference(widget) ])
def getGroupState(self, widget):
return self.getCompositeState(widget), widget.getText()
def getGroupDescription(self, widget):
header = "." * 6 + " " + widget.getText() + " " + "." * 6
footer = "." * len(header)
compositeDesc = self.getCompositeDescription(widget) or self.formatChildrenDescription(widget)
return header + "\n" + str(compositeDesc) + "\n" + footer
def getStateControlDescription(self, widget):
stateControlInfo = self.getState(widget)
stateControl = stateControlInfo[0] if isinstance(stateControlInfo, tuple) else stateControlInfo
if stateControlInfo:
self.widgetsWithState[widget] = stateControlInfo
return self.getDescription(stateControl) if stateControl else ""
def getVerticalDividePositions(self, children):
positions = []
for child in children:
if isinstance(child, swt.widgets.Sash) and child.getStyle() & swt.SWT.VERTICAL:
positions.append(child.getLocation().x)
return sorted(positions)
def layoutSortsChildren(self, widget):
layout = widget.getLayout()
return layout is not None and isinstance(layout, (swt.layout.GridLayout, swt.layout.FillLayout,
swt.layout.RowLayout, swt.custom.StackLayout))
def _getDescription(self, widget):
self.widgetsDescribed.add(widget)
desc = storytext.guishared.Describer._getDescription(self, widget)
if desc and isinstance(widget, (swt.widgets.ExpandBar, swt.widgets.Tree, swt.widgets.List, swt.widgets.Table)):
desc = unicode(desc) + self.formatContextMenuDescriptions()
return desc
def handleGridFormatter(self, formatter):
output = storytext.guishared.Describer.handleGridFormatter(self, formatter)
if isinstance(output, (str, unicode)):
output += self.formatContextMenuDescriptions()
return output
def getWindowContentDescription(self, shell):
desc = ""
desc = self.addToDescription(desc, self.getMenuBarDescription(shell.getMenuBar()))
desc = self.addToDescription(desc, self.getChildrenDescription(shell))
desc += self.formatContextMenuDescriptions()
return desc
def shouldDescribeChildren(self, widget):
# Composites with StackLayout use the topControl rather than the children
return storytext.guishared.Describer.shouldDescribeChildren(self, widget) and not util.getTopControl(widget)
def _getChildrenDescription(self, widget):
if self.shouldDescribeChildren(widget):
return self.formatChildrenDescription(widget)
else:
self.markDescendantsDescribed(widget)
return ""
def markDescendantsDescribed(self, widget):
if hasattr(widget, self.childrenMethodName):
self.logger.debug("Mark descendants for " + self.getRawData(widget))
children = getattr(widget, self.childrenMethodName)()
self.widgetsDescribed.update(children)
for child in children:
self.markDescendantsDescribed(child)
def formatContextMenuDescriptions(self):
text = ""
if "Menu" not in self.excludeClassNames:
for contextMenu, menuId in self.contextMenuCounter.getWidgetsForDescribe():
if not contextMenu.isDisposed():
menuDesc = self.getMenuDescription(contextMenu)
text += "\n\nContext Menu " + str(menuId) + ":\n" + menuDesc
self.widgetsWithState[contextMenu] = self.getMenuState(contextMenu)
if "CustomTooltip" not in self.excludeClassNames:
for (tooltip, widget), tooltipId in self.customTooltipCounter.getWidgetsForDescribe():
text += "\n\nCustom Tooltip " + str(tooltipId) + ":\n" + self.getCustomTooltipDescription(tooltip, widget)
return text
def makeToolTipEvent(self, widgetOrItem):
event = swt.widgets.Event()
event.type = swt.SWT.MouseHover
if isinstance(widgetOrItem, swt.widgets.Item):
event.widget = widgetOrItem.getParent()
event.item = widgetOrItem
bounds = widgetOrItem.getBounds()
event.x = util.getInt(bounds.x) + util.getInt(bounds.width) / 2
event.y = util.getInt(bounds.y) + util.getInt(bounds.height) / 2
else:
event.widget = widgetOrItem
event.item = None
event.x = -1
event.y = -1
return event
def getCustomTooltipDescription(self, tooltip, widget):
event = self.makeToolTipEvent(widget)
if util.callPrivateMethod(tooltip, "shouldCreateToolTip", [ event ]):
shell = swt.widgets.Shell()
result = util.callPrivateMethod(tooltip, "createToolTipContentArea", [ event, shell ], [ swt.widgets.Event, swt.widgets.Composite ])
desc = self.getDescription(result)
result.dispose()
shell.dispose()
return desc
else:
return ""
def getHorizontalSpan(self, widget, columns):
layout = widget.getLayoutData()
if hasattr(layout, "horizontalSpan"):
return min(layout.horizontalSpan, columns)
else:
return 1
def usesGrid(self, widget):
return isinstance(widget.getLayout(), swt.layout.GridLayout)
def getLayoutColumns(self, widget, childCount, sortedChildren):
layout = widget.getLayout()
if hasattr(layout, "numColumns"):
return layout.numColumns
elif hasattr(layout, "type"):
if layout.type == swt.SWT.HORIZONTAL:
return childCount
elif isinstance(layout, swt.layout.FormLayout):
currColumns, maxColumns = 1, 1
for child in sortedChildren:
layoutData = child.getLayoutData()
if layoutData.right and layoutData.right.control:
currColumns += 1
if currColumns > maxColumns:
maxColumns = currColumns
else:
currColumns = 1
return maxColumns
return 1
def getRawDataLayoutDetails(self, layout, *args):
return [ str(layout.numColumns) + " columns" ] if hasattr(layout, "numColumns") else []
def handleImages(self):
if self.imageDescriptionType:
self.buildImages()
def buildImages(self):
self.buildImagesFromPaths()
def buildImagesFromPaths(self):
for path in self.imagePaths:
self.findFiles(File(path))
def getImageFiles(self, path):
d = File(path)
class Filter(FilenameFilter):
def accept(lself, d, fileName):#@NoSelf
return fileName.endswith(".gif") or fileName.endswith(".png") or fileName.endswith(".jpg")
return d.listFiles(Filter())
def findFiles(self, pathAsFile):
if pathAsFile.isFile() and self.isImageType(pathAsFile.getName()):
self.storeImageData(pathAsFile.toURI().toURL())
elif pathAsFile.isDirectory():
for f in pathAsFile.listFiles():
if f is not None:
self.findFiles(f)
def isImageType(self, fileName):
return fileName.endswith(".gif") or fileName.endswith(".png") or fileName.endswith(".jpg")
def checkWindow(self, window):
# Don't describe tooltips
return (window.getStyle() & swt.SWT.TOOL) == 0
def splitState(self, state):
return state if isinstance(state, list) else state.split("\n")
def getStateChangeDescription(self, widget, oldState, state):
if isinstance(widget, (swt.widgets.Menu, swt.widgets.ToolBar)):
old = self.splitState(oldState)
new = self.splitState(state)
if len(old) == len(new):
return self.getDiffedDescription(widget, old, new)
return storytext.guishared.Describer.getStateChangeDescription(self, widget, oldState, state)
def describeStructure(self, widget, indent=0, **kw):
storytext.guishared.Describer.describeStructure(self, widget, indent, **kw)
if isinstance(widget, swt.widgets.Canvas):
for canvasDescriberClass in self.canvasDescriberClasses:
if canvasDescriberClass.canDescribe(widget):
canvasDescriberClass(widget).describeCanvasStructure(indent+1)
|
|
import collections
import math
import numpy as np
from rig.place_and_route import Cores, SDRAM
import struct
from nengo.processes import Process
from nengo.utils import numpy as npext
from nengo_spinnaker.builder.ports import OutputPort
from nengo_spinnaker.builder.netlist import netlistspec
from nengo_spinnaker.netlist import VertexSlice
from nengo_spinnaker import partition
from nengo_spinnaker import regions
from nengo_spinnaker.utils.application import get_application
from nengo_spinnaker.utils.type_casts import np_to_fix
class ValueSource(object):
"""Operator which transmits values from a buffer."""
def __init__(self, function, size_out, period):
"""Create a new source which evaluates the given function over a period
of time.
"""
self.function = function
self.size_out = size_out
self.period = period
# Vertices
self.system_region = None
self.keys_region = None
self.vertices = list()
def make_vertices(self, model, n_steps):
"""Create the vertices to be simulated on the machine."""
# Create the system region
self.system_region = SystemRegion(model.machine_timestep,
self.period is not None, n_steps)
# Get all the outgoing signals to determine how big the size out is and
# to build a list of keys.
sigs_conns = model.get_signals_from_object(self)
if len(sigs_conns) == 0:
return netlistspec([])
keys = list()
self.transmission_parameters = list()
for sig, transmission_params in sigs_conns[OutputPort.standard]:
# Add the keys for this connection
transform, sig_keys = get_transform_keys(sig, transmission_params)
keys.extend(sig_keys)
self.transmission_parameters.append((transmission_params,
transform))
size_out = len(keys)
# Build the keys region
self.keys_region = regions.KeyspacesRegion(
keys, [regions.KeyField({"cluster": "cluster"})],
partitioned_by_atom=True
)
# Create the output region
self.output_region = regions.MatrixRegion(
np.zeros((n_steps, size_out)),
sliced_dimension=regions.MatrixPartitioning.columns
)
self.regions = [self.system_region, self.keys_region,
self.output_region]
# Partition by output dimension to create vertices
transmit_constraint = partition.Constraint(10)
sdram_constraint = partition.Constraint(8*2**20) # Max 8MiB
constraints = {
transmit_constraint: lambda s: s.stop - s.start,
sdram_constraint: (
lambda s: regions.utils.sizeof_regions(self.regions, s)),
}
for sl in partition.partition(slice(0, size_out), constraints):
# Determine the resources
resources = {
Cores: 1,
SDRAM: regions.utils.sizeof_regions(self.regions, sl),
}
vsl = VertexSlice(sl, get_application("value_source"), resources)
self.vertices.append(vsl)
# Return the vertices and callback methods
return netlistspec(self.vertices, self.load_to_machine,
self.before_simulation)
def load_to_machine(self, netlist, controller):
"""Load the values into memory."""
# For each slice
self.vertices_region_memory = collections.defaultdict(dict)
for vertex in self.vertices:
# Layout the slice of SDRAM we have been given
region_memory = regions.utils.create_app_ptr_and_region_files(
netlist.vertices_memory[vertex], self.regions, vertex.slice)
# Store the location of each region in memory
for region, mem in zip(self.regions, region_memory):
self.vertices_region_memory[vertex][region] = mem
# Write in some of the regions
self.vertices_region_memory[vertex][self.system_region].seek(0)
self.system_region.write_subregion_to_file(
self.vertices_region_memory[vertex][self.system_region],
vertex.slice
)
self.vertices_region_memory[vertex][self.keys_region].seek(0)
self.keys_region.write_subregion_to_file(
self.vertices_region_memory[vertex][self.keys_region],
vertex.slice, cluster=vertex.cluster
)
def before_simulation(self, netlist, simulator, n_steps):
"""Generate the values to output for the next set of simulation steps.
"""
# Write out the system region to deal with the current run-time
self.system_region.n_steps = n_steps
# Evaluate the node for this period of time
if self.period is not None:
max_n = min(n_steps, int(np.ceil(self.period / simulator.dt)))
else:
max_n = n_steps
ts = np.arange(simulator.steps, simulator.steps + max_n) * simulator.dt
if callable(self.function):
values = np.array([self.function(t) for t in ts])
elif isinstance(self.function, Process):
values = self.function.run_steps(max_n, d=self.size_out,
dt=simulator.dt)
else:
values = np.array([self.function for t in ts])
# Ensure that the values can be sliced, regardless of how they were
# generated.
values = npext.array(values, min_dims=2)
# Compute the output for each connection
outputs = []
for transmission_params, transform in self.transmission_parameters:
output = []
# For each f(t) for the next set of simulations we calculate the
# output at the end of the connection. To do this we first apply
# the pre-slice, then the function and then the post-slice.
for v in values:
# Apply the pre-slice
v = v[transmission_params.pre_slice]
# Apply the function on the connection, if there is one.
if transmission_params.function is not None:
v = np.asarray(transmission_params.function(v),
dtype=float)
output.append(np.dot(transform, v.T))
outputs.append(np.array(output).reshape(max_n, -1))
# Combine all of the output values to form a large matrix which we can
# dump into memory.
output_matrix = np.hstack(outputs)
new_output_region = regions.MatrixRegion(
np_to_fix(output_matrix),
sliced_dimension=regions.MatrixPartitioning.columns
)
# Write the simulation values into memory
for vertex in self.vertices:
self.vertices_region_memory[vertex][self.system_region].seek(0)
self.system_region.n_steps = max_n
self.system_region.write_subregion_to_file(
self.vertices_region_memory[vertex][self.system_region],
vertex.slice
)
self.vertices_region_memory[vertex][self.output_region].seek(0)
new_output_region.write_subregion_to_file(
self.vertices_region_memory[vertex][self.output_region],
vertex.slice
)
class SystemRegion(regions.Region):
"""System region for a value source."""
def __init__(self, timestep, periodic, n_steps):
# Store all the parameters
self.timestep = timestep
self.periodic = periodic
self.n_steps = n_steps
def sizeof(self, *args, **kwargs):
return 4 * 6
def write_subregion_to_file(self, fp, vertex_slice, **kwargs):
"""Write the region to a file-like."""
# Determine the size out, frames per block, number of blocks and last
# block length.
size_out = vertex_slice.stop - vertex_slice.start
frames_per_block = int(math.floor(20 * 1024 / (size_out * 4.0)))
n_blocks = int(math.floor(self.n_steps / frames_per_block))
last_block_length = self.n_steps % frames_per_block
fp.write(struct.pack(
"<6I", self.timestep, size_out, 0x1 if self.periodic else 0x0,
n_blocks, frames_per_block, last_block_length
))
def get_transform_keys(sig, transmission_params):
# Get the transform for the connection from the list of built connections,
# then remove zeroed rows (should any exist) and derive the list of keys.
transform = transmission_params.full_transform(slice_in=False,
slice_out=False)
keep = np.any(transform != 0.0, axis=1)
keys = list()
for i, k in zip(range(transform.shape[0]), keep):
if k:
keys.append((sig, {"index": i}))
# Return the transform and the list of keys
return transform[keep], keys
|
|
"""Daemon startup and shutdown helpers."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_startstop
#
# Public Functions:
# start_arcyd
# stop_arcyd
# stop_arcyd_pid
# reload_arcyd
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import logging.handlers
import os
import sys
import time
import phlsys_compressedlogging
import phlsys_daemonize
import phlsys_fs
import phlsys_multiprocessing
import phlsys_pid
import phlsys_signal
import phlsys_verboseerrorfilter
import abdt_fs
import abdi_processexitcodes
import abdi_processrepos
import abdi_repoargs
_LOGGER = logging.getLogger(__name__)
def start_arcyd(daemonize=True, loop=True, restart=False, stop_message=''):
# exit gracefully if this process is killed
phlsys_signal.set_exit_on_sigterm()
fs = abdt_fs.make_default_accessor()
with fs.lockfile_context():
pid = fs.get_pid_or_none()
if pid is not None and phlsys_pid.is_running(pid):
if restart:
stop_arcyd_pid(pid, fs.layout.killfile, stop_message)
else:
raise Exception("already running")
if daemonize:
phlsys_daemonize.do(
stdout_path=fs.layout.stdout,
stderr_path=fs.layout.stderr)
# important that we do this *after* daemonizing
pid = phlsys_pid.get()
fs.set_pid(pid)
parser = argparse.ArgumentParser()
params = []
for line in open(fs.layout.root_config):
params.append(line.strip())
if not loop:
params.append('--no-loop')
repo_configs = abdi_repoargs.parse_config_file_list(
fs.repo_config_path_list())
abdi_processrepos.setupParser(parser)
args = parser.parse_args(params)
def logger_config():
_setup_logger(fs, daemonize)
with phlsys_multiprocessing.logging_context(logger_config):
_LOGGER.debug("start with args: {}".format(args))
while True:
_LOGGER.info("arcyd started")
try:
exit_code = abdi_processrepos.process(args, repo_configs)
_LOGGER.debug("arcyd process loop exit_code: %s" % exit_code)
if exit_code == abdi_processexitcodes.ExitCodes.ec_exit:
break
finally:
_LOGGER.info("arcyd stopped")
_LOGGER.debug("reloading arcyd configuration")
try:
with fs.lockfile_context():
repo_configs = abdi_repoargs.parse_config_file_list(
fs.repo_config_path_list())
except phlsys_fs.LockfileExistsError:
_LOGGER.error("couldn't acquire lockfile, reload failed")
def stop_arcyd(message=''):
fs = abdt_fs.make_default_accessor()
with fs.lockfile_context():
pid = fs.get_pid_or_none()
if pid is None or not phlsys_pid.is_running(pid):
raise Exception("Arcyd is not running")
stop_arcyd_pid(pid, fs.layout.killfile, message)
def stop_arcyd_pid(pid, killfile, message=''):
phlsys_fs.write_text_file_atomic(killfile, message)
if os.path.isfile(killfile):
time.sleep(1)
while os.path.isfile(killfile):
print('waiting for arcyd to remove killfile ..')
time.sleep(1)
# wait for Arcyd to not be running
if phlsys_pid.is_running(pid):
time.sleep(1)
while phlsys_pid.is_running(pid):
print('waiting for arcyd to exit')
time.sleep(1)
def reload_arcyd():
fs = abdt_fs.make_default_accessor()
with fs.lockfile_context():
pid = fs.get_pid_or_none()
if pid is None or not phlsys_pid.is_running(pid):
raise Exception("Arcyd is not running")
phlsys_fs.write_text_file('var/command/reload', '')
def _setup_logger(fs, is_background):
# log DEBUG+, INFO+ and ERROR+ to separate files
# pychecker makes us do this, it won't recognise that logging.handlers is a
# thing
lg = logging
error_handler = lg.handlers.RotatingFileHandler(
fs.layout.log_error,
maxBytes=10 * 1024 * 1024,
backupCount=10)
error_handler.setLevel(logging.ERROR)
info_handler = lg.handlers.RotatingFileHandler(
fs.layout.log_info,
maxBytes=10 * 1024 * 1024,
backupCount=10)
info_handler.setLevel(logging.INFO)
info_handler.addFilter(phlsys_verboseerrorfilter.make_filter())
debug_handler = phlsys_compressedlogging.CompressedRotatingFileHandler(
fs.layout.log_debug,
maxBytes=50 * 1024 * 1024,
backupCount=10)
debug_handler.setLevel(logging.DEBUG)
logfmt = '%(asctime)s UTC: %(levelname)s: (%(processName)-11s) %(message)s'
formatter = logging.Formatter(logfmt)
logging.Formatter.converter = time.gmtime
error_handler.setFormatter(formatter)
info_handler.setFormatter(formatter)
debug_handler.setFormatter(formatter)
logging.getLogger().addHandler(error_handler)
logging.getLogger().addHandler(info_handler)
logging.getLogger().addHandler(debug_handler)
if not is_background:
stdout_handler = lg.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.addFilter(phlsys_verboseerrorfilter.make_filter())
stdout_handler.setFormatter(formatter)
logging.getLogger().addHandler(stdout_handler)
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2016 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
from datetime import timedelta
import pytest
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from tests.common import get_test_home_assistant
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in ('T', 'negative', 'lock'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('true', 'On', '1', 'YES', 'enable', 1, True):
assert schema(value)
for value in ('false', 'Off', '0', 'NO', 'disable', 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-89', 89, '12.34'):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-179', 179, '12.34'):
schema(value)
def test_platform_config():
"""Test platform config validation."""
for value in (
{},
{'hello': 'world'},
):
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
for value in (
{'platform': 'mqtt'},
{'platform': 'mqtt', 'beer': 'yes'},
):
cv.PLATFORM_SCHEMA(value)
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_entity')
assert 'sensor.light' == schema('sensor.LIGHT')
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
for value in (
'invalid_entity',
'sensor.light,sensor_invalid',
['invalid_entity'],
['sensor.light', 'sensor_invalid'],
['sensor.light,sensor_invalid'],
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
[],
['sensor.light'],
'sensor.light'
):
schema(value)
assert schema('sensor.LIGHT, light.kitchen ') == [
'sensor.light', 'light.kitchen'
]
def test_event_schema():
"""Test event_schema validation."""
for value in (
{}, None,
{
'event_data': {},
},
{
'event': 'state_changed',
'event_data': 1,
},
):
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
for value in (
{'event': 'state_changed'},
{'event': 'state_changed', 'event_data': {'hello': 'world'}},
):
cv.EVENT_SCHEMA(value)
def test_platform_validator():
"""Test platform validation."""
# Prepares loading
get_test_home_assistant()
schema = vol.Schema(cv.platform_validator('light'))
with pytest.raises(vol.MultipleInvalid):
schema('platform_that_does_not_exist')
schema('hue')
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, 'work', 'icon:work'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema('mdi:work')
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
for value in (
None, '', 1234, 'hello:world', '12:', '12:34:56:78',
{}, {'wrong_key': -10}
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
'8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}
):
schema(value)
assert timedelta(hours=23, minutes=59) == schema('23:59')
assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15')
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
def test_service_schema():
"""Test service_schema validation."""
for value in (
{}, None,
{
'service': 'homeassistant.turn_on',
'service_template': 'homeassistant.turn_on'
},
{
'data': {'entity_id': 'light.kitchen'},
},
{
'service': 'homeassistant.turn_on',
'data': None
},
{
'service': 'homeassistant.turn_on',
'data_template': {
'brightness': '{{ no_end'
}
},
):
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
for value in (
{'service': 'homeassistant.turn_on'},
{
'service': 'homeassistant.turn_on',
'entity_id': 'light.kitchen',
},
{
'service': 'homeassistant.turn_on',
'entity_id': ['light.kitchen', 'light.ceiling'],
},
):
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, 'hello world'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, 'hello'):
schema(value)
def test_string():
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.MultipleInvalid):
schema(None)
for value in (True, 1, 'hello'):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema('K')
schema('C')
schema('F')
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (
None, '{{ partial_print }', '{% if True %}Hello'
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
):
schema(value)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema('America/Do_Not_Exist')
schema('America/Los_Angeles')
schema('UTC')
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency('beer', 'soda'))
for value in (
{'beer': None}
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
{'beer': None, 'soda': None},
{'soda': None}, {}
):
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda'))
for value in (None, [], {}, {'wine': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({'beer': None}, {'soda': None}):
schema(value)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
import re
import time
import frappe
import datetime
import frappe.defaults
import frappe.model.meta
from frappe import _
from time import time
from frappe.utils import now, getdate, cast_fieldtype, get_datetime
from frappe.model.utils.link_count import flush_local_link_count
class Database(object):
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
VARCHAR_LEN = 140
MAX_COLUMN_LENGTH = 64
OPTIONAL_COLUMNS = ["_user_tags", "_comments", "_assign", "_liked_by"]
DEFAULT_SHORTCUTS = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
STANDARD_VARCHAR_COLUMNS = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
DEFAULT_COLUMNS = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',
'parentfield', 'parenttype', 'idx']
class InvalidColumnName(frappe.ValidationError): pass
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default=0, port=None):
self.setup_type_map()
self.host = host or frappe.conf.db_host or '127.0.0.1'
self.port = port or frappe.conf.db_port or ''
self.user = user or frappe.conf.db_name
self.db_name = frappe.conf.db_name
self._conn = None
if ac_name:
self.user = ac_name or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
def setup_type_map(self):
pass
def connect(self):
"""Connects to a database as set in `site_config.json`."""
self.cur_db_name = self.user
self._conn = self.get_connection()
self._cursor = self._conn.cursor()
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
def get_connection(self):
pass
def get_database_size(self):
pass
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None, explain=False):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"test@example.com"})
"""
if re.search(r'ifnull\(', query, flags=re.IGNORECASE):
# replaces ifnull in query with coalesce
query = re.sub(r'ifnull\(', 'coalesce(', query, flags=re.IGNORECASE)
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
self.clear_db_table_cache(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if debug:
time_start = time()
self.log_query(query, values, debug, explain)
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
self._cursor.execute(query, values)
if frappe.flags.in_migrate:
self.log_touched_tables(query, values)
else:
self._cursor.execute(query)
if frappe.flags.in_migrate:
self.log_touched_tables(query)
if debug:
time_end = time()
frappe.errprint(("Execution time: {0} sec").format(round(time_end - time_start, 2)))
except Exception as e:
if frappe.conf.db_type == 'postgres':
self.rollback()
elif self.is_syntax_error(e):
# only for mariadb
frappe.errprint('Syntax error in query:')
frappe.errprint(query)
if ignore_ddl and (self.is_missing_column(e) or self.is_missing_table(e) or self.cant_drop_field_or_key(e)):
pass
else:
raise
if auto_commit: self.commit()
if not self._cursor.description:
return ()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def log_query(self, query, values, debug, explain):
# for debugging in tests
if frappe.conf.get('allow_tests') and frappe.cache().get_value('flag_print_sql'):
print(self.mogrify(query, values))
# debug
if debug:
if explain and query.strip().lower().startswith('select'):
self.explain_query(query, values)
frappe.errprint(self.mogrify(query, values))
# info
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(self.mogrify(query, values))
frappe.log(">>>>")
def mogrify(self, query, values):
'''build the query string with values'''
if not values:
return query
else:
try:
return self._cursor.mogrify(query, values)
except: # noqa: E722
return (query, values)
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except Exception:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
self.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
if result:
keys = [column[0] for column in self._cursor.description]
for r in result:
values = []
for value in r:
if as_utf8 and isinstance(value, str):
value = value.encode('utf-8')
values.append(value)
ret.append(frappe._dict(zip(keys, values)))
return ret
@staticmethod
def clear_db_table_cache(query):
if query and query.strip().split()[0].lower() in {'drop', 'create'}:
frappe.cache().delete_key('db_tables')
@staticmethod
def needs_formatting(result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, int)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
@staticmethod
def convert_to_lists(res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
for r in res:
nr = []
for val in r:
if as_utf8 and isinstance(val, str):
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def build_conditions(self, filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuple like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
_rhs = " ({0})".format(", ".join(self.escape(v) for v in value[1]))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "coalesce(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
# docname is a number, convert to string
filters = str(filters)
if isinstance(filters, str):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False, for_update=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
frappe.db.get_value("User", "test@example.com", "last_login")
last_login, last_ip = frappe.db.get_value("User", "test@example.com",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache, for_update=for_update)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False, for_update=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
user = frappe.db.get_values("User", "test@example.com", "*")[0]
"""
out = None
if cache and isinstance(filters, str) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, str):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update, for_update=for_update)
except Exception as e:
if ignore and (frappe.db.is_missing_column(e) or frappe.db.is_table_missing(e)):
# table or column not found, return None
out = None
elif (not ignore) and frappe.db.is_table_missing(e):
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, str):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(values.get, fields)]
else:
r = self.sql("""select field, value
from `tabSingles` where field in (%s) and doctype=%s"""
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype, debug = False):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
# result = _cast_result(doctype, result)
dict_ = frappe._dict(result)
return dict_
@staticmethod
def get_all(*args, **kwargs):
return frappe.get_all(*args, **kwargs)
@staticmethod
def get_list(*args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
if not doctype in self.value_cache:
self.value_cache = self.value_cache[doctype] = {}
if fieldname in self.value_cache[doctype]:
return self.value_cache[doctype][fieldname]
val = self.sql("""select `value` from
`tabSingles` where `doctype`=%s and `field`=%s""", (doctype, fieldname))
val = val[0][0] if val else None
df = frappe.get_meta(doctype).get_field(fieldname)
if not df:
frappe.throw(_('Invalid field name: {0}').format(frappe.bold(fieldname)), self.InvalidColumnName)
val = cast_fieldtype(df.fieldtype, val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None, for_update=False):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {fields} from `tab{doctype}` {where} {conditions} {order_by} {for_update}"
.format(
for_update = 'for update' if for_update else '',
fields = fl,
doctype = doctype,
where = "where" if conditions else "",
conditions = conditions,
order_by = order_by),
values, as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return self.get_all(doctype,
fields=['name', field],
filters=[['name', 'in', names]],
debug=debug, as_list=1)
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val=None, modified=None, modified_by=None,
update_modified=True, debug=False, for_update=True):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
:param for_update: Will add a row-level lock to the value that is being set so that it can be released on commit.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
for name in self.get_values(dt, dn, 'name', for_update=for_update):
values = dict(name=name[0])
values.update(to_update)
self.sql("""update `tab{0}`
set {1} where name=%(name)s""".format(dt, ', '.join(set_values)),
values, debug=debug)
else:
# for singles
keys = list(to_update)
self.sql('''
delete from `tabSingles`
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in to_update.items():
self.sql('''insert into `tabSingles` (doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
frappe.clear_document_cache(dt, dn)
@staticmethod
def set(doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
modified = now()
self.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
@staticmethod
def set_temp(value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
@staticmethod
def get_temp(key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
@staticmethod
def set_default(key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
@staticmethod
def add_default(key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
@staticmethod
def get_defaults(key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("START TRANSACTION")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
for method in frappe.local.before_commit:
frappe.call(method[0], *(method[1] or []), **(method[2] or {}))
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
def add_before_commit(self, method, args=None, kwargs=None):
frappe.local.before_commit.append([method, args, kwargs])
@staticmethod
def flush_realtime_log():
for args in frappe.local.realtime_log:
frappe.realtime.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.exists('DocField', {
'fieldname': fn,
'parent': dt
})
def table_exists(self, doctype):
"""Returns True if table for given doctype exists."""
return ("tab" + doctype) in self.get_tables()
def has_table(self, doctype):
return self.table_exists(doctype)
def get_tables(self):
tables = frappe.cache().get_value('db_tables')
if not tables:
table_rows = self.sql("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
""")
tables = {d[0] for d in table_rows}
frappe.cache().set_value('db_tables', tables)
return tables
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None, cache=False):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, str):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name", cache=cache)
except Exception:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append([d, '=', dt[d]])
return self.get_all(dt['doctype'], filters=conditions, as_list=1)
except Exception:
return None
def count(self, dt, filters=None, debug=False, cache=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = self.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = self.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
@staticmethod
def format_date(date):
return getdate(date).strftime("%Y-%m-%d")
@staticmethod
def format_datetime(datetime):
if not datetime:
return '0001-01-01 00:00:00.000000'
if isinstance(datetime, str):
if ':' not in datetime:
datetime = datetime + ' 00:00:00.000000'
else:
datetime = datetime.strftime("%Y-%m-%d %H:%M:%S.%f")
return datetime
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return self.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
"""Returns list of column names from given table."""
columns = frappe.cache().hget('table_columns', table)
if columns is None:
columns = [r[0] for r in self.sql('''
select column_name
from information_schema.columns
where table_name = %s ''', table)]
if columns:
frappe.cache().hset('table_columns', table, columns)
return columns
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
columns = self.get_db_table_columns('tab' + doctype)
if not columns:
raise self.TableMissingError('DocType', doctype)
return columns
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return self.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND column_name = '{1}' '''.format(doctype, column))[0][0]
def has_index(self, table_name, index_name):
pass
def add_index(self, doctype, fields, index_name=None):
pass
def add_unique(self, doctype, fields, constraint_name=None):
pass
@staticmethod
def get_index_name(fields):
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
return index_name
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
# self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
@staticmethod
def escape(s, percent=True):
"""Excape quotes and percent in given string."""
# implemented in specific class
pass
@staticmethod
def is_column_missing(e):
return frappe.db.is_missing_column(e)
def get_descendants(self, doctype, name):
'''Return descendants of the current record'''
node_location_indexes = self.get_value(doctype, name, ('lft', 'rgt'))
if node_location_indexes:
lft, rgt = node_location_indexes
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
else:
# when document does not exist
return []
def is_missing_table_or_column(self, e):
return self.is_missing_column(e) or self.is_missing_table(e)
def multisql(self, sql_dict, values=(), **kwargs):
current_dialect = frappe.db.db_type or 'mariadb'
query = sql_dict.get(current_dialect)
return self.sql(query, values, **kwargs)
def delete(self, doctype, conditions, debug=False):
if conditions:
conditions, values = self.build_conditions(conditions)
return self.sql("DELETE FROM `tab{doctype}` where {conditions}".format(
doctype=doctype,
conditions=conditions
), values, debug=debug)
else:
frappe.throw(_('No conditions provided'))
def get_last_created(self, doctype):
last_record = self.get_all(doctype, ('creation'), limit=1, order_by='creation desc')
if last_record:
return get_datetime(last_record[0].creation)
else:
return None
def clear_table(self, doctype):
self.sql('truncate `tab{}`'.format(doctype))
def log_touched_tables(self, query, values=None):
if values:
query = frappe.safe_decode(self._cursor.mogrify(query, values))
if query.strip().lower().split()[0] in ('insert', 'delete', 'update', 'alter', 'drop', 'rename'):
# single_word_regex is designed to match following patterns
# `tabXxx`, tabXxx and "tabXxx"
# multi_word_regex is designed to match following patterns
# `tabXxx Xxx` and "tabXxx Xxx"
# ([`"]?) Captures " or ` at the begining of the table name (if provided)
# \1 matches the first captured group (quote character) at the end of the table name
# multi word table name must have surrounding quotes.
# (tab([A-Z]\w+)( [A-Z]\w+)*) Captures table names that start with "tab"
# and are continued with multiple words that start with a captital letter
# e.g. 'tabXxx' or 'tabXxx Xxx' or 'tabXxx Xxx Xxx' and so on
single_word_regex = r'([`"]?)(tab([A-Z]\w+))\1'
multi_word_regex = r'([`"])(tab([A-Z]\w+)( [A-Z]\w+)+)\1'
tables = []
for regex in (single_word_regex, multi_word_regex):
tables += [groups[1] for groups in re.findall(regex, query)]
if frappe.flags.touched_tables is None:
frappe.flags.touched_tables = set()
frappe.flags.touched_tables.update(tables)
def bulk_insert(self, doctype, fields, values, ignore_duplicates=False):
"""
Insert multiple records at a time
:param doctype: Doctype name
:param fields: list of fields
:params values: list of list of values
"""
insert_list = []
fields = ", ".join("`"+field+"`" for field in fields)
for idx, value in enumerate(values):
insert_list.append(tuple(value))
if idx and (idx%10000 == 0 or idx < len(values)-1):
self.sql("""INSERT {ignore_duplicates} INTO `tab{doctype}` ({fields}) VALUES {values}""".format(
ignore_duplicates="IGNORE" if ignore_duplicates else "",
doctype=doctype,
fields=fields,
values=", ".join(['%s'] * len(insert_list))
), tuple(insert_list))
insert_list = []
def enqueue_jobs_after_commit():
from frappe.utils.background_jobs import execute_job, get_queue
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), is_async=job.get("is_async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.