input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
pass
elif self.dPath.exists():
# is this a zip archive?
f = self.dPath.open("rb")
try:
if zipfile.is_zipfile(f):
name, ext = tail.splitext()
if ext.lower() == ".zip":
self.packageName = name
self.ExpandZip(f)
else:
# anything else must be a manifest file
self.dPath = head
mPath = tail
head, tail = self.dPath.split()
if str(mPath.normcase()) != 'imsmanifest.xml':
raise CPManifestError(
"%s must be named imsmanifest.xml" % str(mPath))
self.packageName = str(tail)
finally:
f.close()
else:
self.dPath.mkdir()
if not isinstance(self.dPath, vfs.VirtualFilePath):
import traceback
traceback.print_stack()
mPath = self.dPath.join('imsmanifest.xml')
if mPath.exists():
self.manifest = self.ManifestDocumentClass(
baseURI=str(uri.URI.from_virtual_path(mPath)))
"""The :py:class:`ManifestDocument` object representing the imsmanifest.xml file.
The file is read (or created) on construction."""
self.manifest.read()
if not isinstance(self.manifest.root, Manifest):
raise CPManifestError("%s not a manifest file, found %s::%s " %
(mPath, self.manifest.root.ns, self.manifest.root.xmlname))
else:
self.manifest = self.ManifestDocumentClass(root=Manifest,
baseURI=str(uri.URI.from_virtual_path(mPath)))
self.manifest.root.set_id(
self.manifest.get_unique_id('manifest'))
md = self.manifest.root.add_child(
self.manifest.root.MetadataClass)
md.add_child(md.SchemaClass).set_value("IMS Content")
md.add_child(md.SchemaVersionClass).set_value("1.2")
self.manifest.create()
self.SetIgnoreFiles(IGNOREFILES_RE)
self.fileTable = {}
"""The fileTable is a dictionary that maps package relative file
paths to the :py:class:`File` objects that represent them in the
manifest.
It is possible for a file to be referenced multiple times (although
dependencies were designed to take care of most cases it is still
possible for two resources to share a physical file, or even for a
resource to contain multiple references to the same file.) Therefore,
the dictionary values are lists of :py:class:`File` objects.
If a file path maps to an empty list then a file exists in the package
which is not referenced by any resource. In some packages it is commone
for auxiliary files such as supporting schemas to be included in
packages without a corresponding :py:class:`File` object so an empty
list does not indicate that the file can be removed safely. These files
are still included when packaging the content package for
interchange.
Finally, if a file referred to by a :py:class:`File` object in the
manifest is missing an entry is still created in the fileTable. You
can walk the keys of the fileTable testing if each file exists to
determine if some expected files are missing from the package.
The keys in fileTable are VirtualFilePath instances. To convert a
string to an appropriate instance use the :py:meth:`FilePath` method."""
self.RebuildFileTable()
errorFlag = False
finally:
if errorFlag:
self.Close()
def FilePath(self, *path):
"""Converts a string into a :py:class:`pyslet.vfs.VirtualFilePath`
instance suitable for using as a key into the :py:attr:`fileTable`. The
conversion is done using the file system of the content package's
directory, :py:attr:`dPath`."""
return self.dPath.__class__(*path)
def SetIgnoreFiles(self, ignoreFiles):
"""Sets the regular expression used to determine if a file should be ignored.
Some operating systems and utilities create hidden files or other spurious data
inside the content package directory. For example, Apple's OS X creates .DS_Store
files and the svn source control utility creates .svn directories. The files shouldn't
generally be included in exported packages as they may confuse the recipient (who
may be using a system on which these files and directories are not hidden) and be
deemed to violate the specification, not to mention adding unnecessarily to the size
of the package and perhaps even leaking information unintentionally.
To help avoid this type of problem the class uses a regular expression to determine
if a file should be considered part of the package. When listing directories, the
names of the files found are compared against this regular expression and are ignored
if they match.
By default, the pattern is set to match all directories and files with
names beginning '.' so you will not normally need to call this
method."""
self.ignoreFiles = re.compile(ignoreFiles)
def IgnoreFile(self, f):
"""Compares a file or directory name against the pattern set by :py:meth:`SetIgnoreFiles`.
f is a unicode string."""
match = self.ignoreFiles.match(f)
if match:
return len(f) == match.end()
else:
return False
def IgnoreFilePath(self, fPath):
"""Compares a file path against the pattern set by :py:meth:`SetIgnoreFiles`
The path is normalised before comparison and any segments consisting of
the string '..' are skipped. The method returns True if any of the
remaining path components matches the ignore pattern. In other words,
if the path describes a file that is is in a directory that should be
ignored it will also be ignored.
The path can be relative or absolute. Relative paths are *not* made
absolute prior to comparison so this method is not affected by the
current directory, even if the current diretory would itself be
ignored."""
fPath = fPath.normpath()
while True:
head, tail = fPath.split()
if tail and tail != fPath.pardir and self.IgnoreFile(unicode(tail)):
return True
if not head or head == fPath:
# No head left, or the path is unsplitable
return False
fPath = head
def RebuildFileTable(self):
"""Rescans the file system and manifest and rebuilds the :py:attr:`fileTable`."""
self.fileTable = {}
been_there = {}
for f in self.dPath.listdir():
if self.IgnoreFile(unicode(f)):
continue
if f.normcase() == 'imsmanifest.xml':
continue
self.FileScanner(f, been_there)
# Now scan the manifest and identify which file objects refer to which
# files
for r in self.manifest.root.Resources.Resource:
for f in r.File:
fPath = f.PackagePath(self)
if fPath is None:
continue
if fPath in self.fileTable:
self.fileTable[fPath].append(f)
else:
self.fileTable[fPath] = [f]
def FileScanner(self, fPath, been_there):
fullPath = self.dPath.join(fPath)
rFullPath = fullPath.realpath()
if rFullPath in been_there:
raise CPPackageBeenThereError(rFullPath)
been_there[rFullPath] = True
if fullPath.isdir():
for f in fullPath.listdir():
if self.IgnoreFile(unicode(f)):
continue
self.FileScanner(fPath.join(f), been_there)
elif fullPath.isfile():
self.fileTable[fPath.normcase()] = []
else: # skip non-regular files.
pass
def PackagePath(self, fPath):
"""Converts an absolute file path into a canonical package-relative path
Returns None if fPath is not inside the package."""
rel_path = []
assert isinstance(fPath, vfs.VirtualFilePath)
while fPath != self.dPath:
fPath, tail = fPath.split()
if not fPath or not tail:
# We've gone as far as we can, fail!
return None
rel_path[0:0] = [tail]
return self.dPath.__class__(*rel_path).normcase()
def ExpandZip(self, zf):
self.dPath = vfs.defaultFS.mkdtemp('.d', 'imscpv1p2-')
self.tempDir = True
zf = zipfile.ZipFile(zf)
try:
for zfi in zf.infolist():
path = self.dPath
for pathSeg in zfi.filename.split('/'):
# The current path will need to be a directory
if not path.isdir():
path.mkdir()
pathSeg = unicode(pathSeg, 'utf-8')
path = path.join(pathSeg).normpath()
if self.PackagePath(path) is None:
raise CPZIPFilenameError(zfi.filename)
if path.isdir():
if zfi.file_size > 0:
raise CPZIPDirectorySizeError(
"%s has size %i" % (zfi.filename, zfi.file_size))
elif path.exists():
# Duplicate entries in the zip file
raise CPZIPDuplicateFileError(zfi.filename)
else:
f = path.open('wb')
try:
f.write(zf.read(zfi.filename))
finally:
f.close()
finally:
zf.close()
def ExportToPIF(self, zPath):
"""Exports the content package, saving the zipped package in *zPath*
*zPath* is overwritten by this operation.
In order to make content packages more interoperable this method goes
beyond the basic zip specification and ensures that pathnames are always
UTF-8 encoded when added to the archive. When creating instances of
:py:class:`ContentPackage` from an existing archive the reverse
transformation is performed. When exchanging PIF files between systems
with different native file path encodings, encoding erros may occurr."""
zf = zipfile.ZipFile(zPath, 'w')
base = ''
been_there = {}
try:
for f in self.dPath.listdir():
if self.IgnoreFile(unicode(f)):
continue
self.AddToZip(self.dPath.join(f), zf, base, been_there)
finally:
zf.close()
def AddToZip(self, fPath, zf, zbase, been_there):
rfName = fPath.realpath()
if rfName in been_there:
raise CPZIPBeenThereError(fPath)
been_there[rfName] = True
fName = unicode(fPath.split()[1])
zfName = fName.replace('/', ':')
# if type(zfName) is StringType:
# zfName=zfName.decode(sys.getfilesystemencoding())
zpath = zbase + zfName.encode('utf-8')
if fPath.isdir():
zpath += '/'
zf.writestr(zpath, '')
for f in fPath.listdir():
if self.IgnoreFile(unicode(f)):
continue
self.AddToZip(fPath.join(f), zf, zpath, been_there)
elif fPath.isfile():
with vfs.ZipHooks():
zf.write(fPath, zpath)
else: # skip non-regular files.
pass
def GetUniqueFile(self, suggestedPath):
"""Returns a unique file path suitable for creating a new file in the package.
suggestedPath is used to provide a suggested path for the file. This
may be relative (to the root and manifest) or absolute but it must
resolve to a file (potentially) in the package. The suggestedPath
should either be a VirtualFilePath (of the same type as the content
package's :py:attr:`dPath`) or a string suitable for conversion to a
VirtualFilePath.
When suggestedPath is relative, it is forced to lower-case. This is
consistent with the behaviour of normcase on systems that are case
insensitive. The trouble with case insensitive file systems is that it
may be impossible to unpack a content package created on a case
sensitive system and store it on a case insenstive one. | |
customer_number, cif_number, notes from bank_details")
bank_data=mycur.fetchall()
bank_details=((tabulate(bank_data, headers=["Bank Name","Acc Holder Name","Acc Number","IFSC Code","Branch Name","Mobile Number","Email ID","Customer Number","CIF Number","Notes"], tablefmt="psql")))
mycur.execute("select card_provider, card_holder, card_type, card_number, card_expiry, card_cvv, card_register_mobile, notes from save_cards")
card_data=mycur.fetchall()
save_cards=((tabulate(card_data, headers=["Provider Name","Holder Name","Card Type","Card Number","Expiry Data","CVV Number","Mobile Number","Notes"], tablefmt="psql")))
test_data=open("Password Manager Backup "+time.strftime("%d %B %Y")+".txt","w")
test_data.write("============================================================================TABLE===================================================================\n")
test_data.writelines(table)
test_data.writelines("\n==============================================================SOCIAL MEDIA ACCOUNTS=================================================================\n")
test_data.writelines(social_data)
test_data.writelines("\n============================================================INTERNET BANKING ACCOUNTS===============================================================\n")
test_data.writelines(inter_data)
test_data.writelines("\n==================================================================BANK DETAILS======================================================================\n")
test_data.writelines(bank_details)
test_data.writelines("\n===================================================================SAVED CARDS======================================================================\n")
test_data.writelines(save_cards)
test_data.close()
print("Backuped your Datas to Text File ('Password Manager Backup"+time.strftime("%d %B %Y")+".txt') Sucessfully,")
print("Reseting your tables...." )
mycur.execute("drop table social_media_management")
print("Reseting your tables...." )
mycur.execute("drop table inter_banking")
print("Reseting your tables...." )
mycur.execute("drop table bank_details")
print("Reseting your tables...." )
mycur.execute("drop table save_cards")
print("creating tables")
print("creating table social_media_management...........")
mycur.execute("create table social_media_management(sno varchar(30), name varchar(30), username varchar(30), password varchar(30), 2fa varchar(30), notes varchar(30))")
print("creating table inter_banking...........")
mycur.execute("create table inter_banking(sno varchar(30), bname varchar(30), username varchar(30), password varchar(30), transaction_password varchar(30), register_mobile varchar(30), notes varchar(30))")
print("creating table bank_details...........")
mycur.execute("create table bank_details(sno varchar(30), bname varchar(30), acc_holder varchar(30), acc_number varchar(30), ifsc_number varchar(30), branch_name varchar(30), register_mobile varchar(30), register_email varchar(30), customer_number varchar(30), cif_number varchar(30), notes varchar(30))")
print("creating table save_cards...........")
mycur.execute("create table save_cards(sno varchar(30), card_provider varchar(30), card_holder varchar(30), card_type varchar(30), card_number varchar(30), card_expiry varchar(30), card_cvv varchar(30), card_register_mobile varchar(30), notes varchar(30))")
print("Table Created Sucessfully")
print("Reseted Sucessfully")
input("Press Enter to Continue")
elif set_choice==3:
connection1=mysql.connector.connect(user="root",password=""+<PASSWORD>+"",host="localhost")
mycur1=connection1.cursor()
mycur1.execute("drop database "+database)
input("Sucessfully Deleted\nPress Any Enter to Log Out and Quit")
quit()
elif set_choice==4:
break
else:
print("Wrong Option :(")
input("Press Enter To Continue")
except ValueError:
input("Invalid Option, Enter only Integers\n Press Any Key to Continue")
else:
print("Sorry!!Access Denied")
mycur.execute("delete from pass_db where user_choice=3")
connection.commit()
#connection.close()
input("Press Enter To Continue")
elif user_choice==2:
mycur.execute("select *from pass_db")
pre_check=mycur.fetchall()
pre_check_data=len(pre_check)
if pre_check_data==0:
print("Sorry !!! Something Went Wrong :(")
print("\n########################################################################")
input("Press Enter To Continue")
print("\n########################################################################")
else:
password=<PASSWORD>(prompt='Enter the Password: ', mask='*')
pass_db=("""INSERT INTO pass_db(user_choice, password) values (3,'%s')"""%password)
mycur.execute(pass_db)
ori_pass=("select password from pass_db where user_choice=1")
check_pass=("select password from pass_db where user_choice=3")
mycur.execute(check_pass)
data1=mycur.fetchall()
mycur.execute(ori_pass)
data=mycur.fetchall()
if data==data1:
print("Sucessfully Loged In")
mycur.execute("delete from pass_db where user_choice=3")
#After Log In
while True:
os.system("cls")
print("1.Add your Passwords of Social Media")
print("2.Add your Passwords of Internet Banking Accounts")
print("3.Add your Bank Details")
print("4.Add your Purchase Cards")
print("5.Back")
try:
add_choice=int(input("Select your option: "))
##SUBCHOICE1
if add_choice==1:
while True:
os.system("cls")
try:
repeat=int(input("Enter How Many Accounts to be Added in Social Media Management\n(Only 5 Times per run Recommanded)"))
print("Fill Each Option with Correct Information ")
print("########################################################################")
for i in range(repeat):
mycur.execute("select *from social_media_management")
row_no=mycur.fetchall()
sno=len(row_no)+1
sname=input("Enter Your Social Media Name: ")
susername=input("Enter Your "+sname+" Username: ")
spassword=input("Enter Your "+sname+" Password: ")
s2fa=input("If 2FA Authendication is Enabled (Optional): ")
snotes=input("You can add Aditional Information if any (optional): ")
print("########################################################################")
##SQl CONNECTION##
add_social=("""INSERT INTO social_media_management(sno, name, username, password, 2fa, notes) values ('%s','%s','%s','%s','%s','%s')"""%(sno, sname, susername, spassword, s2fa, snotes))
mycur.execute(add_social)
connection.commit()
#connection.close()
print("Your Password has been Sucessfully saved in Database as given,\nThank You Have a Nice Day :)")
input("Press Enter To Continue")
print("########################################################################")
break
except ValueError:
print("Enter Only Integer, Invalid Option")
input("Press Enter To Continue")
##SUBCHOICE2
elif add_choice==2:
while True:
os.system("cls")
try:
repeat1=int(input("Enter How Many Accounts to be Added in Internet Banking Management\n(Only 5 Times per run Recommanded)"))
print("Fill Each Option with Correct Information ")
print("########################################################################")
for i in range(repeat1):
mycur.execute("select *from inter_banking")
row_no1=mycur.fetchall()
sno=len(row_no1)+1
bname=input("Enter Your Bank Name: ")
busername=input("Enter Your "+bname+" Username: ")
bpassword=input("Enter Your "+bname+" Password: ")
btrans_pass=input("Enter Your "+bname+" Transaction Password (Optional): ")
while True:
try:
breg_mob=int(input("Enter Your "+bname+" Registered Mobile Number (Optional): "))
break
except ValueError:
print("Enter Only Integer")
input("Press Any Key to ReEnter")
bnotes=input("You can add Aditional Information if any (optional): ")
print("########################################################################")
##SQl CONNECTION##
add_inter_bank=("""INSERT INTO inter_banking(sno, bname, username, password, transaction_password, register_mobile, notes) values ('%s','%s','%s','%s','%s','%s','%s')"""
%(sno, bname, busername, bpassword, btrans_pass, str(breg_mob), bnotes))
mycur.execute(add_inter_bank)
connection.commit()
#connection.close()
print("Your Password has been Sucessfully saved in Database as given,\nThank You Have a Nice Day :)")
input("Press Enter To Continue")
print("\n########################################################################")
break
except ValueError:
print("Enter Only Integer, Invalid Option")
input("Press Enter To Continue")
##SUBCHOICE3
elif add_choice==3:
while True:
os.system("cls")
try:
repeat2=int(input("Enter How Many Accounts to be Added in Bank Details Management\n(Only 5 Times per run Recommanded)"))
print("Fill Each Option with Correct Information ")
print("########################################################################")
for i in range(repeat2):
mycur.execute("select *from bank_details")
row_no2=mycur.fetchall()
sno=len(row_no2)+1
b_name=input("Enter Bank Name: ")
b_hold=input("Enter Account Holder Name: ")
while True:
try:
b_number=int(input("Enter "+b_name+" Account Number: "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
b_ifsc=input("Enter "+b_name+" IFSC Number: ")
b_branch=input("Enter "+b_name+" Branch Name: ")
while True:
try:
b_regmob=int(input("Enter "+b_name+" Registered Mobile Number: "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
b_regmail=input("Enter "+b_name+" Registered Email Id (Optional): ")
while True:
try:
b_cusno=int(input("Enter "+b_hold+"'s Customer Number (Optional): "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
b_cif=input("Enter "+b_hold+"'s CIF Number (Optional): ")
b_notes=input("You can add Aditional Information if any (optional): ")
print("########################################################################")
##SQl CONNECTION##
add_bank_details=("""INSERT INTO bank_details
(sno, bname, acc_holder, acc_number, ifsc_number, branch_name, register_mobile, register_email, customer_number, cif_number, notes) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"""
%(sno, b_name, b_hold, str(b_number), b_ifsc, b_branch, str(b_regmob), b_regmail, str(b_cusno), b_cif, b_notes))
mycur.execute(add_bank_details)
connection.commit()
#connection.close()
print("Your Bank Account Details has been Sucessfully saved in Database as given,\nThank You Have a Nice Day :)")
input("Press Enter To Continue")
print("\n########################################################################")
break
except ValueError:
print("Enter Only Integer, Invalid Option")
input("Press Enter To Continue")
##SUBCHOICE4
elif add_choice==4:
while True:
os.system("cls")
try:
repeat3=int(input("Enter How Many Cards to be Added in Cards Management\n(Only 5 Times per run Recommanded)"))
print("Fill Each Option with Correct Information ")
print("########################################################################")
for i in range(repeat3):
mycur.execute("select *from save_cards")
row_no3=mycur.fetchall()
sno=len(row_no3)+1
cpname=input("Enter Your Card Provider Name: ")
chold=input("Enter Card Holder Name: ")
ctype=input("Enter Card Type (Visa/MasterCard/Rupay/GiftCard/Other): ")
while True:
try:
cnumber=int(input("Enter Card Number: "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
cexpiry=input("Enter Card Expiry Date (optional): ")
while True:
try:
ccvv=int(input("Enter Card CVV NO (optional): "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
while True:
try:
cregmob=int(input("Enter Card Registered Mobile Number (optional): "))
break
except ValueError:
print("Enter Only Integer, Invalid Input")
input("Press Enter To Continue")
cnotes=input("You can add Aditional Information if any(optional): ")
print("########################################################################")
##SQl CONNECTION##
save_cards=("""INSERT INTO save_cards
(sno, card_provider, card_holder, card_type, card_number, card_expiry, card_cvv, card_register_mobile, notes) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s')"""
%(sno, cpname, chold, ctype, str(cnumber), cexpiry, str(ccvv), str(cregmob), cnotes))
mycur.execute(save_cards)
connection.commit()
#connection.close()
print("Your Cards has been Sucessfully saved in Database as given,\nThank You Have a Nice Day :)")
input("Press Enter To Continue")
break
except ValueError:
print("Enter Only Integer, Invalid Option")
input("Press Enter To Continue")
elif add_choice==5:
break
else:
print("Wrong Option :( ")
input("Press Enter To Continue")
print("\n########################################################################")
except ValueError:
print("Enter Only Integer, Invalid Option")
input("Press Enter To continue")
else:
print("Sorry!!Access Denied")
mycur.execute("delete from pass_db where user_choice=3")
connection.commit()
#connection.close()
input("Press Enter To Continue")
print("\n########################################################################")
#Main Choice3
elif user_choice==3:
mycur.execute("select *from pass_db")
pre_check=mycur.fetchall()
pre_check_data=len(pre_check)
if pre_check_data==0:
print("Something went wrong :( !!!")
print("\n########################################################################")
input("Press Enter To Continue")
print("\n########################################################################")
else:
password=<PASSWORD>(prompt='Enter the Password: ', mask='*')
pass_db=("""INSERT INTO pass_db(user_choice, password) values (3,'%s')"""%password)
mycur.execute(pass_db)
ori_pass=("select password from pass_db where user_choice=1")
check_pass=("select password from pass_db where user_choice=3")
mycur.execute(check_pass)
data1=mycur.fetchall()
mycur.execute(ori_pass)
data=mycur.fetchall()
if data==data1:
print("Sucessfully Loged In")
mycur.execute("delete from pass_db where user_choice=3")
while True:
os.system("cls")
print("1.View\n2.Update Datas\n3.Delete Accounts\n4.Back")
try:
up_view=int(input("Select your Operation: "))
if up_view==1:
while True:
os.system("cls")
print('1.Social Media Accounts\n2.Internet Banking Accounts\n3.Bank Details\n4.Saved Cards\n5.Back')
try:
tab_sele=int(input("Choose the Table that you want to View: "))
##view_table_section###
if tab_sele==1:
mycur.execute("select name, username, password, 2fa, notes from social_media_management")
social_media_data=mycur.fetchall()
print((tabulate(social_media_data, headers=["Social Media Name","Username","Password","2FA","Notes"], tablefmt="psql")))
input("Press Enter To Continue")
elif tab_sele==2:
mycur.execute("select bname, username, password, transaction_password, register_mobile, notes from inter_banking")
inter_bank_data=mycur.fetchall()
print((tabulate(inter_bank_data, headers=["Bank Name","Username","Password","Transaction Password","Mobile Number","Notes"], tablefmt="psql")))
input("Press Enter To Continue")
elif tab_sele==3:
mycur.execute("select bname, acc_holder, acc_number, ifsc_number, branch_name, register_mobile, register_email, customer_number, cif_number, notes from bank_details")
bank_data=mycur.fetchall()
print((tabulate(bank_data, headers=
["Bank Name","Acc Holder Name","Acc Number","IFSC Code","Branch Name","Mobile Number","Email ID","Customer Number","CIF Number","Notes"], tablefmt="psql")))
input("Press Enter To Continue")
elif tab_sele==4:
mycur.execute("select card_provider, card_holder, card_type, | |
#Programmer: <NAME>
#Purpose: Load in an ASF file for the geometry/topology of a skeleton
#And load in an AMC file with information for animating that skeleton
#http://research.cs.wisc.edu/graphics/Courses/cs-838-1999/Jeff/ASF-AMC.html
#Motion capture data can be found in the CMU MOCAP database
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from oct2py import octave
def getRotationX(rx):
rotX = np.eye(4)
rotX[1, 1] = np.cos(rx)
rotX[2, 1] = np.sin(rx)
rotX[1, 2] = -np.sin(rx)
rotX[2, 2] = np.cos(rx)
return rotX
def getRotationY(ry):
rotY = np.eye(4)
rotY[0, 0] = np.cos(ry)
rotY[2, 0] = np.sin(ry)
rotY[0, 2] = -np.sin(ry)
rotY[2, 2] = np.cos(ry)
return rotY
def getRotationZ(rz):
rotZ = np.eye(4)
rotZ[0, 0] = np.cos(rz)
rotZ[1, 0] = np.sin(rz)
rotZ[0, 1] = -np.sin(rz)
rotZ[1, 1] = np.cos(rz)
return rotZ
#X first, Y second, Z third
def getRotationXYZ(rx, ry, rz):
Rx = getRotationX(rx)
Ry = getRotationY(ry)
Rz = getRotationZ(rz)
return Rz.dot(Ry.dot(Rx))
#Rotation from this bone local coordinate system to the coordinate
#system of its parent
def computeRotationParentChild(parent, child):
R1 = getRotationXYZ(parent.axis[0], parent.axis[1], parent.axis[2])
R1 = R1.T
R2 = getRotationXYZ(child.axis[0], child.axis[1], child.axis[2])
R = R1.dot(R2)
child.rotParentCurrent = R.T
class SkeletonRoot(object):
def __init__(self):
self.id = -1
self.name = "root"
self.axis = "XYZ"
self.order = {}
self.position = [0, 0, 0]
self.orientation = [0, 0, 0]
self.children = []
self.initialRotMatrix = None
def finishInit(self):
#Precompute Rotation matrix
angles = [float(a)*np.pi/180.0 for a in self.orientation]
self.initialRotMatrix = getRotationXYZ(angles[0], angles[1], angles[2])
class SkeletonBone(object):
def __init__(self):
self.name = "NONAME"
self.id = -1
self.direction = [0, 0, 0]
self.axis = [0, 0, 0]
self.length = 0.0
self.dof = {}
self.limits = []
self.children = []
self.initialRotMatrix = None
class Skeleton(object):
(PARSE_DEFAULT, PARSE_UNITS, PARSE_DOCUMENTATION, PARSE_ROOT, PARSE_BONEDATA, PARSE_BONEDATALIMITS, PARSE_HIERARCHY, PARSE_FINISHED) = (0, 1, 2, 3, 4, 5, 6, 7)
def __init__(self):
self.version = "1.0"
self.units = []
self.documentation = []
self.root = SkeletonRoot()
self.bones = {'root':self.root}
def initFromFile(self, filename):
fin = open(filename, 'r')
lineCount = 0
parseState = Skeleton.PARSE_DEFAULT
thisBone = None
for line in fin:
lineCount = lineCount + 1
fields = ((line.lstrip()).rstrip()).split() #Splits whitespace by default
if len(fields) == 0:
continue #Blank line
if fields[0][0] in ['#', '\0'] or len(fields[0]) == 0:
continue #Comments and stuff
if parseState == Skeleton.PARSE_DEFAULT:
if fields[0] == ":version":
self.version = fields[1]
elif fields[0] == ":name":
self.name = fields[1]
elif fields[0] == ":units":
parseState = Skeleton.PARSE_UNITS
else:
print "Unexpected line while in PARSE_DEFAULT: %s"%line
elif parseState == Skeleton.PARSE_UNITS:
if fields[0] == ":documentation":
parseState = Skeleton.PARSE_DOCUMENTATION
elif fields[0] == ":root":
parseState = Skeleton.PARSE_ROOT
elif fields[0] == ":bonedata":
parseState = Skaleton.PARSE_BONEDATA
else:
self.units.append(line)
elif parseState == Skeleton.PARSE_DOCUMENTATION:
if fields[0] == ":root":
parseState = Skeleton.PARSE_ROOT
elif fields[0] == ":bonedata":
parseState = Skeleton.PARSE_BONEDATA
else:
self.documentation.append(line)
elif parseState == Skeleton.PARSE_ROOT:
if fields[0] == ":bonedata":
self.root.finishInit()
parseState = Skeleton.PARSE_BONEDATA
else:
#print "ROOT FIELD: |%s|"%fields[0]
if fields[0] == "axis":
self.root.axis = fields[1]
elif fields[0] == "order":
orderstr = line.split("order")[1].lstrip()
ordervals = orderstr.split()
for i in range(len(ordervals)):
self.root.order[ordervals[i].lstrip().rstrip()] = i
elif fields[0] == "position":
point = [float(x) for x in fields[1:]]
self.root.position = point
elif fields[0] == "orientation":
orientation = [float(x) for x in fields[1:]]
self.root.orientation = orientation
else:
print "Warning: unrecognized field %s in root"%fields[0]
elif parseState == Skeleton.PARSE_BONEDATA:
#print "BONE FIELD: |%s|"%fields[0]
if fields[0] == "begin":
thisBone = SkeletonBone()
elif fields[0] == "end":
self.bones[thisBone.name] = thisBone
elif fields[0] == "name":
thisBone.name = fields[1]
elif fields[0] == "id":
thisBone.id = int(fields[1])
elif fields[0] == "direction":
direction = np.array([float(x) for x in fields[1:]])
thisBone.direction = direction
elif fields[0] == "length":
thisBone.length = float(fields[1])
elif fields[0] == "axis":
axis = np.array([float(x) for x in fields[1:4]])
axis = axis*np.pi/180
thisBone.axis = axis
elif fields[0] == "dof":
dof = [(x.lstrip().rstrip()).lower() for x in fields[1:]]
for i in range(0, len(dof)):
thisBone.dof[dof[i]] = i
elif fields[0] == "limits":
parseState = Skeleton.PARSE_BONEDATALIMITS
limits = line.split("(")[1]
limits = limits.split(")")[0]
limits = [float(x) for x in limits.split()]
thisBone.limits.append(limits)
elif fields[0] == ":hierarchy":
parseState = Skeleton.PARSE_HIERARCHY
elif parseState == Skeleton.PARSE_BONEDATALIMITS:
if fields[0] == "end":
self.bones[thisBone.name] = thisBone
parseState = Skeleton.PARSE_BONEDATA
else:
limits = line.split("(")[1]
limits = limits.split(")")[0]
limits = [float(x) for x in limits.split()]
thisBone.limits.append(limits)
elif parseState == Skeleton.PARSE_HIERARCHY:
if len(fields) == 1 and fields[0] == "begin":
parseState = Skeleton.PARSE_HIERARCHY
elif len(fields) == 1 and fields[0] == "end":
parseState = Skeleton.PARSE_FINISHED
else:
parent = fields[0]
children = fields[1:]
self.bones[parent].children = [self.bones[s] for s in children]
elif parseState == Skeleton.PARSE_FINISHED:
print "Warning: Finished, but got line %s"%line
fin.close()
#Rotate bone dir to local coordinate system
for bstr in self.bones:
if bstr == 'root':
continue
bone = self.bones[bstr]
#TODO: It seems like I should be rotating the other way
R = getRotationXYZ(bone.axis[0], bone.axis[1], bone.axis[2])
d = bone.direction
d = R.dot(np.array([d[0], d[1], d[2], 1]))
bone.direction = d[0:3]
self.bones['root'].axis = np.array([0, 0, 0])
#Compute rotation to parent coordinate system
self.root.rotParentCurrent = np.eye(4)
for bstr in self.bones:
bone = self.bones[bstr]
for c in bone.children:
computeRotationParentChild(bone, c)
#Functions for exporting tree to numpy
def getEdgesRec(self, node, edges, kindex):
i1 = kindex[node.name]
for c in node.children:
i2 = kindex[c.name]
edges.append([i1, i2])
self.getEdgesRec(c, edges, kindex)
def getEdges(self):
keys = self.bones.keys()
kindex = {}
for i in range(len(keys)):
kindex[keys[i]] = i
edges = []
self.getEdgesRec(self.bones['root'], edges, kindex)
return np.array(edges)
class SkeletonAnimator(object):
def __init__(self, skeleton):
self.skeleton = skeleton
self.bonesStates = {}
self.boneMatrices = {}
self.bonePositions = {}
self.NStates = 0
def initMatrices(self, bone, index):
[rx, ry, rz] = [0]*3
if "rx" in bone.dof:
rx = self.bonesStates[bone.name][index][bone.dof["rx"]]*np.pi/180
if "ry" in bone.dof:
ry = self.bonesStates[bone.name][index][bone.dof["ry"]]*np.pi/180
if "rz" in bone.dof:
rz = self.bonesStates[bone.name][index][bone.dof["rz"]]*np.pi/180
rotMatrix = getRotationXYZ(rx, ry, rz)
self.boneMatrices[bone.name].append(rotMatrix)
for child in bone.children:
self.initMatrices(child, index)
#translate then rotate, translate then rotate, ...
def calcPositions(self, bone, index, matrix):
matrix = matrix.dot(bone.rotParentCurrent)
R = self.boneMatrices[bone.name][index]
matrix = matrix.dot(R)
t = bone.length*bone.direction
T = np.eye(4)
T[0:3, 3] = t
matrix = matrix.dot(T)
self.bonePositions[bone.name][index, :] = matrix[0:3, 3].flatten()
for child in bone.children:
self.calcPositions(child, index, matrix)
def initFromFile(self, filename):
print "Initializing..."
for bone in self.skeleton.bones:
self.bonesStates[bone] = []
#Step 1: Load in states from file
fin = open(filename, 'r')
lineCount = 0
for line in fin:
lineCount = lineCount + 1
fields = ((line.lstrip()).rstrip()).split() #Splits whitespace by default
if len(fields) == 0:
continue #Blank line
if fields[0][0] in ['#', '\0', 'o'] or len(fields[0]) == 0:
continue #Comments and stuff
if fields[0] == ":FULLY-SPECIFIED":
continue
if fields[0] == ":DEGREES":
continue
if len(fields) == 1:
continue #The number of the frame, but I don't need to explicitly store this
bone = fields[0]
values = [float(a) for a in fields[1:]]
self.bonesStates[bone].append(values)
self.NStates = max([len(self.bonesStates[bone]) for bone in self.bonesStates])
fin.close()
#Step 2: Initialize matrices
for bone in self.bonesStates:
self.boneMatrices[bone] = []
self.bonePositions[bone] = np.zeros((self.NStates, 3))
for index in range(self.NStates):
#First initialize the root matrix
bone = self.skeleton.bones['root']
[TX, TY, TZ, RX, RY, RZ] = [0]*6
rotorder = bone.order.copy()
if "TX" in bone.order:
TX = self.bonesStates[bone.name][index][bone.order["TX"]]
if "TY" in bone.order:
TY = self.bonesStates[bone.name][index][bone.order["TY"]]
if "TZ" in bone.order:
TZ = self.bonesStates[bone.name][index][bone.order["TZ"]]
if "RX" in bone.order:
RX = self.bonesStates[bone.name][index][bone.order["RX"]]*np.pi/180
rotorder["RX"] = rotorder["RX"] - 3
if "RY" in bone.order:
RY = self.bonesStates[bone.name][index][bone.order["RY"]]*np.pi/180
rotorder["RY"] = rotorder["RY"] - 3
if "RZ" in bone.order:
RZ = self.bonesStates[bone.name][index][bone.order["RZ"]]*np.pi/180
rotorder["RZ"] = rotorder["RZ"] - 3
translationMatrix = np.eye(4)
translationMatrix[0:3, 3] = np.array([TX, TY, TZ])
rotMatrix = getRotationXYZ(RX, RY, RZ)
self.boneMatrices['root'].append((rotMatrix, translationMatrix))
for child in bone.children:
self.initMatrices(child, index)
matrix = rotMatrix.dot(translationMatrix)
self.bonePositions['root'][index, :] = matrix[0:3, 3]
for child in bone.children:
self.calcPositions(child, index, matrix)
print "Finished initializing"
def initFromFileUsingOctave(self, asf, amc):
#Use the help of some external code for now
[X, boneNames] = octave.getMOCAPTrajectories(asf, amc)
for i in range(len(boneNames)):
x = X[:, i, :]
self.bonePositions[boneNames[i]] = x.T
self.NStates = X.shape[2]
return X
def renderNode(self, bone, parent, index):
if index >= self.NStates:
return
#Endpoint are always matrix[0:3, 3]
P1 = self.bonePositions[parent.name][index, :]
P2 = self.bonePositions[bone.name][index, :]
#colors = [ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1], [0, 1, 1] ]
colors = [ [0.67, 0.223, 0.223], [0.44, | |
# alias for OSError. OSError has a winerror attribute and an
# errno attribute.
# Under Python 2, WindowsError is an OSError subclass.
# Under Python 2.5 and higher on Windows, WindowsError has a
# winerror attribute and an errno attribute.
# The winerror attribute is bound to the Windows error code while
# the errno attribute is bound to a translation of that code to a
# perhaps equivalent POSIX error number.
#
# For further details, refer to:
# https://docs.python.org/3/library/exceptions.html#OSError
# If not for this clause OSError would be handling all of these
# errors on Windows. The errno attribute contains a POSIX error
# code while the winerror attribute contains a Windows error code.
# Windows error codes aren't the same as POSIX error codes,
# so we need to handle them differently.
# Under Python 2.4 on Windows, WindowsError only has an errno
# attribute. It is bound to the Windows error code.
# For simplicity of code and to keep the number of paths through
# this suite minimal, we grab the Windows error code under either
# version.
# Furthermore, attempting to use os.listdir on a non-existent path
# in Python 2.4 will result in a Windows error code of
# ERROR_PATH_NOT_FOUND. However, in Python 2.5,
# ERROR_FILE_NOT_FOUND results instead. -exarkun
winerror = getattr(winErrObj, 'winerror', winErrObj.errno)
if winerror not in (ERROR_PATH_NOT_FOUND,
ERROR_FILE_NOT_FOUND,
ERROR_INVALID_NAME,
ERROR_DIRECTORY):
raise
raise _WindowsUnlistableError(winErrObj)
except OSError as ose:
if ose.errno not in (errno.ENOENT, errno.ENOTDIR):
# Other possible errors here, according to linux manpages:
# EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the
# sort of thing which should be handled normally. -glyph
raise
raise UnlistableError(ose)
return [self.child(name) for name in subnames]
def walk(self, descend=None):
"""
Yield myself, then each of my children, and each of those children's
children in turn.
The optional argument C{descend} is a predicate that takes a FilePath,
and determines whether or not that FilePath is traversed/descended
into. It will be called with each path for which C{isdir} returns
C{True}. If C{descend} is not specified, all directories will be
traversed (including symbolic links which refer to directories).
@param descend: A one-argument callable that will return True for
FilePaths that should be traversed, False otherwise.
@return: a generator yielding FilePath-like objects.
"""
yield self
if self.isdir():
for c in self.children():
# we should first see if it's what we want, then we
# can walk through the directory
if (descend is None or descend(c)):
for subc in c.walk(descend):
if os.path.realpath(self.path).startswith(
os.path.realpath(subc.path)):
raise LinkError("Cycle in file graph.")
yield subc
else:
yield c
def sibling(self, path):
"""
Return a L{FilePath} with the same directory as this instance but with
a basename of C{path}.
@param path: The basename of the L{FilePath} to return.
@type path: L{str}
@return: The sibling path.
@rtype: L{FilePath}
"""
return self.parent().child(path)
def descendant(self, segments):
"""
Retrieve a child or child's child of this path.
@param segments: A sequence of path segments as L{str} instances.
@return: A L{FilePath} constructed by looking up the C{segments[0]}
child of this path, the C{segments[1]} child of that path, and so
on.
@since: 10.2
"""
path = self
for name in segments:
path = path.child(name)
return path
def segmentsFrom(self, ancestor):
"""
Return a list of segments between a child and its ancestor.
For example, in the case of a path X representing /a/b/c/d and a path Y
representing /a/b, C{Y.segmentsFrom(X)} will return C{['c',
'd']}.
@param ancestor: an instance of the same class as self, ostensibly an
ancestor of self.
@raise: ValueError if the 'ancestor' parameter is not actually an
ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d.
@return: a list of strs
"""
# this might be an unnecessarily inefficient implementation but it will
# work on win32 and for zipfiles; later I will deterimine if the
# obvious fast implemenation does the right thing too
f = self
p = f.parent()
segments = []
while f != ancestor and p != f:
segments[0:0] = [f.basename()]
f = p
p = p.parent()
if f == ancestor and segments:
return segments
raise ValueError("%r not parent of %r" % (ancestor, self))
# new in 8.0
def __hash__(self):
"""
Hash the same as another L{FilePath} with the same path as mine.
"""
return hash((self.__class__, self.path))
# pending deprecation in 8.0
def getmtime(self):
"""
Deprecated. Use getModificationTime instead.
"""
return int(self.getModificationTime())
def getatime(self):
"""
Deprecated. Use getAccessTime instead.
"""
return int(self.getAccessTime())
def getctime(self):
"""
Deprecated. Use getStatusChangeTime instead.
"""
return int(self.getStatusChangeTime())
class RWX(FancyEqMixin, object):
"""
A class representing read/write/execute permissions for a single user
category (i.e. user/owner, group, or other/world). Instantiate with
three boolean values: readable? writable? executable?.
@type read: C{bool}
@ivar read: Whether permission to read is given
@type write: C{bool}
@ivar write: Whether permission to write is given
@type execute: C{bool}
@ivar execute: Whether permission to execute is given
@since: 11.1
"""
compareAttributes = ('read', 'write', 'execute')
def __init__(self, readable, writable, executable):
self.read = readable
self.write = writable
self.execute = executable
def __repr__(self):
return "RWX(read=%s, write=%s, execute=%s)" % (
self.read, self.write, self.execute)
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
part of what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx')
@return: The shorthand string.
@rtype: L{str}
"""
returnval = ['r', 'w', 'x']
i = 0
for val in (self.read, self.write, self.execute):
if not val:
returnval[i] = '-'
i += 1
return ''.join(returnval)
class Permissions(FancyEqMixin, object):
"""
A class representing read/write/execute permissions. Instantiate with any
portion of the file's mode that includes the permission bits.
@type user: L{RWX}
@ivar user: User/Owner permissions
@type group: L{RWX}
@ivar group: Group permissions
@type other: L{RWX}
@ivar other: Other/World permissions
@since: 11.1
"""
compareAttributes = ('user', 'group', 'other')
def __init__(self, statModeInt):
self.user, self.group, self.other = (
[RWX(*[statModeInt & bit > 0 for bit in bitGroup]) for bitGroup in
[[S_IRUSR, S_IWUSR, S_IXUSR],
[S_IRGRP, S_IWGRP, S_IXGRP],
[S_IROTH, S_IWOTH, S_IXOTH]]]
)
def __repr__(self):
return "[%s | %s | %s]" % (
str(self.user), str(self.group), str(self.other))
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx-wx--x')
@return: The shorthand string.
@rtype: L{str}
"""
return "".join(
[x.shorthand() for x in (self.user, self.group, self.other)])
class _SpecialNoValue(object):
"""
An object that represents 'no value', to be used in deprecating statinfo.
Please remove once statinfo is removed.
"""
pass
def _asFilesystemBytes(path, encoding=None):
"""
Return C{path} as a string of L{bytes} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes}
"""
if type(path) == bytes:
return path
else:
if encoding is None:
encoding = sys.getfilesystemencoding()
return path.encode(encoding)
def _asFilesystemText(path, encoding=None):
"""
Return C{path} as a string of L{unicode} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode}
"""
if type(path) == unicode:
return path
else:
if encoding is None:
encoding = sys.getfilesystemencoding()
return path.decode(encoding)
def _coerceToFilesystemEncoding(path, newpath, encoding=None):
"""
Return a C{newpath} that is suitable for joining to C{path}.
@param path: The path that it should be suitable for joining to.
@param newpath: The new portion of the path to be coerced if needed.
@param encoding: If coerced, the encoding that will be used.
"""
if type(path) == bytes:
return _asFilesystemBytes(newpath, encoding=encoding)
else:
return _asFilesystemText(newpath, encoding=encoding)
@comparable
@implementer(IFilePath)
class FilePath(AbstractFilePath):
"""
I am a path on the filesystem that only permits 'downwards' access.
Instantiate me with a pathname (for example,
FilePath('/home/myuser/public_html')) and I will attempt to only provide
access to files which reside inside that path. I | |
<reponame>tacaswell/pyFAI<filename>pyFAI/test/test_mask.py
#!/usr/bin/env python
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Test suite for masked arrays"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/01/2021"
import unittest
import numpy
import logging
import fabio
from .utilstest import UtilsTest
logger = logging.getLogger(__name__)
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
from .. import load, detectors
from ..azimuthalIntegrator import AzimuthalIntegrator
class TestMask(unittest.TestCase):
dataFile = "testMask.edf"
poniFile = "Pilatus1M.poni"
def setUp(self):
"""Download files"""
self.dataFile = UtilsTest.getimage(self.__class__.dataFile)
self.poniFile = UtilsTest.getimage(self.__class__.poniFile)
self.ai = load(self.poniFile)
self.data = fabio.open(self.dataFile).data
self.mask = self.data < 0
def tearDown(self):
unittest.TestCase.tearDown(self)
self.dataFile = self.data = self.ai = self.mask = self.poniFile = None
def test_mask_hist(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("no", "histogram", "cython")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_splitBBox(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("bbox", "histogram", "cython")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_splitfull(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("full", "histogram", "cython")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_LUT(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("bbox", "lut", "cython")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_CSR(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = meth = ("bbox", "csr", "cython")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
@unittest.skipIf(UtilsTest.opencl is False, "User request to skip OpenCL tests")
def test_mask_LUT_OCL(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("bbox", "lut", "opencl")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually around 0 (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
@unittest.skipIf(UtilsTest.opencl is False, "User request to skip OpenCL tests")
def test_mask_CSR_OCL(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = ("bbox", "csr", "opencl")
x1 = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d_ng(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d_ng(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually around 0 (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
class TestMaskBeamstop(unittest.TestCase):
"""
Test for https://github.com/silx-kit/pyFAI/issues/76
"""
dataFile = "mock.tif"
def setUp(self):
"""
Download files
Create a mask for tth<3.7 deg
"""
self.dataFile = UtilsTest.getimage(self.__class__.dataFile)
detector = detectors.Detector(pixel1=0.0001, pixel2=0.0001)
self.ai = AzimuthalIntegrator(dist=0.1, poni1=0.03, poni2=0.03, detector=detector)
self.data = fabio.open(self.dataFile).data
self.tth, self.I = self.ai.integrate1d_ng(self.data, 1000, unit="2th_deg")
self.mask = self.ai.ttha < numpy.deg2rad(3.7)
def test_nomask(self):
"""
without mask, tth value should start at 0
"""
| |
# """ Create mock boto3 clients for testing """
from datetime import datetime
import boto3
from botocore.stub import Stubber
from logger import LOG
MOCK_COGNITO_USER_POOL_ID = "eu-west-2_poolid"
def _keep_it_real():
""" Keep the native """
if not getattr(boto3, "real_client", None):
boto3.real_client = boto3.client
def mock_s3_list_objects(bucket_name, prefixes, is_upload=False):
_keep_it_real()
client = boto3.real_client("s3")
stubber = Stubber(client)
for prefix in prefixes:
if is_upload:
stub_response_s3_list_upload_objects_page_1(stubber, bucket_name, prefix)
stub_response_s3_list_upload_objects_page_2(stubber, bucket_name, prefix)
else:
stub_response_s3_list_objects_page_1(stubber, bucket_name, prefix)
stub_response_s3_list_objects_page_2(stubber, bucket_name, prefix)
# replace the get_presigned_url so it runs without AWS creds
client.generate_presigned_url = lambda op, Params, ExpiresIn, HttpMethod: fake_url(
Params["Bucket"], Params["Key"]
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name, config=None: client
return stubber
# Module: main.py
def mock_cognito_auth_flow(token, test_user):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_get_user(stubber, token, test_user)
stub_response_cognito_admin_list_groups_for_user(stubber, test_user["Username"])
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
# Module: cognito.py
def mock_cognito_create_user(admin_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
group_name = admin_user["group"]["value"]
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_create_user(stubber, admin_user, create_user_arguments)
stub_response_cognito_admin_set_user_mfa_preference(stubber, admin_user["email"])
stub_response_cognito_admin_set_user_settings(stubber, admin_user["email"])
stub_response_cognito_admin_add_user_to_group(
stubber, admin_user["email"], group_name
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_create_user_set_mfa_fails(admin_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
group_name = admin_user["group"]["value"]
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_create_user(stubber, admin_user, create_user_arguments)
# Replace admin_set_user_mfa_preference response with ClientError
# stub_response_cognito_admin_set_user_mfa_preference(stubber, admin_user["email"])
stubber.add_client_error(
"admin_set_user_mfa_preference",
expected_params={
"SMSMfaSettings": {"Enabled": True, "PreferredMfa": True},
"UserPoolId": MOCK_COGNITO_USER_POOL_ID,
"Username": admin_user["email"],
},
)
stub_response_cognito_admin_set_user_settings(stubber, admin_user["email"])
stub_response_cognito_admin_add_user_to_group(
stubber, admin_user["email"], group_name
)
stub_response_cognito_admin_disable_user(stubber, admin_user["email"])
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_create_user_set_user_settings_fails(admin_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
group_name = admin_user["group"]["value"]
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_create_user(stubber, admin_user, create_user_arguments)
stub_response_cognito_admin_set_user_mfa_preference(stubber, admin_user["email"])
# stub_response_cognito_admin_set_user_settings(stubber, admin_user["email"])
stubber.add_client_error(
"admin_set_user_settings",
expected_params={
"MFAOptions": [{"DeliveryMedium": "SMS", "AttributeName": "phone_number"}],
"UserPoolId": MOCK_COGNITO_USER_POOL_ID,
"Username": admin_user["email"],
},
)
stub_response_cognito_admin_add_user_to_group(
stubber, admin_user["email"], group_name
)
stub_response_cognito_admin_disable_user(stubber, admin_user["email"])
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_create_user_add_user_to_group_fails(admin_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
group_name = admin_user["group"]["value"]
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_create_user(stubber, admin_user, create_user_arguments)
stub_response_cognito_admin_set_user_mfa_preference(stubber, admin_user["email"])
stub_response_cognito_admin_set_user_settings(stubber, admin_user["email"])
# stub_response_cognito_admin_add_user_to_group(
# stubber, admin_user["email"], group_name
# )
stubber.add_client_error(
"admin_add_user_to_group",
expected_params={
"UserPoolId": MOCK_COGNITO_USER_POOL_ID,
"Username": admin_user["email"],
"GroupName": group_name,
},
)
stub_response_cognito_admin_disable_user(stubber, admin_user["email"])
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_list_pools(env="testing"):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_list_user_pools(stubber, env)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_create_user(user, arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
params_admin_create_user = {
"UserPoolId": MOCK_COGNITO_USER_POOL_ID,
"Username": user["email"],
**arguments,
}
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_create_user(stubber, user, params_admin_create_user)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_update_user_attributes(user, attributes):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_update_user_attributes(
stubber, user["email"], attributes
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_delete_user(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_delete_user(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_disable_user(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_disable_user(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_enable_user(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_enable_user(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_set_user_settings(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_set_user_settings(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_set_user_mfa_preference(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
stub_response_cognito_admin_set_user_mfa_preference(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_add_user_to_group(email, group_name):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_add_user_to_group(stubber, email, group_name)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_remove_user_from_group(email, group_name):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_remove_user_from_group(stubber, email, group_name)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_get_user(email, admin_get_user):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_get_user(stubber, email, admin_get_user)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_cognito_admin_list_groups_for_user(admin_user):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_list_groups_for_user(stubber, admin_user["email"])
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
# Module: user.py
def mock_user_get_details(email, admin_get_user):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_get_user(stubber, email, admin_get_user)
stub_response_cognito_admin_list_groups_for_user(stubber, email)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_user_not_found(email):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stubber.add_client_error(
"admin_get_user",
expected_params={"UserPoolId": MOCK_COGNITO_USER_POOL_ID, "Username": email},
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_user_update(email, admin_get_user, attributes):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_admin_get_user(stubber, email, admin_get_user)
stub_response_cognito_admin_list_groups_for_user(stubber, email)
stub_response_cognito_admin_update_user_attributes(stubber, email, attributes)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_user_reinvite(admin_user, admin_get_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
group_name = admin_user["group"]["value"]
stubber = Stubber(client)
# Add responses
# get user
stub_response_cognito_admin_get_user(stubber, admin_user["email"], admin_get_user)
stub_response_cognito_admin_list_groups_for_user(stubber, admin_user["email"])
# delete user
stub_response_cognito_admin_delete_user(stubber, admin_user["email"])
# create user
stub_response_cognito_admin_create_user(stubber, admin_user, create_user_arguments)
stub_response_cognito_admin_set_user_mfa_preference(stubber, admin_user["email"])
stub_response_cognito_admin_set_user_settings(stubber, admin_user["email"])
stub_response_cognito_admin_add_user_to_group(
stubber, admin_user["email"], group_name
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_delete_user_failure(admin_user, admin_get_user):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
# get user
stub_response_cognito_admin_get_user(stubber, admin_user["email"], admin_get_user)
stub_response_cognito_admin_list_groups_for_user(stubber, admin_user["email"])
# delete user
stubber.add_client_error(
"admin_delete_user",
expected_params={
"UserPoolId": MOCK_COGNITO_USER_POOL_ID,
"Username": admin_user["email"],
},
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_create_user_failure(admin_user, admin_get_user, create_user_arguments):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
# get user
stub_response_cognito_admin_get_user(stubber, admin_user["email"], admin_get_user)
stub_response_cognito_admin_list_groups_for_user(stubber, admin_user["email"])
# delete user
stub_response_cognito_admin_delete_user(stubber, admin_user["email"])
# create user
stubber.add_client_error(
"admin_create_user", expected_params=create_user_arguments,
)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_config_set_app_settings(parameters):
_keep_it_real()
client = boto3.real_client("cognito-idp")
stubber = Stubber(client)
# Add responses
stub_response_cognito_list_user_pool_clients(stubber, parameters)
stub_response_cognito_describe_user_pool_client(stubber, parameters)
stub_response_cognito_describe_user_pool(stubber, parameters)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
def mock_config_load_ssm_parameters(path, parameters):
_keep_it_real()
client = boto3.real_client("ssm")
stubber = Stubber(client)
# Add responses
stub_response_ssm_get_parameters_by_path(stubber, path, parameters)
stubber.activate()
# override boto.client to return the mock client
boto3.client = lambda service, region_name=None: client
return stubber
# Responses
# Client: ssm
def stub_response_ssm_get_parameters_by_path(stubber, path, parameters):
response_params = [
{"Name": f"{path}{param_name}", "Value": param_value}
for param_name, param_value in parameters.items()
]
mock_get_parameters_by_path = {"Parameters": response_params}
parameters = {"Path": path, "Recursive": True, "WithDecryption": True}
stubber.add_response(
"get_parameters_by_path", mock_get_parameters_by_path, parameters
)
# Client: s3
def stub_response_s3_list_objects_page_1(stubber, bucket_name, prefix):
now = datetime.utcnow()
mock_list_objects_1 = {
"Contents": [
{"Key": f"{prefix}/people1.csv", "Size": 100, "LastModified": now},
{"Key": f"{prefix}/people2.csv", "Size": 200, "LastModified": now},
{
"Key": f"{prefix}/nested/nested_people1.csv",
"Size": 300,
"LastModified": now,
},
],
"IsTruncated": True,
"NextMarker": "page-2",
}
stubber.add_response(
"list_objects", mock_list_objects_1, {"Bucket": bucket_name, "Prefix": prefix},
)
def stub_response_s3_list_objects_page_2(stubber, bucket_name, prefix):
now = datetime.utcnow()
mock_list_objects_2 = {
"Contents": [
{"Key": f"{prefix}/people3.csv", "Size": 100, "LastModified": now},
{"Key": f"{prefix}/people4.csv", "Size": 200, "LastModified": now},
]
}
stubber.add_response(
"list_objects",
mock_list_objects_2,
{"Bucket": bucket_name, "Prefix": prefix, "Marker": "page-2"},
)
def stub_response_s3_list_upload_objects_page_1(stubber, bucket_name, prefix):
now = datetime.utcnow()
mock_list_objects_1 = {
"Contents": [
{"Key": f"{prefix}/people1.csv", "Size": 100, "LastModified": now},
{
"Key": f"{prefix}/people1.csv-metadata.json",
"Size": 100,
"LastModified": now,
},
{"Key": f"{prefix}/people2.csv", "Size": 200, "LastModified": now},
{
"Key": f"{prefix}/people2.csv-metadata.json",
"Size": 200,
"LastModified": now,
},
{
"Key": f"{prefix}/nested/nested_people1.csv",
"Size": 300,
"LastModified": now,
},
{
"Key": f"{prefix}/nested/nested_people1.csv-metadata.json",
"Size": 300,
"LastModified": now,
},
],
"IsTruncated": True,
"NextMarker": "page-2",
}
stubber.add_response(
"list_objects", mock_list_objects_1, {"Bucket": bucket_name, "Prefix": prefix},
)
def stub_response_s3_list_upload_objects_page_2(stubber, bucket_name, prefix):
now = datetime.utcnow()
mock_list_objects_2 | |
dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
# mis-oriented
fsM = dadi.Numerics.reverse_array(fsO)
### Sum the two spectra in proportion O
fs = O*fsO+(1-O)*fsM
return fs
def AMG(params, (n1,n2), pts):
nu1, nu2, b1, b2, m12, m21, Tam, Ts, O = params
"""
Model with split, ancient migration
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration (in units of 2*Na generations).
Ts: The scaled time between the end of ancient migration and present.
O: The proportion of accurate orientation
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We start the population size change after the split independantly in each population and set the migration rate to m12 and m21
bnu1_func = lambda t: nu1 * b1**(t/Tam)
bnu2_func = lambda t: nu2 * b2**(t/Tam)
phi = dadi.Integration.two_pops(phi, xx, Tam, bnu1_func, bnu2_func, m12=m12, m21=m21)
# We continue the population size change after ancient migration (until present) independantly in each population and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phi = dadi.Integration.two_pops(phi, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## Finally, calculate the spectrum.
# oriented
fsO = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
# mis-oriented
fsM = dadi.Numerics.reverse_array(fsO)
### Sum the two spectra in proportion O
fs = O*fsO+(1-O)*fsM
return fs
def AM2N(params, (n1,n2), pts):
nu1, nu2, hrf, m12, m21, Tam, Ts, Q, O = params
"""
Model with split, ancient migration, heterogenous effective population size (with 2 classes of loci shared by the two populations = Hill-Robertson effects)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration.
Ts: The scaled time between the end of ancient migration and present (in units of 2*Na generations).
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
O: The proportion of accurate orientation
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsnrO = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
# mis-oriented
fsnrM = dadi.Numerics.reverse_array(fsnrO)
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fslrO = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
# mis-oriented
fslrM = dadi.Numerics.reverse_array(fslrO)
#### Sum the two spectra in proportion O and 1-O
fs= O*((1-Q)*fsnrO + Q*fslrO) + (1-O)*((1-Q)*fsnrM + Q*fslrM)
return fs
def AM2N2m(params, (n1,n2), pts):
nu1, nu2, hrf, m12, m21, me12, me21, Tam, Ts, P, Q, O = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates, heterogenous effective population size (2 classes, shared by the two populations = background selection)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P: The proportion of the genome evolving neutrally
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
O: The proportion of accurate orientation
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsNO = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
# mis-oriented
fsNM = dadi.Numerics.reverse_array(fsNO)
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsIO = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
# mis-oriented
fsIM = dadi.Numerics.reverse_array(fsIO)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
###
## calculate the spectrum.
# oriented
fsnrO = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
# mis-oriented
fsnrM = dadi.Numerics.reverse_array(fsnrO)
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We keep the population sizes after the split to hrf*nu1 and hrf*nu2 and set the migration rates to zero
philr | |
dim=2)
all_encoder_outputs = torch.where(mask, torch.tensor(0., device=flair.device), all_encoder_outputs)
# use token embedding as initial hidden state for decoder
if self.encoder_embeddings:
# embed sentences
self.encoder_embeddings.embed(sentences)
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(
self.rnn_layers * [torch.stack([token.get_embedding() for token in tokens])])
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden_for_decoder = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden_for_decoder, all_encoder_outputs
def encode_token(self, token: Token):
# variable to store initial hidden states for decoder
initial_hidden_for_decoder = []
all_encoder_outputs = None
# encode input characters by sending them through RNN
if self.encode_characters:
# note that we do not need to fill up with dummy symbols since we process each token seperately
encoder_input_indices = self.words_to_char_indices([token.text],
start_symbol=self.start_symbol,
end_symbol=self.end_symbol)
# embed character one-hots
input_vector = self.encoder_character_embedding(encoder_input_indices)
# send through encoder RNN (produces initial hidden for decoder)
all_encoder_outputs, initial_hidden_states = self.encoder_rnn(input_vector)
# since bidirectional rnn is only used in encoding we need to project outputs to hidden_size of decoder
if self.bi_encoding:
# project 2*hidden_size to hidden_size
all_encoder_outputs = self.bi_hidden_states_to_hidden_size(all_encoder_outputs)
# concatenate the final hidden states of the encoder. These will be projected to hidden_size of decoder later with self.emb_to_hidden
# initial_hidden_states = torch.transpose(initial_hidden_states, 0,1).reshape(1,len(tokens),2*self.rnn_hidden_size) # works only for rnn_layers = 1
conditions = torch.cat(2 * [torch.eye(self.rnn_layers).bool()])
bi_states = [initial_hidden_states[conditions[:, i], :, :] for i in range(self.rnn_layers)]
initial_hidden_states = torch.stack([torch.cat((b[0, :, :], b[1, :, :]), dim=1) for b in bi_states])
initial_hidden_for_decoder.append(initial_hidden_states)
# use token embedding as initial hidden state for decoder
if self.encoder_embeddings:
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(self.rnn_layers * [token.get_embedding()]).unsqueeze(1)
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden_for_decoder = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden_for_decoder, all_encoder_outputs
def _calculate_loss(self, scores, labels):
# score vector has to have a certain format for (2d-)loss fct (batch_size, alphabet_size, 1, max_seq_length)
scores_in_correct_format = scores.permute(0, 2, 1).unsqueeze(2)
# create target vector (batch_size, max_label_seq_length + 1)
target = self.words_to_char_indices(labels, start_symbol=False, end_symbol=True, padding_in_front=False)
target.unsqueeze_(1) # (batch_size, 1, max_label_seq_length + 1)
return self.loss(scores_in_correct_format, target), len(labels)
def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> torch.tensor:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def predict(self,
sentences: Union[List[Sentence], Sentence],
label_name='predicted',
mini_batch_size: int = 16,
embedding_storage_mode="None",
return_loss=False,
print_prediction=False,
):
'''
Predict lemmas of words for a given (list of) sentence(s).
:param sentences: sentences to predict
:param label_name: label name used for predicted lemmas
:param mini_batch_size: number of tokens that are send through the RNN simultaneously, assuming batching_in_rnn is set to True
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
:param return_loss: whether or not to compute and return loss. Setting it to True only makes sense if labels are provided
:param print_prediction: If True, lemmatized sentences will be printed in the console.
'''
if isinstance(sentences, Sentence):
sentences = [sentences]
# filter empty sentences
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# max length of the predicted sequences
if not self.dependent_on_input:
max_length = self.max_sequence_length
else:
max_length = max([len(token.text) + 1 for sentence in sentences for token in sentence])
# for printing
line_to_print = ''
overall_loss = 0
number_tokens_in_total = 0
with torch.no_grad():
dataloader = DataLoader(dataset=SentenceDataset(sentences), batch_size=mini_batch_size)
for batch in dataloader:
# stop if all sentences are empty
if not batch: continue
# remove previously predicted labels of this type
for sentence in batch:
for token in sentence:
token.remove_labels(label_name)
# create list of tokens in batch
tokens_in_batch = [token for sentence in batch for token in sentence]
number_tokens = len(tokens_in_batch)
number_tokens_in_total += number_tokens
# encode inputs
hidden, all_encoder_outputs = self.encode(batch)
# create input for first pass (batch_size, 1, input_size), first letter is special character <S>
# sequence length is always set to one in prediction
input_indices = self.start_index * torch.ones(number_tokens, dtype=torch.long,
device=flair.device).unsqueeze(1)
# option 1: greedy decoding
if self.beam_size == 1:
# predictions
predicted = [[] for _ in range(number_tokens)]
for decode_step in range(max_length):
# decode next character
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# pick top beam size many outputs with highest probabilities
input_indices = log_softmax_probs.argmax(dim=2)
for i in range(number_tokens):
if len(predicted[i]) > 0 and predicted[i][-1] == self.end_index: continue
predicted[i].append(input_indices[i].item())
for t_id, token in enumerate(tokens_in_batch):
predicted_lemma = ''.join(
self.char_dictionary.get_item_for_index(idx) if idx != self.end_index else ""
for idx in predicted[t_id])
token.set_label(typename=label_name, value=predicted_lemma)
# option 2: beam search
else:
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
# out_probs = self.softmax(output_vectors).squeeze(1)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2).squeeze(1)
# make sure no dummy symbol <> or start symbol <S> is predicted
log_softmax_probs[:, self.dummy_index] = -inf
log_softmax_probs[:, self.start_index] = -inf
# pick top beam size many outputs with highest probabilities
# probabilities, leading_indices = out_probs.topk(self.beam_size, 1) # max prob along dimension 1
log_probabilities, leading_indices = log_softmax_probs.topk(self.beam_size, 1)
# leading_indices and probabilities have size (batch_size, beam_size)
# keep scores of beam_size many hypothesis for each token in the batch
scores = log_probabilities.view(-1, 1)
# stack all leading indices of all hypothesis and corresponding hidden states in two tensors
leading_indices = leading_indices.view(-1, 1) # this vector goes through RNN in each iteration
hidden_states_beam = torch.stack(self.beam_size * [hidden], dim=2).view(self.rnn_layers, -1,
self.rnn_hidden_size)
# save sequences so far
sequences = torch.tensor([[i.item()] for i in leading_indices], device=flair.device)
# keep track of how many hypothesis were completed for each token
n_completed = [0 for _ in range(number_tokens)] # cpu
final_candidates = [[] for _ in range(number_tokens)] # cpu
# if all_encoder_outputs returned, expand them to beam size (otherwise keep this as None)
batched_encoding_output = torch.stack(self.beam_size * [all_encoder_outputs], dim=1).view(
self.beam_size * number_tokens, -1, self.rnn_hidden_size) if self.use_attention else None
for j in range(1, max_length):
output_vectors, hidden_states_beam = self.decode(leading_indices,
hidden_states_beam,
batched_encoding_output)
# decode with log softmax
out_log_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# make sure no dummy symbol <> or start symbol <S> is predicted
out_log_probs[:, 0, self.dummy_index] = -inf
out_log_probs[:, 0, self.start_index] = -inf
log_probabilities, index_candidates = out_log_probs.topk(self.beam_size, 2)
log_probabilities.squeeze_(1)
index_candidates.squeeze_(1)
# check if an end symbol <E> has been predicted and, in that case, set hypothesis aside
end_symbols = (index_candidates == self.end_index).nonzero(as_tuple=False)
for tuple in end_symbols:
# if the sequence is already ended, do not record as candidate
if sequences[tuple[0], -1].item() == self.end_index: continue
# index of token in in list tokens_in_batch
token_number = torch.div(tuple[0], self.beam_size, rounding_mode='trunc')
# print(token_number)
seq = sequences[tuple[0], :] # hypothesis sequence
# hypothesis score
score = (scores[tuple[0]] + log_probabilities[tuple[0], tuple[1]]) / (len(seq) + 1)
final_candidates[token_number].append((seq, score))
# TODO: remove token if number of completed hypothesis exceeds given value
n_completed[token_number] += 1
# set score of corresponding entry to -inf so it will not be expanded
log_probabilities[tuple[0], tuple[1]] = -inf
# get leading_indices for next expansion
# find highest scoring hypothesis among beam_size*beam_size possible ones for each token
# take beam_size many copies of scores vector and add scores of possible new extensions
# size (beam_size*batch_size, beam_size)
hypothesis_scores = torch.cat(self.beam_size * [scores], dim=1) + log_probabilities
# print(hypothesis_scores)
# reshape to vector of size (batch_size, beam_size*beam_size), each row contains beam_size*beam_size scores of the new possible hypothesis
hypothesis_scores_per_token = hypothesis_scores.view(number_tokens, self.beam_size ** 2)
# print(hypothesis_scores_per_token)
# choose beam_size best for each token - size (batch_size, beam_size)
best_scores, indices_per_token = hypothesis_scores_per_token.topk(self.beam_size, 1)
# out of indices_per_token we now need to recompute the original indices of the hypothesis in a list of length beam_size*batch_size
# where the first three inidices belong to the first token, the next three to the second token, and so on
beam_numbers = []
seq_numbers = []
for i, row in enumerate(indices_per_token):
beam_numbers.extend(i * self.beam_size + index.item() // self.beam_size for index in row)
seq_numbers.extend(index.item() % self.beam_size for index in row)
# with these indices we can compute the tensors for the next iteration
# expand sequences with corresponding index
sequences = torch.cat(
(sequences[beam_numbers], index_candidates[beam_numbers, seq_numbers].unsqueeze(1)), dim=1)
# add log-probabilities to the scores
scores = scores[beam_numbers] + log_probabilities[beam_numbers, seq_numbers].unsqueeze(1)
# save new leading | |
# this file is copied from
# https://github.com/Hadisalman/smoothing-adversarial
# originally written by <NAME>.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from typing import Optional
class Attacker(metaclass=ABCMeta):
@abstractmethod
def attack(self, model, inputs, labels):
raise NotImplementedError
# Modification of the code from https://github.com/jeromerony/fast_adversarial
class PGD_L2(Attacker):
"""
PGD attack
Parameters
----------
steps : int
Number of steps for the optimization.
max_norm : float or None, optional
If specified, the norms of the perturbations will not be greater than this value which might lower success rate.
device : torch.device, optional
Device on which to perform the attack.
"""
def __init__(self,
steps: int,
random_start: bool = True,
max_norm: Optional[float] = None,
device: torch.device = torch.device('cpu')) -> None:
super(PGD_L2, self).__init__()
self.steps = steps
self.random_start = random_start
self.max_norm = max_norm
self.device = device
def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors=1, targeted: bool = False, no_grad=False) -> torch.Tensor:
if num_noise_vectors == 1:
return self._attack(model, inputs, labels, noise, targeted)
else:
if no_grad:
with torch.no_grad():
return self._attack_mutlinoise_no_grad(model, inputs, labels, noise, num_noise_vectors, targeted)
else:
return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
def _attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = inputs.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros_like(inputs, requires_grad=True)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1)
ce_loss = F.cross_entropy(logits, labels, reduction='sum')
loss = multiplier * ce_loss
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer.step()
delta.data.add_(inputs)
delta.data.clamp_(0, 1).sub_(inputs)
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta
def _attack_mutlinoise(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors: int = 1,
targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = labels.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros((len(labels), *inputs.shape[1:]), requires_grad=True, device=self.device)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1).reshape(-1, num_noise_vectors).mode(1)[0]
# safe softamx
softmax = F.softmax(logits, dim=1)
# average the probabilities across noise
average_softmax = softmax.reshape(-1, num_noise_vectors, logits.shape[-1]).mean(1, keepdim=True).squeeze(1)
logsoftmax = torch.log(average_softmax.clamp(min=1e-20))
ce_loss = F.nll_loss(logsoftmax, labels)
loss = multiplier * ce_loss
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer.step()
delta.data.add_(inputs[::num_noise_vectors])
delta.data.clamp_(0, 1).sub_(inputs[::num_noise_vectors])
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
def _attack_mutlinoise_no_grad(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors: int = 1,
targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = labels.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros((len(labels), *inputs.shape[1:]), requires_grad=True, device=self.device)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1).reshape(-1, num_noise_vectors).mode(1)[0]
# safe softamx
softmax = F.softmax(logits, dim=1)
grad = F.nll_loss(softmax,
labels.unsqueeze(1).repeat(1, 1, num_noise_vectors).view(batch_size * num_noise_vectors),
reduction='none').repeat(*noise.shape[1:], 1).permute(3, 0, 1, 2) * noise
grad = grad.reshape(-1, num_noise_vectors, *inputs.shape[1:]).mean(1)
# average the probabilities across noise
grad_norms = grad.view(batch_size, -1).norm(p=2, dim=1)
grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
grad[grad_norms == 0] = torch.randn_like(grad[grad_norms == 0])
# optimizer.step()
delta = delta + grad * self.max_norm / self.steps * 2
delta.data.add_(inputs[::num_noise_vectors])
delta.data.clamp_(0, 1).sub_(inputs[::num_noise_vectors])
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
# Source code from https://github.com/jeromerony/fast_adversarial
class DDN(Attacker):
"""
DDN attack: decoupling the direction and norm of the perturbation to achieve a small L2 norm in few steps.
Parameters
----------
steps : int
Number of steps for the optimization.
gamma : float, optional
Factor by which the norm will be modified. new_norm = norm * (1 + or - gamma).
init_norm : float, optional
Initial value for the norm.
quantize : bool, optional
If True, the returned adversarials will have quantized values to the specified number of levels.
levels : int, optional
Number of levels to use for quantization (e.g. 256 for 8 bit images).
max_norm : float or None, optional
If specified, the norms of the perturbations will not be greater than this value which might lower success rate.
device : torch.device, optional
Device on which to perform the attack.
callback : object, optional
Visdom callback to display various metrics.
"""
def __init__(self,
steps: int,
gamma: float = 0.05,
init_norm: float = 1.,
quantize: bool = True,
levels: int = 256,
max_norm: Optional[float] = None,
device: torch.device = torch.device('cpu'),
callback: Optional = None) -> None:
super(DDN, self).__init__()
self.steps = steps
self.gamma = gamma
self.init_norm = init_norm
self.quantize = quantize
self.levels = levels
self.max_norm = max_norm
self.device = device
self.callback = callback
def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors=1, targeted: bool = False, no_grad=False) -> torch.Tensor:
if num_noise_vectors == 1:
return self._attack(model, inputs, labels, noise, targeted)
# return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
else:
if no_grad:
raise NotImplementedError
else:
return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
def _attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: | |
#!/usr/bin/env python
#
# iuwandbox.py
#
# Copyright (C) 2014-2018, <NAME>
# This software is released under the new BSD License,
# see LICENSE
#
import os
import sys
import re
import codecs
import argparse
from time import sleep
from argparse import ArgumentParser
from wandbox import Wandbox
from requests.exceptions import HTTPError
from requests.exceptions import ConnectionError
IUTEST_FUSED_SRC = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../fused-src/iutest.min.hpp'))
IUTEST_INCLUDE_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../include'))
IUTEST_INCLUDE_REGEX = re.compile(r'^\s*#\s*include\s*".*(iutest|iutest_switch)\.hpp"')
EXPAND_INCLUDE_REGEX = re.compile(r'^\s*#\s*include\s*"(.*?)"')
IUTEST_INCG_REGEX = re.compile(r'\s*#\s*define[/\s]*(INCG_IRIS_\S*)\s*')
iutest_incg_list = []
workaround = True
api_retries = 3
api_retry_wait = 60
# command line option
def parse_command_line():
global api_retries
global api_retry_wait
parser = ArgumentParser()
parser.add_argument(
'-v',
'--version',
action='version',
version=u'%(prog)s version 5.9'
)
parser.add_argument(
'--list-compiler',
'--list_compiler',
action='store_true',
help='listup compiler.'
)
parser.add_argument(
'--list-options',
'--list_options',
metavar='COMPILER',
help='listup compiler options.'
)
parser.add_argument(
'-c',
'--compiler',
default='gcc-head',
help='compiler select. default: %(default)s'
)
parser.add_argument(
'-x',
'--options',
help='used options for a compiler.'
)
parser.add_argument(
'--default',
action='store_true',
help='--default option is deprecated. default options are always set.'
)
parser.add_argument(
'--no-default',
action='store_true',
help='no set default options.'
)
parser.add_argument(
'--std',
metavar='VERSION',
help='set --std options.'
)
parser.add_argument(
'--boost',
metavar='VERSION',
help='set boost options version X.XX or nothing.'
)
parser.add_argument(
'--optimize',
action='store_true',
help='use optimization.'
)
parser.add_argument(
'--cpp-verbose',
action='store_true',
help='use cpp-verbose.'
)
parser.add_argument(
'--sprout',
action='store_true',
help='use sprout.'
)
parser.add_argument(
'--msgpack',
action='store_true',
help='use msgpack.'
)
parser.add_argument(
'--stdin',
help='set stdin.'
)
parser.add_argument(
'-f',
'--compiler-option-raw',
'--compiler_option_raw',
metavar='OPTIONS',
action='append',
default=['-D__WANDBOX__'],
help='compile-time any additional options.'
)
parser.add_argument(
'-r',
'--runtime-option-raw',
'--runtime_option_raw',
metavar='OPTIONS',
action='append',
help='runtime-time any additional options.'
)
parser.add_argument(
'-s',
'--save',
action='store_true',
help='generate permanent link.'
)
parser.add_argument(
'--permlink',
metavar='ID',
help='get permanent link.'
)
parser.add_argument(
'-o',
'--output',
metavar='FILE',
help='output source code.'
)
parser.add_argument(
'--xml',
metavar='FILE',
help='output result xml.'
)
parser.add_argument(
'--junit',
metavar='FILE',
help='output result junit xml.'
)
parser.add_argument(
'--stderr',
action='store_true',
help='output stderr.'
)
parser.add_argument(
'--encoding',
help='set encoding.'
)
parser.add_argument(
'--expand-include',
'--expand_include',
action='store_true',
help='expand include file.'
)
parser.add_argument(
'--make',
action='store_true',
help=argparse.SUPPRESS
)
parser.add_argument(
'--retry-wait',
type=int,
default=api_retry_wait,
metavar='SECONDS',
help='Wait time for retry when HTTPError occurs'
)
parser.add_argument(
'--retry',
type=int,
default=api_retries,
metavar='COUNT',
help='Number of retries when HTTPError occurs'
)
parser.add_argument(
'--check-config',
'--check_config',
action='store_true',
help='check config.'
)
parser.add_argument(
'--iutest-use-main',
action='store_true',
help='define IUTEST_USE_MAIN.'
)
parser.add_argument(
'--verbose',
action='store_true',
help='verbose.'
)
parser.add_argument(
'--dryrun',
action='store_true',
help='dryrun.'
)
parser.add_argument(
'code',
metavar='CODE',
nargs='*',
help='source code file'
)
options = parser.parse_args()
api_retries = options.retry
api_retry_wait = options.retry_wait
return options, parser
# file open
def file_open(path, mode, encoding):
if encoding:
file = codecs.open(path, mode, encoding)
else:
file = open(path, mode)
return file
# make include filename
def make_include_filename(path, includes, included_files):
if path in included_files:
return included_files[path]
include_dir, include_filename = os.path.split(path)
while include_filename in includes:
include_dir, dirname = os.path.split(include_dir)
include_filename = dirname + '__' + include_filename
included_files[path] = include_filename
return include_filename
def is_iutest_included_file(filepath):
if os.path.abspath(filepath).startswith(IUTEST_INCLUDE_PATH):
incg = 'INCG_IRIS_' + os.path.basename(filepath).upper().replace('.', '_')
for included_incg in iutest_incg_list:
if included_incg.startswith(incg):
return True
return False
# make code
def make_code(path, encoding, expand, includes, included_files):
code = ''
file = file_open(path, 'r', encoding)
for line in file:
m = IUTEST_INCLUDE_REGEX.match(line)
if m:
code += '#include "iutest.hpp"\n'
code += '//origin>> ' + line
if 'iutest.hpp' not in includes:
try:
f = codecs.open(IUTEST_FUSED_SRC, 'r', 'utf-8-sig')
iutest_src = f.read()
f.close()
includes['iutest.hpp'] = iutest_src
global iutest_incg_list
iutest_incg_list = IUTEST_INCG_REGEX.findall(iutest_src)
except:
print('{0} is not found...'.format(IUTEST_FUSED_SRC))
print('please try \"make fused\"')
exit(1)
else:
m = EXPAND_INCLUDE_REGEX.match(line)
if m:
include_path = os.path.normpath(os.path.join(os.path.dirname(path), m.group(1)))
if is_iutest_included_file(include_path):
code += '//origin>> '
elif os.path.exists(include_path):
if expand:
expand_include_file_code = make_code(
include_path, encoding, expand, includes, included_files)
code += expand_include_file_code
code += '//origin>> '
else:
include_abspath = os.path.abspath(include_path)
include_filename = make_include_filename(
include_abspath, includes, included_files)
if not include_filename == include_path:
code += '#include "' + include_filename + '"\n'
code += '//origin>> '
if include_filename not in includes:
includes[include_filename] = ''
expand_include_file_code = make_code(
include_path, encoding, expand, includes, included_files)
includes[include_filename] = expand_include_file_code
code += line
file.close()
return code
def print_undefined_option(option_name, compiler):
print('Wandbox is not supported option [{0}] ({1})'.format(option_name, compiler))
def change_std_option(options, new_value):
print(' change std option: {0} -> {1}'.format(options.std, new_value))
options.std = new_value
def check_std_option_compatible(options, old, new, optlist):
if (options.std == old) and (new in optlist):
print(' [{0}] is not supported option. you can use [{1}]'.format(old, new))
change_std_option(options, new)
return True
elif (options.std == new) and (old in optlist):
print(' [{0}] is not supported option. you can use [{1}]'.format(new, old))
change_std_option(options, old)
return True
return False
# check config
def check_config(options):
has_error = False
if not find_compiler(options.compiler):
print('Wandbox is not supported compiler [' + options.compiler + ']')
listup_compiler(options.verbose)
has_error = True
if options.options or options.std:
opt = get_options(options.compiler)
if options.options:
for o in options.options.split(','):
if o not in opt:
print_undefined_option(o, options.compiler)
has_error = True
if options.std:
if options.std not in opt:
print_undefined_option(options.std, options.compiler)
prev_std_option = options.std
if check_std_option_compatible(options, 'c++1z', 'c++17', opt) or \
check_std_option_compatible(options, 'gnu++1z', 'gnu++17', opt) or \
check_std_option_compatible(options, 'c++1y', 'c++14', opt) or \
check_std_option_compatible(options, 'gnu++1y', 'gnu++14', opt) or \
check_std_option_compatible(options, 'c++0x', 'c++11', opt) or \
check_std_option_compatible(options, 'gnu++0x', 'gnu++11', opt):
pass
if (options.std == prev_std_option):
has_error = True
if has_error:
listup_options(options.compiler)
if has_error:
sys.exit(1)
if options.default:
print('--default option is deprecated. default options are always set.')
# setup additional files
def add_files(w, fileinfos):
for filename, code in fileinfos.items():
w.add_file(filename, code)
# create opt list
def create_option_list(options):
def filterout_cppver(opt):
tmp = list(filter(lambda s: s.find('c++') == -1, opt))
tmp = list(filter(lambda s: s.find('gnu++') == -1, tmp))
return tmp
opt = []
if not options.no_default:
opt = get_default_options(options.compiler)
if options.options:
for o in options.options.split(','):
if o not in opt:
if (o.find('c++') == 0) or (o.find('gnu++') == 0):
opt = filterout_cppver(opt)
opt.append(o)
# std
if options.std:
opt = filterout_cppver(opt)
opt.append(options.std)
# optimize
if options.optimize and ('optimize' not in opt):
opt.append('optimize')
# cpp-verbose
if options.cpp_verbose and ('cpp-verbose' not in opt):
opt.append('cpp-verbose')
# boost
if workaround:
pass
# if options.compiler in ['clang-3.4', 'clang-3.3']:
# if not options.boost:
# options.boost = 'nothing'
if options.boost:
if options.compiler not in options.boost:
options.boost = options.boost + '-' + options.compiler
opt = list(filter(lambda s: s.find('boost') == -1, opt))
opt.append('boost-' + str(options.boost))
# sprout
if options.sprout and ('sprout' not in opt):
opt.append('sprout')
# msgpack
if options.msgpack and ('msgpack' not in opt):
opt.append('msgpack')
return opt
def expand_wandbox_options(w, compiler, options):
colist = []
defs = {}
for d in w.get_compiler_list():
if d['name'] == compiler:
if 'switches' in d:
switches = d['switches']
for s in switches:
if ('name' in s) and ('display-flags' in s):
defs[s['name']] = s['display-flags']
elif 'options' in s:
for o in s['options']:
if ('name' in o) and ('display-flags' in o):
defs[o['name']] = o['display-flags']
for opt in options:
if opt in defs:
colist.extend(defs[opt].split())
return colist
def wandbox_api_call(callback, retries, retry_wait):
try:
return callback()
except (HTTPError, ConnectionError) as e:
def is_retry(e):
if not e.response:
return True
return e.response.status_code in [504]
if is_retry(e) and retries > 0:
try:
print(e.message)
except:
pass
print('wait {0}sec...'.format(retry_wait))
sleep(retry_wait)
return wandbox_api_call(callback, retries - 1, retry_wait)
else:
raise
except:
raise
def wandbox_get_compilerlist():
return wandbox_api_call(Wandbox.GetCompilerList, api_retries, api_retry_wait)
def wandbox_get_compilerswitches(compiler):
for d in wandbox_get_compilerlist():
if d['name'] == compiler:
if 'switches' in d:
return d['switches']
return []
def run_wandbox_impl(w, options):
if options.dryrun:
sys.exit(0)
retries = options.retry
def run():
return w.run()
return wandbox_api_call(run, retries, options.retry_wait)
def create_compiler_raw_option_list(options):
colist = []
if options.compiler_option_raw:
raw_options = options.compiler_option_raw
for x in raw_options:
colist.extend(re.split('\s(?=-)', x.strip('"')))
if options.iutest_use_main:
colist.append('-DIUTEST_USE_MAIN')
return colist
# run wandbox (makefile)
def run_wandbox_make(main_filepath, code, includes, impliments, options):
with Wandbox() as w:
w.compiler('bash')
woptions = create_option_list(options)
if options.stdin:
w.stdin(options.stdin)
impliments[os.path.basename(main_filepath)] = code
colist = create_compiler_raw_option_list(options)
colist.extend(expand_wandbox_options(w, options.compiler, woptions))
rolist = []
if options.runtime_option_raw:
for opt in options.runtime_option_raw:
rolist.extend(opt.split())
makefile = '#!/bin/make\n# generate makefile by iuwandbox.py\n'
makefile += '\nCXXFLAGS+='
for opt in colist:
makefile += opt + ' '
makefile += '\nOBJS='
for filename in impliments.keys():
makefile += os.path.splitext(filename)[0] + '.o '
makefile += '\n\
prog: $(OBJS)\n\
\t$(CXX) -o $@ $^ $(CXXFLAGS) $(LDFLAGS)\n\
'
impliments['Makefile'] = makefile
bashscript = 'make -j 4\n'
bashscript += './prog '
for opt in rolist:
bashscript += opt + ' '
bashscript += '\n'
w.code(bashscript)
if options.save:
w.permanent_link(options.save)
if options.verbose:
w.dump()
add_files(w, impliments)
add_files(w, includes)
return run_wandbox_impl(w, options)
# run wandbox (cxx)
def run_wandbox_cxx(code, includes, impliments, options):
with Wandbox() as w:
w.compiler(options.compiler)
w.options(','.join(create_option_list(options)))
if options.stdin:
w.stdin(options.stdin)
colist = create_compiler_raw_option_list(options)
if workaround:
if options.compiler in ['clang-3.2']:
colist.append('-ftemplate-depth=1024')
# if options.compiler in ['clang-3.4']:
# colist.append('-DIUTEST_HAS_HDR_CXXABI=0')
# if options.compiler in ['clang-3.3', 'clang-3.2', 'clang-3.1', 'clang-3.0']:
# colist.append('-Qunused-arguments')
# if options.compiler in ['clang-3.4', 'clang-3.3']:
# colist.append('-fno-exceptions')
# colist.append('-fno-rtti')
pass
if colist:
co = '\n'.join(colist)
co = co.replace('\\n', '\n')
w.compiler_options(co)
if options.runtime_option_raw:
rolist = []
for opt in options.runtime_option_raw:
rolist.extend(opt.split())
ro = '\n'.join(rolist)
ro | |
For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesInvalid: If op_PolicyRulesInvalid is specified, the field named in this input will be compared to the value in PolicyRulesInvalid using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesInvalid must be specified if op_PolicyRulesInvalid is specified.
:type val_f_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesInvalid: If op_PolicyRulesInvalid is specified, this value will be compared to the value in PolicyRulesInvalid using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesInvalid must be specified if op_PolicyRulesInvalid is specified.
:type val_c_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesPassed: The operator to apply to the field PolicyRulesPassed. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesPassed: The total number of rules in this policy that the device passed successfully. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesPassed: If op_PolicyRulesPassed is specified, the field named in this input will be compared to the value in PolicyRulesPassed using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesPassed must be specified if op_PolicyRulesPassed is specified.
:type val_f_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesPassed: If op_PolicyRulesPassed is specified, this value will be compared to the value in PolicyRulesPassed using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesPassed must be specified if op_PolicyRulesPassed is specified.
:type val_c_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesSkipped: The operator to apply to the field PolicyRulesSkipped. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesSkipped: If op_PolicyRulesSkipped is specified, the field named in this input will be compared to the value in PolicyRulesSkipped using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesSkipped must be specified if op_PolicyRulesSkipped is specified.
:type val_f_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesSkipped: If op_PolicyRulesSkipped is specified, this value will be compared to the value in PolicyRulesSkipped using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesSkipped must be specified if op_PolicyRulesSkipped is specified.
:type val_c_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesTotal: The operator to apply to the field PolicyRulesTotal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesTotal: If op_PolicyRulesTotal is specified, the field named in this input will be compared to the value in PolicyRulesTotal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesTotal must be specified if op_PolicyRulesTotal is specified.
:type val_f_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesTotal: If op_PolicyRulesTotal is specified, this value will be compared to the value in PolicyRulesTotal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesTotal must be specified if op_PolicyRulesTotal is specified.
:type val_c_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesUnknown: The operator to apply to the field PolicyRulesUnknown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesUnknown: If op_PolicyRulesUnknown is specified, the field named in this input will be compared to the value in PolicyRulesUnknown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesUnknown must be specified if op_PolicyRulesUnknown is specified.
:type val_f_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesUnknown: If op_PolicyRulesUnknown is specified, this value will be compared to the value in PolicyRulesUnknown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesUnknown must be specified if op_PolicyRulesUnknown is specified.
:type val_c_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesValid: The operator to apply to the field PolicyRulesValid. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesValid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesValid: If op_PolicyRulesValid is specified, the field named in this input will be compared to the value in PolicyRulesValid using the specified operator. That is, the | |
# -*- coding: utf-8 -*-
import re
import pytest
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import HTMLFile, Project
from readthedocs.search.tests.utils import (
get_search_query_from_project_file,
DATA_TYPES_VALUES,
)
@pytest.mark.django_db
@pytest.mark.search
class TestProjectSearch:
url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def test_search_by_project_name(self, client, project, all_projects):
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
assert len(results) == 1
assert project.name.encode('utf-8') in results[0].name.encode('utf-8')
for proj in all_projects[1:]:
assert proj.name.encode('utf-8') not in results[0].name.encode('utf-8')
def test_search_project_have_correct_language_facets(self, client, project):
"""Test that searching project should have correct language facets in the results"""
# Create a project in bn and add it as a translation
G(Project, language='bn', name=project.name)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
for facet in lang_facets:
assert facet[2] == False # because none of the facets are applied
def test_search_project_filter_language(self, client, project):
"""Test that searching project filtered according to language."""
# Create a project in bn and add it as a translation
translate = G(Project, language='bn', name=project.name)
search_params = { 'q': project.name, 'language': 'bn' }
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
# There should be only 1 result
assert len(results) == 1
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages because both `en` and `bn` should show there
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
@pytest.mark.django_db
@pytest.mark.search
class TestPageSearch(object):
url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def _get_highlight(self, result, data_type):
# if query is from page title,
# highlighted title is present in 'result.meta.highlight.title'
if data_type == 'title':
highlight = result.meta.highlight.title
# if result is not from page title,
# then results and highlighted results are present inside 'inner_hits'
else:
inner_hits = result.meta.inner_hits
assert len(inner_hits) >= 1
# checking first inner_hit
inner_hit_0 = inner_hits[0]
expected_type = data_type.split('.')[0] # can be either 'sections' or 'domains'
assert inner_hit_0['type'] == expected_type
highlight = inner_hit_0['highlight'][data_type]
return highlight
def _get_highlighted_words(self, string):
highlighted_words = re.findall(
'<span>(.*?)</span>',
string
)
return highlighted_words
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('page_num', [0, 1])
def test_file_search(self, client, project, data_type, page_num):
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
data_type=data_type
)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
# checking first result
result_0 = results[0]
highlight = self._get_highlight(result_0, data_type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
# Make it lower because our search is case insensitive
assert word.lower() in query.lower()
def test_file_search_have_correct_role_name_facets(self, client):
"""Test that searching files should result all role_names."""
# searching for 'celery' to test that
# correct role_names are displayed
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': 'celery', 'type': 'file' }
)
assert len(results) >= 1
role_name_facets = facets['role_name']
role_name_facets_str = [facet[0] for facet in role_name_facets]
expected_role_names = ['py:class', 'py:function', 'py:method']
assert sorted(expected_role_names) == sorted(role_name_facets_str)
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
def test_file_search_filter_role_name(self, client):
"""Test that searching files filtered according to role_names."""
search_params = { 'q': 'celery', 'type': 'file' }
# searching without the filter
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
assert len(results) >= 2 # there are > 1 results without the filter
role_name_facets = facets['role_name']
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
confval_facet = 'py:class'
# checking if 'py:class' facet is present in results
assert confval_facet in [facet[0] for facet in role_name_facets]
# filtering with role_name=py:class
search_params['role_name'] = confval_facet
new_results, new_facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
new_role_names_facets = new_facets['role_name']
# there is only one result with role_name='py:class'
# in `signals` page
assert len(new_results) == 1
first_result = new_results[0] # first result
inner_hits = first_result.meta.inner_hits # inner_hits of first results
assert len(inner_hits) >= 1
inner_hit_0 = inner_hits[0] # first inner_hit
assert inner_hit_0.type == 'domains'
assert inner_hit_0.source.role_name == confval_facet
for facet in new_role_names_facets:
if facet[0] == confval_facet:
assert facet[2] == True # because 'std:confval' filter is active
else:
assert facet[2] == False
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('case', ['upper', 'lower', 'title'])
def test_file_search_case_insensitive(self, client, project, case, data_type):
"""
Check File search is case insensitive.
It tests with uppercase, lowercase and camelcase.
"""
query_text = get_search_query_from_project_file(
project_slug=project.slug,
data_type=data_type
)
cased_query = getattr(query_text, case)
query = cased_query()
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
first_result = results[0]
highlight = self._get_highlight(first_result, data_type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_exact_match(self, client, project):
"""
Check quoted query match exact phrase.
Making a query with quoted text like ``"foo bar"`` should match exactly
``foo bar`` phrase.
"""
# `Sphinx` word is present both in `kuma` and `docs` files
# But the phrase `Sphinx uses` is present only in `kuma` docs.
# So search with this phrase to check
query = r'"Sphinx uses"'
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' })
# there must be only 1 result
# because the phrase is present in
# only one project
assert len(results) == 1
assert results[0].project == 'kuma'
assert results[0].path == 'testdocumentation'
inner_hits = results[0].meta.inner_hits
assert len(inner_hits) == 1
assert inner_hits[0].type == 'sections'
highlight = self._get_highlight(results[0], 'sections.content')
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_have_correct_project_facets(self, client, all_projects):
"""Test that file search have correct project facets in results"""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase
query = 'environment'
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# There should be 2 search result
assert len(results) == 2
project_facets = facets['project']
project_facets_str = [facet[0] for facet in project_facets]
assert len(project_facets_str) == 2
# kuma and pipeline should be there
assert sorted(project_facets_str) == sorted(['kuma', 'docs'])
def test_file_search_filter_by_project(self, client):
"""Test that search result are filtered according to project."""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase but filter through `kuma` project
search_params = {
'q': 'environment',
'type': 'file',
'project': 'kuma'
}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
project_facets = facets['project']
resulted_project_facets = [ facet[0] for facet in project_facets ]
# There should be 1 search result as we have filtered
assert len(results) == 1
# kuma should should be there only
assert 'kuma' == results[0].project
# But there should be 2 projects in the project facets
# as the query is present in both projects
assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])
@pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')
def test_file_search_show_versions(self, client, all_projects, es_index, settings):
# override the settings to index all versions
settings.INDEX_ONLY_LATEST = False
project = all_projects[0]
# Create some versions of the project
versions = [G(Version, project=project) for _ in range(3)]
query = get_search_query_from_project_file(project_slug=project.slug)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# Results can be from other projects also
assert len(results) >= 1
version_facets = facets['version']
version_facets_str = [facet[0] for facet in version_facets]
# There should be total 4 versions
# one is latest, and other 3 that we created above
assert len(version_facets) == 4
project_versions = [v.slug for v in versions] + [LATEST]
assert sorted(project_versions) == sorted(resulted_version_facets)
def test_file_search_subprojects(self, client, all_projects, es_index):
"""
TODO: File search should return results from subprojects also.
This is currently disabled because the UX around it is weird.
You filter by a project, and get results for multiple.
"""
project = all_projects[0]
subproject = all_projects[1]
# Add another project as | |
*__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80083FBC)
SetType(0x80083FBC, "void ___14CPauseMessages(struct CPauseMessages *this, int __in_chrg)")
del_items(0x80083FF0)
SetType(0x80083FF0, "struct CPauseMessages *__14CPauseMessages(struct CPauseMessages *this)")
del_items(0x80084004)
SetType(0x80084004, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x80084024)
SetType(0x80084024, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8008402C)
SetType(0x8008402C, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80084034)
SetType(0x80084034, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8008405C)
SetType(0x8008405C, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x800840B8)
SetType(0x800840B8, "unsigned short GetDown__C4CPad_addr_800840B8(struct CPad *this)")
del_items(0x800840E0)
SetType(0x800840E0, "unsigned short GetUp__C4CPad(struct CPad *this)")
del_items(0x80084108)
SetType(0x80084108, "unsigned char CheckActive__4CPad_addr_80084108(struct CPad *this)")
del_items(0x80084114)
SetType(0x80084114, "unsigned long ReadPadStream__Fv()")
del_items(0x8008422C)
SetType(0x8008422C, "void PAD_Handler__Fv()")
del_items(0x800843F4)
SetType(0x800843F4, "struct CPad *PAD_GetPad__FiUc(int PadNum, unsigned char both)")
del_items(0x80084490)
SetType(0x80084490, "void NewVal__4CPadUs(struct CPad *this, unsigned short New)")
del_items(0x800845C8)
SetType(0x800845C8, "void BothNewVal__4CPadUsUs(struct CPad *this, unsigned short New, unsigned short New2)")
del_items(0x80084724)
SetType(0x80084724, "unsigned short Trans__4CPadUs(struct CPad *this, unsigned short PadVal)")
del_items(0x80084848)
SetType(0x80084848, "void _GLOBAL__I_Pad0()")
del_items(0x80084880)
SetType(0x80084880, "void SetPadType__4CPadUc(struct CPad *this, unsigned char val)")
del_items(0x80084888)
SetType(0x80084888, "unsigned char CheckActive__4CPad_addr_80084888(struct CPad *this)")
del_items(0x80084894)
SetType(0x80084894, "void SetActive__4CPadUc(struct CPad *this, unsigned char a)")
del_items(0x8008489C)
SetType(0x8008489C, "void SetBothFlag__4CPadUc(struct CPad *this, unsigned char fl)")
del_items(0x800848A4)
SetType(0x800848A4, "struct CPad *__4CPadi(struct CPad *this, int PhysStick)")
del_items(0x800848D8)
SetType(0x800848D8, "void Flush__4CPad(struct CPad *this)")
del_items(0x800848FC)
SetType(0x800848FC, "void Set__7FontTab(struct FontTab *this)")
del_items(0x80084998)
SetType(0x80084998, "void InitPrinty__Fv()")
del_items(0x80084A38)
SetType(0x80084A38, "void SetTextDat__5CFontP7TextDat(struct CFont *this, struct TextDat *NewDat)")
del_items(0x80084A40)
SetType(0x80084A40, "int PrintChar__5CFontUsUscUcUcUc(struct CFont *this, unsigned short Cx, unsigned short Cy, char C, int R, int G, int B)")
del_items(0x80084BD8)
SetType(0x80084BD8, "int Print__5CFontiiPc8TXT_JUSTP4RECTUcUcUc(struct CFont *this, int X, int Y, char *Str, enum TXT_JUST Justify, struct RECT *TextWindow, int R, int G, int B)")
del_items(0x80085204)
SetType(0x80085204, "int GetStrWidth__5CFontPc(struct CFont *this, char *Str)")
del_items(0x800852B8)
SetType(0x800852B8, "void SetChar__5CFontiUs(struct CFont *this, int ch, unsigned short Frm)")
del_items(0x8008531C)
SetType(0x8008531C, "int SetOTpos__5CFonti(struct CFont *this, int OT)")
del_items(0x80085328)
SetType(0x80085328, "void ClearFont__5CFont(struct CFont *this)")
del_items(0x8008534C)
SetType(0x8008534C, "bool IsDefined__5CFontUc(struct CFont *this, unsigned char C)")
del_items(0x8008536C)
SetType(0x8008536C, "int GetCharFrameNum__5CFontc(struct CFont *this, char ch)")
del_items(0x80085384)
SetType(0x80085384, "int GetCharWidth__5CFontc(struct CFont *this, char ch)")
del_items(0x800853DC)
SetType(0x800853DC, "void Init__5CFont(struct CFont *this)")
del_items(0x80085410)
SetType(0x80085410, "struct FRAME_HDR *GetFr__7TextDati_addr_80085410(struct TextDat *this, int FrNum)")
del_items(0x8008542C)
SetType(0x8008542C, "unsigned char TrimCol__Fs(short col)")
del_items(0x80085464)
SetType(0x80085464, "struct POLY_GT4 *DialogPrint__Fiiiiiiiiii(int Frm, int X, int Y, int SW, int SH, int UW, int UH, int UOfs, int VOfs, int Trans)")
del_items(0x80085DDC)
SetType(0x80085DDC, "struct POLY_G4 *GetDropShadowG4__FUcUcUcUcUcUcUcUcUcUcUcUc(unsigned char r0, unsigned char g0, unsigned char b0, unsigned char r1, int g1, int b1, int r2, int g2, int b2, int r3, int g3, int b3)")
del_items(0x80085F14)
SetType(0x80085F14, "void DropShadows__Fiiii(int x, int y, int w, int h)")
del_items(0x800861B8)
SetType(0x800861B8, "void InitDialog__Fv()")
del_items(0x800862F0)
SetType(0x800862F0, "void GetSizes__6Dialog(struct Dialog *this)")
del_items(0x80086548)
SetType(0x80086548, "void Back__6Dialogiiii(struct Dialog *this, int DX, int DY, int DW, int DH)")
del_items(0x80087708)
SetType(0x80087708, "void Line__6Dialogiii(struct Dialog *this, int DX, int DY, int DW)")
del_items(0x80087920)
SetType(0x80087920, "struct PAL *GetPal__7TextDati_addr_80087920(struct TextDat *this, int PalNum)")
del_items(0x8008793C)
SetType(0x8008793C, "struct FRAME_HDR *GetFr__7TextDati_addr_8008793C(struct TextDat *this, int FrNum)")
del_items(0x80087958)
SetType(0x80087958, "void ATT_DoAttract__Fv()")
del_items(0x80087AA8)
SetType(0x80087AA8, "void CreatePlayersFromFeData__FR9FE_CREATE(struct FE_CREATE *CStruct)")
del_items(0x80087B74)
SetType(0x80087B74, "void UpdateSel__FPUsUsPUc(unsigned short *Col, unsigned short Add, unsigned char *Count)")
del_items(0x80087BB4)
SetType(0x80087BB4, "void CycleSelCols__Fv()")
del_items(0x80087D6C)
SetType(0x80087D6C, "int FindTownCreature__7CBlocksi(struct CBlocks *this, int GameEqu)")
del_items(0x80087DE0)
SetType(0x80087DE0, "int FindCreature__7CBlocksi(struct CBlocks *this, int MgNum)")
del_items(0x80087E34)
SetType(0x80087E34, "struct CBlocks *__7CBlocksiiiii(struct CBlocks *this, int BgId, int ObjId, int ItemId, int Level, int List)")
del_items(0x80087F88)
SetType(0x80087F88, "void SetTownersGraphics__7CBlocks(struct CBlocks *this)")
del_items(0x80087FC0)
SetType(0x80087FC0, "void SetMonsterGraphics__7CBlocksii(struct CBlocks *this, int Level, int List)")
del_items(0x80088088)
SetType(0x80088088, "void ___7CBlocks(struct CBlocks *this, int __in_chrg)")
del_items(0x80088110)
SetType(0x80088110, "void DumpGt4s__7CBlocks(struct CBlocks *this)")
del_items(0x80088178)
SetType(0x80088178, "void DumpRects__7CBlocks(struct CBlocks *this)")
del_items(0x800881E0)
SetType(0x800881E0, "void SetGraphics__7CBlocksPP7TextDatPii(struct CBlocks *this, struct TextDat **TDat, int *pId, int Id)")
del_items(0x8008823C)
SetType(0x8008823C, "void DumpGraphics__7CBlocksPP7TextDatPi(struct CBlocks *this, struct TextDat **TDat, int *Id)")
del_items(0x8008828C)
SetType(0x8008828C, "void PrintBlockOutline__7CBlocksiiiii(struct CBlocks *this, int x, int y, int r, int g, int b)")
del_items(0x800885D8)
SetType(0x800885D8, "void Load__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x80088684)
SetType(0x80088684, "void MakeRectTable__7CBlocks(struct CBlocks *this)")
del_items(0x80088758)
SetType(0x80088758, "void MakeGt4Table__7CBlocks(struct CBlocks *this)")
del_items(0x80088860)
SetType(0x80088860, "void MakeGt4__7CBlocksP8POLY_GT4P9FRAME_HDR(struct CBlocks *this, struct POLY_GT4 *GT4, struct FRAME_HDR *Fr)")
del_items(0x8008899C)
SetType(0x8008899C, "struct CBlock *GetBlock__7CBlocksi(struct CBlocks *this, int num)")
del_items(0x80088A14)
SetType(0x80088A14, "void Print__7CBlocks(struct CBlocks *this)")
del_items(0x80088A3C)
SetType(0x80088A3C, "void SetXY__7CBlocksii(struct CBlocks *this, int nx, int ny)")
del_items(0x80088A64)
SetType(0x80088A64, "void GetXY__7CBlocksPiT1(struct CBlocks *this, int *nx, int *ny)")
del_items(0x80088A7C)
SetType(0x80088A7C, "void PrintMap__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x80089F6C)
SetType(0x80089F6C, "void PrintGameSprites__7CBlocksiiiii(struct CBlocks *this, int ThisXPos, int ThisYPos, int OtPos, int ScrX, int ScrY)")
del_items(0x8008A0DC)
SetType(0x8008A0DC, "void PrintGameSprites__7CBlocksP8map_infoiiiiiii(struct CBlocks *this, struct map_info *piece, int OtPos, int ScrX, int ScrY, int R, int G, int B)")
del_items(0x8008AE44)
SetType(0x8008AE44, "void PrintSprites__7CBlocksP8map_infoiiiiiii(struct CBlocks *this, struct map_info *piece, int OtPos, int ScrX, int ScrY, int R, int G, int B)")
del_items(0x8008B570)
SetType(0x8008B570, "void PrintSprites__7CBlocksiiiii(struct CBlocks *this, int ThisXPos, int ThisYPos, int OtPos, int ScrX, int ScrY)")
del_items(0x8008B6E0)
SetType(0x8008B6E0, "int ScrToWorldX__7CBlocksii(struct CBlocks *this, int sx, int sy)")
del_items(0x8008B6F4)
SetType(0x8008B6F4, "int ScrToWorldY__7CBlocksii(struct CBlocks *this, int sx, int sy)")
del_items(0x8008B708)
SetType(0x8008B708, "void SetScrollTarget__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008B7CC)
SetType(0x8008B7CC, "void DoScroll__7CBlocks(struct CBlocks *this)")
del_items(0x8008B84C)
SetType(0x8008B84C, "void SetPlayerPosBlocks__7CBlocksiii(struct CBlocks *this, int PlayerNum, int bx, int by)")
del_items(0x8008B8EC)
SetType(0x8008B8EC, "void GetScrXY__7CBlocksR4RECTiiii(struct CBlocks *this, struct RECT *R, int x, int y, int sxoff, int syoff)")
del_items(0x8008B9C0)
SetType(0x8008B9C0, "void ShadScaleSkew__7CBlocksP8POLY_FT4(struct POLY_FT4 *Ft4)")
del_items(0x8008BA40)
SetType(0x8008BA40, "int WorldToScrX__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008BA48)
SetType(0x8008BA48, "int WorldToScrY__7CBlocksii(struct CBlocks *this, int x, int y)")
del_items(0x8008BA5C)
SetType(0x8008BA5C, "struct CBlocks *BL_GetCurrentBlocks__Fv()")
del_items(0x8008BA68)
SetType(0x8008BA68, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8008BA68(struct POLY_FT4 **Prim)")
del_items(0x8008BAE4)
SetType(0x8008BAE4, "int GetHighlightCol__FiPiUsUsUs(int Index, int *SelList, unsigned short P1Col, unsigned short P2Col, int P12Col)")
del_items(0x8008BB2C)
SetType(0x8008BB2C, "struct POLY_FT4 *PRIM_GetCopy__FP8POLY_FT4(struct POLY_FT4 *Prim)")
del_items(0x8008BB68)
SetType(0x8008BB68, "int GetHighlightCol__FiPcUsUsUs(int Index, char *SelList, unsigned short P1Col, unsigned short P2Col, int P12Col)")
del_items(0x8008BBB0)
SetType(0x8008BBB0, "void PRIM_GetPrim__FPP8POLY_GT4_addr_8008BBB0(struct POLY_GT4 **Prim)")
del_items(0x8008BC2C)
SetType(0x8008BC2C, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
del_items(0x8008BCA8)
SetType(0x8008BCA8, "void PRIM_CopyPrim__FP8POLY_FT4T0(struct POLY_FT4 *Dest, struct POLY_FT4 *Source)")
del_items(0x8008BCD0)
SetType(0x8008BCD0, "int GetCreature__14TownToCreaturei(struct TownToCreature *this, int GameCreature)")
del_items(0x8008BCEC)
SetType(0x8008BCEC, "void SetItemGraphics__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x8008BD14)
SetType(0x8008BD14, "void SetObjGraphics__7CBlocksi(struct CBlocks *this, int Id)")
del_items(0x8008BD3C)
SetType(0x8008BD3C, "void DumpItems__7CBlocks(struct CBlocks *this)")
del_items(0x8008BD60)
SetType(0x8008BD60, "void DumpObjs__7CBlocks(struct CBlocks *this)")
del_items(0x8008BD84)
SetType(0x8008BD84, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8008BDAC)
SetType(0x8008BDAC, "int GetNumOfBlocks__7CBlocks(struct CBlocks *this)")
del_items(0x8008BDB8)
SetType(0x8008BDB8, "void CopyToGt4__9LittleGt4P8POLY_GT4(struct LittleGt4 *this, struct POLY_GT4 *Gt4)")
del_items(0x8008BE50)
SetType(0x8008BE50, "void InitFromGt4__9LittleGt4P8POLY_GT4ii(struct LittleGt4 *this, struct POLY_GT4 *Gt4, int nw, int nh)")
del_items(0x8008BEE0)
SetType(0x8008BEE0, "int GetNumOfFrames__7TextDatii(struct TextDat *this, int Creature, int Action)")
del_items(0x8008BF18)
SetType(0x8008BF18, "struct CCreatureHdr *GetCreature__7TextDati_addr_8008BF18(struct TextDat *this, int Creature)")
del_items(0x8008BF90)
SetType(0x8008BF90, "int GetNumOfCreatures__7TextDat_addr_8008BF90(struct TextDat *this)")
del_items(0x8008BFA4)
SetType(0x8008BFA4, "void SetFileInfo__7TextDatPC13CTextFileInfoi_addr_8008BFA4(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)")
del_items(0x8008BFB0)
SetType(0x8008BFB0, "struct PAL *GetPal__7TextDati_addr_8008BFB0(struct TextDat *this, int PalNum)")
del_items(0x8008BFCC)
SetType(0x8008BFCC, "struct FRAME_HDR *GetFr__7TextDati_addr_8008BFCC(struct TextDat *this, int FrNum)")
del_items(0x8008BFE8)
SetType(0x8008BFE8, "bool OVR_IsMemcardOverlayBlank__Fv()")
del_items(0x8008C014)
SetType(0x8008C014, "void OVR_LoadPregame__Fv()")
del_items(0x8008C03C)
SetType(0x8008C03C, "void OVR_LoadFrontend__Fv()")
del_items(0x8008C064)
SetType(0x8008C064, "void OVR_LoadGame__Fv()")
del_items(0x8008C08C)
SetType(0x8008C08C, "void OVR_LoadFmv__Fv()")
del_items(0x8008C0B4)
SetType(0x8008C0B4, "void OVR_LoadMemcard__Fv()")
del_items(0x8008C0E0)
SetType(0x8008C0E0, "void ClearOutOverlays__Fv()")
del_items(0x8008C138)
SetType(0x8008C138, "void ClearOut__7Overlay(struct Overlay *this)")
del_items(0x8008C1FC)
SetType(0x8008C1FC, "void Load__7Overlay(struct Overlay *this)")
del_items(0x8008C26C)
SetType(0x8008C26C, "enum OVER_TYPE OVR_GetCurrentOverlay__Fv()")
del_items(0x8008C278)
SetType(0x8008C278, "void LoadOver__FR7Overlay(struct Overlay *Ovr)")
del_items(0x8008C2CC)
SetType(0x8008C2CC, "void _GLOBAL__I_OVR_Open__Fv()")
del_items(0x8008C43C)
SetType(0x8008C43C, "enum OVER_TYPE GetOverType__7Overlay(struct Overlay *this)")
del_items(0x8008C448)
SetType(0x8008C448, "void StevesDummyPoll__Fv()")
del_items(0x8008C450)
SetType(0x8008C450, "void Lambo__Fv()")
del_items(0x8008C458)
SetType(0x8008C458, "struct CPlayer *__7CPlayerbi(struct CPlayer *this, bool Town, int mPlayerNum)")
del_items(0x8008C53C)
SetType(0x8008C53C, "void ___7CPlayer(struct CPlayer *this, int __in_chrg)")
del_items(0x8008C594)
SetType(0x8008C594, "void Load__7CPlayeri(struct CPlayer *this, int Id)")
del_items(0x8008C5F0)
SetType(0x8008C5F0, "void SetBlockXY__7CPlayerR7CBlocksR12PlayerStruct(struct CPlayer *this, struct CBlocks *Bg, struct PlayerStruct *Plr)")
del_items(0x8008C73C)
SetType(0x8008C73C, "void SetScrollTarget__7CPlayerR12PlayerStructR7CBlocks(struct CPlayer *this, struct PlayerStruct *Plr, struct CBlocks *Bg)")
del_items(0x8008CB68)
SetType(0x8008CB68, "int GetNumOfSpellAnims__FR12PlayerStruct(struct PlayerStruct *Plr)")
del_items(0x8008CBE8)
SetType(0x8008CBE8, "void Print__7CPlayerR12PlayerStructR7CBlocks(struct CPlayer *this, struct PlayerStruct *Plr, struct CBlocks *Bg)")
del_items(0x8008D0DC)
SetType(0x8008D0DC, "int FindAction__7CPlayerR12PlayerStruct(struct CPlayer *this, struct PlayerStruct *Plr)")
del_items(0x8008D158)
SetType(0x8008D158, "enum PACTION FindActionEnum__7CPlayerR12PlayerStruct(struct CPlayer *this, struct PlayerStruct *Plr)")
del_items(0x8008D1D4)
SetType(0x8008D1D4, "void Init__7CPlayer(struct CPlayer *this)")
del_items(0x8008D1DC)
SetType(0x8008D1DC, "void Dump__7CPlayer(struct CPlayer *this)")
del_items(0x8008D1E4)
SetType(0x8008D1E4, "void PRIM_GetPrim__FPP8POLY_FT4_addr_8008D1E4(struct POLY_FT4 **Prim)")
del_items(0x8008D260)
SetType(0x8008D260, "struct POLY_FT4 *PRIM_GetCopy__FP8POLY_FT4_addr_8008D260(struct POLY_FT4 *Prim)")
del_items(0x8008D29C)
SetType(0x8008D29C, "void PRIM_CopyPrim__FP8POLY_FT4T0_addr_8008D29C(struct POLY_FT4 *Dest, struct POLY_FT4 *Source)")
del_items(0x8008D2C4)
SetType(0x8008D2C4, "int GetPlrOt__7CBlocksi(struct CBlocks *this, int PlayerNum)")
del_items(0x8008D2D8)
SetType(0x8008D2D8, "void SetDecompArea__7TextDatiiii(struct TextDat *this, int nDecX, int nDecY, int nPalX, int nPalY)")
del_items(0x8008D2F0)
SetType(0x8008D2F0, "int GetNumOfFrames__7TextDatii_addr_8008D2F0(struct TextDat *this, int Creature, int Action)")
del_items(0x8008D328)
SetType(0x8008D328, "int GetNumOfActions__7TextDati(struct TextDat *this, int Creature)")
del_items(0x8008D34C)
SetType(0x8008D34C, "struct CCreatureHdr *GetCreature__7TextDati_addr_8008D34C(struct TextDat *this, int Creature)")
del_items(0x8008D3C4)
SetType(0x8008D3C4, "int GetNumOfCreatures__7TextDat_addr_8008D3C4(struct TextDat *this)")
del_items(0x8008D3D8)
SetType(0x8008D3D8, "void SetFileInfo__7TextDatPC13CTextFileInfoi_addr_8008D3D8(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)")
del_items(0x8008D3E4)
SetType(0x8008D3E4, "void PROF_Open__Fv()")
del_items(0x8008D424)
SetType(0x8008D424, "bool PROF_State__Fv()")
del_items(0x8008D430)
SetType(0x8008D430, "void PROF_On__Fv()")
del_items(0x8008D440)
SetType(0x8008D440, "void PROF_Off__Fv()")
del_items(0x8008D44C)
SetType(0x8008D44C, "void PROF_CpuEnd__Fv()")
del_items(0x8008D47C)
SetType(0x8008D47C, "void PROF_CpuStart__Fv()")
del_items(0x8008D4A0)
SetType(0x8008D4A0, "void PROF_DrawStart__Fv()")
del_items(0x8008D4C4)
SetType(0x8008D4C4, "void PROF_DrawEnd__Fv()")
del_items(0x8008D4F4)
SetType(0x8008D4F4, "void PROF_Draw__FPUl(unsigned long *Ot)")
del_items(0x8008D6E8)
SetType(0x8008D6E8, "void PROF_Restart__Fv()")
del_items(0x8008D708)
SetType(0x8008D708, "void PSX_WndProc__FUilUl(unsigned int Msg, long wParam, unsigned long lParam)")
del_items(0x8008D7C8)
SetType(0x8008D7C8, "void PSX_PostWndProc__FUilUl(unsigned int Msg, long wParam, unsigned long lParam)")
del_items(0x8008D878)
SetType(0x8008D878, "void GoBackLevel__Fv()")
del_items(0x8008D8F0)
SetType(0x8008D8F0, "void GoWarpLevel__Fv()")
del_items(0x8008D928)
SetType(0x8008D928, "void PostLoadGame__Fv()")
del_items(0x8008D9C4)
SetType(0x8008D9C4, "void GoLoadGame__Fv()")
del_items(0x8008DA20)
SetType(0x8008DA20, "void PostNewLevel__Fv()")
del_items(0x8008DABC)
SetType(0x8008DABC, "void GoNewLevel__Fv()")
del_items(0x8008DB10)
SetType(0x8008DB10, "void PostGoBackLevel__Fv()")
del_items(0x8008DBA8)
SetType(0x8008DBA8, "void GoForwardLevel__Fv()")
del_items(0x8008DC00)
SetType(0x8008DC00, "void PostGoForwardLevel__Fv()")
del_items(0x8008DC98)
SetType(0x8008DC98, "void GoNewGame__Fv()")
del_items(0x8008DCE8)
SetType(0x8008DCE8, "void PostNewGame__Fv()")
del_items(0x8008DD20)
SetType(0x8008DD20, "void LevelToLevelInit__Fv()")
del_items(0x8008DD68)
SetType(0x8008DD68, "unsigned int GetPal__6GPaneli(struct GPanel *this, int Frm)")
del_items(0x8008DDAC)
SetType(0x8008DDAC, "struct GPanel *__6GPaneli(struct GPanel *this, int Ofs)")
del_items(0x8008DE04)
SetType(0x8008DE04, "void DrawFlask__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E284)
SetType(0x8008E284, "void DrawSpeedBar__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E708)
SetType(0x8008E708, "void DrawSpell__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E8A8)
SetType(0x8008E8A8, "void DrawMsgWindow__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008E8F4)
SetType(0x8008E8F4, "int DrawDurThingy__6GPaneliiP10ItemStructi(struct GPanel *this, int X, int Y, struct ItemStruct *Item, int ItemType)")
del_items(0x8008ECB0)
SetType(0x8008ECB0, "void DrawDurIcon__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008EDA4)
SetType(0x8008EDA4, "void Print__6GPanelP7PanelXYP12PlayerStruct(struct GPanel *this, struct PanelXY *XY, struct PlayerStruct *Plr)")
del_items(0x8008EEA8)
SetType(0x8008EEA8, "struct PAL *GetPal__7TextDati_addr_8008EEA8(struct TextDat *this, int PalNum)")
del_items(0x8008EEC4)
SetType(0x8008EEC4, "struct FRAME_HDR *GetFr__7TextDati_addr_8008EEC4(struct TextDat *this, int FrNum)")
del_items(0x8008EEE0)
SetType(0x8008EEE0, "void PrintCDWaitTask__FP4TASK(struct TASK *T)")
del_items(0x8008EF98)
SetType(0x8008EF98, "void InitCDWaitIcon__Fv()")
del_items(0x8008EFCC)
SetType(0x8008EFCC, "void STR_Debug__FP6SFXHDRPce(struct SFXHDR *sfh, char *e)")
del_items(0x8008EFE0)
SetType(0x8008EFE0, "void STR_SystemTask__FP4TASK(struct TASK *T)")
del_items(0x8008F028)
SetType(0x8008F028, "void STR_AllocBuffer__Fv()")
del_items(0x8008F07C)
SetType(0x8008F07C, "void STR_Init__Fv()")
del_items(0x8008F19C)
SetType(0x8008F19C, "struct SFXHDR *STR_InitStream__Fv()")
del_items(0x8008F2D4)
SetType(0x8008F2D4, "struct SFXHDR *STR_PlaySound__FUscic(unsigned short Name, char flag, int volume, char loop)")
del_items(0x8008F410)
SetType(0x8008F410, "void STR_setvolume__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F468)
SetType(0x8008F468, "void STR_PlaySFX__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F574)
SetType(0x8008F574, "void STR_pauseall__Fv()")
del_items(0x8008F5C4)
SetType(0x8008F5C4, "void STR_resumeall__Fv()")
del_items(0x8008F614)
SetType(0x8008F614, "void STR_CloseStream__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F680)
SetType(0x8008F680, "void STR_SoundCommand__FP6SFXHDRi(struct SFXHDR *sfh, int Command)")
del_items(0x8008F78C)
SetType(0x8008F78C, "char STR_Command__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008F938)
SetType(0x8008F938, "void STR_DMAControl__FP6SFXHDR(struct SFXHDR *sfh)")
del_items(0x8008FA00)
SetType(0x8008FA00, "void STR_PlayStream__FP6SFXHDRPUci(struct SFXHDR *sfh, unsigned char *Src, int size)")
del_items(0x8008FBDC)
SetType(0x8008FBDC, "void STR_AsyncWeeTASK__FP4TASK(struct TASK *T)")
del_items(0x8008FEDC)
SetType(0x8008FEDC, "void STR_AsyncTASK__FP4TASK(struct TASK *T)")
del_items(0x80090310)
SetType(0x80090310, "void STR_StreamMainTask__FP6SFXHDRc(struct SFXHDR *sfh, char FileType)")
del_items(0x80090418)
SetType(0x80090418, "void SND_Monitor__FP4TASK(struct TASK *T)")
del_items(0x800904A4)
SetType(0x800904A4, "void SPU_Init__Fv()")
del_items(0x800905B0)
SetType(0x800905B0, "int SND_FindChannel__Fv()")
del_items(0x8009061C)
SetType(0x8009061C, "void SND_ClearBank__Fv()")
del_items(0x80090694)
SetType(0x80090694, "bool SndLoadCallBack__FPUciib(unsigned char *Mem, int ReadSoFar, int Size, bool LastChunk)")
del_items(0x8009070C)
SetType(0x8009070C, "void SND_LoadBank__Fi(int lvlnum)")
del_items(0x80090840)
SetType(0x80090840, "int SND_FindSFX__FUs(unsigned short Name)")
del_items(0x80090894)
SetType(0x80090894, "void SND_StopSnd__Fi(int voice)")
del_items(0x800908B8)
SetType(0x800908B8, "bool SND_IsSfxPlaying__Fi(int SFXNo)")
del_items(0x800908F4)
SetType(0x800908F4, "int SND_RemapSnd__Fi(int SFXNo)")
del_items(0x80090958)
SetType(0x80090958, | |
uk_36
+ 4500 * uk_37
+ 160 * uk_38
+ 25281 * uk_39
+ 8 * uk_4
+ 35775 * uk_40
+ 1272 * uk_41
+ 50625 * uk_42
+ 1800 * uk_43
+ 64 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 403869462877 * uk_47
+ 20579335688 * uk_48
+ 51448339220 * uk_49
+ 20 * uk_5
+ 409014296799 * uk_50
+ 578793816225 * uk_51
+ 20579335688 * uk_52
+ 153424975 * uk_53
+ 437958565 * uk_54
+ 22316360 * uk_55
+ 55790900 * uk_56
+ 443537655 * uk_57
+ 627647625 * uk_58
+ 22316360 * uk_59
+ 159 * uk_6
+ 1250172631 * uk_60
+ 63703064 * uk_61
+ 159257660 * uk_62
+ 1266098397 * uk_63
+ 1791648675 * uk_64
+ 63703064 * uk_65
+ 3246016 * uk_66
+ 8115040 * uk_67
+ 64514568 * uk_68
+ 91294200 * uk_69
+ 225 * uk_7
+ 3246016 * uk_70
+ 20287600 * uk_71
+ 161286420 * uk_72
+ 228235500 * uk_73
+ 8115040 * uk_74
+ 1282227039 * uk_75
+ 1814472225 * uk_76
+ 64514568 * uk_77
+ 2567649375 * uk_78
+ 91294200 * uk_79
+ 8 * uk_8
+ 3246016 * uk_80
+ 166375 * uk_81
+ 474925 * uk_82
+ 24200 * uk_83
+ 60500 * uk_84
+ 480975 * uk_85
+ 680625 * uk_86
+ 24200 * uk_87
+ 1355695 * uk_88
+ 69080 * uk_89
+ 2572416961 * uk_9
+ 172700 * uk_90
+ 1372965 * uk_91
+ 1942875 * uk_92
+ 69080 * uk_93
+ 3520 * uk_94
+ 8800 * uk_95
+ 69960 * uk_96
+ 99000 * uk_97
+ 3520 * uk_98
+ 22000 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 106260 * uk_100
+ 148500 * uk_101
+ 103620 * uk_102
+ 1425655 * uk_103
+ 1992375 * uk_104
+ 1390235 * uk_105
+ 2784375 * uk_106
+ 1942875 * uk_107
+ 1355695 * uk_108
+ 64 * uk_109
+ 202876 * uk_11
+ 2512 * uk_110
+ 192 * uk_111
+ 2576 * uk_112
+ 3600 * uk_113
+ 2512 * uk_114
+ 98596 * uk_115
+ 7536 * uk_116
+ 101108 * uk_117
+ 141300 * uk_118
+ 98596 * uk_119
+ 7962883 * uk_12
+ 576 * uk_120
+ 7728 * uk_121
+ 10800 * uk_122
+ 7536 * uk_123
+ 103684 * uk_124
+ 144900 * uk_125
+ 101108 * uk_126
+ 202500 * uk_127
+ 141300 * uk_128
+ 98596 * uk_129
+ 608628 * uk_13
+ 3869893 * uk_130
+ 295788 * uk_131
+ 3968489 * uk_132
+ 5546025 * uk_133
+ 3869893 * uk_134
+ 22608 * uk_135
+ 303324 * uk_136
+ 423900 * uk_137
+ 295788 * uk_138
+ 4069597 * uk_139
+ 8165759 * uk_14
+ 5687325 * uk_140
+ 3968489 * uk_141
+ 7948125 * uk_142
+ 5546025 * uk_143
+ 3869893 * uk_144
+ 1728 * uk_145
+ 23184 * uk_146
+ 32400 * uk_147
+ 22608 * uk_148
+ 311052 * uk_149
+ 11411775 * uk_15
+ 434700 * uk_150
+ 303324 * uk_151
+ 607500 * uk_152
+ 423900 * uk_153
+ 295788 * uk_154
+ 4173281 * uk_155
+ 5832225 * uk_156
+ 4069597 * uk_157
+ 8150625 * uk_158
+ 5687325 * uk_159
+ 7962883 * uk_16
+ 3968489 * uk_160
+ 11390625 * uk_161
+ 7948125 * uk_162
+ 5546025 * uk_163
+ 3869893 * uk_164
+ 3025 * uk_17
+ 220 * uk_18
+ 8635 * uk_19
+ 55 * uk_2
+ 660 * uk_20
+ 8855 * uk_21
+ 12375 * uk_22
+ 8635 * uk_23
+ 16 * uk_24
+ 628 * uk_25
+ 48 * uk_26
+ 644 * uk_27
+ 900 * uk_28
+ 628 * uk_29
+ 4 * uk_3
+ 24649 * uk_30
+ 1884 * uk_31
+ 25277 * uk_32
+ 35325 * uk_33
+ 24649 * uk_34
+ 144 * uk_35
+ 1932 * uk_36
+ 2700 * uk_37
+ 1884 * uk_38
+ 25921 * uk_39
+ 157 * uk_4
+ 36225 * uk_40
+ 25277 * uk_41
+ 50625 * uk_42
+ 35325 * uk_43
+ 24649 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 10289667844 * uk_47
+ 403869462877 * uk_48
+ 30869003532 * uk_49
+ 12 * uk_5
+ 414159130721 * uk_50
+ 578793816225 * uk_51
+ 403869462877 * uk_52
+ 153424975 * uk_53
+ 11158180 * uk_54
+ 437958565 * uk_55
+ 33474540 * uk_56
+ 449116745 * uk_57
+ 627647625 * uk_58
+ 437958565 * uk_59
+ 161 * uk_6
+ 811504 * uk_60
+ 31851532 * uk_61
+ 2434512 * uk_62
+ 32663036 * uk_63
+ 45647100 * uk_64
+ 31851532 * uk_65
+ 1250172631 * uk_66
+ 95554596 * uk_67
+ 1282024163 * uk_68
+ 1791648675 * uk_69
+ 225 * uk_7
+ 1250172631 * uk_70
+ 7303536 * uk_71
+ 97989108 * uk_72
+ 136941300 * uk_73
+ 95554596 * uk_74
+ 1314687199 * uk_75
+ 1837295775 * uk_76
+ 1282024163 * uk_77
+ 2567649375 * uk_78
+ 1791648675 * uk_79
+ 157 * uk_8
+ 1250172631 * uk_80
+ 166375 * uk_81
+ 12100 * uk_82
+ 474925 * uk_83
+ 36300 * uk_84
+ 487025 * uk_85
+ 680625 * uk_86
+ 474925 * uk_87
+ 880 * uk_88
+ 34540 * uk_89
+ 2572416961 * uk_9
+ 2640 * uk_90
+ 35420 * uk_91
+ 49500 * uk_92
+ 34540 * uk_93
+ 1355695 * uk_94
+ 103620 * uk_95
+ 1390235 * uk_96
+ 1942875 * uk_97
+ 1355695 * uk_98
+ 7920 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 143440 * uk_100
+ 198000 * uk_101
+ 3520 * uk_102
+ 1461295 * uk_103
+ 2017125 * uk_104
+ 35860 * uk_105
+ 2784375 * uk_106
+ 49500 * uk_107
+ 880 * uk_108
+ 17576 * uk_109
+ 1318694 * uk_11
+ 2704 * uk_110
+ 10816 * uk_111
+ 110188 * uk_112
+ 152100 * uk_113
+ 2704 * uk_114
+ 416 * uk_115
+ 1664 * uk_116
+ 16952 * uk_117
+ 23400 * uk_118
+ 416 * uk_119
+ 202876 * uk_12
+ 6656 * uk_120
+ 67808 * uk_121
+ 93600 * uk_122
+ 1664 * uk_123
+ 690794 * uk_124
+ 953550 * uk_125
+ 16952 * uk_126
+ 1316250 * uk_127
+ 23400 * uk_128
+ 416 * uk_129
+ 811504 * uk_13
+ 64 * uk_130
+ 256 * uk_131
+ 2608 * uk_132
+ 3600 * uk_133
+ 64 * uk_134
+ 1024 * uk_135
+ 10432 * uk_136
+ 14400 * uk_137
+ 256 * uk_138
+ 106276 * uk_139
+ 8267197 * uk_14
+ 146700 * uk_140
+ 2608 * uk_141
+ 202500 * uk_142
+ 3600 * uk_143
+ 64 * uk_144
+ 4096 * uk_145
+ 41728 * uk_146
+ 57600 * uk_147
+ 1024 * uk_148
+ 425104 * uk_149
+ 11411775 * uk_15
+ 586800 * uk_150
+ 10432 * uk_151
+ 810000 * uk_152
+ 14400 * uk_153
+ 256 * uk_154
+ 4330747 * uk_155
+ 5978025 * uk_156
+ 106276 * uk_157
+ 8251875 * uk_158
+ 146700 * uk_159
+ 202876 * uk_16
+ 2608 * uk_160
+ 11390625 * uk_161
+ 202500 * uk_162
+ 3600 * uk_163
+ 64 * uk_164
+ 3025 * uk_17
+ 1430 * uk_18
+ 220 * uk_19
+ 55 * uk_2
+ 880 * uk_20
+ 8965 * uk_21
+ 12375 * uk_22
+ 220 * uk_23
+ 676 * uk_24
+ 104 * uk_25
+ 416 * uk_26
+ 4238 * uk_27
+ 5850 * uk_28
+ | |
= (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
| |
from operator import add, mul
import os
from PyQt4.Qt import QPushButton, SIGNAL, QTextEdit, QScrollArea, QTabWidget, \
QLineEdit, QAbstractTableModel, QModelIndex, Qt, QVariant, QTableView, QIcon,\
QDialogButtonBox, QGridLayout, QLabel, QComboBox, QMenu, QCursor, QListWidget,\
QListWidgetItem, QMessageBox, QString
from CppBlockUtils import SecureBinaryData
from armoryengine.ArmoryUtils import RightNow, script_to_addrStr, \
addrStr_to_hash160, enum, isASCII, PyBackgroundThread
from armoryengine.PyBtcWallet import PyBtcWallet
from qtdefines import QRichLabel, makeHorizFrame, GETFONT, relaxedSizeNChar, \
makeVertFrame, tightSizeNChar, initialColResize, ArmoryDialog,\
UnicodeErrorBox
from armorycolors import Colors
from armorymodels import WLTVIEWCOLS
from PyQt4 import QtGui
# Give an upper limit for any method to return
# if the limit is exceded raise MaxResultsExceeded exception
MAX_LIST_LEN = 20000000
class MaxResultsExceeded(Exception): pass
class WalletNotFound(object): pass
MAX_SEGMENT_LENGTH = 20
MAX_SEGMENTS = 20
MAX_UNKNOWN_SEGMENT_LENGTH = 10
BILLION_INT = 1000000000
OVER_BILLION_STR = '> 1 Billion'
class PluginObject(object):
tabName = 'Pass Phrase Finder'
maxVersion = '0.93.99'
#############################################################################
def __init__(self, main):
self.searchThread = None
self.passPhraseFinder = None
self.isSearchOver = False
def updateResults(resultStr):
self.resultStr += resultStr
def updateDisplay():
if len(self.resultStr) > 0:
self.resultsDisplay.append(self.resultStr)
self.resultStr = ''
self.resultsDisplay.moveCursor(QtGui.QTextCursor.End)
self.resultsDisplay.repaint()
if self.isSearchOver:
endSearch()
# Call this from another thread to end the search
def terminateSearch():
self.isSearchOver = True
# Call this from the main thread to end the search
def endSearch():
self.main.extraHeartbeatAlways.remove(updateDisplay)
self.searchButton.setEnabled(True)
self.stopButton.setEnabled(False)
# If the thread is still searching tell the pass phrase finder to stop
if self.passPhraseFinder and self.searchThread and not self.searchThread.isFinished():
self.passPhraseFinder.isStopped = True
def searchForPassphrase():
# Get the selected wallet from the main screen
wlt = self.getSelectedWlt()
if wlt and not wlt.watchingOnly and wlt.isLocked:
self.resultStr = ''
self.passPhraseFinder = PassPhraseFinder(wlt)
self.resultsDisplay.setText(QString(''))
self.main.extraHeartbeatAlways.append(updateDisplay)
if len(self.segOrdStrSet) > 0:
# From self.segOrdStrList, create a list of lists of indexes that describe the segment orderings to search
# In other words convert all of the strings in orderings list to lists of integers
segOrdIntListList = []
for ordStr in self.segOrdStrSet:
# The indexes provided by the users are 1 based, and the list indexes ought to be 0 based
segOrdIntListList.append([int(indexStr)-1 for indexStr in ordStr.split(',')])
self.searchThread = PyBackgroundThread(self.passPhraseFinder.searchForPassPhrase,
[segDef.getSegList() for segDef in self.segDefList],
segOrdIntListList,
updateResults,
terminateSearch )
# Reset the isSearchOver flag
self.isSearchOver = False
self.searchThread.start()
# Disable search button adn enabled stop button
self.stopButton.setEnabled(True)
self.searchButton.setEnabled(False)
else:
QMessageBox.warning(self.main, tr('Invalid'), tr("""
There are no valid segment combinations to search.
Please add at least one segment and ordering to search."""), QMessageBox.Ok)
else:
QMessageBox.warning(self.main, tr('Invalid'), tr("""
No valid wallet is selected. Please select a locked
non-watching-only from Available Wallets."""), QMessageBox.Ok)
def addKnownSegment():
dlgEnterSegment = DlgEnterSegment(main, main)
if dlgEnterSegment.exec_():
segmentText = str(dlgEnterSegment.editSegment.text())
if len(segmentText)>0:
self.segDefList.append(KnownSeg(segmentText))
self.segDefTableModel.updateSegList(self.segDefList)
def addUnknownCaseSegment():
dlgEnterSegment = DlgEnterSegment(main, main)
if dlgEnterSegment.exec_():
segmentText = str(dlgEnterSegment.editSegment.text())
if len(segmentText)>0:
self.segDefList.append(UnknownCaseSeg(segmentText))
self.segDefTableModel.updateSegList(self.segDefList)
def addUnknownOrderSegment():
dlgEnterSegment = DlgEnterSegment(main, main, isUnknownOrder=True)
if dlgEnterSegment.exec_():
segmentText = str(dlgEnterSegment.editSegment.text())
minLen = int(str(dlgEnterSegment.minSelector.currentText()))
maxLen = int(str(dlgEnterSegment.maxSelector.currentText()))
if len(segmentText)>0:
self.segDefList.append(UnknownSeg(segmentText, minLen, maxLen))
self.segDefTableModel.updateSegList(self.segDefList)
def addOrdering():
if len(self.segDefList) > 0:
dlgSpecifyOrdering = DlgSpecifyOrdering(main, main, len(self.segDefList))
if dlgSpecifyOrdering.exec_():
self.segOrdStrSet.add(str(dlgSpecifyOrdering.parseOrderingList()).strip('[]'))
self.updateOrderingListBox()
else:
QMessageBox.warning(self.main, tr('Not Ready'), tr("""
No segments have been entered. You must enter some segments before you can order them."""), QMessageBox.Ok)
self.main = main
self.segDefList = []
self.segOrdStrSet = set()
segmentHeader = QRichLabel(tr("""<b>Build segments for pass phrase search: </b>"""), doWrap=False)
self.knownButton = QPushButton("Add Known Segment")
self.unknownCaseButton = QPushButton("Add Unknown Case Segment")
self.unknownOrderButton = QPushButton("Add Unknown Order Segment")
self.main.connect(self.knownButton, SIGNAL('clicked()'), addKnownSegment)
self.main.connect(self.unknownCaseButton, SIGNAL('clicked()'), addUnknownCaseSegment)
self.main.connect(self.unknownOrderButton, SIGNAL('clicked()'), addUnknownOrderSegment)
topRow = makeHorizFrame([segmentHeader, self.knownButton, self.unknownCaseButton, self.unknownOrderButton, 'stretch'])
self.segDefTableModel = SegDefDisplayModel()
self.segDefTableView = QTableView()
self.segDefTableView.setModel(self.segDefTableModel)
self.segDefTableView.setSelectionBehavior(QTableView.SelectRows)
self.segDefTableView.setSelectionMode(QTableView.SingleSelection)
self.segDefTableView.verticalHeader().setDefaultSectionSize(20)
self.segDefTableView.verticalHeader().hide()
h = tightSizeNChar(self.segDefTableView, 1)[1]
self.segDefTableView.setMinimumHeight(2 * (1.3 * h))
self.segDefTableView.setMaximumHeight(10 * (1.3 * h))
initialColResize(self.segDefTableView, [.1, .2, .4, .1, .1, .1])
self.segDefTableView.customContextMenuRequested.connect(self.showSegContextMenu)
self.segDefTableView.setContextMenuPolicy(Qt.CustomContextMenu)
segmentOrderingsHeader = QRichLabel(tr("""<b>Specify orderings for pass phrase search: </b>"""), doWrap=False)
self.addOrderingButton = QPushButton("Add Ordering")
self.main.connect(self.addOrderingButton, SIGNAL('clicked()'), addOrdering)
orderingButtonPanel = makeHorizFrame([segmentOrderingsHeader, self.addOrderingButton, 'stretch'])
self.segOrdListBox = QListWidget()
self.segOrdListBox.customContextMenuRequested.connect(self.showOrdContextMenu)
self.segOrdListBox.setContextMenuPolicy(Qt.CustomContextMenu)
self.searchButton = QPushButton("Search")
self.main.connect(self.searchButton, SIGNAL('clicked()'), searchForPassphrase)
self.stopButton = QPushButton("Stop Searching")
self.stopButton.setEnabled(False)
self.main.connect(self.stopButton, SIGNAL('clicked()'), endSearch)
totalSearchLabel = QRichLabel(tr("""<b>Total Passphrase Tries To Search: </b>"""), doWrap=False)
self.totalSearchTriesDisplay = QLineEdit()
self.totalSearchTriesDisplay.setReadOnly(True)
self.totalSearchTriesDisplay.setText(QString('0'))
self.totalSearchTriesDisplay.setFont(GETFONT('Fixed'))
self.totalSearchTriesDisplay.setMinimumWidth(tightSizeNChar(self.totalSearchTriesDisplay, 6)[0])
self.totalSearchTriesDisplay.setMaximumWidth(tightSizeNChar(self.totalSearchTriesDisplay, 12)[0])
searchButtonPanel = makeHorizFrame([self.searchButton, self.stopButton, 'stretch', totalSearchLabel, self.totalSearchTriesDisplay])
self.resultsDisplay = QTextEdit()
self.resultsDisplay.setReadOnly(True)
self.resultsDisplay.setFont(GETFONT('Fixed'))
self.resultsDisplay.setMinimumHeight(100)
self.searchPanel = makeVertFrame([topRow, self.segDefTableView, orderingButtonPanel,
self.segOrdListBox, searchButtonPanel, self.resultsDisplay, 'stretch'])
# Now set the scrollarea widget to the layout
self.tabToDisplay = QScrollArea()
self.tabToDisplay.setWidgetResizable(True)
self.tabToDisplay.setWidget(self.searchPanel)
def getSelectedWlt(self):
wlt = None
selectedWltList = self.main.walletsView.selectedIndexes()
if len(selectedWltList)>0:
row = selectedWltList[0].row()
wltID = str(self.main.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
wlt = self.main.walletMap[wltID]
return wlt
def showSegContextMenu(self):
menu = QMenu(self.segDefTableView)
if len(self.segDefTableView.selectedIndexes())==0:
return
row = self.segDefTableView.selectedIndexes()[0].row()
deleteSegMenuItem = menu.addAction("Delete Segment")
action = menu.exec_(QCursor.pos())
if action == deleteSegMenuItem:
self.deleteSegRow(row)
def showOrdContextMenu(self):
menu = QMenu(self.segOrdListBox)
if len(self.segOrdListBox.selectedItems())==0:
return
item = self.segOrdListBox.currentItem()
deleteOrdMenuItem = menu.addAction("Delete Ordering")
action = menu.exec_(QCursor.pos())
if action == deleteOrdMenuItem:
self.deleteOrdItem(item)
def deleteSegRow(self, row):
self.segDefList.remove(self.segDefList[row])
self.segDefTableModel.updateSegList(self.segDefList)
self.segOrdStrSet.clear()
self.updateOrderingListBox()
def deleteOrdItem(self, ordItem):
ordText = str(ordItem.text())
self.segOrdStrSet.remove(ordText)
self.updateOrderingListBox()
def getTabToDisplay(self):
return self.tabToDisplay
def updateOrderingListBox(self):
self.segOrdListBox.clear()
segOrdList = list(self.segOrdStrSet)
segOrdList.sort()
totalTries = 0
for ordStr in segOrdList:
self.segOrdListBox.addItem(QListWidgetItem(ordStr))
totalTries += self.calculateTries(ordStr)
if totalTries > BILLION_INT:
self.totalSearchTriesDisplay.setText(OVER_BILLION_STR)
else:
self.totalSearchTriesDisplay.setText(str(totalTries))
def calculateTries(self, ordStr):
ordIntList = [int(indexStr) for indexStr in ordStr.split(',')]
totalTries = 1
# Multiply each of the totals of segment instance together.
for ordInt in ordIntList:
totalTries *= self.segDefList[ordInt-1].getSegListLen()
return totalTries
class DlgSpecifyOrdering(ArmoryDialog):
def __init__(self, parent, main, maxSeg):
super(DlgSpecifyOrdering, self).__init__(parent, main)
self.maxSeg = maxSeg
self.setWindowTitle('Enter Ordering')
self.setWindowIcon(QIcon(self.main.iconfile))
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | \
QDialogButtonBox.Cancel)
self.connect(buttonbox, SIGNAL('accepted()'), self.accept)
self.connect(buttonbox, SIGNAL('rejected()'), self.reject)
layout = QGridLayout()
lbl = QLabel('Enter Ordering as a comma separated list of segment indices between 1 and %d:' % maxSeg)
self.editOrdering = QLineEdit()
h, w = relaxedSizeNChar(self, 50)
self.editOrdering.setMinimumSize(h, w)
self.editOrdering.setMaxLength(MAX_SEGMENTS)
editSegPanel = makeHorizFrame([self.editOrdering, 'stretch'])
layout.addWidget(lbl, 0, 0)
layout.addWidget(editSegPanel, 0, 1)
layout.addWidget(buttonbox, 1, 0)
self.setLayout(layout)
# return empty list if not a valid list of numbers
def parseOrderingList(self):
try:
return [int(i) for i in str(self.editOrdering.text()).split(',')]
except:
return []
#############################################################################
def accept(self):
orderingList = self.parseOrderingList()
if len(orderingList) < 1:
QMessageBox.warning(self.main, tr('Invalid'), tr("""
Some segment indices are invalid."""), QMessageBox.Ok)
return
for segIndex in orderingList:
if segIndex > self.maxSeg or segIndex < 1:
QMessageBox.warning(self.main, tr('Invalid'), tr("""
Some segment indices are out of range."""), QMessageBox.Ok)
return
super(DlgSpecifyOrdering, self).accept()
################################################################################
class DlgEnterSegment(ArmoryDialog):
#############################################################################
def __init__(self, parent, main, isUnknownOrder=False):
super(DlgEnterSegment, self).__init__(parent, main)
self.setWindowTitle('Enter Segment')
self.setWindowIcon(QIcon(self.main.iconfile))
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | \
QDialogButtonBox.Cancel)
self.connect(buttonbox, SIGNAL('accepted()'), self.accept)
self.connect(buttonbox, SIGNAL('rejected()'), self.reject)
layout = QGridLayout()
lbl = QLabel('Segment Text:')
self.editSegment = QLineEdit()
h, w = relaxedSizeNChar(self, 50)
self.editSegment.setMinimumSize(h, w)
self.editSegment.setMaxLength(MAX_SEGMENT_LENGTH)
editSegPanel = makeHorizFrame([self.editSegment, 'stretch'])
layout.addWidget(lbl, 0, 0)
layout.addWidget(editSegPanel, 0, 1)
minSelectorLabel = QLabel('Min Length: ')
maxSelectorLabel = QLabel('Max Length: ')
self.minSelector = QComboBox()
self.maxSelector = QComboBox()
if isUnknownOrder:
self.minSelector.setFont(GETFONT('Var', 10, bold=True))
self.maxSelector.setFont(GETFONT('Var', 10, bold=True))
for i in range(1,MAX_UNKNOWN_SEGMENT_LENGTH):
self.minSelector.addItem(str(i))
self.maxSelector.addItem(str(i))
# default to 1 to 4
self.minSelector.setCurrentIndex(0)
self.maxSelector.setCurrentIndex(0)
# fix the inversion of min and max when user sets min
def updateMaxSelector():
minLen = int(str(self.minSelector.currentText()))
maxLen = int(str(self.maxSelector.currentText()))
if minLen > maxLen:
self.maxSelector.setCurrentIndex(minLen - 1)
# fix the inversion of min and max when user sets max
def updateMinSelector():
minLen = int(str(self.minSelector.currentText()))
maxLen = int(str(self.maxSelector.currentText()))
if minLen > maxLen:
self.minSelector.setCurrentIndex(maxLen - 1)
main.connect(self.minSelector, SIGNAL('activated(int)'), \
updateMaxSelector)
main.connect(self.maxSelector, SIGNAL('activated(int)'), \
updateMinSelector)
layout.addWidget(minSelectorLabel, 1, 0)
minSelectorPanel = makeHorizFrame([self.minSelector,'stretch'])
layout.addWidget(minSelectorPanel, 1, 1)
layout.addWidget(maxSelectorLabel, 2, 0)
maxSelectorPanel = makeHorizFrame([self.maxSelector,'stretch'])
layout.addWidget(maxSelectorPanel, 2, 1)
layout.addWidget(buttonbox, 3, 0)
else:
layout.addWidget(buttonbox, 1, 0)
self.setLayout(layout)
#############################################################################
def accept(self):
if not isASCII(unicode(self.editSegment.text())):
UnicodeErrorBox(self)
return
else:
super(DlgEnterSegment, self).accept()
class PwdSeg(object):
def __init__(self, known):
self.known = known
# Abstract method
def getSegListLen(self):
raise NotImplementedError("Subclass must implement getSegListLength()")
# Abstract Generator
def segListGenerator(self, maxResults=MAX_LIST_LEN):
raise NotImplementedError("Subclass must implement getSegList()")
yield None
# Abstract method
def getSegList(self, maxResults=MAX_LIST_LEN):
raise NotImplementedError("Subclass must implement getSegList()")
class UnknownCaseSeg(PwdSeg):
def __init__(self, known):
super(UnknownCaseSeg, self).__init__(known)
getBothCases = lambda self, ch : [ch.lower(), ch.upper()] if ch.lower() != ch.upper() else [ch]
def segListRecursiveGenerator(self, seg):
if len(seg) > 0:
for a in self.getBothCases(seg[0]):
for b in self.segListRecursiveGenerator(seg[1:]):
yield a + b
else:
yield ''
def getSegListLen(self):
return reduce(mul, [1 if ch.lower() == ch.upper() else 2 for ch in self.known])
def segListGenerator(self, maxResults=MAX_LIST_LEN):
if self.getSegListLen() > maxResults:
| |
(masked with %s)" % \
maskedwith
len2 = len(title)
title += "\n" + (len2 * '~')
strg += title + "\n" + str(data_masked) + "\n"
strg += self.get_data_str('refout', underline=True, underchar="-")
strg += self.get_data_str('zerodata', underline=True, underchar="-")
if hasattr(self, 'pixeldq'):
strg += self.get_data_str('pixeldq', underline=True, underchar="-")
if hasattr(self, 'groupdq'):
strg += self.get_data_str('groupdq', underline=True, underchar="-")
if hasattr(self, 'group'):
strg += self.get_data_str('group', underline=True, underchar="-")
return strg
@property
def dq(self):
# The dq attribute is an alias for pixeldq, groupdq or both combined,
# depending on the masking method.
if self.maskwith == 'groupdq':
if hasattr(self, 'groupdq'):
return self.groupdq
else:
return None
elif self.maskwith == 'pixeldq':
if hasattr(self, 'pixeldq'):
return self.pixeldq
else:
return None
else:
# Combine both sets of flags together, if they both exist.
if hasattr(self, 'groupdq') and self._isvalid(self.groupdq) and \
hasattr(self, 'pixeldq') and self._isvalid(self.pixeldq):
return self.groupdq | self.pixeldq
elif hasattr(self, 'groupdq') and self._isvalid(self.groupdq):
return self.groupdq
elif hasattr(self, 'pixeldq') and self._isvalid(self.pixeldq):
return self.pixeldq
else:
return None
class MiriSlopeModel(MiriMeasuredModel):
"""
A data model for MIRI slope data with error handling and masking, like
MiriMeasuredModel, but with additional restrictions to ensure the
underlying data model is compatible with the STScI RampModel.
:Parameters:
The same as MiriMeasuredModel, plus
zeropt: numpy array
An array containing the zero point of the fit.
Must be broadcastable onto the data array.
nreads: numpy array
An array containing the number of good frames used in the fits
Must be broadcastable onto the data array.
readsat: numpy array
An array containing the frame number of the first saturated frame.
Must be broadcastable onto the data array.
ngoodseg: numpy array
An array containing the number of good segments used in the slope fit.
Must be broadcastable onto the data array.
fiterr: numpy array
An array containing the RMS error in the slope fit.
Must be broadcastable onto the data array.
meandata: numpy array (optional)
An array containing the mean slope data.
Must be broadcastable onto the data array.
meanerr: numpy array (optional)
An array containing the uncertainty in the mean slope data.
Must be broadcastable onto the data array.
meandq: numpy array (optional)
An array containing the quality of the mean slope data.
Must be broadcastable onto the data array.
"""
schema_url = "miri_slope.schema"
def __init__(self, init=None, data=None, dq=None, err=None, dq_def=None,
zeropt=None, nreads=None, readsat=None, ngoodseg=None,
fiterr=None, fitinfo=None, **kwargs):
super(MiriSlopeModel, self).__init__(init=init, data=data, dq=dq,
err=err, dq_def=dq_def,
**kwargs)
# Data type is MIRI slope data.
self.meta.filetype = 'Slope (level 2)'
model_type = get_my_model_type( self.__class__.__name__ )
self.meta.model_type = model_type
# If 3-D data is given, the 3rd dimension gives the number of
# integrations.
if data is not None and hasattr(data, "shape"):
if len(data.shape) == 3:
nints = data.shape[0]
if self.meta.exposure.nints is None:
self.meta.exposure.nints = nints
# NOTE: The additional metadata and housekeeping arrays
# added below are TO BE DECIDED. Some of the extra data is
# not going to be needed by the STScI pipeline.
# If fit information is provided, use it to update the metadata.
if fitinfo is not None and isinstance(fitinfo, dict):
if 'NPINT' in fitinfo:
self.meta.fit.npint = fitinfo['NPINT']
if 'NSFITS' in fitinfo:
self.meta.fit.nsfits = fitinfo['NSFITS']
if 'NSFITE' in fitinfo:
self.meta.fit.nsfite = fitinfo['NSFITE']
if 'HIGHSAT' in fitinfo:
self.meta.fit.highsat = fitinfo['HIGHSAT']
if nreads is not None:
self.nreads = nreads
# self._nreads_mask = None
# self._nreads_fill = 0
# self._nreads_fill_value = None
if readsat is not None:
self.readsat = readsat
# self._readsat_mask = None
# self._readsat_fill = -1
# self._readsat_fill_value = None
if ngoodseg is not None:
self.ngoodseg = ngoodseg
# self._ngoodseg_mask = None
# self._ngoodseg_fill = 0
# self._ngoodseg_fill_value = None
if zeropt is not None:
self.zeropt = zeropt
self._zeropt_mask = None
self._zeropt_fill = 'min'
self._zeropt_fill_value = None
if fiterr is not None:
self.fiterr = fiterr
self._fiterr_mask = None
self._fiterr_fill = 'max'
self._fiterr_fill_value = None
# Copy the units of the these arrays, if defined.
self.set_data_units('zeropt')
self.set_data_units('fiterr')
def __str__(self):
"""
Return the contents of the slope object as a readable
string.
"""
# First obtain a string describing the underlying measured
# model.
strg = super(MiriSlopeModel, self).__str__(extra_objects=False)
# Add the extras
if self.dq is not None and len(self.dq) > 0:
maxdq = self.dq.max()
else:
maxdq = 0
strg += self.get_data_str('nreads', underline=True, underchar="-")
strg += self.get_data_str('readsat', underline=True, underchar="-")
strg += self.get_data_str('ngoodseg', underline=True, underchar="-")
if self.zeropt is not None:
strg += self.get_data_str('zeropt', underline=True, underchar="-")
if maxdq > 0:
if self.maskable():
zeropt_masked = self.zeropt_masked
if zeropt_masked is not None:
title = self.get_data_title('zeropt') + " (masked)"
len2 = len(title)
title += "\n" + (len2 * '~')
strg += title + "\n" + str(zeropt_masked) + "\n"
if self.fiterr is not None:
strg += self.get_data_str('fiterr', underline=True, underchar="-")
if maxdq > 0:
if self.maskable():
fiterr_masked = self.fiterr_masked
if fiterr_masked is not None:
title = self.get_data_title('fiterr') + " (masked)"
len2 = len(title)
title += "\n" + (len2 * '~')
strg += title + "\n" + str(fiterr_masked) + "\n"
return strg
@property
def zeropt_masked(self):
# Generate the masked data on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the zeropt or dq arrays change?
if self.zeropt is not None and self.dq is not None:
self._zeropt_mask = self._generate_mask(self.zeropt, self.dq)
self._zeropt_fill_value = self._generate_fill(self.zeropt,
self._zeropt_fill)
return ma.array(self.zeropt, mask=self._zeropt_mask,
fill_value=self._zeropt_fill_value)
else:
return self.zeropt
@property
def zeropt_filled(self):
masked = self.zeropt_masked
if masked is not None:
return masked.filled(self._zeropt_fill_value)
else:
return self.zeropt
@property
def fiterr_masked(self):
# Generate the masked data on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the fiterr or dq arrays change?
if self.fiterr is not None and self.dq is not None:
self._fiterr_mask = self._generate_mask(self.fiterr, self.dq)
self._fiterr_fill_value = self._generate_fill(self.fiterr,
self._fiterr_fill)
return ma.array(self.fiterr, mask=self._fiterr_mask,
fill_value=self._fiterr_fill_value)
else:
return self.fiterr
@property
def fiterr_filled(self):
masked = self.fiterr_masked
if masked is not None:
return masked.filled(self._fiterr_fill_value)
else:
return self.fiterr
#
# A minimal test is run when this file is run as a main program.
# For a more substantial test see miri/datamodels/tests.
#
if __name__ == '__main__':
print("Testing the MiriMeasuredModel module.")
PLOTTING = False
SAVE_FILES = False
data3x3 = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
err3x3 = np.array([[1.,1.,1.],[2.,2.,2.],[1.,1.,1.]])
dq3x3 = np.array([[0,1,0],[1,2,1],[0,1,0]])
dqa3x3 = np.array([[0,2,0],[1,0,1],[0,0,0]])
dqb3x3 = np.array([[0,1,0],[0,1,0],[2,1,0]])
dqdef = master_flags
grpdef = groupdq_flags
tenarray = np.ones_like(data3x3) * 10.0
print("Completely null measured data")
with MiriMeasuredModel(title='null data') as nulldata:
print(nulldata)
print(nulldata.stats())
print("Scalar measured data")
with MiriMeasuredModel( data=42, title='scalar data' ) as scalardata:
print(scalardata)
print(scalardata.stats())
print("Measured data with data + err + dq:")
with MiriMeasuredModel(data=data3x3, err=err3x3, dq=dq3x3, dq_def=dqdef,
title='data+err+dq') \
as testdata:
print("Data arrays: ", testdata.list_data_arrays())
print("Data tables: ", testdata.list_data_tables())
print(testdata)
print(testdata.stats())
if PLOTTING:
testdata.plot("Test data 1 - with data + err + dq")
if SAVE_FILES:
testdata.save("test_measured_model.fits", overwrite=True)
print("Add 1 to testdata")
newdata = testdata + 1
print(newdata)
del newdata
print("Subtract 1 from testdata")
newdata = testdata - 1
print(newdata)
del newdata
print("Multiply testdata by 10.")
newdata = testdata * 10
print(newdata)
del newdata
print("Divide testdata by 10.")
newdata = testdata / 10
print(newdata)
del newdata
print("Add an array full of 10s to testdata.")
newdata = testdata + tenarray
print(newdata)
del newdata
print("Subtract an array full of 10s from testdata.")
newdata = testdata - tenarray
print(newdata)
del newdata
print("Multiply testdata by an array full of 10s.")
newdata = testdata * tenarray
print(newdata)
del newdata
print("Divide testdata by an array full of 10s.")
newdata = testdata / tenarray
print(newdata)
del newdata
print("Image data with data + err + dq:")
with MiriMeasuredModel(data=data3x3, err=err3x3, dq=dq3x3,
dq_def=dqdef) \
as testdata2:
print(testdata2)
print(testdata2.stats())
if PLOTTING:
testdata2.plot("Test data 2 - image with data + err + dq")
if SAVE_FILES:
testdata2.save("test__image_model.fits",
overwrite=True)
print("Add two image objects together")
newdata = testdata + testdata2
print(newdata)
del newdata
print("Subtract one image object from another")
newdata = testdata - testdata2
print(newdata)
del newdata
print("Multiply two | |
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum606(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum607(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_URL = "contentUrl"
CONTENT_URL_DESC = "contentUrl desc"
CREATED_BY_APP_ID = "createdByAppId"
CREATED_BY_APP_ID_DESC = "createdByAppId desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
LEVEL = "level"
LEVEL_DESC = "level desc"
LINKS = "links"
LINKS_DESC = "links desc"
ORDER = "order"
ORDER_DESC = "order desc"
TITLE = "title"
TITLE_DESC = "title desc"
USER_TAGS = "userTags"
USER_TAGS_DESC = "userTags desc"
class Enum608(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CONTENT = "content"
CONTENT_URL = "contentUrl"
CREATED_BY_APP_ID = "createdByAppId"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LEVEL = "level"
LINKS = "links"
ORDER = "order"
TITLE = "title"
USER_TAGS = "userTags"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION = "parentSection"
class Enum609(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION = "parentSection"
class Enum61(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum610(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CONTENT = "content"
CONTENT_URL = "contentUrl"
CREATED_BY_APP_ID = "createdByAppId"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LEVEL = "level"
LINKS = "links"
ORDER = "order"
TITLE = "title"
USER_TAGS = "userTags"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION = "parentSection"
class Enum611(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION = "parentSection"
class Enum612(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
IS_SHARED = "isShared"
LINKS = "links"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
USER_ROLE = "userRole"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum613(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum614(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc"
SECTIONS_URL = "sectionsUrl"
SECTIONS_URL_DESC = "sectionsUrl desc"
class Enum615(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum616(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum617(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum618(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum619(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
IS_DEFAULT = "isDefault"
IS_DEFAULT_DESC = "isDefault desc"
LINKS = "links"
LINKS_DESC = "links desc"
PAGES_URL = "pagesUrl"
PAGES_URL_DESC = "pagesUrl desc"
class Enum62(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum620(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum621(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum622(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum623(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum624(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum625(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum626(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
IS_SHARED = "isShared"
LINKS = "links"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
USER_ROLE = "userRole"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum627(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum628(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc"
SECTIONS_URL = "sectionsUrl"
SECTIONS_URL_DESC = "sectionsUrl desc"
class Enum629(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum63(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc"
SECTIONS_URL = "sectionsUrl"
SECTIONS_URL_DESC = "sectionsUrl desc"
class Enum630(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum631(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SECTION_GROUPS_URL = "sectionGroupsUrl"
SECTIONS_URL = "sectionsUrl"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum632(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
SECTION_GROUPS = "sectionGroups"
SECTIONS = "sections"
class Enum633(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF = "self"
SELF_DESC = "self desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
IS_DEFAULT = "isDefault"
IS_DEFAULT_DESC = "isDefault desc"
LINKS = "links"
LINKS_DESC = "links desc"
PAGES_URL = "pagesUrl"
PAGES_URL_DESC = "pagesUrl desc"
class Enum634(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum635(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class Enum636(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF = "self"
CREATED_DATE_TIME = "createdDateTime"
CREATED_BY = "createdBy"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
IS_DEFAULT = "isDefault"
LINKS = "links"
PAGES_URL = "pagesUrl"
PAGES = "pages"
PARENT_NOTEBOOK = "parentNotebook"
PARENT_SECTION_GROUP = "parentSectionGroup"
class | |
in games
for slot in slots]) + slack_minus >= c_min,
name="GA1_min_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
# Break constraints
if c_name == "BR1":
teams = [int(t) for t in constraint["teams"].split(';')]
slots = [int(s) for s in constraint["slots"].split(';')]
mode = constraint["mode2"]
intp = int(constraint["intp"])
penalty = int(constraint["penalty"])
if constraint["type"] == "HARD":
if mode == "A":
for team in teams:
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=True)
for slot in slots if slot != 0]) <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
elif mode == "H":
for team in teams:
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False)
for slot in slots if slot != 0]) <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
else:
for team in teams:
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False) + get_break_var(team, slot, away=True)
for slot in slots if slot != 0]) <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
elif not skipSoft:
if mode == "A":
for team in teams:
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=True)
for slot in slots if slot != 0]) - slack <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
elif mode == "H":
for team in teams:
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False)
for slot in slots if slot != 0]) - slack <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
else:
for team in teams:
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False) + get_break_var(team, slot, away=True)
for slot in slots if slot != 0]) - slack <= intp,
name="BR1_" + str(team) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
if c_name == "BR2":
teams = [int(t) for t in constraint["teams"].split(';')]
slots = [int(s) for s in constraint["slots"].split(';')]
intp = int(constraint["intp"])
penalty = int(constraint["penalty"])
if constraint["type"] == "HARD":
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False) + get_break_var(team, slot, away=True)
for team in teams
for slot in slots if slot != 0]) <= intp,
name="BR2_" + str(ind))
if lazy:
constr.Lazy = lazy
elif not skipSoft:
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(gp.quicksum([get_break_var(team, slot, away=False) + get_break_var(team, slot, away=True)
for team in teams
for slot in slots if slot != 0]) - slack <= intp,
name="BR2_" + str(ind))
if lazy:
constr.Lazy = lazy
# Fairness constraints
if c_name == "FA2":
teams = [int(t) for t in constraint["teams"].split(';')]
slots = sorted([int(s) for s in constraint["slots"].split(';')])
intp = int(constraint["intp"])
penalty = int(constraint["penalty"])
if constraint["type"] == "HARD":
for team1 in teams:
for team2 in teams:
if team1 == team2:
continue
for slot in slots:
constr = model.addConstr(gp.quicksum([m_vars[team1,i,j]
for i in range(n_teams) if i != team1
for j in range(slot + 1)]) - \
gp.quicksum([m_vars[team2,i,j]
for i in range(n_teams) if i != team2
for j in range(slot + 1)]) <= intp,
name="FA2_1_" + str(team1) + "_" + str(team2) + "_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
constr = model.addConstr(gp.quicksum([m_vars[team2,i,j]
for i in range(n_teams) if i != team1
for j in range(slot + 1)]) - \
gp.quicksum([m_vars[team1,i,j]
for i in range(n_teams) if i != team2
for j in range(slot + 1)]) <= intp,
name="FA2_2_" + str(team1) + "_" + str(team2) + "_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
elif not skipSoft:
for team1 in teams:
for team2 in teams:
if team1 == team2:
continue
largest_diff_var = model.addVar(vtype=GRB.INTEGER, name="ldiff_" + str(team1) + "_" + str(team2))
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(largest_diff_var - slack <= intp)
if lazy:
constr.Lazy = lazy
for slot in slots:
diff_var = model.addVar(vtype=GRB.INTEGER, name="diff_" + str(team1) + "_" + str(team2) + "_" + str(slot))
constr = model.addConstr(gp.quicksum([m_vars[team1,i,j]
for i in range(n_teams) if i != team1
for j in range(slot + 1)]) - \
gp.quicksum([m_vars[team2,i,j]
for i in range(n_teams) if i != team2
for j in range(slot + 1)]) <= diff_var,
name="FA2_1_" + str(team1) + "_" + str(team2) + "_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
constr = model.addConstr(gp.quicksum([m_vars[team2,i,j]
for i in range(n_teams) if i != team2
for j in range(slot + 1)]) - \
gp.quicksum([m_vars[team1,i,j]
for i in range(n_teams) if i != team1
for j in range(slot + 1)]) <= diff_var,
name="FA2_2_" + str(team1) + "_" + str(team2) + "_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
constr = model.addConstr(diff_var <= largest_diff_var, name="FA2_3_" + str(team1) + "_" + str(team2) + "_" + str(slot) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
# Separation constraints
if c_name == "SE1":
teams = [int(t) for t in constraint["teams"].split(';')]
penalty = int(constraint["penalty"])
c_min = int(constraint["min"])
if constraint["type"] == "HARD":
raise Exception("The HARD version of constraint SE1 is not implemented!")
elif not skipSoft:
for i in range(len(teams)):
for j in range(i + 1, len(teams)):
sepc_var = model.addVar(vtype=GRB.INTEGER, name="sep_" + str(teams[i]) + "_" + str(teams[j]))
min1_var = model.addVar(vtype=GRB.BINARY, name="min1_" + str(teams[i]) + "_" + str(teams[j]))
min2_var = model.addVar(vtype=GRB.BINARY, name="min2_" + str(teams[i]) + "_" + str(teams[j]))
slack = model.addVar(vtype=GRB.INTEGER, obj=penalty)
constr = model.addConstr(sepc_var - slack <= - c_min - 1 + n_slots)
if lazy:
constr.Lazy = lazy
constr = model.addConstr(gp.quicksum([slot * m_vars[teams[i],teams[j],slot]
for slot in range(n_slots)]) - \
gp.quicksum([slot * m_vars[teams[j],teams[i],slot]
for slot in range(n_slots)]) + n_slots <= sepc_var + min1_var * 2 * n_slots,
name="SE1_1_" + str(teams[i]) + "_" + str(teams[j]) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
constr = model.addConstr(gp.quicksum([slot * m_vars[teams[j],teams[i],slot]
for slot in range(n_slots)]) - \
gp.quicksum([slot * m_vars[teams[i],teams[j],slot]
for slot in range(n_slots)]) + n_slots <= sepc_var + min2_var * 2 * n_slots,
name="SE1_2_" + str(teams[i]) + "_" + str(teams[j]) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
constr = model.addConstr(min1_var + min2_var == 1, name="SE1_3_" + str(teams[i]) + "_" + str(teams[j]) + "_" + str(ind))
if lazy:
constr.Lazy = lazy
if debug:
model.update()
print("Num vars: " + str(model.NumVars))
print("Num constraints: " + str(model.NumConstrs))
if debug:
print("Writing problem to file...")
model.write("problem.lp")
#model.setParam("OutputFlag", 0)
# Setting up callback function to retrieve feasible solutions
def callbackGetIncumbent(model, where):
if where == GRB.Callback.MIPSOL:
solcnt = model.cbGet(GRB.Callback.MIPSOL_SOLCNT)
obj = model.cbGet(GRB.Callback.MIPSOL_OBJ)
x = model.cbGetSolution(m_vars)
solution = make_solution(x, n_teams, n_slots)
infeasibilities = []
obj_v = 0
for constraint in prob.constraints:
violated,_,penalty = validate_constraint(prob, solution, constraint)
if violated and constraint[1]["type"] == "HARD":
infeasibilities.append(constraint[0])
obj_v += penalty
print("Infeasibilities: {}, Obj Validator: {}, Obj Gurobi: {}".format(len(infeasibilities), obj_v, obj))
print(infeasibilities)
write_solution("solution_{}.xml".format(solcnt), prob, x, obj)
# Solution pool
if skipSoft:
model.setParam("PoolSolutions", 100)
model.setParam("PoolSearchMode", 2)
model.setParam("MIPFocus", 1)
model.setParam("Heuristics", 0.5)
# Tuning parameters
model.setParam("Presolve", 2)
model.setParam("Symmetry", 2)
model.setParam("GomoryPasses", 1)
model.setParam("PrePasses", 2)
if debug:
print("Solving...")
# Optimize
if skipSoft:
model.optimize(callbackGetIncumbent)
else:
model.optimize()
write_status(model)
if (model.status == GRB.OPTIMAL):
solution = make_solution(m_vars, n_teams, n_slots)
if debug:
print_solution(solution)
write_solution("solution.xml", prob, m_vars, model.objVal)
obj = 0
for constraint in prob.constraints:
violated,diff,penalty = validate_constraint(prob, solution, constraint)
obj += penalty
print(constraint[0], (violated,diff,penalty))
print("Obj validator: " + str(obj))
def write_status(model: gp.Model):
# Displays the status of Gurobi in a more human readable format
if model.status == GRB.OPTIMAL:
print('Optimal objective: %g' % model.objVal)
elif model.status == GRB.INF_OR_UNBD:
print('Model is infeasible or unbounded')
elif model.status == GRB.INFEASIBLE:
print('Model is infeasible')
elif model.status == GRB.UNBOUNDED:
print('Model is unbounded')
else:
print('Optimization ended with status %d' % model.status)
def make_solution(m_vars, n_teams, n_slots):
# Computes the solution from the binary variables of the model
solution = []
for slot in range(n_slots):
games = []
for team1 in range(n_teams):
for team2 in range(n_teams):
if team1 == team2:
continue
var_value = m_vars[team1, team2, slot]
if isinstance(var_value, gp.Var):
var_value = var_value.x
if var_value > 0.5:
games.append((team1, team2))
solution.append(games)
return solution
def print_solution(solution):
# Displays the solution in a more human readble format
for slot,games in enumerate(solution):
print("Slot " + str(slot) + ":")
for h,a in games:
print("({},{})".format(str(h), str(a)), end=' ')
| |
0 and randomCharacter == "0":
randomCharacter = aRandom.choice(characters)
chosenCharacters.append(randomCharacter)
return "".join(chosenCharacters)
def randomString(alphabet, length=None, maxLength=20):
"""Generate a random string of characters from a given up a bit.
This function generates and returns a random string of characters
from a given alphabet, where the length of the string can be
specified or can also be selected randomly. The individual
characters in the string are selected uniformly at random.
Args:
alphabet (list of characters): A list of characters in the
alphabet to be used.
length (int): The desired length of the string. Defaults to
None. If None, the length of the string will be chosen
uniformly at random between 0 and maxLength-1.
maxLength: When the length of the string is chosen at random,
the maximum length is maxLength-1. This parameter is only
relevant if length is None.
Returns:
str: The randomly generated string.
"""
characters = alphabet
if length == None:
length = aRandom.randint(0, maxLength)
chosenCharacters = []
for i in range(length):
randomCharacter = aRandom.choice(characters)
chosenCharacters.append(randomCharacter)
return "".join(chosenCharacters)
def asciiAlphabetAsList():
"""Return a list consisting of the 128 ASCII characters"""
asciiAlphabet = []
for i in range(128):
asciiAlphabet.append(chr(i))
return asciiAlphabet
ASCII_ALPHABET = asciiAlphabetAsList()
"""A list consisting of the 128 ASCII characters"""
def geneticAlphabetAsList():
"""Return a list consisting of the 4 characters 'A', 'C', 'G', 'T'"""
return ["A", "C", "G", "T"]
def boolToYes(b):
"""Convert a Boolean input into 'yes' or 'no'
Args:
b (bool): The Boolean value to be converted
Returns:
str: 'yes' if b is True, and 'no' otherwise.
"""
if b:
return "yes"
else:
return "no"
def nextShortLex(s, alphabet):
"""Return the next string in shortlex ordering on a given alphabet.
Shortlex is an ordering that lists strings according to length,
with strings of the same length being ordered
lexicographically. This function takes a string on some particular
alphabet as input, and returns the next string on that alphabet in
the shortlex ordering.
Args:
s (str): The string whose successor will be returned.
alphabet (list of characters): A list of characters in the
alphabet to be used.
Returns:
str: The successor of s in the shortlex ordering, assuming the
given alphabet.
Example:
>>> nextShortLex('aab', ['a', 'b', 'c'])
'aac'
>>> nextShortLex('ccc', ['a', 'b', 'c'])
'aaaa'
"""
first = alphabet[0]
last = alphabet[-1]
if s == "":
return str(first)
chars = [c for c in s]
L = len(chars)
# The Boolean variable overflow will indicate whether or not this
# is the last string of the current length (and hence whether we
# need to "overflow" to the first string with length one greater)
overflow = True
for i in range(L - 1, -1, -1):
currentChar = chars[i]
if currentChar != last:
overflow = False
break
# Either we overflowed (and i=0), or we didn't overflow, in which
# case the value of i is now the index of the rightmost character
# that can be incremented. Let's remember all the needed
# information about that character.
incrementIndex = i
incrementChar = currentChar
alphabetIndex = alphabet.index(currentChar)
if overflow:
# Treat overflow as a special case and return a string of
# length L+1 consisting entirely of the first character in the
# alphabet.
return first * (L + 1)
else:
# We didn't overflow, so manipulate the array of characters to
# produce the next string in lexicographic order. The
# rightmost character that can be incremented gets
# incremented...
chars[incrementIndex] = alphabet[alphabetIndex + 1]
# ...then all the characters to the right of that roll over to
# the first character in the alphabet.
for j in range(incrementIndex + 1, L):
chars[j] = first
return "".join(chars)
def nextASCII(s):
"""Return the successor of ASCII string s in the shortlex ordering.
For a detailed explanation, see the documentation of
nextShortLex(). This function is the same as nextShortLex(), for
the special case where the alphabet is the ASCII alphabet.
Args:
s (str): The ASCII string whose successor will be returned.
Returns:
str: The successor of ASCII string s in the shortlex ordering.
"""
return nextShortLex(s, ASCII_ALPHABET)
# Enter supposedly infinite loop. In fact, we exit if the event
# haltComputations is signalled, or if the fixed timeout expires.
# This helps to prevent problems with automated testing of code that
# enters infinite loops.
def loop():
"""Enter an infinite loop, but with features that facilitate testing.
This function supposedly enters an infinite loop. The intention is
that it should be used for simulating infinite loops, but in fact
it is more sophisticated. The function waits on the
utils.haltComputations event, and exits immediately if that event
is signaled. This facilitates testing of code that deliberately
enters infinite loops. In addition, this function times out after
60 seconds. This prevents background threads looping indefinitely.
"""
timeout = 60 # one minute should be plenty
haltComputations.wait(timeout)
# reset the haltComputations event
haltComputations.clear()
def invokeAndStoreResult(fn, q, done, *inStrings):
"""Invoke a function and store its return value in a given queue.
Mostly intended as a private function used by
utils.runWithTimeout(). The invokeAndStoreResult() function
invokes a function (which itself is passed in as a parameter) with
certain arguments (also passed in as parameters), stores the
result in a queue data structure, then signals an event to declare
that it is finished. This makes it possible for other threads to
be aware of when the function has completed and for those threads
to obtain its return value.
Args:
fn (a function): The function that will be invoked.
q (a Python queue.Queue): A queue that will be used for storing the
return value. A queue is used because Python queues happen
to behave well in multithreaded environments. In fact, at
most one item will be stored in this queue.
done (a Python threading.Event): An event that will be
signaled when fn has returned.
*inStrings: A variable number of arguments that will be passed
on to fn.
"""
ret = fn(*inStrings)
q.put(ret)
done.set()
def runWithTimeout(timeout, fn, *inStrings):
"""Invoke a function with a timeout.
This invokes a function (which itself is passed in as a parameter)
with certain arguments (also passed in as parameters). If the
function completes within the given timeout, its return value is
returned. Otherwise, None is returned.
Args:
timeout (float): The number of seconds before the function
invocation times out. If None, this is set to a standard
value for running unit tests.
fn (a function): The function that will be invoked.
*inStrings: A variable number of arguments that will be passed
on to fn.
Returns:
object: None if fn times out, otherwise the return value of fn.
"""
if timeout == None:
timeout = TEST_TIMEOUT
# a queue for storing the return value of fn
q = queue.Queue()
# an event for signaling when fn has completed
done = threading.Event()
# create and start a separate thread in which to invoke the
# function
t = threading.Thread(target=invokeAndStoreResult, args=(fn, q, done) + inStrings)
t.start()
# wait for either the function to complete, or the duration of the
# timeout, whichever is earlier
done.wait(timeout)
# If it's a long-running computation that knows about the
# haltComputations event, tell it to stop now.
haltComputations.set()
# Reset for future computations
haltComputations.clear()
# if the queue is empty, the function did not complete, so return
# None
if q.empty():
retVal = None
else:
retVal = q.get()
return retVal
def formatASet(theSet):
"""Format a set of strings as a string.
The given set is returned enclosed by braces and with elements
separated by commas.
Args:
theSet (set of str): The set to be formatted.
Returns:
str: A string representing theSet, enclosed by braces and with
elements separated by commas.
Example:
>>> formatASet({'abc', 'd', 'ef'})
'{d,ef,abc}'
"""
return "{" + ",".join(theSet) + "}"
def formatSetOfSets(theSets):
"""Format a set of frozensets of strings as a single string.
Each frozenset of strings is formatted using utils.formatASet(),
and | |
<gh_stars>10-100
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expression Intrinsics and math functions in TVM."""
# pylint: disable=redefined-builtin
from __future__ import absolute_import as _abs
from ._ffi.function import register_func as _register_func
from . import make as _make
from .api import convert, const
from .expr import Call as _Call
from .schedule import Buffer as _Buffer
def _pack_buffer(buf):
"""Build intrinsics that packs the buffer.
"""
assert buf.shape
shape = _make.Call("handle", "tvm_stack_make_shape", buf.shape,
_Call.Intrinsic, None, 0)
strides = _make.Call("handle", "tvm_stack_make_shape", buf.strides,
_Call.Intrinsic, None, 0) if buf.strides else 0
pack_args = [buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset]
return _make.Call("handle", "tvm_stack_make_array",
pack_args, _Call.Intrinsic, None, 0)
def call_packed(*args):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
Returns
-------
call : Expr
The call expression.
See Also
--------
tvm.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, _Buffer) else x for x in args]
return _make.Call(
"int32", "tvm_call_packed", call_args, _Call.Intrinsic, None, 0)
def call_pure_intrin(dtype, func_name, *args):
"""Build expression by calling a pure intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
args = convert(args)
return _make.Call(
dtype, func_name, convert(args), _Call.PureIntrinsic, None, 0)
def call_intrin(dtype, func_name, *args):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
args = convert(args)
return _make.Call(
dtype, func_name, convert(args), _Call.Intrinsic, None, 0)
def call_pure_extern(dtype, func_name, *args):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
return _make.Call(
dtype, func_name, convert(args), _Call.PureExtern, None, 0)
def call_extern(dtype, func_name, *args):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
return _make.Call(
dtype, func_name, convert(args), _Call.Extern, None, 0)
def call_llvm_intrin(dtype, name, *args):
"""Build expression by calling an llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
Returns
-------
call : Expr
The call expression.
"""
import tvm
llvm_id = tvm.codegen.llvm_lookup_intrinsic_id(name)
assert llvm_id != 0, "%s is not an LLVM intrinsic" % name
return call_pure_intrin(dtype, 'llvm_intrin', tvm.const(llvm_id, 'uint32'), *args)
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x)
def erf(x):
"""Take gauss error function of the input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "erf", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "log", x)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "cos", x)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "sin", x)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "atan", x)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "sqrt", x)
def rsqrt(x):
"""Take reciprocal of square root of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "rsqrt", x)
def floor(x):
"""Take floor of float input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.floor(x)
def ceil(x):
"""Take ceil of float input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.ceil(x)
def trunc(x):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.trunc(x)
def abs(x):
"""Get absolute value of the input element-wise.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.abs(x)
def round(x):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.round(x)
def nearbyint(x):
"""Round elements of the array to the nearest integer.
This intrinsic uses llvm.nearbyint instead of llvm.round
which is faster but will results different from tvm.round.
Notably nearbyint rounds according to the rounding mode,
whereas tvm.round (llvm.round) ignores that.
For differences between the two see:
https://en.cppreference.com/w/cpp/numeric/math/round
https://en.cppreference.com/w/cpp/numeric/math/nearbyint
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.nearbyint(x)
def isnan(x):
"""Check if input value is Nan.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.isnan(x)
def power(x, y):
"""x power y
Parameters
----------
x : Expr
Input argument.
y : Expr
The exponent
Returns
-------
z : Expr
The result.
"""
return _make._OpPow(convert(x), convert(y))
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "popcount", x)
def fmod(x, y):
"""Return the remainder of x divided by y with the same sign as x.
Parameters
----------
x : Expr
Input argument.
y : Expr
Input argument.
Returns
-------
z : Expr
The result.
"""
return call_pure_intrin(x.dtype, "fmod", x, y)
def if_then_else(cond, t, f):
"""Conditional selection expression.
Parameters
----------
cond : Expr
The condition
t : Expr
The result expression if cond is true.
f : Expr
The result expression if cond is false.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have | |
<filename>odincal/odincal/level1b_exporter_32.py
import numpy
import copy
from odin import odin
from pg import DB
from sys import stderr, stdout, stdin, argv, exit
import os
import string
class db(DB):
def __init__(self):
passwd = os.getenv('ODINDB_PASSWD')
DB.__init__(
self,
dbname='odin',
user='odinop',
host='malachite.rss.chalmers.se',
passwd=<PASSWORD>)
class Calibration_spectrum():
def __init__(self):
self.backend = []
self.frontend = []
self.version = []
self.intmode = []
self.sourcemode = []
self.freqmode = []
self.ssb_fq = []
self.altitude_range = []
self.hotload_range = []
self.spectrum = []
class Orbit_data_exporter():
def __init__(self, backend, orbit, con):
self.backend = backend
self.orbit = orbit
self.con = con
def get_db_data(self):
'''export orbit data from database tables'''
# find min and max stws from orbit
query = self.con.query('''
select min(foo.stw),max(foo.stw) from
(select stw from attitude_level1 where
orbit>={0} and orbit<{0}+1 order by stw) as foo
'''.format(self.orbit))
result = query.dictresult()
print result
if result[0]['max'] is None:
print 'no attitude data from orbit ' + str(orbit)
return 0
# find out which scans that starts in the orbit
if self.backend == 'AC1':
stwoff = 1
else:
stwoff = 0
temp = [result[0]['min'], result[0]['max'], self.backend, stwoff]
if self.backend == 'AC1':
query = self.con.query('''
select min(ac_level0.stw),max(ac_level0.stw)
from ac_level0
natural join getscansac1({0},{1}+16*60*45)
where start>={0} and start<={1} and backend='{2}'
'''.format(*temp))
if self.backend == 'AC2':
query = self.con.query('''
select min(ac_level0.stw),max(ac_level0.stw)
from ac_level0
natural join getscansac2({0},{1}+16*60*45)
where start>={0} and start<={1} and backend='{2}'
'''.format(*temp))
result = query.dictresult()
print result
if result[0]['max'] is None:
print 'no data from ' + backend + ' in orbit ' + str(orbit)
return 0
# extract all target spectrum data for the orbit
temp = [result[0]['min'], result[0]['max'], self.backend]
query = self.con.query('''
select calstw,stw,backend,orbit,mjd,lst,intmode,spectra,
alevel,version,channels,skyfreq,lofreq,restfreq,maxsuppression,
tsys,sourcemode,freqmode,efftime,sbpath,latitude,longitude,
altitude,skybeamhit,ra2000,dec2000,vsource,qtarget,qachieved,
qerror,gpspos,gpsvel,sunpos,moonpos,sunzd,vgeo,vlsr,ssb_fq,
inttime,ac_level1b.frontend,hotloada,lo,sig_type,ac_level1b.soda
from ac_level1b
join attitude_level1 using (backend,stw)
join ac_level0 using (backend,stw)
join shk_level1 using (backend,stw)
where stw>={0} and stw<={1} and backend='{2}' and version=8
and sig_type='SIG'
order by stw asc,intmode asc'''.format(*temp))
result = query.dictresult()
print len(result)
# extract all calibration spectrum data for the orbit
query2 = self.con.query('''
select stw,backend,orbit,mjd,lst,intmode,spectra,alevel,version,
channels,spectype,skyfreq,lofreq,restfreq,maxsuppression,
sourcemode,freqmode,sbpath,latitude,longitude,altitude,tspill,
skybeamhit,ra2000,dec2000,vsource,qtarget,qachieved,qerror,
gpspos,gpsvel,sunpos,moonpos,sunzd,vgeo,vlsr,ssb_fq,inttime,
ac_cal_level1b.frontend,hotloada,lo,sig_type,ac_cal_level1b.soda
from ac_cal_level1b
join attitude_level1 using (backend,stw)
join ac_level0 using (backend,stw)
join shk_level1 using (backend,stw)
where stw>={0} and stw<={1} and backend='{2}' and version=8
order by stw asc,intmode asc,spectype asc'''.format(*temp))
result2 = query2.dictresult()
print len(result2)
if result == [] or result2 == []:
print 'could not extract all necessary data for processing ' + \
backend + ' in orbit ' + orbit
return 0
# combine target and calibration data
# list of both target and calibration spectrum data
self.specdata = []
# list of calstw that tells which scan a spectra belongs to
self.scaninfo = []
for ind, row2 in enumerate(result2):
# fist add calibration spectrum
self.specdata.append(row2)
self.scaninfo.append(row2['stw'])
if ind < len(result2) - 1:
if result2[ind]['stw'] == result2[ind + 1]['stw']:
continue
for row in result:
if row['calstw'] == row2['stw']:
self.scaninfo.append(row['calstw'])
self.specdata.append(row)
return 1
def decode_data(self):
self.spectra = []
# write data to the odinscan structure for each spectra
for ind, res in enumerate(self.specdata):
a = odin.Spectrum()
a.version = 262 # 1
a.level = res['alevel'] + res['version'] # 2
a.quality = 0 # 3
a.stw = res['stw'] % 2**32 # 4
a.mjd = res['mjd'] # 5
a.orbit = res['orbit'] # 6
a.lst = res['lst'] # 7
a.source = res['sourcemode'].replace(
'STRAT', 'stratospheric').replace(
'ODD_H', 'Odd hydrogen').replace(
'ODD_N', 'Odd nitrogen').replace(
'WATER', 'Water isotope').replace(
'SUMMER', 'Summer mesosphere').replace(
'DYNAM', 'Transpoort') + ' FM=' + str(res['freqmode']) # 8
a.discipline = 'AERO' # 9
a.topic = res['sourcemode'] # 10
a.spectrum = ind + 1 # 11
a.obsmode = 'SSW' # 12
try:
if res['spectype'] == 'CAL' or res['spectype'] == 'SSB': # 13
a.type = res['spectype']
except BaseException:
a.type = 'SPE'
a.frontend = res['frontend'] # 14
a.backend = res['backend'] # 15
a.skybeamhit = res['skybeamhit'] # 16
a.ra2000 = res['ra2000'] # 17
a.dec2000 = res['dec2000'] # 18
a.vsource = res['vsource'] # 19
a.longitude = res['longitude'] # 20
a.latitude = res['latitude'] # 21
a.altitude = res['altitude'] # 22
a.qtarget = res['qtarget'] # 23
a.qachieved = res['qachieved'] # 24
a.qerror = res['qerror'] # 25
a.gpspos = res['gpspos'] # 26
a.gpsvel = res['gpsvel'] # 27
a.sunpos = res['sunpos'] # 28
a.moonpos = res['moonpos'] # 29
a.sunzd = res['sunzd'] # 30
a.vgeo = res['vgeo'] # 31
a.vlsr = res['vlsr'] # 32
a.tcal = res['hotloada'] # 33
try:
a.tsys = res['tsys'] # 34
except BaseException:
a.tsys = 0
a.sbpath = res['sbpath'] # 35
a.lofreq = res['lofreq'] # 36
a.skyfreq = res['skyfreq'] # 37
a.restfreq = res['restfreq'] # 38
a.maxsup = res['maxsuppression'] # 39
a.soda = res['soda'] # 40
a.freqres = 1000000.0 # 41
ssb_fq = res['ssb_fq']
for iq in range(len(ssb_fq)):
ssb_fq[iq] = ssb_fq[iq] * 1e6
a.freqcal = ssb_fq # 42
a.intmode = res['intmode'] # 43
a.inttime = res['inttime'] # 44
try:
a.efftime = res['efftime'] # 45
except BaseException:
a.efftime = 0
a.channels = res['channels'] # 46
a.data = numpy.ndarray(shape=(res['channels'],), dtype='float64',
buffer=con.res['spectra'])
self.spectra.append(a)
def planck(T, f):
h = 6.626176e-34 # Planck constant (Js)
k = 1.380662e-23 # Boltzmann constant (J/K)
T0 = h * f / k
if (T > 0.0):
Tb = T0 / (numpy.exp(T0 / T) - 1.0)
else:
Tb = 0.0
return Tb
def freq(lofreq, skyfreq, LO):
n = 896
f = numpy.zeros(shape=(n,))
seq = [1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1]
m = 0
for adc in range(8):
if seq[2 * adc]:
k = seq[2 * adc] * 112
df = 1.0e6 / seq[2 * adc]
if seq[2 * adc + 1] < 0:
df = -df
for j in range(k):
f[m + j] = LO[adc / 2] + j * df
m += k
fdata = numpy.zeros(shape=(n,))
if skyfreq >= lofreq:
for i in range(n):
v = f[i]
v = lofreq + v
v /= 1.0e9
fdata[i] = v
else:
for i in range(n):
v = f[i]
v = lofreq - v
v /= 1.0e9
fdata[i] = v
return fdata
class Calibration_step2():
def __init__(self, con):
self.con = con
self.spectra = [Calibration_spectrum()]
def get_db_data(self, backend, frontend, version, intmode,
sourcemode, freqmode, ssb_fq, altitude_range, hotload):
hotload_lower = int(numpy.floor(hotload))
hotload_upper = int(numpy.ceil(hotload))
hotload_range = '''{{{0},{1}}}'''.format(
*[hotload_lower, hotload_upper])
temp = [backend, frontend, version, intmode, sourcemode, freqmode,
ssb_fq, altitude_range, hotload_range]
# find out if we already have required data
for ind, spec in enumerate(self.spectra):
if (spec.backend == backend and
spec.frontend == frontend and
spec.version == version and
spec.intmode == intmode and
spec.sourcemode == sourcemode and
spec.freqmode == freqmode and
spec.ssb_fq == ssb_fq and
spec.altitude_range == altitude_range and
spec.hotload_range == hotload_range):
self.spec = spec
return
# now we did not have the required data, so load it
query = self.con.query('''
select hotload_range,median_fit,channels
from ac_cal_level1c where backend='{0}' and
frontend='{1}' and version={2} and intmode={3}
and sourcemode='{4}' and freqmode={5} and ssb_fq='{6}' and
altitude_range='{7}' and hotload_range='{8}'
'''.format(*temp))
result = query.dictresult()
if result == []:
medianfit = 0.0
else:
medianfit = numpy.ndarray(shape=(result[0]['channels'],),
dtype='float64',
buffer=result[0]['median_fit'])
self.spec = Calibration_spectrum()
self.spec.backend = backend
self.spec.frontend = frontend
self.spec.version = version
self.spec.intmode = intmode
self.spec.sourcemode = sourcemode
self.spec.freqmode = freqmode
self.spec.ssb_fq = ssb_fq
self.spec.altitude_range = altitude_range
self.spec.hotload_range = hotload_range
self.spec.spectrum = medianfit
self.spectra.append(self.spec)
def calibration_step2(self, spec, tspill):
# compensate for ripple on sky beam signal
t_load = planck(spec.tcal, spec.skyfreq)
t_sky = planck(2.7, spec.skyfreq)
eta = 1 - tspill / 300.0 # main beam efficeiency
w = 1 / eta * (1 - (spec.data) / (t_load))
spec.data = spec.data - w * self.spec.spectrum
return spec
def create_hdf_file(spectra, outfile):
# store data to file
# -first store a binary file for each spectra, and produce
# a file with a list of all files
# -then produce a level1b hdf-file
basepath = '/home/molflow/odincal'
# store a binary file for each spectra
filelist = os.path.join(basepath, 'odincal/odincal/filelist.txt')
output = open(filelist, 'w+')
for spec in spectra:
spec.Save(str(spec.spectrum))
output.write(str(spec.spectrum) + '\n')
# print spec.stw
output.close()
# produce a level1b hdf-file
ld_path = os.path.join(basepath, 'parts/hdf4/lib')
LD = '''LD_LIBRARY_PATH='{0}' '''.format([ld_path])
program = os.path.join(basepath, 'parts/oops/bin/whdf')
cmd = string.join([LD, program, '-file', outfile, '-list '])
os.system(string.join([cmd, filelist]))
# delete files created in step 1 above
cmd = string.join(['rm ', filelist])
os.system(cmd)
for spec in spectra:
cmd = string.join(['rm ', | |
import json
import os
import pprint
from os import listdir
from os.path import isfile, join
import cv2 as cv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.models as models
import torchvision.transforms.functional as TF
from matplotlib import cm
from PIL import Image, ImageColor, ImageDraw, ImageFont
from skimage.exposure import match_histograms
from torch.serialization import save
from torchvision.io import read_image
from torchvision.utils import draw_segmentation_masks, make_grid, save_image
from tqdm.auto import tqdm
from adv_patch_bench.models.common import Normalize
from adv_patch_bench.transforms import get_box_from_ellipse
from adv_patch_bench.utils import get_image_files, load_annotation, pad_image
plt.rcParams["savefig.bbox"] = 'tight'
plt.style.use('seaborn-white')
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
POLYGON_ERROR = 0.04
SELECTED_SHAPES = ['octagon,36,36', 'diamond,36,36', 'pentagon,36,36', 'rect,36,48', 'rect,30,36', ]
def img_numpy_to_torch(img):
if img.ndim == 3:
return torch.from_numpy(img).permute(2, 0, 1) / 255.
return torch.from_numpy(img).permute(0, 3, 1, 2) / 255.
def count_blobs(image):
labeled, num_blobs = scipy.ndimage.label(image, structure=np.ones((3, 3)))
# DEBUG
# blob_sizes = []
# for i in range(1, num_blobs + 1):
# blob_sizes.append((labeled == i).sum())
# print(blob_sizes)
return num_blobs
def detect_polygon(contour):
eps = cv.arcLength(contour, True) * POLYGON_ERROR
vertices = cv.approxPolyDP(contour, eps, True)
return vertices
def draw_from_contours(img, contours, color=[0, 0, 255, 255]):
if not isinstance(contours, list):
contours = [contours]
for contour in contours:
if contour.ndim == 3:
contour_coord = (contour[:, 0, 1], contour[:, 0, 0])
elif contour.ndim == 2:
contour_coord = (contour[:, 1], contour[:, 0])
else:
raise ValueError('Invalid contour shape.')
img[contour_coord] = color
return img
def show(imgs, num_cols=2, titles=None):
num_imgs = len(imgs)
num_rows = int(np.ceil(num_imgs / num_cols))
if not isinstance(imgs, (list, np.ndarray)):
imgs = [imgs]
fix, axs = plt.subplots(ncols=num_cols, nrows=num_rows,
figsize=(10, 6 / num_cols * num_rows))
for i, img in enumerate(imgs):
if isinstance(img, torch.Tensor):
img = img.detach()
img = TF.to_pil_image(img)
row, col = i // num_cols, i % num_cols
if isinstance(axs, matplotlib.axes.Axes):
axs.imshow(np.asarray(img))
axs.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
axs.set_title(titles)
elif axs.ndim == 1:
axs[col].imshow(np.asarray(img))
axs[col].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if titles is not None:
axs[col].set_title(titles[i])
else:
axs[row, col].imshow(np.asarray(img))
axs[row, col].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if titles is not None:
axs[row, col].set_title(titles[i])
def show_img_with_segment(label, panoptic_per_image_id, data_dir, min_area=0):
img_path = join(data_dir, 'images')
label_path = join(data_dir, 'v2.0/panoptic/')
filenames = [f for f in listdir(img_path) if isfile(join(img_path, f))]
np.random.shuffle(filenames)
num_imgs, max_num_imgs = 0, 10
imgs = []
for filename in filenames:
if num_imgs >= max_num_imgs:
break
label_found = False
img_id = filename.split('.')[0]
segment = panoptic_per_image_id[img_id]['segments_info']
panoptic = np.array(Image.open(join(label_path, f'{img_id}.png')))
# Find label id of each object present in the scene
highlight_ids = []
for obj in segment:
if obj['category_id'] == label and obj['area'] >= min_area:
label_found = True
highlight_ids.append(obj['id'])
if not label_found:
continue
num_imgs += 1
# Get segmentation mask from panoptic
img_pil = Image.open(join(img_path, filename))
img = np.array(img_pil)
mask = np.zeros_like(img[:, :, 0], dtype=np.uint8)
for i in highlight_ids:
mask += panoptic[:, :, 0] == i
bool_mask = (mask > 0).astype(np.float32)
mask = np.stack([bool_mask, ] * 4, axis=-1)
color_tuple = ImageColor.getrgb('green')
mask[:, :, :3] *= color_tuple
mask[:, :, 3] = bool_mask * 0.5
mask = Image.fromarray(np.uint8(mask * 255))
# mask = Image.fromarray(np.uint8(cm.gist_earth(mask) * 255))
img = Image.alpha_composite(img_pil.convert('RGBA'), mask.convert('RGBA'))
imgs.append(img)
show(imgs, num_cols=2)
plt.savefig('test.png', dpi=600)
def show_img_patch(model, label, panoptic_per_image_id, data_dir,
max_num_imgs=1000, min_area=0, conf_thres=0.8, pad=0.05,
num_classes=6, device='cuda'):
img_path = join(data_dir, 'images')
label_path = join(data_dir, 'v2.0/panoptic/')
filenames = [f for f in listdir(img_path) if isfile(join(img_path, f))]
np.random.shuffle(filenames)
patches, resized_patches, masks, ids = [], [], [], []
with tqdm(total=max_num_imgs) as pbar:
for filename in filenames:
img_id = filename.split('.')[0]
segment = panoptic_per_image_id[img_id]['segments_info']
panoptic = np.array(Image.open(join(label_path, f'{img_id}.png')))
img_pil = Image.open(join(img_path, filename))
img = np.array(img_pil)
img_height, img_width, _ = img.shape
# Pad image to avoid cutting varying shapes due to boundary
img_padded, pad_size = pad_image(img, pad_mode='edge', return_pad_size=True)
# Crop the specified object
for obj in segment:
# Check if bounding box is cut off at the image boundary
xmin, ymin, width, height = obj['bbox']
is_oob = (xmin == 0) or (ymin == 0) or \
((xmin + width) >= img_width) or ((ymin + height) >= img_height)
if obj['category_id'] != label or obj['area'] < min_area or is_oob:
continue
# Make sure that bounding box is square and add some padding to
# avoid cutting into the sign
size = max(width, height)
xpad, ypad = int((size - width) / 2), int((size - height) / 2)
extra_obj_pad = int(pad * size)
size += 2 * extra_obj_pad
xmin += pad_size - xpad - extra_obj_pad
ymin += pad_size - ypad - extra_obj_pad
xmax, ymax = xmin + size, ymin + size
patch = img_padded[ymin:ymax, xmin:xmax]
# Collect mask
bool_mask = (panoptic[:, :, 0] == obj['id']).astype(np.uint8)
mask = np.stack([bool_mask, ] * 4, axis=-1)
mask *= np.array([0, 255, 0, 127], dtype=np.uint8)
# # Run corner detection on mask
# # blockSize: It is the size of neighbourhood considered for corner detection
# block_size = int(size * 0.1)
# # ksize: Aperture parameter of the Sobel derivative used
# ksize = 3
# corners = cv.cornerHarris(bool_mask, block_size, ksize, 0.04)
# d_corners = cv.dilate(corners, None)
# mask[corners > 0.5 * d_corners.max()] = [255, 0, 0, 255]
# DEBUG
# # print(corners.max())
# print(corners.reshape(-1)[np.argsort(corners.reshape(-1))[::-1][:10]])
# # print(np.sum(corners > 0.5 * corners.max()))
# print(count_blobs(corners > 0.5 * corners.max()))
contours, _ = cv.findContours(bool_mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
# mask = draw_from_contours(mask, contours, color=[0, 0, 255, 255])
# Find convex hull to (1) combine multiple contours and/or
# (2) fix some occlusion
cat_contours = np.concatenate(contours, axis=0)
hull = cv.convexHull(cat_contours, returnPoints=True)
hull_color = (255, 255, 255, 255)
mask = cv.drawContours(mask, [hull], -1, hull_color, 1)
hull_mask = (mask == np.array(hull_color)).prod(-1)
# mask = (1 - hull_mask) * mask + hull_mask * hull_draw
# Fit polygon to remove some annotation errors and get vertices
vertices = detect_polygon(hull)
# mask = draw_from_contours(mask, vertices, color=[255, 255, 255, 255])
print(vertices)
# TODO: Check if matches with classifier prediction
# TODO: if circle, fit ellipse instead
hull_draw_points = np.stack(np.where(hull_mask), axis=1)[:, ::-1]
ellipse = cv.fitEllipse(hull_draw_points)
ellipse_color = (255, 0, 0, 255)
mask = cv.ellipse(mask, ellipse, ellipse_color)
# Get filled ellipse and compute ellipse error
ellipse_mask = cv.ellipse(np.zeros_like(bool_mask, dtype=np.float32), ellipse, (1,), thickness=-1)
ellipse_error = np.abs(ellipse_mask - bool_mask.astype(np.float32)).sum() / bool_mask.sum()
# DEBUG: Transform
if len(vertices) == 4:
sign_height, sign_width = 100, 100
canonical = np.zeros((sign_height, sign_width, 3))
canonical_mask = np.zeros((sign_height, sign_width, 1))
adv_patch = np.random.rand(40, 40, 3)
# Histogram matching of the patch and the entire image
adv_patch_matched = match_histograms(adv_patch, img) / 255.
canonical[20:60, 40:80, :] = adv_patch_matched
canonical_mask[20:60, 40:80, :] = 1
src = np.array([[0, 0], [0, sign_width - 1],
[sign_height - 1, sign_width - 1], [sign_height - 1, 0]],
dtype=np.float32)
target = vertices[:, 0, :]
M = cv.getPerspectiveTransform(src, target.astype(np.float32))
out = cv.warpPerspective(canonical, M, (img_width, img_height))
out_mask = cv.warpPerspective(canonical_mask, M, (img_width, img_height))
out_mask = (out_mask > 0.5).astype(np.float32)[:, :, None]
new_img = (1 - out_mask) * img / 255. + out_mask * out
# Mark vertices
vert = draw_from_contours(np.zeros_like(new_img), vertices, color=[0, 255, 0])
vert = cv.dilate(vert, None) / 255.
vert_mask = (vert.sum(-1) > 0).astype(np.float32)[:, :, None]
new_img = (1 - vert_mask) * new_img + vert_mask * vert
new_img = pad_image(new_img, pad_mode='constant')[ymin:ymax, xmin:xmax]
# save_image(torch.from_numpy(new_img).permute(2, 0, 1), 'test_warp.png')
# No histogram matching
canonical[20:60, 40:80, :] = adv_patch
out = cv.warpPerspective(canonical, M, (img_width, img_height))
new_img2 = (1 - out_mask) * img / 255. + out_mask * out
new_img2 = (1 - vert_mask) * new_img2 + vert_mask * vert
new_img2 = pad_image(new_img2, pad_mode='constant')[ymin:ymax, xmin:xmax]
else:
new_img = np.zeros_like(patch)
new_img2 = np.zeros_like(patch)
# emask = pad_image(ellipse_mask, pad_mode='constant')
# save_image(torch.from_numpy(emask[ymin:ymax, xmin:xmax]), 'test.png')
# save_image(torch.from_numpy(mask_patch[:, :, :3] / 255.).permute(2, 0, 1), 'test_mask.png')
# save_image(torch.from_numpy(patch / 255.).permute(2, 0, 1), 'test_img.png')
# DEBUG:
if ellipse_error > 0.1:
pass
# vertices = get_corners(bool_mask)
# shape = get_shape_from_vertices(vertices)[0]
# print(shape)
# if shape != 'other':
# get_box_vertices(vertices, shape)
else:
print('found circle')
box = get_box_from_ellipse(ellipse).astype(np.int64)
mask = cv.drawContours(mask, [box], 0, ellipse_color, 1)
# emask = np.zeros_like(mask)
# # draw_from_contours(mask, box, color=[255, 255, 255, 255])
# emask[(box[:, 1], box[:, 0])] = [230, 230, 250, 255]
# # emask = cv.dilate(emask, None)
# mask = (emask == 0) * mask + (emask > 0) * emask
# mask_padded = pad_image(mask, pad_mode='constant')
# mask_patch = mask_padded[ymin:ymax, xmin:xmax]
# # mask_patch = mask_padded
# save_image(torch.from_numpy(mask_patch[:, :, :3] / 255.).permute(2, 0, 1), 'test_ellipse.png')
# TODO: Use edge | |
# -*- coding: UTF-8 -*-
"""
Model tests for Workflow
Author: <NAME>
"""
# python
import datetime
import sys
# django
from django.test.client import Client
from django.test import TestCase
from django.contrib.auth.models import User
# project
from workflow.models import *
class ModelTestCase(TestCase):
"""
Testing Models
"""
# Reference fixtures here
fixtures = ['workflow_test_data']
def test_workflow_unicode(self):
"""
Makes sure that the slug field (name) is returned from a call to
__unicode__()
"""
w = Workflow.objects.get(id=1)
self.assertEquals(u'test workflow', w.__unicode__())
def test_workflow_lifecycle(self):
"""
Makes sure the methods in the Workflow model work as expected
"""
# All new workflows start with status DEFINITION - from the fixtures
w = Workflow.objects.get(id=1)
self.assertEquals(Workflow.DEFINITION, w.status)
# Activate the workflow
w.activate()
self.assertEquals(Workflow.ACTIVE, w.status)
# Retire it.
w.retire()
self.assertEquals(Workflow.RETIRED, w.status)
def test_workflow_is_valid(self):
"""
Makes sure that the validation for a workflow works as expected
"""
# from the fixtures
w = Workflow.objects.get(id=1)
self.assertEquals(Workflow.DEFINITION, w.status)
# make sure the workflow contains exactly one start state
# 0 start states
state1 = State.objects.get(id=1)
state1.is_start_state=False
state1.save()
self.assertEqual(False, w.is_valid())
self.assertEqual(True, u'There must be only one start state' in w.errors['workflow'])
state1.is_start_state=True
state1.save()
# >1 start states
state2 = State.objects.get(id=2)
state2.is_start_state=True
state2.save()
self.assertEqual(False, w.is_valid())
self.assertEqual(True, u'There must be only one start state' in w.errors['workflow'])
state2.is_start_state=False
state2.save()
# make sure we have at least one end state
# 0 end states
end_states = w.states.filter(is_end_state=True)
for state in end_states:
state.is_end_state=False
state.save()
self.assertEqual(False, w.is_valid())
self.assertEqual(True, u'There must be at least one end state' in w.errors['workflow'])
for state in end_states:
state.is_end_state=True
state.save()
# make sure we don't have any orphan states
orphan_state = State(name='orphaned_state', workflow=w)
orphan_state.save()
self.assertEqual(False, w.is_valid())
self.assertEqual(True, orphan_state.id in w.errors['states'])
msg = u'This state is orphaned. There is no way to get to it given'\
' the current workflow topology.'
self.assertEqual(True, msg in w.errors['states'][orphan_state.id])
orphan_state.delete()
# make sure we don't have any cul-de-sacs from which one can't
# escape (re-using an end state for the same effect)
cul_de_sac = end_states[0]
cul_de_sac.is_end_state = False
cul_de_sac.save()
self.assertEqual(False, w.is_valid())
self.assertEqual(True, cul_de_sac.id in w.errors['states'])
msg = u'This state is a dead end. It is not marked as an end state'\
' and there is no way to exit from it.'
self.assertEqual(True, msg in w.errors['states'][cul_de_sac.id])
cul_de_sac.is_end_state = True
cul_de_sac.save()
# make sure transition's roles are a subset of the roles associated
# with the transition's from_state (otherwise you'll have a
# transition that none of the participants for a state can make use
# of)
role = Role.objects.get(id=2)
transition = Transition.objects.get(id=10)
transition.roles.clear()
transition.roles.add(role)
self.assertEqual(False, w.is_valid())
self.assertEqual(True, transition.id in w.errors['transitions'])
msg = u'This transition is not navigable because none of the'\
' roles associated with the parent state have permission to'\
' use it.'
self.assertEqual(True, msg in w.errors['transitions'][transition.id])
# so all the potential pitfalls have been vaidated. Lets make sure
# we *can* validate it as expected.
transition.roles.clear()
admin_role = Role.objects.get(id=1)
staff_role = Role.objects.get(id=3)
transition.roles.add(admin_role)
transition.roles.add(staff_role)
self.assertEqual(True, w.is_valid())
self.assertEqual([], w.errors['workflow'])
self.assertEqual({}, w.errors['states'])
self.assertEqual({}, w.errors['transitions'])
def test_workflow_has_errors(self):
"""
Ensures that has_errors() returns the appropriate response for all
possible circumstances
"""
# Some housekeepeing
w = Workflow.objects.get(id=1)
u = User.objects.get(id=1)
w.activate()
w2 = w.clone(u)
# A state with no errors
state1 = State.objects.get(id=1)
w.is_valid()
self.assertEqual([], w.has_errors(state1))
# A state with errors
state1.is_start_state = False
state1.save()
w.is_valid()
msg = u'This state is orphaned. There is no way to get to it given'\
' the current workflow topology.'
self.assertEqual([msg], w.has_errors(state1))
# A transition with no errors
transition = Transition.objects.get(id=10)
w.is_valid()
self.assertEqual([], w.has_errors(transition))
# A transition with errors
role = Role.objects.get(id=2)
transition.roles.clear()
transition.roles.add(role)
w.is_valid()
msg = u'This transition is not navigable because none of the'\
' roles associated with the parent state have permission to'\
' use it.'
self.assertEqual([msg], w.has_errors(transition))
# A state not associated with the workflow
state2 = w2.states.all()[0]
state2.is_start_state = False
state2.save()
w.is_valid()
# The state is a problem state but isn't anything to do with the
# workflow w
self.assertEqual([], w.has_errors(state2))
# A transition not associated with the workflow
transition2 = w2.transitions.all()[0]
transition2.roles.clear()
w.is_valid()
# The transition has a problem but isn't anything to do with the
# workflow w
self.assertEqual([], w.has_errors(transition2))
# Something not either a state or transition (e.g. a string)
w.is_valid()
self.assertEqual([], w.has_errors("Test"))
def test_workflow_activate_validation(self):
"""
Makes sure that the appropriate validation of a workflow happens
when the activate() method is called
"""
# from the fixtures
w = Workflow.objects.get(id=1)
self.assertEquals(Workflow.DEFINITION, w.status)
# make sure only workflows in definition can be activated
w.status=Workflow.ACTIVE
w.save()
try:
w.activate()
except Exception, instance:
self.assertEqual(u'Only workflows in the "definition" state may'\
' be activated', instance.args[0])
else:
self.fail('Exception expected but not thrown')
w.status=Workflow.DEFINITION
w.save()
# Lets make sure the workflow is validated before being activated by
# making sure the workflow in not valid
state1 = State.objects.get(id=1)
state1.is_start_state=False
state1.save()
try:
w.activate()
except Exception, instance:
self.assertEqual(u"Cannot activate as the workflow doesn't"\
" validate.", instance.args[0])
else:
self.fail('Exception expected but not thrown')
state1.is_start_state=True
state1.save()
# so all the potential pitfalls have been validated. Lets make sure
# we *can* approve it as expected.
w.activate()
self.assertEqual(Workflow.ACTIVE, w.status)
def test_workflow_retire_validation(self):
"""
Makes sure that the appropriate state is set against a workflow when
this method is called
"""
w = Workflow.objects.get(id=1)
w.retire()
self.assertEqual(Workflow.RETIRED, w.status)
def test_workflow_clone(self):
"""
Makes sure we can clone a workflow correctly.
"""
# We can't clone workflows that are in definition because they might
# not be "correct" (see the validation that happens when activate()
# method is called
u = User.objects.get(id=1)
w = Workflow.objects.get(id=1)
try:
w.clone(u)
except Exception, instance:
self.assertEqual(u'Only active or retired workflows may be'\
' cloned', instance.args[0])
else:
self.fail('Exception expected but not thrown')
w.activate()
clone = w.clone(u)
self.assertEqual(Workflow.DEFINITION, clone.status)
self.assertEqual(u, clone.created_by)
self.assertEqual(w, clone.cloned_from)
self.assertEqual(w.name, clone.name)
self.assertEqual(w.description, clone.description)
# Lets check we get the right number of states, transitions and
# events
self.assertEqual(w.transitions.all().count(),
clone.transitions.all().count())
self.assertEqual(w.states.all().count(), clone.states.all().count())
self.assertEqual(w.events.all().count(), clone.events.all().count())
def test_state_deadline(self):
"""
Makes sure we get the right result from the deadline() method in the
State model
"""
w = Workflow.objects.get(id=1)
s = State(
name='test',
workflow=w
)
s.save()
# Lets make sure the default is correct
self.assertEquals(None, s.deadline())
# Changing the unit of time measurements mustn't change anything
s.estimation_unit = s.HOUR
s.save()
self.assertEquals(None, s.deadline())
# Only when we have a positive value in the estimation_value field
# should a deadline be returned
s._today = lambda : datetime.datetime(2000, 1, 1, 0, 0, 0)
# Seconds
s.estimation_unit = s.SECOND
s.estimation_value = 1
s.save()
expected = datetime.datetime(2000, 1, 1, 0, 0, 1)
actual = s.deadline()
self.assertEquals(expected, actual)
# Minutes
s.estimation_unit = s.MINUTE
s.save()
expected = datetime.datetime(2000, 1, 1, 0, 1, 0)
actual = s.deadline()
self.assertEquals(expected, actual)
# Hours
s.estimation_unit = s.HOUR
s.save()
expected = datetime.datetime(2000, 1, 1, 1, 0)
actual = s.deadline()
self.assertEquals(expected, actual)
# Days
s.estimation_unit = s.DAY
s.save()
expected = datetime.datetime(2000, 1, 2)
actual = s.deadline()
self.assertEquals(expected, actual)
# Weeks
s.estimation_unit = s.WEEK
s.save()
expected = datetime.datetime(2000, 1, 8)
actual = s.deadline()
self.assertEquals(expected, actual)
def test_state_unicode(self):
"""
Makes sure we get the right result from the __unicode__() method in
the State model
"""
w = Workflow.objects.get(id=1)
s = State(
name='test',
workflow=w
)
s.save()
self.assertEqual(u'test', s.__unicode__())
def test_transition_unicode(self):
"""
Makes sure we get the right result from the __unicode__() method in
the Transition model
"""
tr = Transition.objects.get(id=1)
self.assertEqual(u'Proceed to state 2', tr.__unicode__())
def test_event_unicode(self):
"""
Makes sure we get the right result from the __unicode__() method in
the Event model
"""
e = Event.objects.get(id=1)
self.assertEqual(u'Important meeting', e.__unicode__())
def test_event_type_unicode(self):
"""
Make sure we get the name of the event type
"""
et = EventType.objects.get(id=1)
self.assertEquals(u'Meeting', et.__unicode__())
def test_workflowactivity_current_state(self):
"""
Check we always get the latest state (or None if the WorkflowActivity
hasn't started navigating a workflow
"""
w = Workflow.objects.get(id=1)
u = User.objects.get(id=1)
r = Role.objects.get(id=1)
wa = WorkflowActivity(workflow=w, created_by=u)
wa.save()
p = Participant(user=u, workflowactivity=wa)
p.save()
p.roles.add(r)
# We've not started the workflow yet so make sure we don't get
# anything back
self.assertEqual(None, wa.current_state())
wa.start(p)
# We should be in the first state
s1 = State.objects.get(id=1) # From the fixtures
current_state = | |
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of common ranking losses in JAX.
A ranking loss is a differentiable function that expresses the cost of a ranking
induced by item scores compared to a ranking induced from relevance labels. Rax
provides a number of ranking losses as JAX functions that are implemented
according to the :class:`~rax.types.LossFn` interface.
Loss functions are designed to operate on the last dimension of its inputs. The
leading dimensions are considered batch dimensions. To compute per-list losses,
for example to apply per-list weighting or for distributed computing of losses
across devices, please use standard JAX transformations such as :func:`jax.vmap`
or :func:`jax.pmap`.
Standalone usage:
>>> scores = jnp.array([2., 1., 3.])
>>> labels = jnp.array([1., 0., 0.])
>>> rax.softmax_loss(scores, labels)
DeviceArray(1.4076059, dtype=float32)
Usage with a batch of data and a mask to indicate valid items.
>>> scores = jnp.array([[2., 1., 0.], [1., 0.5, 1.5]])
>>> labels = jnp.array([[1., 0., 0.], [0., 0., 1.]])
>>> where = jnp.array([[True, True, False], [True, True, True]])
>>> rax.pairwise_hinge_loss(
... scores, labels, where=where, reduce_fn=jnp.mean)
DeviceArray(0.16666667, dtype=float32)
To compute gradients of each loss function, please use standard JAX
transformations such as :func:`jax.grad` or :func:`jax.value_and_grad`:
>>> scores = jnp.asarray([[0., 1., 3.], [1., 2., 0.]])
>>> labels = jnp.asarray([[0., 0., 1.], [1., 0., 0.]])
>>> jax.grad(rax.softmax_loss)(scores, labels, reduce_fn=jnp.mean)
DeviceArray([[ 0.02100503, 0.0570976 , -0.07810265],
[-0.37763578, 0.33262047, 0.04501529]], dtype=float32)
"""
import operator
from typing import Callable, Optional
import jax
import jax.numpy as jnp
from rax._src import utils
from rax._src.types import Array
from rax._src.types import ReduceFn
def softmax_loss(scores: Array,
labels: Array,
*,
where: Optional[Array] = None,
weights: Optional[Array] = None,
label_fn: Callable[..., Array] = lambda a, where: a,
reduce_fn: Optional[ReduceFn] = jnp.sum) -> Array:
r"""Softmax loss.
Definition:
.. math::
\ell(s, y) =
\sum_i y_i \log \frac{\exp(s_i)}{\sum_j \exp(s_j)}
Args:
scores: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
score of each item.
labels: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
relevance label for each item.
where: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating which items are valid for computing the loss. Items for which
this is False will be ignored when computing the loss.
weights: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating the weight for each item.
label_fn: A label function that maps labels to probabilities. Default keeps
labels as-is.
reduce_fn: An optional function that reduces the loss values. Can be
:func:`jax.numpy.sum` or :func:`jax.numpy.mean`. If ``None``, no reduction
is performed.
Returns:
The softmax loss.
"""
# Applies mask so that masked elements do not count towards the loss.
if where is not None:
labels = jnp.where(where, labels, jnp.zeros_like(labels))
scores = jnp.where(where, scores, -jnp.ones_like(scores) * jnp.inf)
# Apply weights to labels.
if weights is not None:
labels *= weights
# Scales labels and scores to match the cross entropy loss.
labels_probabilities = label_fn(labels, where=where)
scores_log_softmax = jax.nn.log_softmax(scores, axis=-1)
# Computes per-element cross entropy.
softmax_cross_entropy = labels_probabilities * scores_log_softmax
# Reduces softmax cross-entropy loss.
loss = -jnp.sum(softmax_cross_entropy, axis=-1, where=where)
return utils.safe_reduce(loss, reduce_fn=reduce_fn)
def poly1_softmax_loss(scores: Array,
labels: Array,
*,
epsilon: float = 1.0,
where: Optional[Array] = None,
weights: Optional[Array] = None,
reduce_fn: Optional[ReduceFn] = jnp.mean) -> Array:
r"""Poly1 softmax loss.
Definition :cite:p:`leng2022polyloss`:
.. math::
\ell(s, y) = softmax(s, y) + \epsilon * (1 - pt)
where :math:`softmax` is the standard softmax loss as implemented in
:func:`~rax.softmax_loss` and :math:`pt` is the target softmax probability
defined as:
.. math::
pt = \sum_i \frac{y_i}{\sum_j y_j} \frac{\exp(s_i)}{\sum_j \exp(s_j)}
Args:
scores: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
score of each item.
labels: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
relevance label for each item.
epsilon: A float hyperparameter indicating the weight of the leading
polynomial coefficient in the poly loss.
where: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating which items are valid for computing the loss. Items for which
this is False will be ignored when computing the loss.
weights: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating the weight for each item.
reduce_fn: An optional function that reduces the loss values. Can be
:func:`jax.numpy.sum` or :func:`jax.numpy.mean`. If ``None``, no reduction
is performed.
Returns:
The poly1 softmax loss.
"""
# Compute softmax cross-entropy loss without batch reduction.
ce = softmax_loss(
scores,
labels,
where=where,
weights=weights,
reduce_fn=None)
# Applies mask so that masked elements do not count towards the loss.
if where is not None:
labels = jnp.where(where, labels, jnp.zeros_like(labels))
scores = jnp.where(where, scores, -jnp.ones_like(scores) * jnp.inf)
# Apply weights to labels.
if weights is not None:
labels *= weights
# Compute target probabilities.
scores_softmax = jax.nn.softmax(scores)
labels_normalized = utils.normalize_probabilities(labels, where=where)
pt = jnp.sum(labels_normalized * scores_softmax, where=where, axis=-1)
# For lists where all items are masked, this sets pt to 1 so that the term
# (1 - pt) is set to 0 for the loss computation.
if where is not None:
pt = jnp.where(jnp.all(jnp.logical_not(where), axis=-1), 1., pt)
# Compute and return the poly1 loss.
loss = ce + epsilon * (1. - pt)
return utils.safe_reduce(loss, reduce_fn=reduce_fn)
def listmle_loss(scores: Array,
labels: Array,
*,
key: Optional[Array] = None,
where: Optional[Array] = None,
reduce_fn: Optional[ReduceFn] = jnp.mean) -> Array:
r"""ListMLE Loss.
.. note::
This loss performs sorting using the given labels. If the labels contain
multiple identical values, you should provide a :func:`~jax.random.PRNGKey`
to the ``key`` argument to make sure ties are broken randomly during the
sorting operation.
Definition :cite:p:`xia2008listwise`:
.. math::
\ell(s, y) =
- \sum_i \log
\frac{\exp(s_i)}{\sum_j I[rank(y_j) \ge rank(y_i)] \exp(s_j)}
where :math:`\operatorname{rank}(y_i)` indicates the rank of item :math:`i`
after sorting all labels :math:`y`.
Args:
scores: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
score of each item.
labels: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
relevance label for each item.
key: An optional :func:`~jax.random.PRNGKey` to perform random tie-breaking.
where: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating which items are valid for computing the loss. Items for which
this is False will be ignored when computing the loss.
reduce_fn: An optional function that reduces the loss values. Can be
:func:`jax.numpy.sum` or :func:`jax.numpy.mean`. If ``None``, no reduction
is performed.
Returns:
The listmle loss.
"""
# Sort scores and mask by labels.
if where is None:
where = jnp.ones_like(scores, dtype=jnp.bool_)
scores_sorted, where_sorted = utils.sort_by(
labels, [scores, where], where=where, key=key)
# Compute cumulative logsumexp.
lse = utils.logcumsumexp(
scores_sorted, axis=-1, where=where_sorted, reverse=True)
# Reduce list MLE loss.
loss = -jnp.sum(scores_sorted - lse, axis=-1, where=where_sorted)
return utils.safe_reduce(loss, reduce_fn=reduce_fn)
def compute_pairs(a: Array, op: Callable[[Array, Array], Array]) -> Array:
"""Computes pairs based on values of `a` and the given pairwise `op`.
Args:
a: The array used to form pairs. The last axis is used to form pairs.
op: The binary op to map a pair of values to a single value.
Returns:
A new array with the same leading dimensions as `a`, but with the last
dimension expanded so it includes all pairs `op(a[..., i], a[..., j])`
"""
a_i = jnp.expand_dims(a, -1)
a_j = jnp.expand_dims(a, -2)
result_shape = jnp.broadcast_shapes(a_i.shape, a_j.shape)
result = jnp.broadcast_to(op(a_i, a_j), result_shape)
out_shape = tuple(result.shape[:-2]) + (result.shape[-2] * result.shape[-1],)
return jnp.reshape(result, out_shape)
def pairwise_hinge_loss(scores: Array,
labels: Array,
*,
where: Optional[Array] = None,
weights: Optional[Array] = None,
reduce_fn: ReduceFn = jnp.sum) -> Array:
r"""Pairwise hinge loss.
Definition:
.. math::
\ell(s, y) =
\sum_i \sum_j I[y_i > y_j] \max(0, 1 - (s_i - s_j))
Args:
scores: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
score of each item.
labels: A ``[..., list_size]``-:class:`~jax.numpy.ndarray`, indicating the
relevance label for each item.
where: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating which items are valid for computing the loss. Items for which
this is False will be ignored when computing the loss.
weights: An optional ``[..., list_size]``-:class:`~jax.numpy.ndarray`,
indicating the weight for each item.
reduce_fn: An optional function that reduces the loss values. Can be
:func:`jax.numpy.sum` or :func:`jax.numpy.mean`. If ``None``, no reduction
is performed.
Returns:
The pairwise hinge loss.
"""
# Expand scores and labels into pairwise versions.
scores = compute_pairs(scores, operator.sub)
labels = | |
traversal a subgraph may be
revisited. Complexity would be O(V*E).
Let k = number of apps with a vertex that have the in degree > 1 and
that are not leaf apps. We can bind k to be 0<=k<=10000, shall we reach
that app number.
Each node and each vertex will be visited once O(V+E) (root apps
+ vertex to leaf).
Only k nodes will trigger a revisit of a subset of vertices (k * O(E)).
Complexity now becomes O(V+(k+1)*E) = O(V+E)
Limitations:
If an app(current) depends only on non-existing apps, then
current app will not be properly ordered. It will not be present in
the ordered list before other apps based on it.
If an app(current) depends only on non platform managed apps, then
current app will not be properly ordered. It will not be present in
the ordered list before other apps based on it.
:param: apps_metadata_dict dictionary containing parsed and processed
metadata collection
:return: Sorted list containing the app reapply order.
"""
# Apps directly after current
after_apps = {}
# Remember the maximum depth
chain_depth = {}
# Used to detect cycles
cycle_depth = {}
# Used for second traversal when populating ordered list
traverse_depth = {}
# Final result
ordered_apps = []
apps_metadata_dict[constants.APP_METADATA_ORDERED_APPS] = ordered_apps
# Initialize structures
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
after_apps[app_name] = []
chain_depth[app_name] = 0
cycle_depth[app_name] = 0
traverse_depth[app_name] = 0
# For each app remember which apps are directly after
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
app_metadata = apps_metadata_dict[constants.APP_METADATA_APPS][app_name]
metadata_after = app_metadata.get(constants.APP_METADATA_BEHAVIOR, None)
if metadata_after is not None:
metadata_after = metadata_after.get(constants.APP_METADATA_EVALUATE_REAPPLY, None)
if metadata_after is not None:
metadata_after = metadata_after.get(constants.APP_METADATA_AFTER, None)
if metadata_after is not None:
for before_app in metadata_after:
# This one may be a non-existing app, need to initialize
if after_apps.get(before_app, None) is None:
after_apps[before_app] = []
# Store information
after_apps[before_app].append(app_name)
# Remember that current app is before at least one
chain_depth[app_name] = 1
traverse_depth[app_name] = 1
# Identify root apps
root_apps = []
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
if chain_depth.get(app_name, None) == 0:
root_apps.append(app_name)
# Used for cycle detection
stack_ = queue.LifoQueue()
cycle_checked = {}
max_depth = len(apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS])
# Detect cycles and abort
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
# Skip already checked app
if cycle_checked.get(app_name, False) is True:
continue
# Start from this
stack_.put(app_name)
# Reinitialize temporary visited
visited = {}
# Traverse DFS to detect cycles
while not stack_.empty():
app_name = stack_.get_nowait()
visited[app_name] = True
# Skip already checked app
if cycle_checked.get(app_name, False) is True:
continue
for after in after_apps[app_name]:
cycle_depth[after] = max(cycle_depth[app_name] + 1, cycle_depth[after])
# Detected cycle
if cycle_depth[after] > max_depth:
return ordered_apps
stack_.put(after)
# Remember the temporary visited apps to skip them in the future
for r in visited.keys():
cycle_checked[r] = True
# Used for traversal
queue_ = queue.Queue()
# Compute the longest dependency chain starting from root apps
for app_name in root_apps:
queue_.put(app_name)
# Traverse similar to BFS to compute the longest dependency chain
while not queue_.empty():
app_name = queue_.get_nowait()
for after in after_apps[app_name]:
chain_depth[after] = max(chain_depth[app_name] + 1, chain_depth[after])
queue_.put(after)
# Traverse graph again similar to BFS
# Add to ordered list when the correct chain depth is reached
found = {}
for app_name in root_apps:
queue_.put(app_name)
found[app_name] = True
ordered_apps.append(app_name)
while not queue_.empty():
app_name = queue_.get_nowait()
for after in after_apps[app_name]:
traverse_depth[after] = max(traverse_depth[app_name] + 1, traverse_depth[after])
# This is the correct depth, add to ordered list
if traverse_depth[after] == chain_depth[after]:
# Skip if already added
if found.get(after, False) is True:
continue
found[after] = True
ordered_apps.append(after)
queue_.put(after)
# Add apps that have dependencies on non-existing apps
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
if found.get(app_name, False) is True:
continue
ordered_apps.append(app_name)
LOG.info("Applications reapply order: {}".format(ordered_apps))
apps_metadata_dict[constants.APP_METADATA_ORDERED_APPS] = ordered_apps
@staticmethod
@cutils.synchronized(LOCK_NAME_PROCESS_APP_METADATA, external=False)
def update_and_process_app_metadata(apps_metadata_dict, app_name, metadata, overwrite=True):
""" Update the cached metadata for an app
:param apps_metadata_dict: The dictionary being the cache
:param app_name: Name of the app
:param metadata: Metadata that will replace the old one
:param overwrite: If metadata is already present in the cache for this app,
then overwrite needs to be enabled to do the replacement
"""
if not overwrite and \
app_name in apps_metadata_dict[constants.APP_METADATA_APPS]:
LOG.info("Updating metadata for app {} skipped because metadata "
"is present and overwrite is not enabled"
"".format(app_name))
return
apps_metadata_dict[constants.APP_METADATA_APPS][app_name] = metadata
LOG.info("Loaded metadata for app {}: {}".format(app_name, metadata))
behavior = metadata.get(constants.APP_METADATA_BEHAVIOR, None)
if behavior is not None:
is_managed = behavior.get(constants.APP_METADATA_PLATFORM_MANAGED_APP, None)
desired_state = behavior.get(constants.APP_METADATA_DESIRED_STATE, None)
# Remember if the app wants to be managed by the platform
if cutils.is_valid_boolstr(is_managed):
apps_metadata_dict[
constants.APP_METADATA_PLATFORM_MANAGED_APPS][app_name] = None
LOG.info("App {} requested to be platform managed"
"".format(app_name))
# Recompute app reapply order
AppOperator.recompute_app_evaluation_order(apps_metadata_dict)
# Remember the desired state the app should achieve
if desired_state is not None:
apps_metadata_dict[
constants.APP_METADATA_DESIRED_STATES][app_name] = desired_state
LOG.info("App {} requested to achieve {} state"
"".format(app_name, desired_state))
def load_application_metadata_from_database(self, rpc_app):
""" Load the application metadata from the database
:param rpc_app: KubeApp model object
"""
LOG.info("Loading application metadata for {} from database"
"".format(rpc_app.name))
app = AppOperator.Application(rpc_app)
metadata = {}
# Load metadata as a dictionary from a column in the database
db_app = self._dbapi.kube_app_get(app.name)
if db_app.app_metadata:
metadata = db_app.app_metadata or {}
AppOperator.update_and_process_app_metadata(self._apps_metadata,
app.name,
metadata)
def load_application_metadata_from_file(self, rpc_app):
""" Load the application metadata from the metadata file of the app
:param rpc_app: data object provided in the rpc request
"""
LOG.info("Loading application metadata for {} from file"
"".format(rpc_app.name))
app = AppOperator.Application(rpc_app)
metadata = {}
if os.path.exists(app.sync_metadata_file):
with io.open(app.sync_metadata_file, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default.
# Set preserve_quotes=True to preserve all the quotes.
# The assumption here: there is just one yaml section
metadata = yaml.load(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True) or {}
AppOperator.update_and_process_app_metadata(self._apps_metadata,
app.name,
metadata)
# Save metadata as a dictionary in a column in the database
rpc_app.app_metadata = metadata
rpc_app.save()
def perform_app_apply(self, rpc_app, mode, lifecycle_hook_info_app_apply, caller=None):
"""Process application install request
This method processes node labels per configuration and invokes
Armada to apply the application manifest.
For OpenStack app (system app), the method generates combined
overrides (a merge between system and user overrides if available)
for the charts that comprise the app before downloading docker images
and applying the manifest.
Usage: the method can be invoked at initial install or after the
user has either made some manual configuration changes or
or applied (new) user overrides to some Helm chart(s) to
correct/update a previous manifest apply.
:param rpc_app: application object in the RPC request
:param mode: mode to control how to apply application manifest
:param lifecycle_hook_info_app_apply: LifecycleHookInfo object
:param caller: internal caller, None if it is an RPC call,
otherwise apply is invoked from update method
:return boolean: whether application apply was successful
"""
app = AppOperator.Application(rpc_app)
# If apply is called from update method, the app's abort status has
# already been registered.
if not caller:
self._register_app_abort(app.name)
self._raise_app_alarm(app.name, constants.APP_APPLY_IN_PROGRESS,
fm_constants.FM_ALARM_ID_APPLICATION_APPLYING,
fm_constants.FM_ALARM_SEVERITY_WARNING,
_("Application Apply In Progress"),
fm_constants.FM_ALARM_TYPE_0,
_("No action required."),
True)
self.clear_reapply(app.name)
LOG.info("Application %s (%s) apply started." % (app.name, app.version))
overrides_str = ''
ready = True
try:
app.charts = self._get_list_of_charts(app.sync_armada_mfile)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
# Perform app resources actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
# Perform rbd actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_GENERATE_OVERRIDES)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
LOG.info("Generating application overrides...")
self._helm.generate_helm_application_overrides(
app.sync_overrides_dir, app.name, mode, cnamespace=None,
armada_format=True, armada_chart_info=app.charts, combined=True)
(helm_files, armada_files) = self._get_overrides_files(
app.sync_overrides_dir, app.charts, app.name, mode)
if helm_files or armada_files:
LOG.info("Application overrides generated.")
overrides_str = self._generate_armada_overrides_str(
app.name, app.version, helm_files, armada_files)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_DOWNLOAD_IMAGES)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
self.download_images(app)
else:
ready = False
except Exception as e:
LOG.exception(e)
if AppOperator.is_app_aborted(app.name):
self._abort_operation(app, constants.APP_APPLY_OP,
user_initiated=True)
else:
self._abort_operation(app, constants.APP_APPLY_OP,
constants.APP_PROGRESS_ABORTED)
if not caller:
# If apply is not called from update method, deregister the app's
# abort status. Otherwise, it will be done in the update method.
self._deregister_app_abort(app.name)
if isinstance(e, exception.KubeAppApplyFailure):
# ex:Image download failure
raise
else:
# ex:K8s resource creation failure, user abort
raise exception.KubeAppApplyFailure(
name=app.name, version=app.version, reason=e)
try:
if ready:
# | |
-1.095550e-02, 5.748574e+00],
[ 4.407484e+02, 4.736099e-01, -5.086917e-01, -6.610682e-01],
[-2.458302e+00, 6.864762e+00, 2.633289e+00, -4.246873e-01],
[-1.839424e+01, -1.194455e+00, 5.659980e+02, -2.567729e+00]]])
Coordinates:
* lat (lat) int64 -90 -45 0 45
* lon (lon) int64 0 90 180 270
* level (level) int64 100 200
Attributes:
long_name: Brunt-Vaisala frequency squared
units: s^-2
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
Pressure levels must be provided in units of hPa
To do
-----
Add switch for atmosphere/ocean input
"""
R = utils.constants().R_d
Cp = utils.constants().C_pd
g = utils.constants().g
if plevel_name is None:
plevel_name = utils.get_plevel_name(temp)
dTdp = temp.differentiate(coord=plevel_name)
pdR = temp[plevel_name] / R
nsq = ((-dTdp * pdR + (temp / Cp)) / (temp / g) ** 2).rename('nsq')
nsq.attrs['long_name']='Brunt-Vaisala frequency squared'
nsq.attrs['units']='s^-2'
return nsq.rename('nsq')
# ===================================================================================================
def Rossby_wave_number(u, v, u_clim, lat_name=None, lon_name=None):
"""
Returns the square of the stationary Rossby wave number, Ks**2
Author: <NAME>
Date: 11/07/2018
Parameters
----------
u : xarray DataArray
Array containing fields of zonal velocity with at least coordinates latitude and longitude
(following standard naming - see Limitations)
v : xarray DataArray
Array containing fields of meridional velocity with at least coordinates latitude and
longitude (following standard naming - see Limitations)
u_clim : xarray DataArray
Array containing climatological fields of zonal velocity with at least coordinates latitude
and longitude (following standard naming - see Limitations)
lat_name : str, optional
Name of latitude coordinate. If None, doppyo will attempt to determine lat_name
automatically
lon_name : str, optional
Name of longitude coordinate. If None, doppyo will attempt to determine lon_name
automatically
Returns
-------
Rossby_wave_number : xarray DataArray
Array containing the square of the Rossby wave source
Examples
--------
>>> u = xr.DataArray(np.random.normal(size=(6,4,24)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('time', pd.date_range('2000-01-01',periods=24,freq='M'))])
>>> v = xr.DataArray(np.random.normal(size=(6,4,24)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('time', pd.date_range('2000-01-01',periods=24,freq='M'))])
>>> u_clim = u.groupby('time.month').mean(dim='time')
>>> u_clim = doppyo.utils.anomalize(0*u, -u_clim)
>>> doppyo.diagnostic.Rossby_wave_number(u, v, u_clim)
<xarray.DataArray 'ks2' (lat: 6, lon: 4, time: 24)>
array([[[ 8.077277e-01, 1.885835e-01, ..., 6.383953e-01, -4.686696e-01],
[-3.756420e-01, 1.210226e+00, ..., -2.055076e+00, -2.291500e+00],
[ 8.786361e-01, 4.181778e-01, ..., -2.071749e+00, 4.018699e-01],
[ 8.218020e-01, 5.197270e+00, ..., 5.181735e+00, 7.112056e-01]],
[[-5.323813e+02, -2.894449e+02, ..., -5.063012e+03, -3.921559e+02],
[ 3.167388e+02, -5.406136e+02, ..., -1.987485e+03, -2.692395e+02],
[ 2.916992e+03, 2.318578e+02, ..., 8.611478e+02, 8.559919e+02],
[-4.380459e+02, -5.035198e+02, ..., -1.844072e+03, -2.856807e+02]],
...,
[[ 3.832781e+02, -1.272144e+03, ..., 3.900539e+02, -5.402686e+02],
[-2.494814e+02, -2.041985e+02, ..., 3.426493e+02, -5.557717e+02],
[-6.290198e+03, 1.606871e+03, ..., 2.894713e+03, 3.284330e+02],
[-3.325505e+02, -2.406172e+02, ..., -3.270787e+03, -1.040641e+03]],
[[ 1.401437e+00, 6.053096e-01, ..., 1.725558e-01, -7.287578e+01],
[-8.905873e-01, 1.469694e-01, ..., 1.308367e+00, -7.136195e-01],
[ 4.318194e+01, -1.850361e-01, ..., -2.447798e-01, -4.454747e-01],
[ 1.247740e+00, 9.826164e-02, ..., 2.808380e+00, 1.254609e+00]]])
Coordinates:
* lat (lat) int64 75 45 15 -15 -45 -75
* lon (lon) int64 45 135 225 315
* time (time) datetime64[ns] 2000-01-31 2000-02-29 ... 2001-12-31
Attributes:
units: real number
long_name: Square of Rossby stationary wavenumber
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
This function utilises the windspharm package, which is a wrapper on pyspharm, which is a
wrapper on SPHEREPACK. These packages require that the latitudinal and longitudinal grid
is regular or Gaussian.
These calculations are not yet dask-compatible.
To Do
-----
Make dask-compatible by either developing the windspharm package, or using a kernel approach
Notes
-----
The input u_clim must have the same dimensions as u and v. One can project a mean climatology,
A_clim, over the time dimension in A using
>>> doppyo.utils.anomalize(0*A, -A_clim)
"""
if lat_name is None:
lat_name = utils.get_lat_name(u)
if lon_name is None:
lon_name = utils.get_lon_name(u)
if not (utils._equal_coords(u, v) & utils._equal_coords(u, u_clim)):
raise ValueError('u, v and u_clim coordinates must match')
# Create a VectorWind instance -----
w = wsh.xarray.VectorWind(u, v)
# Compute the absolute vorticity gradient -----
etau, etav = w.gradient(w.absolutevorticity())
# Compute the stationary wave number -----
ks2 = (xr.ufuncs.cos(etav[lat_name] / 180 * utils.constants().pi)**2 * \
(etav * utils.constants().R_earth ** 2) / u_clim).rename('ks2')
ks2.attrs['units'] = 'real number'
ks2.attrs['long_name'] = 'Square of Rossby stationary wavenumber'
return ks2
# ===================================================================================================
def Eady_growth_rate(u, v, gh, nsq, lat_name=None, lon_name=None, level_name=None):
"""
Returns the square of the Eady growth rate
Author: <NAME>
Date: 15/07/2018
Parameters
----------
u : xarray DataArray
Array containing fields of zonal velocity with at least coordinates latitude, longitude and
level (following standard naming - see Limitations)
v : xarray DataArray
Array containing fields of meridional velocity with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
gh : xarray DataArray
Array containing fields of geopotential height with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
nsq : xarray DataArray
Array containing fields of Brunt Väisälä frequency with at least coordinates latitude,
longitude and level (following standard naming - see Limitations)
lat_name : str, optional
Name of latitude coordinate. If None, doppyo will attempt to determine lat_name
automatically
lon_name : str, optional
Name of longitude coordinate. If None, doppyo will attempt to determine lon_name
automatically
level_name : str, optional
Name of level coordinate. If None, doppyo will attempt to determine level_name
automatically
Returns
-------
Eady^2 : xarray DataArray
Array containing the square of the Eady growth rate
Examples
--------
>>> u = xr.DataArray(np.random.normal(size=(6,4,2)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('level', [200, 500])])
>>> v = xr.DataArray(np.random.normal(size=(6,4,2)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('level', [200, 500])])
>>> temp = xr.DataArray(np.random.normal(size=(6,4,2)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('level', [200, 500])])
>>> gh = xr.DataArray(np.random.normal(size=(6,4,2)),
... coords=[('lat', np.arange(-75,76,30)), ('lon', np.arange(45,316,90)),
... ('level', [200, 500])])
>>> nsq = doppyo.diagnostic.Brunt_Vaisala(temp)
>>> doppyo.diagnostic.Eady_growth_rate(u, v, gh, nsq)
<xarray.DataArray 'Eady^2' (level: 2, lon: 4, lat: 6)>
array([[[-5.371897e-08, 1.338133e-11, -7.254014e-13, -8.196598e-12,
2.062633e-09, -7.200158e-12],
[ 9.906932e-10, -7.349832e-09, -2.558847e-12, -1.695842e-09,
4.986779e-09, -3.090147e-09],
[ 3.948602e-07, 1.397756e-09, 1.508010e-10, 1.481968e-10,
5.627093e-11, 7.463454e-10],
[ 4.326971e-09, -2.528522e-09, -1.243954e-13, -3.138463e-11,
-6.801250e-09, -6.286382e-10]],
[[-8.580527e-10, 7.040065e-12, -3.760004e-13, -1.213131e-12,
2.437557e-11, -6.522981e-11],
[ 6.119671e-09, -1.644123e-09, -5.124997e-11, 1.725101e-08,
2.574158e-08, -3.101566e-10],
[ 1.601742e-06, 1.994867e-11, 3.341006e-11, 1.641253e-11,
5.601919e-08, 5.527214e-11],
[ 4.700271e-09, -1.422149e-11, -1.302035e-12, -2.153002e-11,
-4.607096e-10, -3.813686e-09]]])
Coordinates:
* lat (lat) int64 -75 -45 -15 15 45 75
* lon (lon) int64 45 135 225 315
* level (level) int64 200 500
Attributes:
units: s^-2
long_name: Square of Eady growth rate
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
"""
degtorad = utils.constants().pi / 180
if lat_name is None:
lat_name = utils.get_lat_name(u)
if lon_name is None:
lon_name = utils.get_lon_name(u)
if level_name is None:
level_name = utils.get_level_name(u)
f = 2 * utils.constants().Omega * xr.ufuncs.sin(gh[lat_name] * degtorad)
eady2 = ((utils.constants().Ce * f) * (xr.ufuncs.sqrt(u ** 2 + v ** 2).differentiate(coord=level_name) / \
gh.differentiate(coord=level_name))) ** 2 / nsq
eady2.attrs['units'] = 's^-2'
eady2.attrs['long_name'] = 'Square of Eady growth rate'
return eady2.rename('Eady^2')
# ===================================================================================================
def thermal_wind(gh, plevel_lower, plevel_upper, lat_name=None, lon_name=None, plevel_name=None):
"""
Returns the thermal wind, (u_tw, v_tw) = 1/f x k x grad(thickness), where f = 2*Omega*sin(lat)
Author: <NAME>
Date: 15/07/2018
Parameters
----------
gh : xarray DataArray
Array containing fields of geopotential height with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
plevel_lower : value
Value of lower pressure level used to compute termal wind. Must exist in level coordinate of
gh
plevel_upper : value
Value of upper pressure level used to compute termal wind. Must exist in level coordinate of
gh
lat_name : str, optional
Name of latitude coordinate. If None, doppyo will attempt to determine lat_name
automatically
lon_name : str, optional
Name of longitude coordinate. If None, doppyo will attempt to determine lon_name
automatically
plevel_name : str, optional
Name of pressure level coordinate. If None, doppyo will attempt to determine plevel_name
automatically
Returns
-------
thermal_wind : xarray Dataset
Dataset containing the following variables:
u_tw; array containing the zonal component of the thermal wind
v_tw; array containing the meridonal component of the thermal wind
| |
<filename>diofant/core/basic.py
"""Base class for all the objects in Diofant."""
from collections import Mapping, defaultdict
from itertools import zip_longest
from .cache import cacheit
from .compatibility import iterable, ordered
from .decorators import _sympifyit
from .sympify import SympifyError, sympify
class Basic:
"""
Base class for all objects in Diofant.
Always use ``args`` property, when accessing parameters of some instance.
"""
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_MatMul = False
is_Vector = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
"""Return swallow copy of self."""
return self.func(*self.args)
def __reduce_ex__(self, proto):
"""Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__.
"""
return self._args
@classmethod
def class_key(cls):
"""Nice order of classes."""
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""Return a sort key.
Examples
========
>>> sorted([Rational(1, 2), I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> [x, 1/x, 1/x**2, x**2, sqrt(x), root(x, 4), x**Rational(3, 2)]
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
from .numbers import Integer
args = len(self.args), tuple(arg.sort_key(order)
for arg in self._sorted_args)
return self.class_key(), args, Integer(1).sort_key(), Integer(1)
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
Notes
=====
See [1]_. If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
* http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
if type(self) != type(other):
return False
return self._hashable_content() == other._hashable_content()
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue sympy/sympy#5487.
def __repr__(self):
from ..printing import srepr
return srepr(self, order=None)
def __str__(self):
from ..printing import sstr
return sstr(self, order=None)
def _repr_pretty_(self, p, cycle):
from ..printing import pretty
p.text(pretty(self))
def _repr_latex_(self):
from ..printing import latex
return latex(self, mode='equation')
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> e = 1 + x + 2*sin(y + I*pi)
>>> e.atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> e.atoms(Symbol)
{x, y}
>>> e.atoms(Number)
{1, 2}
>>> e.atoms(Number, NumberSymbol)
{1, 2, pi}
>>> e.atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> e.atoms(x)
{x, y}
Be careful to check your assumptions when using the implicit option
since ``Integer(1).is_Integer = True`` but ``type(Integer(1))`` is
``One``, a special type of diofant atom, while ``type(Integer(2))``
is type ``Integer`` and will find all integers in an expression:
>>> e.atoms(Integer(1))
{1}
>>> e.atoms(Integer(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
diofant type can be listed as an argument and those types of "atoms"
as found in scanning the arguments of the expression recursively:
>>> from diofant.core.function import AppliedUndef
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
>>> f = Function('f')
>>> e = 1 + f(x) + 2*sin(y + I*pi)
>>> e.atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
"""
if types:
types = tuple(t if isinstance(t, type) else type(t) for t in types)
else:
types = Atom,
return set().union(*[self.find(t) for t in types])
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method.
"""
return set().union(*[a.free_symbols for a in self.args])
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in Diofant the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
if callable(self) and hasattr(self, '__call__'):
return self(*args)
elif self.args:
newargs = [sub.rcall(*args) for sub in self.args]
return type(self)(*newargs)
else:
return self
@property
def func(self):
"""The top-level function in an expression.
The following should hold for all objects::
x == x.func(*x.args)
Examples
========
>>> a = 2*x
>>> a.func
<class 'diofant.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> cot(x).args
(x,)
>>> (x*y).args
(x, y)
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> (1 + x*y).subs({x: pi})
pi*y + 1
>>> (1 + x*y).subs({x: pi, y: 2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs({x**2: y})
y**2 + y
To replace only the x**2 but not the x**4, | |
value starts
status = PTP_VALUE
else:
log.cl_error("invalid [%s]: key contains with illegal characters",
param)
return None
else:
assert status == PTP_VALUE
if char == "\\":
if escape:
value += "\\"
escape = True
# The escape will be handled together with next char.
continue
if char == delimiter:
if escape:
value += delimiter
escape = False
else:
# The key/value pair finishes
ret = add_parameter_pair(param_dict, key, value)
if ret:
log.cl_error("invalid [%s]: ambiguous values",
param)
return None
status = PTP_INIT
value = ""
key = ""
continue
if char == "\n":
if escape:
escape = False
continue
log.cl_error("invalid [%s]: multiple lines",
param)
return None
if escape:
value += "\\"
escape = False
value += char
if status == PTP_KEY:
ret = add_parameter_pair(param_dict, key, True)
if ret:
log.cl_error("invalid [%s]: ambiguous values",
param)
return None
elif status == PTP_VALUE:
ret = add_parameter_pair(param_dict, key, value)
if ret:
log.cl_error("invalid [%s]: ambiguous values",
param)
return None
return param_dict
def parameter_dict_merge(log, param_dicts):
"""
Merge the param dicts into a single one.
If fail, return None
"""
merged_dict = {}
for param_dict in param_dicts:
for key, value in param_dict.items():
if key not in merged_dict:
merged_dict[key] = value
continue
if merged_dict[key] != value:
log.cl_error("ambiguous values for key [%s] in parameters",
key)
return None
return merged_dict
def table_add_field_names(table, field_names):
"""
Add field names of a table
"""
colorful_field_names = []
for field_name in field_names:
colorful_field_names.append(clog.colorful_message(clog.COLOR_TABLE_FIELDNAME,
field_name))
table.field_names = colorful_field_names
def table_set_sortby(table, field_name):
"""
Set the field to sortby
"""
table.sortby = clog.colorful_message(clog.COLOR_TABLE_FIELDNAME, field_name)
def print_field(log, field_name, value):
"""
print one field
"""
field = clog.colorful_message(clog.COLOR_TABLE_FIELDNAME, "%s: " % field_name)
log.cl_stdout("%s%s", field, value)
def cmd_exit(log, exist_status):
"""
Print message and exit
"""
if exist_status:
log.cl_debug("command failed with status %s", exist_status)
else:
log.cl_debug("command succeeded")
sys.exit(exist_status)
TEST_SKIPPED = 1
def run_test(log, workspace, only_test_names, first_test_names,
local_host, reverse_order, start, stop, skip_basic,
test_functs, args):
"""
Run test.
If only is specified together with start/stop, start/stop option will
be ignored.
If only is specified together with reverse_order, reverse_order option
will be ignored.
Only/first tests can repeat tests, e.g. first=testA,testA would repeat
testA for twice.
:param skip_basic: Do not run basic test.
:param test_functs: A list of function that has the argument types of:
test_funct(log, test_workspace, *args)
"""
# pylint: disable=too-many-branches,too-many-locals
# pylint: disable=too-many-statements,global-statement
test_dict = {}
for test_funct in test_functs:
test_dict[test_funct.__name__] = test_funct
if len(test_functs) == 0:
log.cl_error("no test to run")
return -1
if test_functs[0].__name__ != "basic":
log.cl_error("the first test is not [basic]")
return -1
basic_test = test_functs[0]
# Reverse order won't change the order of first tests
selected_tests = []
if first_test_names is not None:
for test_name in first_test_names:
if test_name not in test_dict:
log.cl_error("first test [%s] does not exist", test_name)
return -1
test_funct = test_dict[test_name]
selected_tests.append(test_funct)
if only_test_names is not None:
for test_name in only_test_names:
if test_name not in test_dict:
log.cl_error("only test [%s] does not exist", test_name)
return -1
test_funct = test_dict[test_name]
selected_tests.append(test_funct)
else:
start_index = 0
if start is not None:
if start not in test_dict:
log.cl_error("start test [%s] does not exist", start)
return -1
for test_funct in test_functs:
if test_funct.__name__ == start:
break
start_index += 1
if start_index == len(test_functs):
log.cl_error("failed to find the index of start test [%s]",
start)
return -1
stop_index = len(test_functs) - 1
if stop is not None:
if stop not in test_dict:
log.cl_error("stop test [%s] does not exist", stop)
return -1
stop_index = 0
for test_funct in test_functs:
if test_funct.__name__ == stop:
break
stop_index += 1
if stop_index == len(test_functs):
log.cl_error("failed to find the index of start test [%s]",
stop)
return -1
if stop_index < start_index:
log.cl_error("start test [%s] is behind stop test [%s]",
start, stop)
return -1
test_index = 0
for test_funct in test_functs:
if test_index > stop_index:
break
if test_index >= start_index:
selected_tests.append(test_funct)
test_index += 1
if len(selected_tests) == 0:
pass
elif selected_tests[0].__name__ != "basic":
if reverse_order:
selected_tests.reverse()
if not skip_basic:
selected_tests.insert(0, basic_test)
elif reverse_order:
other_tests = selected_tests[1:]
other_tests.reverse()
if skip_basic:
selected_tests = other_tests
else:
selected_tests = [basic_test] + other_tests
if (len(selected_tests) > 0 and selected_tests[0].__name__ != "basic" and
not skip_basic):
selected_tests.insert(0, basic_test)
if skip_basic:
former_selected_tests = selected_tests
selected_tests = []
for selected_test in former_selected_tests:
if selected_test.__name__ == "basic":
continue
selected_tests.append(selected_test)
table = prettytable.PrettyTable()
table.field_names = ["Test name", "Result", "Duration"]
exit_status = 0
for test_func in selected_tests:
test_name = test_func.__name__
if exit_status:
table.add_row([test_name, "Not Started", "0 seconds"])
continue
test_workspace = (workspace + "/" +
time_util.local_strftime(time_util.utcnow(),
"%Y-%m-%d-%H_%M_%S") +
"-" + test_name + "-" + utils.random_word(8))
command = "mkdir -p %s" % test_workspace
retval = local_host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
local_host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
table.add_row([test_name, "Not Started", "0 seconds"])
exit_status = -1
continue
log.cl_info("starting test [%s]", test_name)
start_time = time.time()
ret = test_func(log, test_workspace, *args)
duration_time = time.time() - start_time
if ret < 0:
log.cl_error("test [%s] failed, duration %f seconds", test_name,
duration_time)
table.add_row([test_name, "Failed", "%f seconds" % duration_time])
exit_status = -1
continue
if ret == TEST_SKIPPED:
log.cl_warning("test [%s] skipped, duration %f seconds", test_name,
duration_time)
table.add_row([test_name, "Skipped", "%f seconds" % duration_time])
else:
log.cl_info("test [%s] passed, duration %f seconds", test_name,
duration_time)
table.add_row([test_name, "Passed", "%f seconds" % duration_time])
for test_funct in test_functs:
if test_funct not in selected_tests:
test_name = test_funct.__name__
table.add_row([test_name, "Excluded", "0"])
log.cl_stdout(table)
return exit_status
def get_table_field(log, host, field_number, command, ignore_status=False):
"""
Return a dict for a given field of a table.
Key is service/host/lustrefs/... name, should be the first column
Value is the field value.
"""
retval = host.sh_run(log, command)
if retval.cr_exit_status and not ignore_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = %d, stdout = [%s], stderr = [%s]",
command, host.sh_hostname,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
return None
lines = retval.cr_stdout.splitlines()
if len(lines) < 1:
log.cl_error("no output [%s] of command [%s] on host [%s]",
retval.cr_stdout, command, host.sh_hostname)
return None
lines = lines[1:]
field_dict = {}
for line in lines:
fields = line.split()
if len(fields) < field_number + 1:
log.cl_error("no field with index [%d] in output [%s] of "
"command [%s] on host [%s]",
field_number, retval.cr_stdout, command,
host.sh_hostname)
return None
name = fields[0]
field_dict[name] = fields[field_number]
return field_dict
def get_status_dict(log, host, command, ignore_exit_status=True,
strip_value=False):
"""
Return status dict from stdout of command with format of "$KEY: $VALUE"
"""
retval = host.sh_run(log, command)
if retval.cr_exit_status:
# Some commands still print fields when return failure
if ignore_exit_status:
log.cl_debug("failed to run command [%s] on host [%s], "
"ret = %d, stdout = [%s], stderr = [%s]",
command, host.sh_hostname,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
else:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = %d, stdout = [%s], stderr = [%s]",
command, host.sh_hostname,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
return None
lines = retval.cr_stdout.splitlines()
status_dict = {}
for line in lines:
if len(line) == 0:
continue
split_index = line.find(": ")
if split_index < 0:
split_index = line.find(":\t")
if split_index < 0:
log.cl_error("can not find [: ] or [:\t] in output line [%s] of "
"command [%s]", line, command)
return None
if split_index == 0:
log.cl_error("no key before [: ] or [:\t] in output line [%s] of "
"command [%s]", line, command)
return None
if split_index + 2 >= len(line):
log.cl_error("no value after [: ] or [:\t] in output line [%s] of "
"command [%s]", line, command)
return None
key = line[0:split_index]
value = line[split_index + 2:]
if strip_value:
value = value.strip()
if len(value) == 0:
log.cl_error("empty value for key [%s]", key)
if key in status_dict:
log.cl_error("multiple values for key [%s] of command [%s]",
key, command)
return None
status_dict[key] = value
return status_dict
def parse_field_string(log, field_string, quick_fields, table_fields,
all_fields, print_table=False, print_status=False):
"""
Return field names.
"""
# pylint: disable=too-many-branches
if field_string is None:
if not print_table:
field_names = all_fields
elif print_status:
field_names = table_fields
else:
field_names = quick_fields
elif isinstance(field_string, tuple):
field_names = list(field_string)
for field_name in field_names:
if field_name not in all_fields:
log.cl_error("unknown field [%s]", field_name)
return None
if field_names[0] != quick_fields[0]:
field_names.insert(0, quick_fields[0])
if len(field_names) != len(set(field_names)):
log.cl_error("duplicated fields in %s",
field_names)
return None
elif isinstance(field_string, | |
<reponame>JonathanCasey/asana_extensions
#!/usr/bin/env python3
"""
Move Tasks Rule functionality to implement the generic interface components
defined by the metaclass.
Module Attributes:
logger (Logger): Logger for this module.
(C) Copyright 2021 <NAME>. All Rights Reserved Worldwide.
"""
import logging
import asana
from asana_extensions.asana import client as aclient
from asana_extensions.asana import utils as autils
from asana_extensions.general import config
from asana_extensions.general.exceptions import * # pylint: disable=wildcard-import
from asana_extensions.rules import rule_meta
logger = logging.getLogger(__name__)
class MoveTasksRule(rule_meta.Rule):
"""
Rules to move tasks to the specified destination based on the specified
conditions.
Class Attributes:
N/A
Instance Attributes:
_rules_params ({str:str/int/bool/etc}): The generic dictionary that
defines the parameters for this rule.
[inherited from Rule]:
_rule_id (str): The id used as the section name in the rules conf.
_rule_type (str): The type of rule, such as "move tasks".
_test_report_only (bool): Whether or not this is for reporting for
testing only or whether rule is live.
_is_valid (bool or None): Cached value as to whether the rule is valid.
If not validated yet, will be None.
"""
def __init__(self, rule_params, **kwargs):
"""
Create the Move Tasks Rule.
Args:
rules_params ({str:str/int/bool/etc}): The generic dictionary that
defines the parameters for this rule.
See parent(s) for required kwargs.
Raises:
(AssertionError): Invalid data.
"""
super().__init__(**kwargs)
is_project_given = rule_params['project_name'] is not None \
or rule_params['project_gid'] is not None
assert rule_params['is_my_tasks_list'] is False \
or rule_params['user_task_list_gid'] is None, "Cannot" \
+ " specify 'for my tasks list' and 'user task list gid'" \
+ " together."
is_user_task_list_given = rule_params['is_my_tasks_list'] \
or rule_params['user_task_list_gid'] is not None
assert is_project_given ^ is_user_task_list_given, "Must specify to" \
+ " use a project or user task list, but not both."
assert rule_params['workspace_name'] is not None \
or rule_params['workspace_gid'] is not None, "Must specify" \
+ " workspace."
is_time_given = rule_params['min_time_until_due_str'] is not None \
or rule_params['max_time_until_due_str'] is not None
is_time_parsed = rule_params['min_time_until_due'] is not None \
or rule_params['max_time_until_due'] is not None
assert is_time_given == is_time_parsed, "Failed to parse min/max" \
+ " time until due -- check format."
assert is_time_given ^ rule_params['match_no_due_date'], "Must" \
+ " specify either min/max time until due or match no due" \
+ " date (but not both)."
self._rule_params = rule_params
@classmethod
def load_specific_from_conf(cls, rules_cp, rule_id, rule_params=None,
**kwargs):
"""
Loads the rule-specific config items for this rule from the
configparsers from files provided. Then creates the rule from the data
provided and data loaded.
Args:
rule_cp (configparser): The full configparser from the rules conf.
rule_id (str): The ID name for this rule as it appears as the
section header in the rules_cp.
rule_params ({str: str/int/bool/etc}): The rule parameters loaded from
config. Updated by super classes with their results. Final sub
class expected to be None.
Note: kwargs contains other args to pass thru to constructor.
Returns:
rule (Rule<> or None): The Rule<> object created and loaded from
config, where Rule<> is a subclass of Rule (e.g. MoveTasksRule).
Will return None if failed to load and create due to invalid config.
Abstract parent classes such as Rule will return None.
Raises:
(AssertionError): Invalid data.
"""
assert rule_params is None, "Should not pass anything in for" \
+ " `rule_params`"
try:
rule_params = {}
super_params = {}
super().load_specific_from_conf(rules_cp, rule_id, super_params,
**kwargs)
rule_params['project_name'] = rules_cp.get(rule_id, 'project name',
fallback=None)
rule_params['project_gid'] = rules_cp.getint(rule_id, 'project gid',
fallback=None)
rule_params['is_my_tasks_list'] = rules_cp.getboolean(rule_id,
'for my tasks list', fallback=None)
rule_params['user_task_list_gid'] = rules_cp.getint(rule_id,
'user task list id', fallback=None)
rule_params['workspace_name'] = rules_cp.get(rule_id,
'workspace name', fallback=None)
rule_params['workspace_gid'] = rules_cp.getint(rule_id,
'workspace gid', fallback=None)
rule_params['match_no_due_date'] = rules_cp.getboolean(rule_id,
'no due date', fallback=False)
rule_params['min_time_until_due_str'] = rules_cp.get(rule_id,
'min time until due', fallback=None)
rule_params['min_time_until_due'] = cls.parse_timedelta_arg(
rule_params['min_time_until_due_str'])
rule_params['max_time_until_due_str'] = rules_cp.get(rule_id,
'max time until due', fallback=None)
rule_params['max_time_until_due'] = cls.parse_timedelta_arg(
rule_params['max_time_until_due_str'])
rule_params['min_due_assumed_time_str'] = rules_cp.get(rule_id,
'assumed time for min due', fallback=None)
rule_params['min_due_assumed_time'] = cls.parse_time_arg(
rule_params['min_due_assumed_time_str'], None)
rule_params['max_due_assumed_time_str'] = rules_cp.get(rule_id,
'assumed time for max due', fallback=None)
rule_params['max_due_assumed_time'] = cls.parse_time_arg(
rule_params['max_due_assumed_time_str'], None)
rule_params['src_sections_include_names'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections include names', fallback=None),
config.CastType.STRING, delim=None, delim_newlines=True,
strip_quotes=True)
rule_params['src_sections_include_gids'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections include gids', fallback=None),
config.CastType.INT, delim_newlines=True)
rule_params['src_sections_exclude_names'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections exclude names', fallback=None),
config.CastType.STRING, delim=None, delim_newlines=True,
strip_quotes=True)
rule_params['src_sections_exclude_gids'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections exclude gids', fallback=None),
config.CastType.INT, delim_newlines=True)
rule_params['dst_section_name'] = rules_cp.get(rule_id,
'dst section name', fallback=None)
rule_params['dst_section_gid'] = rules_cp.getint(rule_id,
'dst section gid', fallback=None)
except config.UnsupportedFormatError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' time args. Exception: {str(ex)}')
return None
except KeyError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' keys. Exception: {str(ex)}')
return None
except TimeframeArgDupeError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' timeframe args. Exception: {str(ex)}')
return None
except ValueError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' strong typed values. Exception: {str(ex)}')
return None
try:
rule = cls(rule_params, **kwargs, **super_params, rule_id=rule_id)
return rule
except AssertionError as ex:
logger.error(f'Failed to create Move Tasks Rule from config: {ex}')
return None
@classmethod
def get_rule_type_names(cls):
"""
Get the list of names that can be used as the 'rule type' in the rules
conf to identify this rule.
Returns:
([str]): A list of names that are valid to use as the type for this
rule.
"""
# pylint: disable=multi-line-list-first-line-item
# pylint: disable=multi-line-list-eol-close, closing-comma
return ['move tasks', 'auto-promote tasks', 'auto-promote',
'auto promote tasks', 'auto promote', 'promote tasks']
def _sync_and_validate_with_api(self):
"""
Sync configuration data with the API and further validate, storing any
newly prepared configuration info.
Returns:
(bool): True if completed successfully; False if failed for any
reason (this should probably catch nearly all exceptions).
"""
rps = self._rule_params # Shorten name since used so much here
try:
if rps['workspace_name'] is not None:
rps['workspace_gid'] = aclient.get_workspace_gid_from_name(
rps['workspace_name'], rps['workspace_gid'])
if rps['project_name'] is not None:
# For now, hardcoded for non-archived project
# Could use None, but workaround for now is to specify by gid
rps['project_gid'] = aclient.get_project_gid_from_name(
rps['workspace_gid'], rps['project_name'],
rps['project_gid'])
if rps['is_my_tasks_list']:
rps['user_task_list_gid'] = aclient.get_user_task_list_gid(
rps['workspace_gid'], True)
if rps['project_gid'] is not None:
rps['effective_project_gid'] = rps['project_gid']
elif rps['user_task_list_gid'] is not None:
rps['effective_project_gid'] = rps['user_task_list_gid']
# Else, shouldn't be possible based on assertions in __init__()
# Always want to default to include for move task rule
rps['src_net_include_section_gids'] = \
autils.get_net_include_section_gids(
rps['effective_project_gid'],
rps['src_sections_include_names'],
rps['src_sections_include_gids'],
rps['src_sections_exclude_names'],
rps['src_sections_exclude_gids'])
if rps['dst_section_name'] is not None:
rps['dst_section_gid'] = aclient.get_section_gid_from_name(
rps['effective_project_gid'], rps['dst_section_name'],
rps['dst_section_gid'])
except (asana.error.AsanaError, aclient.ClientCreationError,
aclient.DataNotFoundError, aclient.DuplicateNameError,
aclient.MismatchedDataError, autils.DataConflictError,
autils.DataMissingError) as ex:
logger.error(f'Failed to sync and validate rule "{self._rule_id}"'
+ f' with the API. Skipping rule. Exception: {str(ex)}')
return False
return True
def execute(self, force_test_report_only=False):
"""
Execute the rule. This should likely check if it is valid and the
criteria to run the rule has been met (if any). If either the rule is
set to test report only or the caller of this method specified to force
to be test report only, no changes will be made via the API -- only
simulated results will be reported (but still based on data from API).
This should ideally catch all errors except ones so catastrophic that
the operation of the entire app should cease immediately. Callers of
this method are not intended to require try/except handling for things
like mis-configured rules, etc.
Args:
force_test_report_only (bool): If True, will ensure this runs as a
test report only with no changes made via the API; if False, will
defer to the `_test_report_only` setting of the rule.
Returns:
(bool): True if fully completed without any errors; False any errors,
regardless of whether it resulted in partial or full failure.
"""
if not self.is_valid():
logger.error(f'Failed to execute "{self._rule_id}" since invalid.')
return False
if not self.is_criteria_met():
logger.info(f'Skipping execution of "{self._rule_id}" completely'
+ ' since criteria not met.')
return False
any_errors = False
rps = self._rule_params # Shorten name since used so much here
tasks_to_move = []
for src_sect_gid in rps['src_net_include_section_gids']:
# Could lock in a dt_base before loop, but likely not an issue
# For now, hardcoded for incomplete tasks
try:
tasks_to_move.extend(autils.get_filtered_tasks(src_sect_gid,
rps['match_no_due_date'],
rps['min_time_until_due'], rps['max_time_until_due'],
rps['min_due_assumed_time'],
rps['max_due_assumed_time']))
except (asana.error.AsanaError, aclient.ClientCreationError) as ex:
logger.error(f'Failed to filter tasks for "{self._rule_id}"'
+ f' in section [{src_sect_gid}]. Skipping | |
<reponame>baotuquan/impala<filename>tests/util/auto_scaler.py
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import time
import logging
import os
import pipes
from subprocess import check_call
from tests.common.impala_cluster import ImpalaCluster
from threading import Event, Thread
IMPALA_HOME = os.environ["IMPALA_HOME"]
class AutoScaler(object):
"""This class implements a simple autoscaling algorithm: if queries queue up for a
configurable duration, a new executor group is started. Likewise, if the number of
concurrently running queries indicated that an executor group can be removed, such
measure is taken.
Users of this class can start an auto scaler by calling start() and must call stop()
before exiting (see main() below for an example).
This class only uses the default admission control pool.
"""
DEFAULT_POOL_NAME = "default-pool"
def __init__(self, executor_slots, group_size, start_batch_size=0, max_groups=0,
wait_up_s=0, wait_down_s=0, coordinator_slots=128):
# Number of queries that can run concurrently on each executor
self.executor_slots = executor_slots
self.coordinator_slots = coordinator_slots
# Number of executors per executor group
self.group_size = group_size
# New executor groups will be started in increments of this size
self.start_batch_size = group_size
if start_batch_size > 0:
self.start_batch_size = start_batch_size
# Maximum number of executor groups. We only have 10 TCP ports free on our
# miniclusters and we need one for the dedicated coordinator.
self.max_groups = 9 / self.group_size
# max_groups can further bound the maximum number of groups we are going to start,
# but we won't start more than possible.
if max_groups > 0 and max_groups < self.max_groups:
self.max_groups = max_groups
# Number of seconds to wait before scaling up/down
self.scale_wait_up_s = 5
if wait_up_s > 0:
self.scale_wait_up_s = wait_up_s
self.scale_wait_down_s = 5
if wait_down_s > 0:
self.scale_wait_down_s = wait_down_s
self.groups = []
self.num_groups = 0
# Stopwatches to track how long the conditions for scaling up/down have been met.
self.scale_up_sw = time.time()
self.scale_down_sw = time.time()
self.loop_thread = None
# Event to signal that the control loop should exit
self.stop_ev = Event()
def get_cluster(self):
return ImpalaCluster.get_e2e_test_cluster()
def get_coordinator(self):
cluster = self.get_cluster()
assert len(cluster.impalads) > 0
return cluster.get_first_impalad()
def get_service(self):
return self.get_coordinator().service
def get_client(self):
return self.get_coordinator().service.create_hs2_client()
def group_name(self, idx):
# By convention, group names must start with their associated resource pool name
# followed by a "-".
return "%s-group-%s" % (self.DEFAULT_POOL_NAME, idx)
def start_base_cluster(self):
"""Starts the base cluster consisting of an exclusive coordinator, catalog, and
statestore. Does not add any executors."""
logging.info("Starting base cluster (coordinator, catalog, statestore)")
cluster_args = ["--impalad_args=-executor_groups=coordinator"]
self._start_impala_cluster(cluster_args, cluster_size=1,
executor_slots=self.coordinator_slots,
expected_num_executors=0, add_executors=False)
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def start_group(self):
"""Starts an executor group. The name of the group is automatically determined based
on the current number of total executor groups. Executors in the group will be started
in batches."""
self.num_groups += 1
name = self.group_name(self.num_groups)
desc = "%s:%s" % (name, self.group_size)
logging.info("Starting executor group %s with %s members" % (name, self.group_size))
cluster_args = ["--impalad_args=-executor_groups=%s" % desc]
batch_size = self.start_batch_size
num_started = 0
num_expected = (self.num_groups - 1) * self.group_size
while (num_started < self.group_size):
to_start = min(batch_size, self.group_size - num_started)
num_expected += to_start
if to_start == 1:
start_msg = "Starting executor %s" % (num_started + 1)
else:
start_msg = "Starting executors %s-%s" % (num_started + 1,
num_started + to_start)
logging.info(start_msg)
self._start_impala_cluster(cluster_args, cluster_size=to_start,
executor_slots=self.executor_slots,
expected_num_executors=num_expected, add_executors=True)
num_started += to_start
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def stop_group(self):
"""Stops the executor group that was added last."""
name = self.group_name(self.num_groups)
group_hosts = self.get_groups()[name]
logging.info("Stopping executor group %s" % name)
for host in group_hosts:
logging.debug("Stopping host %s" % host)
query = ":shutdown('%s');" % host
self.execute(query)
self.wait_for_group_gone(name)
self.num_groups -= 1
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def wait_for_group_gone(self, group_name, timeout=120):
"""Waits until all executors in group 'group_name' have unregistered themselves from
the coordinator's cluster membership view."""
end = time.time() + timeout
while time.time() < end:
groups = self.get_groups()
if group_name not in groups:
return
time.sleep(0.5)
assert False, "Timeout waiting for group %s to shut down" % group_name
def get_groups(self):
return self.get_service().get_executor_groups()
def execute(self, query):
return self.get_client().execute(query)
def get_num_queued_queries(self):
"""Returns the number of queries currently queued in the default pool on the
coordinator."""
return self.get_service().get_num_queued_queries(pool_name=self.DEFAULT_POOL_NAME)
def get_num_running_queries(self):
"""Returns the number of queries currently queued in the default pool on the
coordinator."""
return self.get_service().get_num_running_queries(self.DEFAULT_POOL_NAME)
def loop(self):
"""Controls whether new executor groups need to be started or existing ones need to be
stopped, based on the number of queries that are currently queued and running.
"""
while not self.stop_ev.is_set():
now = time.time()
num_queued = self.get_num_queued_queries()
num_running = self.get_num_running_queries()
capacity = self.executor_slots * self.num_groups
logging.debug("queued: %s, running: %s, capacity: %s" % (num_queued, num_running,
capacity))
if num_queued == 0:
self.scale_up_sw = now
scale_up = self.scale_up_sw < now - self.scale_wait_up_s
if scale_up and self.num_groups < self.max_groups:
self.start_group()
self.scale_up_sw = time.time()
self.scale_down_sw = self.scale_up_sw
continue
surplus = capacity - num_running
if surplus < self.executor_slots:
self.scale_down_sw = now
if self.scale_down_sw < now - self.scale_wait_down_s:
self.stop_group()
self.scale_up_sw = time.time()
self.scale_down_sw = self.scale_up_sw
continue
time.sleep(1)
def start(self):
"""Starts a base cluster with coordinator and statestore and the control loop to start
and stop additional executor groups."""
self.start_base_cluster()
assert self.loop_thread is None
self.loop_thread = Thread(target=self.loop)
self.loop_thread.start()
def stop(self):
"""Stops the AutoScaler and its cluster."""
if self.stop_ev.is_set():
return
self.stop_ev.set()
if self.loop_thread:
self.loop_thread.join()
self.loop_thread = None
self._kill_whole_cluster()
def _start_impala_cluster(self, options, cluster_size, executor_slots,
expected_num_executors, add_executors):
"""Starts an Impala cluster and waits for all impalads to come online.
If 'add_executors' is True, new executors will be added to the cluster and the
existing daemons will not be restarted. In that case 'cluster_size' must specify the
number of nodes that will be added and 'expected_num_executors' must be the total
expected number of executors after the additional ones have started.
If 'add_executors' is false, 'cluster_size' must be 1 and a single exclusive
coordinator will be started (together with catalog and statestore).
"""
assert cluster_size > 0, "cluster_size cannot be 0"
impala_log_dir = os.getenv("LOG_DIR", "/tmp/")
cmd = [os.path.join(IMPALA_HOME, "bin/start-impala-cluster.py"),
"--cluster_size=%d" % cluster_size,
"--log_dir=%s" % impala_log_dir,
"--log_level=1"]
if add_executors:
cmd.append("--add_executors")
else:
assert expected_num_executors == 0
assert cluster_size == 1
cmd.append("--use_exclusive_coordinators")
impalad_args = [
"-vmodule=admission-controller=3,cluster-membership-mgr=3",
"-max_concurrent_queries=%s" % executor_slots,
"-shutdown_grace_period_s=2"]
options += ["--impalad_args=%s" % a for a in impalad_args]
logging.debug("Starting cluster with command: %s" %
" ".join(pipes.quote(arg) for arg in cmd + options))
log_debug = logging.getLogger().getEffectiveLevel() == logging.DEBUG
log_file = None
if not log_debug:
log_file = open("/dev/null", "w")
check_call(cmd + options, close_fds=True, stdout=log_file, stderr=log_file)
# The number of statestore subscribers is
# cluster_size (# of impalad) + 1 (for catalogd).
if expected_num_executors > 0:
expected_subscribers = expected_num_executors + 2
expected_backends = expected_num_executors + 1
else:
expected_subscribers = cluster_size + 1
expected_backends = 1
cluster = self.get_cluster()
statestored = cluster.statestored
if statestored is None:
raise Exception("statestored was not found")
logging.debug("Waiting for %s subscribers to come online" % expected_subscribers)
statestored.service.wait_for_live_subscribers(expected_subscribers, timeout=60)
for impalad in cluster.impalads:
logging.debug("Waiting for %s executors to come online" % expected_backends)
impalad.service.wait_for_num_known_live_backends(expected_backends, timeout=60)
def _kill_whole_cluster(self):
"""Terminates the whole cluster, i.e. all impalads, catalogd, and statestored."""
logging.info("terminating cluster")
check_call([os.path.join(IMPALA_HOME, "bin/start-impala-cluster.py"), "--kill_only"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--executor_slots", help="Concurrent queries per executor "
"group", type=int, default=3)
parser.add_argument("-g", "--group_size", help="Number of executors per group",
type=int, default=2)
parser.add_argument("-b", "--batch_size", help="Start executors of a group "
"in batches instead of all at once", type=int, default=0)
parser.add_argument("-m", "--max_groups", help="Maximum number of groups to start",
type=int, default=0)
parser.add_argument("-d", "--wait_down", help="Time to wait before scaling down (s)",
type=int, default=5)
parser.add_argument("-u", "--wait_up", help="Time to wait before scaling up (s)",
type=int, default=5)
parser.add_argument("-v", | |
%num_img_channels)
sys.stderr.write("conv_size for structured_model is %i\n" %conv_size)
#theano.config.compute_test_value = 'raise'
sys.stderr.write('... building the model\n')
x = T.tensor4('x', dtype=theano.config.floatX) # the data is presented in 4D shape (#batch, #kpts, #row, #cols)
# the given input is the shape (#batch, #row, #cols, #channels)
# in order to pass it to the first convlolutional layer it should be reshaped to
# (#batch, #channels, #row, #cols)
x.tag.test_value = np.random.rand(128, num_keypoints, dim, dim).astype(theano.config.floatX)
# the keypoint location labels are are presented as a 2D matrix of shape (#batch, #keypoints*2)
# keypoints are [float32] real values in the range of [0,1]
y_kpt_norm = T.imatrix('y_kpt_norm')
y_kpt_norm.tag.test_value = np.random.binomial(n=dim**2, p=0.5, size=(128, num_keypoints)).astype(np.int32)
# y_kpt_norm_serial is a vector of dim (#batch * #kpts)
# where for each batch example all keypoints are serialized before having the next
# example in the first dimension (exp1_kpt1, exp1_kpt2,.., exp1_kptn, exp2_kpt1, ...)
y_kpt_norm_serial = y_kpt_norm.flatten()
# y_kpt_ocular_dist is a 1D float vector of (#batch) containing the inter_ocular
# distance for each training example. It is also a float32 normalized in the range [0,1]
y_kpt_ocular_dist = T.vector('y_kpt_ocular_dist', dtype=theano.config.floatX)
y_kpt_ocular_dist.tag.test_value = np.random.rand(128).astype(theano.config.floatX)
# make a column out of a 1d vector (#batch to #batch x 1)
y_kpt_ocular = y_kpt_ocular_dist.dimshuffle(0, 'x')
# the y components for the 300W dataset
y_kpt_300W = T.matrix('y_kpt_300W', dtype=theano.config.floatX)
y_kpt_300W.tag.test_value = np.random.rand(128, 10).astype(theano.config.floatX)
# the labels of the auxiliary tasks are presented as 1D vector of (#batch)
# [int] labels starting from zero.
# L2 cost coefficient for the output layer
L2_coef = T.scalar('L2_coef', dtype=theano.config.floatX)
L2_coef.tag.test_value = np.float32(1.0)
# L2 cost coefficient for the fully connected layer
# mask_kpts is a vector of shape (#batch * #kpts) indicating
# for each sample which keypoint is on the border.
# the order of values is: (exp1_kpt1, exp1_kpt2,.., exp1_kptn, exp2_kpt1, ...)
# mask_kpts is one when the kpt is not in the pad region and zero otherwise.
mask_kpts = T.vector('mask_kpts', dtype=theano.config.floatX)
mask_kpts.tag.test_value = (np.ones((128 * num_keypoints))).astype(theano.config.floatX)
# bound_mask is a matrix of shape (#batch, #kpts) indicating
# whether for each kpt it is in the padded region or not.
# bound_mask is one of these values for each kpt:
# 0: kpt is not in the boundary, 1: kpt is in the left boundary,
# 2: kpt is in the top boundary
# 3: kpt is in the right boundary, 4: kpt is in the bottom boundary
bound_mask = T.matrix('bound_mask', dtype=theano.config.floatX)
bound_mask.tag.test_value = (np.zeros((128 , num_keypoints))).astype(theano.config.floatX)
# border_pixel is a matrix of shape (#batch, 4) indicating
# for each image the starting pixel of valid image (no pad region)
# it contains a value in the range [0, image_size - 1 ]
border_pixel = T.matrix('border_pixel', dtype=theano.config.floatX)
border_pixel.tag.test_value = (np.tile([[0, 0, dim-1 , dim-1]], (128,1))).astype(theano.config.floatX)
#the input tensor to layer0 is of shape (#batch, #channels, dim * dim)
layer1_input = x
dropout = T.scalar('dropout', dtype=theano.config.floatX)
dropout.tag.test_value = np.float32(0.)
conv_sum, softmax_layer, params, L2_sqr = fforward_model(layer1_input=layer1_input, dropout=dropout, nkerns=nkerns,
conv_size=conv_size, num_img_channels=num_img_channels,
dim=dim, rng=rng)
self.params = params
self.L2_sqr = L2_sqr
########################################
# getting the cost of the output layer #
########################################
epsilon = 1e-8
# cost_batch is a vector of dim (#batch_size * #kpts)
cost_batch = softmax_layer.negative_log_likelihood(y_kpt_norm_serial)
cost_kpt = T.sum(cost_batch * mask_kpts) / (T.sum(mask_kpts) + epsilon)
# cost is the sum of the cost of the keypoints
cost_kpt *= num_keypoints
L2_cost = L2_coef * L2_sqr
cost = L2_cost + cost_kpt
########################################################
# getting the sum log probs for the detected locations #
########################################################
softmax_probs = softmax_layer.p_y_given_x
max_prob = T.max(softmax_probs, axis=1)
log_max_prob = -T.log(max_prob)
# log_max_prob_2D is of shape (#batch, #kpts)
log_max_prob_2D = log_max_prob.reshape((-1, num_keypoints))
sum_log_probs = T.sum(log_max_prob_2D, axis=1)
##################
# error for MTFL #
##################
# getting the prediction values
# predict is of dim (#batch * #kpts)
predict = softmax_layer.predict(y_kpt_norm_serial)
# getting the estimated values
# for each batch, all keypoints come sequentially
# before seeing the next example.
# y_pred is of shape (#batch * #kpt)
y_pred = predict // dim
# x_pred is of shape (#batch * #kpt)
x_pred = predict % dim
# y_true is of shape (#batch * #kpt)
y_true = y_kpt_norm_serial // dim
# x_true is of shape (#batch * #kpt)
x_true = y_kpt_norm_serial % dim
#################################################
# getting the model's prediction in [0, dim**2) #
#################################################
# predict_2D is of shape (#batch, #kpts)
predict_2D = predict.reshape((-1, num_keypoints))
################################
# getting error for train set #
# masks completely the points #
# in the pad rather than #
# projecting them to the border#
################################
# getting x_pred and y_pred that is not masked
# just in training (for illustration purposes
x_pred_train = x_pred
y_pred_train = y_pred
x_diff_sqr_train = (x_pred - x_true)**2
y_diff_sqr_train = (y_pred - y_true)**2
# kpt_euc_dist is of shape (#batch * #kpt)
kpt_euc_dist_train = T.sqrt(x_diff_sqr_train + y_diff_sqr_train)
# masking the points that are in the pad
error_kpt_masked_train = kpt_euc_dist_train * mask_kpts
# error_kpt_2D is of shape (#batch , #kpt)
error_kpt_2D_train = error_kpt_masked_train.reshape((-1, num_keypoints))
# the values of x_pred, y_pred, x_true, y_true
# are in the range of [0,dim). So to make the
# calculation compatible y_kpt_ocular should also
# get unnormalized
y_kpt_ocular_unorm = y_kpt_ocular * dim
error_kpt_each_norm_MTFL_train = error_kpt_2D_train / y_kpt_ocular_unorm
# getting the sum of error over all samples and all keypoints
error_kpt_MTFL_train = T.sum(error_kpt_each_norm_MTFL_train)
error_kpt_train = error_kpt_MTFL_train
##############################
# getting error for test set #
# projects the points in the #
# pad to the border #
##############################
# moving the kpts to the border if its in the pad #
# x_pred_2D is of shape (#batch, #kpt)
x_pred_2D = x_pred.reshape((-1, num_keypoints))
y_pred_2D = y_pred.reshape((-1, num_keypoints))
# if bound_mask==1 (left border), use border_pixel[0], elif bound_mask==3 (right border), use border_pixel[2]
# else, use x_pred_2D
# x_inside is a matrix of shape (#batch, #kpts) indicating
x_inside = 1 - (T.eq(bound_mask, 1) + T.eq(bound_mask, 3))
x_pred_bord = T.eq(bound_mask, 1) * border_pixel[:, 0].reshape((-1, 1)) +\
T.eq(bound_mask, 3) * border_pixel[:, 2].reshape((-1, 1)) + x_inside * x_pred_2D
y_inside = 1 - (T.eq(bound_mask, 2) + T.eq(bound_mask, 4))
y_pred_bord = T.eq(bound_mask, 2) * border_pixel[:, 1].reshape((-1, 1)) +\
T.eq(bound_mask, 4) * border_pixel[:, 3].reshape((-1, 1)) + y_inside * y_pred_2D
x_inside_all = 1 - (T.lt(x_pred_bord, border_pixel[:, 0].reshape((-1, 1))) +\
T.gt(x_pred_bord, border_pixel[:, 2].reshape((-1, 1))))
x_pred_bord_all = T.lt(x_pred_bord, border_pixel[:, 0].reshape((-1, 1))) * border_pixel[:, 0].reshape((-1, 1)) +\
T.gt(x_pred_bord, border_pixel[:, 2].reshape((-1, 1))) * border_pixel[:, 2].reshape((-1, 1)) +\
x_inside_all * x_pred_bord
y_inside_all = 1 - (T.lt(y_pred_bord, border_pixel[:, 1].reshape((-1, 1))) +\
T.gt(y_pred_bord, border_pixel[:, 3].reshape((-1, 1))))
y_pred_bord_all = T.lt(y_pred_bord, border_pixel[:, 1].reshape((-1, 1))) * border_pixel[:, 1].reshape((-1, 1)) +\
T.gt(y_pred_bord, border_pixel[:, 3].reshape((-1, 1))) * border_pixel[:, 3].reshape((-1, 1)) +\
y_inside_all * y_pred_bord
# x_pred is of shape (#batch * #kpt)
x_pred = x_pred_bord_all.flatten()
y_pred = y_pred_bord_all.flatten()
x_diff_sqr = (x_pred - x_true) ** 2
y_diff_sqr = (y_pred - y_true) ** 2
# kpt_euc_dist is of shape (#batch * #kpt)
kpt_euc_dist = T.sqrt(x_diff_sqr + y_diff_sqr)
# applying the mask to the predicted kpts locations
error_kpt_masked = kpt_euc_dist * mask_kpts
# error_kpt_2D is of shape (#batch , #kpt)
error_kpt_2D = error_kpt_masked.reshape((-1, num_keypoints))
# the values of x_pred, y_pred, x_true, y_true
# are in the range of [0,dim). So to make the
# calculation compatible y_kpt_ocular should also
# get unnormalized
y_kpt_ocular_unorm = y_kpt_ocular * dim
error_kpt_each_norm_MTFL = error_kpt_2D / y_kpt_ocular_unorm
# getting the sum of error over all samples and all keypoints
error_kpt_MTFL = T.sum(error_kpt_each_norm_MTFL)
error_kpt = error_kpt_MTFL
#############################
# getting errors seperately #
#############################
# error_kpt_each contains the error seperately for each batch_sample
error_kpt_each = T.mean(error_kpt_each_norm_MTFL, axis=1)
# the same variable for the train set
error_kpt_each_train = T.mean(error_kpt_each_norm_MTFL_train, axis=1)
#######################################
# defining the optimization algorithm #
#######################################
# setting the updates using the ada_delta
self.tr = Train_alg()
updates = self.tr.build_updates(cost=cost, params=self.params, consider_constant=None, decay=decay)
###############################
# defining the test functions #
###############################
self.train_model = theano.function(
[L2_coef, x, y_kpt_ocular_dist, y_kpt_norm, mask_kpts, dropout],
[cost, cost_kpt, L2_cost, error_kpt_train],
updates=updates, allow_input_downcast=True)
self.valid_model = theano.function(
[L2_coef, x, y_kpt_ocular_dist, y_kpt_norm, mask_kpts, dropout],
[cost, cost_kpt, L2_cost, error_kpt_train], allow_input_downcast=True)
# testing only on MTFL dataset with no task | |
the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
`Xy = np.dot(X.T, y)` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample. If `None`, it will be `n_samples`.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
testing : bool, default=False
Whether to conduct hypothesis testing each time a new variable enters
alpha : float, default=0.05
Significance level of hypothesis testing. Valid only if testing is True.
testing_stop : bool, default=False
If set to True, stops calculating future paths when the test yields
insignificant results.
Only takes effect when testing is set to True.
testing_verbose : bool, default=True
Controls output verbosity for hypothese testing procedure.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
test_result: dictionary
Contains testing results in the form of [test_stats, new_n] produced
at each step. Returned only if testing is set to True.
See Also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == "lar" and positive:
raise ValueError(
"Positive constraint not supported for 'lar' " "coding method."
)
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError('X and Gram cannot both be unspecified.')
elif isinstance(Gram, str) and Gram == 'auto' or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError('The shapes of the inputs Gram and Xy'
' do not match.')
if copy_X and X is not None and Gram is None:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
max_features = min(max_iter, n_features)
dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None)
if len(dtypes) == 1:
# use the precision level of input data if it is consistent
return_dtype = next(iter(dtypes))
else:
# fallback to double precision otherwise
return_dtype = np.float64
if return_path:
coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype)
alphas = np.zeros(max_features + 1, dtype=return_dtype)
else:
coef, prev_coef = (np.zeros(n_features, dtype=return_dtype),
np.zeros(n_features, dtype=return_dtype))
alpha, prev_alpha = (np.array([0.], dtype=return_dtype),
np.array([0.], dtype=return_dtype))
# above better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (Cov,))
solve_cholesky, = get_lapack_funcs(('potrs',), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
residual = y - 0
coef = np.zeros(n_features)
test_result = {}
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
z_score = stats.norm.ppf(1 - alpha)
while True:
if not testing:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
else:
# not implemented when if positive is set to True
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
if Cov.size > 1:
C_idx_second = np.abs(Cov).argsort()[-2]
x1 = X.T[n_active + C_idx]
x2 = X.T[n_active + C_idx_second]
residual = y - np.dot(X[:, :n_active], coef[active])
u = np.array([np.dot(x1, residual), np.dot(x2, residual)]) / len(y)
cov = np.cov(x1 * residual, x2 * residual)
new_n = len(y)
if u[0] >= 0 and u[1] >= 0:
test_stats = u[0] - u[1] - z_score * np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y))
if test_stats < 0:
z_alpha = (u[0] - u[1]) / np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y))
new_n = new_n * (z_score / z_alpha) ** 2
elif u[0] >= 0 and u[1] < 0:
test_stats = u[0] + u[1] - z_score * np.sqrt(2 * (cov[0][0] + cov[1][1] + cov[0][1] + cov[1][0]) / len(y))
if test_stats < 0:
z_alpha = (u[0] + u[1]) / np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y))
new_n = new_n * (z_score / z_alpha) ** 2
elif u[0] < 0 and u[1] >= 0:
test_stats = -(u[0] + u[1] + z_score * np.sqrt(2 * (cov[0][0] + cov[1][1] + cov[0][1] + cov[1][0]) / len(y)))
if test_stats < 0:
z_alpha = (-u[0] - u[1]) / np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y))
new_n = new_n * (z_score / z_alpha) ** 2
else:
test_stats = -(u[0] - u[1] + z_score * np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y)))
if test_stats < 0:
z_alpha = (-u[0] + u[1]) / np.sqrt(2 * (cov[0][0] + cov[1][1] - cov[0][1] - cov[1][0]) / len(y))
new_n = new_n * (z_score / z_alpha) ** 2
test_result[n_active + 1] = [test_stats, new_n]
if testing_verbose:
print("Selecting " + str(n_active + 1) + "th varieble: ")
print("Correlations: " + str(np.round(u, 4)))
print("Test statistics: " + str(round(test_stats, 4)))
if testing_stop:
if test_stats < 0:
if testing_verbose:
print("Not enough samples!")
return | |
str. The proper command to be submitted to desi_proc to process the job defined by the prow values.
"""
cmd = 'desi_proc'
cmd += ' --batch'
cmd += ' --nosubmit'
cmd += ' --traceshift'
if queue is not None:
cmd += f' -q {queue}'
if prow['OBSTYPE'].lower() == 'science':
if prow['JOBDESC'] == 'prestdstar':
cmd += ' --nostdstarfit --nofluxcalib'
elif prow['JOBDESC'] == 'poststdstar':
cmd += ' --noprestdstarfit --nostdstarfit'
elif prow['OBSTYPE'].lower() == 'dark':
cmd += ' --nightlybias'
pcamw = str(prow['PROCCAMWORD'])
cmd += ' --cameras={} -n {} -e {}'.format(pcamw, prow['NIGHT'], prow['EXPID'][0])
if prow['BADAMPS'] != '':
cmd += ' --badamps={}'.format(prow['BADAMPS'])
return cmd
def desi_proc_joint_fit_command(prow, queue=None):
"""
Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, PROCCAMWORD defined)
and determines the proper command line call to process the data defined by the input row/dict.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'.
queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default).
Returns:
cmd, str. The proper command to be submitted to desi_proc_joint_fit to process the job defined by the prow values.
"""
cmd = 'desi_proc_joint_fit'
cmd += ' --batch'
cmd += ' --nosubmit'
cmd += ' --traceshift'
if queue is not None:
cmd += f' -q {queue}'
descriptor = prow['OBSTYPE'].lower()
night = prow['NIGHT']
specs = str(prow['PROCCAMWORD'])
expids = prow['EXPID']
expid_str = ','.join([str(eid) for eid in expids])
cmd += f' --obstype {descriptor}'
cmd += ' --cameras={} -n {} -e {}'.format(specs, night, expid_str)
return cmd
def create_batch_script(prow, queue='realtime', dry_run=0, joint=False, system_name=None):
"""
Wrapper script that takes a processing table row and three modifier keywords and creates a submission script for the
compute nodes.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
queue, str. The name of the NERSC Slurm queue to submit to. Default is the realtime queue.
dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written but not submitted.
If dry_run=2, the scripts will not be written nor submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
joint, bool. Whether this is a joint fitting job (the job involves multiple exposures) and therefore needs to be
run with desi_proc_joint_fit. Default is False.
system_name (str): batch system name, e.g. cori-haswell or perlmutter-gpu
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
scriptname.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
log = get_logger()
if prow['JOBDESC'] in ['perexp','pernight','pernight-v0','cumulative']:
if dry_run > 1:
scriptpathname = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'],
night=prow['NIGHT'], expid=prow['EXPID'][0])
log.info("Output file would have been: {}".format(scriptpathname))
else:
#- run zmtl for cumulative redshifts but not others
run_zmtl = (prow['JOBDESC'] == 'cumulative')
scripts, failed_scripts = generate_tile_redshift_scripts(tileid=prow['TILEID'], group=prow['JOBDESC'],
night=[prow['NIGHT']], expid=prow['EXPID'],
run_zmtl=run_zmtl,
batch_queue=queue, system_name=system_name,
nosubmit=True)
if len(failed_scripts) > 0:
log.error(f"Redshifts failed for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+
f"tileid={prow['TILEID']}, expid={prow['EXPID']}.")
log.info(f"Returned failed scriptname is {failed_scripts}")
elif len(scripts) > 1:
log.error(f"More than one redshifts returned for group={prow['JOBDESC']}, night={prow['NIGHT']}, "+
f"tileid={prow['TILEID']}, expid={prow['EXPID']}.")
log.info(f"Returned scriptnames were {scripts}")
else:
scriptpathname = scripts[0]
else:
if joint:
cmd = desi_proc_joint_fit_command(prow, queue=queue)
else:
cmd = desi_proc_command(prow, queue=queue)
scriptpathname = batch_script_name(prow)
if dry_run > 1:
log.info("Output file would have been: {}".format(scriptpathname))
log.info("Command to be run: {}".format(cmd.split()))
else:
log.info("Running: {}".format(cmd.split()))
scriptpathname = create_desi_proc_batch_script(night=prow['NIGHT'], exp=prow['EXPID'], \
cameras=prow['PROCCAMWORD'], jobdesc=prow['JOBDESC'], \
queue=queue, cmdline=cmd, system_name=system_name)
log.info("Outfile is: {}".format(scriptpathname))
prow['SCRIPTNAME'] = os.path.basename(scriptpathname)
return prow
def submit_batch_script(prow, dry_run=0, reservation=None, strictly_successful=False):
"""
Wrapper script that takes a processing table row and three modifier keywords and submits the scripts to the Slurm
scheduler.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for processing_table columns found in
desispect.workflow.proctable.get_processing_table_column_defs()
dry_run, int. If nonzero, this is a simulated run. If dry_run=1 the scripts will be written or submitted. If
dry_run=2, the scripts will not be writter or submitted. Logging will remain the same
for testing as though scripts are being submitted. Default is 0 (false).
reservation: str. The reservation to submit jobs to. If None, it is not submitted to a reservation.
strictly_successful, bool. Whether all jobs require all inputs to have succeeded. For daily processing, this is
less desirable because e.g. the sciences can run with SVN default calibrations rather
than failing completely from failed calibrations. Default is False.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
scriptname.
Note:
This modifies the input. Though Table.Row objects are generally copied on modification, so the change to the
input object in memory may or may not be changed. As of writing, a row from a table given to this function will
not change during the execution of this function (but can be overwritten explicitly with the returned row if desired).
"""
log = get_logger()
dep_qids = prow['LATEST_DEP_QID']
dep_list, dep_str = '', ''
if len(dep_qids) > 0:
jobtype = prow['JOBDESC']
if strictly_successful:
depcond = 'afterok'
elif jobtype in ['arc', 'psfnight', 'prestdstar', 'stdstarfit']:
## (though psfnight and stdstarfit will require some inputs otherwise they'll go up in flames)
depcond = 'afterany'
else:
## if 'flat','nightlyflat','poststdstar', or any type of redshift, require strict success of inputs
depcond = 'afterok'
dep_str = f'--dependency={depcond}:'
if np.isscalar(dep_qids):
dep_list = str(dep_qids).strip(' \t')
if dep_list == '':
dep_str = ''
else:
dep_str += dep_list
else:
if len(dep_qids)>1:
dep_list = ':'.join(np.array(dep_qids).astype(str))
dep_str += dep_list
elif len(dep_qids) == 1 and dep_qids[0] not in [None, 0]:
dep_str += str(dep_qids[0])
else:
dep_str = ''
# script = f'{jobname}.slurm'
# script_path = pathjoin(batchdir, script)
if prow['JOBDESC'] in ['pernight-v0','pernight','perexp','cumulative']:
script_path = get_tile_redshift_script_pathname(tileid=prow['TILEID'],group=prow['JOBDESC'],
night=prow['NIGHT'], expid=np.min(prow['EXPID']))
jobname = os.path.split(script_path)[-1]
else:
batchdir = get_desi_proc_batch_file_path(night=prow['NIGHT'])
jobname = batch_script_name(prow)
script_path = pathjoin(batchdir, jobname)
batch_params = ['sbatch', '--parsable']
if dep_str != '':
batch_params.append(f'{dep_str}')
if reservation is not None:
batch_params.append(f'--reservation={reservation}')
batch_params.append(f'{script_path}')
if dry_run:
## in dry_run, mock Slurm ID's are generated using CPU seconds. Wait one second so we have unique ID's
current_qid = int(time.time() - 1.6e9)
time.sleep(1)
else:
current_qid = subprocess.check_output(batch_params, stderr=subprocess.STDOUT, text=True)
current_qid = int(current_qid.strip(' \t\n'))
log.info(batch_params)
log.info(f'Submitted {jobname} with dependencies {dep_str} and reservation={reservation}. Returned qid: {current_qid}')
prow['LATEST_QID'] = current_qid
prow['ALL_QIDS'] = np.append(prow['ALL_QIDS'],current_qid)
prow['STATUS'] = 'SUBMITTED'
prow['SUBMIT_DATE'] = int(time.time())
return prow
#############################################
########## Row Manipulations ############
#############################################
def define_and_assign_dependency(prow, darkjob, arcjob, flatjob):
"""
Given input processing row and possible arcjob (processing row for psfnight) and flatjob (processing row for
nightlyflat), this defines the JOBDESC keyword and assigns the dependency appropriate for the job type of prow.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'OBSTYPE'. A row must have column names for
'JOBDESC', 'INT_DEP_IDS', and 'LATEST_DEP_ID'.
darkjob, Table.Row, dict, or NoneType. Row corresponding to the processed 300s dark for the night from proctable.
This must contain keyword accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist and no dependency is assigned.
arcjob, Table.Row, dict, or NoneType. Processing row corresponding to psfnight for the night of the data in prow.
This must contain keyword accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist and no dependency is assigned.
flatjob, Table.Row, dict, or NoneType. Processing row corresponding to nightlyflat for the night of the data in prow.
This must contain keyword accessible values for 'INTID', and 'LATEST_QID'.
If None, it assumes the dependency doesn't exist and no dependency is assigned.
Returns:
prow, Table.Row or dict. The same prow type and keywords as input except with modified values updated values for
'JOBDESC', 'INT_DEP_IDS'. and 'LATEST_DEP_ID'.
Note:
This modifies the input. Though Table.Row objects are generally copied on | |
1, 2, 2, 2, 1]
energizations 1.5 1.11803 [1, 0, 3, 1, 3, 0, 1, 1, 2, 3]
energize 2.1 0.7 [2, 2, 2, 1, 3, 2, 3, 2, 3, 1]
energized 2.3 0.64031 [3, 2, 3, 3, 3, 2, 2, 2, 1, 2]
energizer 2.1 0.53852 [3, 2, 2, 2, 2, 2, 2, 3, 1, 2]
energizers 1.7 0.9 [2, 0, 2, 3, 3, 1, 1, 2, 2, 1]
energizes 2.1 0.53852 [3, 2, 3, 2, 2, 2, 2, 2, 1, 2]
energizing 2.0 0.63246 [3, 3, 2, 1, 2, 2, 1, 2, 2, 2]
energy 1.1 0.83066 [0, 2, 0, 2, 1, 1, 2, 1, 2, 0]
engage 1.4 0.8 [1, 2, 3, 2, 1, 1, 0, 1, 2, 1]
engaged 1.7 1.1 [1, 1, 2, 2, 1, 0, 2, 3, 4, 1]
engagement 2.0 1.34164 [0, 0, 3, 4, 4, 2, 1, 2, 2, 2]
engagements 0.6 0.8 [1, 0, 0, 2, 0, 2, 0, 0, 1, 0]
engager 1.1 0.7 [1, 1, 0, 2, 1, 0, 2, 1, 2, 1]
engagers 1.0 0.7746 [1, 1, 1, 0, 2, 1, 0, 2, 2, 0]
engages 1.0 0.7746 [1, 1, 0, 2, 1, 0, 1, 2, 2, 0]
engaging 1.4 0.4899 [2, 2, 1, 1, 2, 1, 1, 1, 1, 2]
engagingly 1.5 0.67082 [1, 2, 3, 1, 1, 1, 1, 1, 2, 2]
engrossed 0.6 1.49666 [0, 2, 0, 2, -2, 2, 3, -1, 0, 0]
enjoy 2.2 0.6 [3, 2, 2, 2, 3, 2, 2, 3, 2, 1]
enjoyable 1.9 0.53852 [3, 2, 2, 1, 2, 1, 2, 2, 2, 2]
enjoyableness 1.9 1.13578 [2, 2, 2, 2, 1, 3, 3, 3, -1, 2]
enjoyably 1.8 0.4 [2, 2, 2, 1, 2, 1, 2, 2, 2, 2]
enjoyed 2.3 0.64031 [2, 2, 1, 3, 3, 3, 2, 2, 2, 3]
enjoyer 2.2 0.6 [2, 2, 1, 3, 3, 2, 2, 2, 2, 3]
enjoyers 2.2 0.74833 [2, 4, 2, 2, 2, 2, 2, 3, 2, 1]
enjoying 2.4 0.66332 [2, 2, 2, 3, 3, 3, 1, 3, 2, 3]
enjoyment 2.6 0.4899 [2, 3, 2, 3, 2, 3, 2, 3, 3, 3]
enjoyments 2.0 0.7746 [3, 1, 1, 3, 2, 1, 2, 2, 2, 3]
enjoys 2.3 0.45826 [2, 3, 2, 2, 2, 3, 2, 3, 2, 2]
enlighten 2.3 1.1 [2, 2, 1, 3, 2, 1, 1, 4, 3, 4]
enlightened 2.2 0.87178 [4, 2, 3, 1, 2, 2, 1, 3, 2, 2]
enlightening 2.3 0.64031 [3, 2, 2, 2, 2, 2, 2, 4, 2, 2]
enlightens 1.7 1.00499 [2, 1, 1, 1, 1, 2, 4, 1, 3, 1]
ennui -1.2 0.6 [-1, -1, -1, -2, -1, -1, -2, -1, 0, -2]
enrage -2.6 0.91652 [-3, -3, -3, -4, -1, -1, -3, -2, -3, -3]
enraged -1.7 1.79165 [-3, -3, -3, -3, 2, -1, -3, -1, 1, -3]
enrages -1.8 1.6 [-3, -3, -3, -3, 1, -1, -3, -1, 1, -3]
enraging -2.8 0.74833 [-4, -2, -3, -2, -2, -3, -3, -2, -4, -3]
enrapture 3.0 0.63246 [2, 4, 3, 3, 4, 3, 3, 2, 3, 3]
enslave -3.1 0.9434 [-3, -4, -2, -4, -4, -2, -4, -2, -4, -2]
enslaved -1.7 2.41039 [3, -3, -3, -3, -4, -4, -4, 1, -1, 1]
enslaves -1.6 2.15407 [2, -2, -3, -2, -4, -4, -4, 1, -1, 1]
ensure 1.6 0.91652 [2, 1, 3, 1, 1, 2, 3, 2, 0, 1]
ensuring 1.1 0.9434 [0, 1, 3, 1, 1, 2, 1, 0, 2, 0]
enterprising 2.3 0.78102 [3, 2, 1, 3, 3, 2, 1, 2, 3, 3]
entertain 1.3 0.64031 [1, 2, 1, 1, 2, 1, 2, 0, 1, 2]
entertained 1.7 0.64031 [1, 2, 2, 1, 2, 1, 1, 3, 2, 2]
entertainer 1.6 1.2 [1, 4, 2, 2, 0, 0, 1, 3, 1, 2]
entertainers 1.0 0.7746 [0, 1, 2, 2, 0, 0, 1, 1, 2, 1]
entertaining 1.9 0.83066 [1, 2, 1, 1, 3, 2, 3, 2, 3, 1]
entertainingly 1.9 0.53852 [2, 1, 2, 3, 2, 2, 1, 2, 2, 2]
entertainment 1.8 0.9798 [2, 0, 4, 2, 2, 1, 2, 2, 1, 2]
entertainments 2.3 1.18743 [3, 3, 3, 2, 1, 0, 3, 4, 1, 3]
entertains 2.4 0.66332 [2, 2, 2, 2, 2, 3, 4, 3, 2, 2]
enthral 0.4 1.42829 [2, 2, 0, 2, 0, -1, -2, 2, 0, -1]
enthuse 1.6 0.66332 [1, 2, 1, 1, 3, 1, 2, 2, 2, 1]
enthused 2.0 0.63246 [3, 3, 1, 2, 2, 2, 2, 1, 2, 2]
enthuses 1.7 0.78102 [2, 3, 1, 2, 1, 3, 1, 1, 1, 2]
enthusiasm 1.9 0.9434 [3, 3, 3, 2, 1, 0, 2, 1, 2, 2]
enthusiasms 2.0 0.89443 [1, 3, 2, 2, 3, 2, 0, 2, 3, 2]
enthusiast 1.5 0.67082 [1, 2, 2, 2, 0, 1, 1, 2, 2, 2]
enthusiastic 2.2 0.9798 [1, 2, 3, 4, 2, 3, 2, 1, 1, 3]
enthusiastically 2.6 0.66332 [3, 3, 3, 2, 3, 3, 3, 3, 2, 1]
enthusiasts 1.4 0.91652 [1, 1, 0, 3, 3, 2, 1, 1, 1, 1]
enthusing 1.9 0.7 [2, 1, 2, 1, 2, 3, 2, 1, 2, 3]
entitled 1.1 0.83066 [2, 2, 1, 1, 2, 1, 1, -1, 1, 1]
entrusted 0.8 1.46969 [3, 0, 2, 2, 1, 1, -1, 0, -2, 2]
envied -1.1 0.83066 [-1, -2, -2, 1, -2, -1, -1, -1, -1, -1]
envier -1.0 0.7746 [-1, -2, -2, -1, -1, 1, -1, -1, -1, -1]
enviers -1.1 1.13578 [-3, -1, 0, -3, -1, -1, -1, -1, 1, -1]
envies -0.8 0.9798 [-1, -2, -2, 1, -1, 1, -1, -1, -1, -1]
envious -1.1 0.83066 [-2, -1, -1, -1, -2, -1, -1, 1, -2, -1]
envy -1.1 0.83066 [-2, -1, -1, -2, -1, -1, -1, 1, -1, -2]
envying -0.8 1.32665 [-1, -1, -1, -1, -3, 2, -2, -1, 1, -1]
envyingly -1.3 1.55242 [-2, 3, -2, -2, -1, -3, -1, -1, -2, -2]
erroneous -1.8 0.6 [-2, -3, -2, -2, -2, -2, -1, -1, -1, -2]
error -1.7 0.64031 [-2, -1, -2, -1, -2, -1, -1, -2, -3, -2]
errors -1.4 0.66332 [-2, -1, -2, 0, -2, -2, -1, -1, -1, -2]
escape 0.7 1.00499 [2, 0, 0, 1, 0, 1, 0, 3, 0, 0]
escapes 0.5 1.36015 [4, 1, 1, 0, -1, 0, -1, 0, 1, 0]
escaping 0.2 1.46969 [-2, 2, -1, 0, 1, 0, 2, 2, -2, 0]
esteemed 1.9 0.83066 [3, 2, 1, 2, 3, 1, 1, 2, 3, 1]
ethical 2.3 0.78102 [3, 3, 3, 3, 2, 2, 1, 3, 2, 1]
euphoria 3.3 0.9 [4, 4, 3, 3, 3, 4, 4, 4, 1, 3]
euphoric 3.2 0.87178 [3, 4, 3, 3, 3, 4, 4, 4, 1, 3]
eviction -2.0 0.63246 [-2, -2, -3, -2, -3, -2, -1, -2, -1, -2]
evil -3.4 0.91652 [-4, -4, -4, -3, -3, -4, -1, -4, -3, -4]
evildoer -3.1 0.7 [-2, -3, -3, -3, -4, -4, -3, -2, -3, -4]
evildoers -2.4 0.4899 [-3, -3, -2, -2, -2, -2, -2, -2, -3, -3]
evildoing -3.1 0.7 [-4, -4, -3, -3, -3, -4, -2, -3, -2, -3]
evildoings -2.5 1.0247 [-3, -1, -1, -3, -4, -2, -4, -2, -2, -3]
eviler -2.1 1.13578 [-2, -1, -3, -2, -4, -3, -1, -2, 0, -3]
evilest -2.5 1.0247 [-3, -4, -1, -3, -2, -3, -1, -4, -2, -2]
eviller -2.9 0.83066 [-3, -3, -4, -2, -2, -3, -2, -4, -2, -4]
evillest -3.3 0.78102 [-3, -4, -2, -3, -4, -2, -4, -4, -3, -4]
evilly -3.4 0.8 [-2, -4, -4, -4, -3, -4, -4, -4, -3, -2]
evilness -3.1 1.04403 [-3, -4, -4, -4, -4, -2, -3, -2, -1, -4]
evils -2.7 0.78102 [-3, -2, -2, -4, -4, -2, -3, -2, -3, -2]
exaggerate -0.6 0.66332 [-1, -1, -1, 0, -1, 0, 1, -1, -1, -1]
exaggerated -0.4 1.2 [-1, -1, -1, -1, -1, 2, 1, 1, -2, -1]
exaggerates -0.6 1.28062 [-1, -1, -1, -1, -1, 1, 0, 2, -3, -1]
exaggerating -0.7 0.9 [-1, -2, 0, -1, 0, 0, -2, -1, 1, -1]
exasperated -1.8 1.53623 [-4, -3, -3, -1, -1, -1, 1, -1, -4, -1]
excel 2.0 1.0 [3, 0, 2, 3, 1, 1, 3, 3, 2, 2]
excelled 2.2 0.87178 [1, 2, 2, 2, 3, 2, 4, 3, 2, 1]
excellence 3.1 0.9434 [4, 3, 4, 3, 2, 3, 1, 4, 3, 4]
excellences 2.5 0.92195 [4, 2, 2, 2, 4, 3, 2, 2, 3, 1]
excellencies 2.4 0.4899 [3, 2, 3, 3, 2, 2, 2, 2, 3, 2]
excellency 2.5 0.80623 [4, 2, 3, 3, 2, 3, 1, 3, 2, 2]
excellent 2.7 0.64031 [2, 3, 3, 3, 3, 2, 3, 2, 2, 4]
excellently 3.1 0.7 [4, 3, 3, 3, 2, 3, 3, 4, 4, 2]
excelling 2.5 0.67082 [2, 2, 3, 3, 3, 2, 2, 4, 2, 2]
excels 2.5 0.92195 [4, 2, 4, 2, 2, 1, 2, 3, 3, 2]
excelsior 0.7 0.64031 [1, 0, 0, 2, 0, 1, 1, 1, 1, 0]
excitabilities 1.5 1.0247 [2, 0, 1, 1, 3, 1, 2, 3, 2, 0]
excitability 1.2 0.87178 [0, 1, 1, 0, 1, 2, 3, 1, 2, 1]
excitable 1.5 0.92195 [2, 3, 1, 0, 1, 2, 2, 0, 2, 2]
excitableness 1.0 1.09545 [0, 0, 2, 0, 2, 0, 2, 0, 1, 3]
excitant 1.8 1.16619 [1, 0, 1, 3, 2, 0, 3, 3, 2, 3]
excitants 1.2 0.9798 [1, 0, 1, 2, 2, 2, 1, -1, 2, 2]
excitation 1.8 0.87178 [2, 0, 3, 1, 3, 2, 2, 2, 1, 2]
excitations 1.8 1.16619 [3, 3, -1, 2, 2, 2, 1, 1, 3, 2]
excitative 0.3 0.78102 [0, 1, 1, 0, 0, 0, 2, 0, -1, 0]
excitatory 1.1 1.7 [-1, 2, 2, 1, 2, 2, 2, 3, -3, 1]
excite 2.1 1.22066 [1, 2, 2, 1, 2, 0, 4, 4, 3, 2]
excited 1.4 0.4899 [1, 1, 2, 1, 2, 1, 2, 1, 1, 2]
excitedly 2.3 0.9 [3, 3, 2, 3, 1, 3, 1, 3, 1, 3]
excitement 2.2 0.4 [2, 2, 2, 3, 3, 2, 2, 2, 2, 2]
excitements 1.9 0.53852 [2, 1, 2, 3, 2, 2, 2, 2, 2, 1]
exciter 1.9 0.9434 [3, 2, 3, 1, 0, 1, 2, 3, 2, 2]
exciters 1.4 1.42829 [1, 2, 0, 1, 2, 4, 0, -1, 3, 2]
excites 2.1 0.83066 [2, 3, 3, 2, 0, 2, 2, 3, 2, 2]
exciting 2.2 0.87178 [3, 2, 1, 1, 1, 3, 3, 3, 2, 3]
excitingly 1.9 0.9434 [3, 2, 3, 0, 1, 2, 1, 2, 2, 3]
exciton 0.3 0.64031 [2, 0, 0, 0, 0, 0, 1, 0, 0, 0]
excitonic 0.2 0.6 [0, 0, 0, 0, 2, 0, 0, 0, 0, 0]
excitons 0.8 0.6 [1, 2, 0, 1, 1, 0, 0, 1, 1, 1]
excitor 0.5 0.67082 [2, 0, 0, 0, 1, 1, 1, 0, 0, 0]
exclude -0.9 1.13578 [-1, -2, -1, -3, -1, 1, -1, -1, 1, -1]
excluded -1.4 1.62481 [-2, -1, -3, -3, -2, -3, -2, 1, -1, 2]
exclusion -1.2 1.249 [-2, -2, -3, -1, -2, -1, -1, -1, 2, -1]
exclusive 0.5 0.92195 [0, 0, 0, -1, 2, 0, 1, | |
<reponame>securedataplane/preacher
# Testing the basic intent functionality of ONOS
class FUNCintent:
def __init__( self ):
self.default = ''
def CASE1( self, main ):
import time
import imp
import re
"""
- Construct tests variables
- GIT ( optional )
- Checkout ONOS master branch
- Pull latest ONOS code
- Building ONOS ( optional )
- Install ONOS package
- Build ONOS package
"""
main.case( "Constructing test variables and building ONOS package" )
main.step( "Constructing test variables" )
main.caseExplanation = "This test case is mainly for loading " +\
"from params file, and pull and build the " +\
" latest ONOS package"
stepResult = main.FALSE
# Test variables
try:
main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
main.apps = main.params[ 'ENV' ][ 'cellApps' ]
gitBranch = main.params[ 'GIT' ][ 'branch' ]
main.dependencyPath = main.testOnDirectory + \
main.params[ 'DEPENDENCY' ][ 'path' ]
main.topology = main.params[ 'DEPENDENCY' ][ 'topology' ]
main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
if main.ONOSbench.maxNodes:
main.maxNodes = int( main.ONOSbench.maxNodes )
else:
main.maxNodes = 0
wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
wrapperFile2 = main.params[ 'DEPENDENCY' ][ 'wrapper2' ]
wrapperFile3 = main.params[ 'DEPENDENCY' ][ 'wrapper3' ]
main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
main.checkIntentSleep = int( main.params[ 'SLEEP' ][ 'checkintent' ] )
main.removeIntentSleep = int( main.params[ 'SLEEP' ][ 'removeintent' ] )
main.rerouteSleep = int( main.params[ 'SLEEP' ][ 'reroute' ] )
main.fwdSleep = int( main.params[ 'SLEEP' ][ 'fwd' ] )
main.checkTopoAttempts = int( main.params[ 'SLEEP' ][ 'topoAttempts' ] )
gitPull = main.params[ 'GIT' ][ 'pull' ]
main.numSwitch = int( main.params[ 'MININET' ][ 'switch' ] )
main.numLinks = int( main.params[ 'MININET' ][ 'links' ] )
main.cellData = {} # for creating cell file
main.hostsData = {}
main.CLIs = []
main.ONOSip = []
main.scapyHostNames = main.params[ 'SCAPY' ][ 'HOSTNAMES' ].split( ',' )
main.scapyHosts = [] # List of scapy hosts for iterating
main.assertReturnString = '' # Assembled assert return string
main.ONOSip = main.ONOSbench.getOnosIps()
print main.ONOSip
# Assigning ONOS cli handles to a list
for i in range( 1, main.maxNodes + 1 ):
main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
# -- INIT SECTION, ONLY RUNS ONCE -- #
main.startUp = imp.load_source( wrapperFile1,
main.dependencyPath +
wrapperFile1 +
".py" )
main.intentFunction = imp.load_source( wrapperFile2,
main.dependencyPath +
wrapperFile2 +
".py" )
main.topo = imp.load_source( wrapperFile3,
main.dependencyPath +
wrapperFile3 +
".py" )
copyResult1 = main.ONOSbench.scp( main.Mininet1,
main.dependencyPath +
main.topology,
main.Mininet1.home + "custom/",
direction="to" )
if main.CLIs:
stepResult = main.TRUE
else:
main.log.error( "Did not properly created list of ONOS CLI handle" )
stepResult = main.FALSE
except Exception as e:
main.log.exception(e)
main.cleanup()
main.exit()
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully construct " +
"test variables ",
onfail="Failed to construct test variables" )
if gitPull == 'True':
main.step( "Building ONOS in " + gitBranch + " branch" )
onosBuildResult = main.startUp.onosBuild( main, gitBranch )
stepResult = onosBuildResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully compiled " +
"latest ONOS",
onfail="Failed to compile " +
"latest ONOS" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
def CASE2( self, main ):
"""
- Set up cell
- Create cell file
- Set cell file
- Verify cell file
- Kill ONOS process
- Uninstall ONOS cluster
- Verify ONOS start up
- Install ONOS cluster
- Connect to cli
"""
# main.scale[ 0 ] determines the current number of ONOS controller
main.numCtrls = int( main.scale[ 0 ] )
main.case( "Starting up " + str( main.numCtrls ) +
" node(s) ONOS cluster" )
main.caseExplanation = "Set up ONOS with " + str( main.numCtrls ) +\
" node(s) ONOS cluster"
#kill off all onos processes
main.log.info( "Safety check, killing all ONOS processes" +
" before initiating environment setup" )
time.sleep( main.startUpSleep )
main.step( "Uninstalling ONOS package" )
onosUninstallResult = main.TRUE
for ip in main.ONOSip:
onosUninstallResult = onosUninstallResult and \
main.ONOSbench.onosUninstall( nodeIp=ip )
stepResult = onosUninstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully uninstalled ONOS package",
onfail="Failed to uninstall ONOS package" )
time.sleep( main.startUpSleep )
for i in range( main.maxNodes ):
main.ONOSbench.onosDie( main.ONOSip[ i ] )
print "NODE COUNT = ", main.numCtrls
tempOnosIp = []
for i in range( main.numCtrls ):
tempOnosIp.append( main.ONOSip[i] )
main.ONOSbench.createCellFile( main.ONOSbench.ip_address,
"temp", main.Mininet1.ip_address,
main.apps, tempOnosIp )
main.step( "Apply cell to environment" )
cellResult = main.ONOSbench.setCell( "temp" )
verifyResult = main.ONOSbench.verifyCell()
stepResult = cellResult and verifyResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully applied cell to " + \
"environment",
onfail="Failed to apply cell to environment " )
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
stepResult = packageResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully created ONOS package",
onfail="Failed to create ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
for i in range( main.numCtrls ):
onosInstallResult = onosInstallResult and \
main.ONOSbench.onosInstall( node=main.ONOSip[ i ] )
stepResult = onosInstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully installed ONOS package",
onfail="Failed to install ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Starting ONOS service" )
stopResult = main.TRUE
startResult = main.TRUE
onosIsUp = main.TRUE
for i in range( main.numCtrls ):
onosIsUp = onosIsUp and main.ONOSbench.isup( main.ONOSip[ i ] )
if onosIsUp == main.TRUE:
main.log.report( "ONOS instance is up and ready" )
else:
main.log.report( "ONOS instance may not be up, stop and " +
"start ONOS again " )
for i in range( main.numCtrls ):
stopResult = stopResult and \
main.ONOSbench.onosStop( main.ONOSip[ i ] )
for i in range( main.numCtrls ):
startResult = startResult and \
main.ONOSbench.onosStart( main.ONOSip[ i ] )
stepResult = onosIsUp and stopResult and startResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="ONOS service is ready",
onfail="ONOS service did not start properly" )
main.step( "Start ONOS cli" )
cliResult = main.TRUE
for i in range( main.numCtrls ):
cliResult = cliResult and \
main.CLIs[ i ].startOnosCli( main.ONOSip[ i ] )
stepResult = cliResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully start ONOS cli",
onfail="Failed to start ONOS cli" )
# Remove the first element in main.scale list
main.scale.remove( main.scale[ 0 ] )
main.intentFunction.report( main )
def CASE8( self, main ):
"""
Compare ONOS Topology to Mininet Topology
"""
import json
main.case( "Compare ONOS Topology view to Mininet topology" )
main.caseExplanation = "Compare topology elements between Mininet" +\
" and ONOS"
main.log.info( "Gathering topology information from Mininet" )
devicesResults = main.FALSE # Overall Boolean for device correctness
linksResults = main.FALSE # Overall Boolean for link correctness
hostsResults = main.FALSE # Overall Boolean for host correctness
deviceFails = [] # Nodes where devices are incorrect
linkFails = [] # Nodes where links are incorrect
hostFails = [] # Nodes where hosts are incorrect
attempts = main.checkTopoAttempts # Remaining Attempts
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
main.step( "Comparing Mininet topology to ONOS topology" )
while ( attempts >= 0 ) and\
( not devicesResults or not linksResults or not hostsResults ):
time.sleep( 2 )
if not devicesResults:
devices = main.topo.getAllDevices( main )
ports = main.topo.getAllPorts( main )
devicesResults = main.TRUE
deviceFails = [] # Reset for each failed attempt
if not linksResults:
links = main.topo.getAllLinks( main )
linksResults = main.TRUE
linkFails = [] # Reset for each failed attempt
if not hostsResults:
hosts = main.topo.getAllHosts( main )
hostsResults = main.TRUE
hostFails = [] # Reset for each failed attempt
# Check for matching topology on each node
for controller in range( main.numCtrls ):
controllerStr = str( controller + 1 ) # ONOS node number
# Compare Devices
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
try:
deviceData = json.loads( devices[ controller ] )
portData = json.loads( ports[ controller ] )
except (TypeError,ValueError):
main.log.error( "Could not load json: {0} or {1}".format( str( devices[ controller ] ), str( ports[ controller ] ) ) )
currentDevicesResult = main.FALSE
else:
currentDevicesResult = main.Mininet1.compareSwitches(
mnSwitches,deviceData,portData )
else:
currentDevicesResult = main.FALSE
if not currentDevicesResult:
deviceFails.append( controllerStr )
devicesResults = devicesResults and currentDevicesResult
# Compare | |
"""
problog.forward - Forward compilation and evaluation
----------------------------------------------------
Forward compilation using TP-operator.
..
Part of the ProbLog distribution.
Copyright 2015 <NAME>, DTAI Research Group
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from .formula import LogicFormula, OrderedSet, atom
from .dd_formula import DD
from .sdd_formula import SDD
from .bdd_formula import BDD
from .core import transform
from .evaluator import Evaluator, EvaluatableDSP, InconsistentEvidenceError
from .dd_formula import build_dd
import warnings
import time
import logging
import copy
import signal
from .core import transform_create_as
from .util import UHeap
import random
from collections import defaultdict
def timeout_handler(signum, frame):
raise SystemError('Process timeout (Python) [%s]' % signum)
class ForwardInference(DD):
def __init__(self, compile_timeout=None, **kwdargs):
super(ForwardInference, self).__init__(auto_compact=False, **kwdargs)
self._inodes_prev = None
self._inodes_old = None
self._inodes_neg = None
self._facts = None
self._atoms_in_rules = None
self._completed = None
self.timeout = compile_timeout
self._update_listeners = []
self._node_depths = None
self.evidence_node = 0
def register_update_listener(self, obj):
self._update_listeners.append(obj)
def _create_atom(self, identifier, probability, group, name=None, source=None):
return atom(identifier, probability, group, name, source)
def is_complete(self, node):
node = abs(node)
return self._completed[node - 1]
def set_complete(self, node):
self._completed[node - 1] = True
def init_build(self):
if self.evidence():
ev = [n for q, n in self.evidence() if n is None or n != 0]
if ev:
if len(ev) == 1:
self.evidence_node = ev[0]
else:
self.evidence_node = self.add_and(ev)
else:
# Only deterministically true evidence
self.evidence_node = 0
self._facts = [] # list of facts
self._atoms_in_rules = defaultdict(OrderedSet) # lookup all rules in which an atom is used
self._completed = [False] * len(self)
self._compute_node_depths()
for index, node, nodetype in self:
if self._node_depths[index - 1] is not None:
# only include nodes that are reachable from a query or evidence
if nodetype == 'atom': # it's a fact
self._facts.append(index)
self.set_complete(index)
else: # it's a compound
for atom in node.children:
self._atoms_in_rules[abs(atom)].add(index)
self.build_constraint_dd()
self.inodes = [None] * len(self)
self._inodes_prev = [None] * len(self)
self._inodes_old = [None] * len(self)
self._inodes_neg = [None] * len(self)
self._compute_minmax_depths()
def _propagate_complete(self, interrupted=False):
if not interrupted:
for i, c in enumerate(self._completed):
if not c:
self._completed[i] = True
self.notify_node_completed(i + 1)
else:
updated_nodes = set([(i + 1) for i, c in enumerate(self._completed) if c])
while updated_nodes:
next_updates = set()
# Find all heads that are affected
affected_nodes = set()
for node in updated_nodes:
for rule in self._atoms_in_rules[node]:
if not self.is_complete(rule):
affected_nodes.add(rule)
for head in affected_nodes:
# head must be compound
node = self.get_node(head)
children = [self.is_complete(c) for c in node.children]
if False not in children:
self.is_complete(head)
self.notify_node_completed(head)
next_updates.add(head)
updated_nodes = next_updates
def _compute_node_depths(self):
"""Compute node depths in breadth-first manner."""
self._node_depths = [None] * len(self)
self._node_levels = []
# Start with current nodes
current_nodes = set(abs(n) for q, n, l in self.labeled() if self.is_probabilistic(n))
if self.is_probabilistic(self.evidence_node):
current_nodes.add(abs(self.evidence_node))
current_level = 0
while current_nodes:
self._node_levels.append(current_nodes)
next_nodes = set()
for index in current_nodes:
self._node_depths[index - 1] = current_level
node = self.get_node(index)
nodetype = type(node).__name__
if nodetype != 'atom':
for c in node.children:
if self.is_probabilistic(c):
if self._node_depths[abs(c) - 1] is None:
next_nodes.add(abs(c))
current_nodes = next_nodes
current_level += 1
def _compute_minmax_depths(self):
self._node_minmax = [None] * len(self)
for level, nodes in reversed(list(enumerate(self._node_levels))):
for index in nodes:
# Get current node's minmax
minmax = self._node_minmax[index - 1]
if minmax is None:
minmax = level
for rule in self._atoms_in_rules[index]:
rule_minmax = self._node_minmax[rule - 1]
if rule_minmax is None:
self._node_minmax[rule - 1] = minmax
else:
node = self.get_node(rule)
nodetype = type(node).__name__
if nodetype == 'conj':
rule_minmax = max(minmax, rule_minmax)
else: # disj
rule_minmax = min(minmax, rule_minmax)
self._node_minmax[rule - 1] = rule_minmax
def _update_minmax_depths(self, index, new_minmax=0):
"""Update the minmax depth data structure when the given node is completed.
:param index:
:return:
"""
current_minmax = self._node_minmax[index - 1]
self._node_minmax[index - 1] = new_minmax
for parent in self._atoms_in_rules[index]:
parent_minmax = self._node_minmax[parent - 1]
if current_minmax == parent_minmax:
# Current node is best child => we need to recompute
parent_node = self.get_node(parent)
parent_nodetype = type(parent_node).__name__
parent_children_minmax = [self._node_minmax[c - 1]
for c in parent_node.children
if not self.is_complete(c)]
if not parent_children_minmax:
# No incomplete children
self.set_complete(parent)
parent_minmax = 0
elif parent_nodetype == 'conj':
parent_minmax == max(parent_children_minmax)
else:
parent_minmax == min(parent_children_minmax)
self._update_minmax_depths(parent, parent_minmax)
def sort_nodes(self, nodes):
return sorted(nodes, key=lambda i: self._node_depths[i - 1])
def notify_node_updated(self, node, complete):
for obj in self._update_listeners:
obj.node_updated(self, node, complete)
def notify_node_completed(self, node):
for obj in self._update_listeners:
obj.node_completed(self, node)
def _heuristic_key_depth(self, node):
# For OR: D(n) is min(D(c) for c in children)
# For AND: D(n) is max(D(c) for c in children)
return self._node_minmax[node - 1], self._node_depths[node - 1], random.random()
def _heuristic_key(self, node):
return self._heuristic_key_depth(node)
def build_iteration(self, updated_nodes):
to_recompute = UHeap(key=self._heuristic_key)
for node in updated_nodes:
for rule in self._atoms_in_rules[node]:
to_recompute.push(rule)
# nodes_to_recompute should be an updateable heap without duplicates
while to_recompute:
key, node = to_recompute.pop_with_key()
if self.update_inode(node): # The node has changed
# Find rules that may be affected
for rule in self._atoms_in_rules[node]:
to_recompute.push(rule)
# Notify listeners that node was updated
self.notify_node_updated(node, self.is_complete(node))
elif self.is_complete(node):
self.notify_node_completed(node)
# if self.is_complete(node):
# self._update_minmax_depths(node)
def build_iteration_levelwise(self, updated_nodes):
while updated_nodes:
next_updates = OrderedSet()
# Find all heads that are affected
affected_nodes = OrderedSet()
for node in updated_nodes:
for rule in self._atoms_in_rules[node]:
affected_nodes.add(rule)
affected_nodes = self.sort_nodes(affected_nodes)
# print (affected_nodes, [self._node_depths[i-1] for i in affected_nodes])
for head in affected_nodes:
if self.update_inode(head):
next_updates.add(head)
self.notify_node_updated(head, self.is_complete(head))
elif self.is_complete(head):
self.notify_node_completed(head)
updated_nodes = next_updates
def build_stratum(self, updated_nodes):
self.build_iteration(updated_nodes)
updated_nodes = OrderedSet()
for i, nodes in enumerate(zip(self.inodes, self._inodes_old)):
if not self.get_manager().same(*nodes):
updated_nodes.add(i + 1)
# self.notify_node_updated(i + 1)
self.get_manager().ref(*filter(None, self.inodes))
self.get_manager().deref(*filter(None, self._inodes_prev))
self.get_manager().deref(*filter(None, self._inodes_neg))
# Only completed nodes should be used for negation in the next stratum.
self._inodes_old = self.inodes[:]
self._inodes_prev = [None] * len(self)
for i, n in enumerate(self.inodes):
if self._completed[i]:
self._inodes_prev[i] = n
self._inodes_neg = [None] * len(self)
return updated_nodes
def build_dd(self):
required_nodes = set([abs(n) for q, n, l in self.labeled() if self.is_probabilistic(n)])
required_nodes |= set([abs(n) for q, n, v in self.evidence_all() if self.is_probabilistic(n)])
if self.timeout:
# signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(self.timeout)
signal.signal(signal.SIGALRM, timeout_handler)
logging.getLogger('problog').info('Set timeout:', self.timeout)
try:
self.init_build()
updated_nodes = OrderedSet(self._facts)
while updated_nodes:
# TODO only check nodes that are actually used in negation
updated_nodes = self.build_stratum(updated_nodes)
self._propagate_complete(False)
except SystemError as err:
self._propagate_complete(True)
logging.getLogger('problog').warning(err)
except KeyboardInterrupt as err:
self._propagate_complete(True)
logging.getLogger('problog').warning(err)
signal.alarm(0)
self.build_constraint_dd()
def current(self):
destination = LogicFormula(auto_compact=False)
source = self
# TODO maintain a translation table
for i, n, t in source:
inode = self.get_inode(i)
if inode is not None:
inode = int(inode)
if t == 'atom':
j = destination.add_atom(n.identifier, n.probability, n.group, name=inode)
elif t == 'conj':
children = [c for c in n.children if self.get_inode(c) is not None]
j = destination.add_and(children, name=inode)
elif t == 'disj':
children = [c for c in n.children if self.get_inode(c) is not None]
j = destination.add_or(children, name=inode)
else:
raise TypeError('Unknown node type')
assert i == j
for name, node, label in source.get_names_with_label():
if label != self.LABEL_NAMED:
destination.add_name(name, node, label)
for c in source.constraints():
if c.is_nontrivial():
destination.add_constraint(c)
return destination
def update_inode(self, index):
"""Recompute the inode at the given index."""
was_complete = self.is_complete(index)
oldnode = self.get_inode(index)
node = self.get_node(index)
assert index > 0
nodetype = type(node).__name__
if nodetype == 'conj':
children = [self.get_inode(c) for c in node.children]
children_complete = [self.is_complete(c) for c in node.children]
if None in children:
newnode = None # don't compute if some children are still unknown
else:
newnode = self.get_manager().conjoin(*children)
if False not in children_complete:
self.set_complete(index)
elif nodetype == 'disj':
children = [self.get_inode(c) for c in node.children]
children_complete = [self.is_complete(c) for c in node.children]
children = list(filter(None, children)) # discard children that are still unknown
if children:
newnode = self.get_manager().disjoin(*children)
else:
newnode = None
if False not in children_complete:
self.set_complete(index)
else:
raise TypeError('Unexpected node type.')
# Add constraints
if newnode is not None:
newernode = self.get_manager().conjoin(newnode, self.get_constraint_inode())
self.get_manager().deref(newnode)
newnode = newernode
if self.get_manager().same(oldnode, newnode):
return self.is_complete(index) != | |
import pybamm
import unittest
import numpy as np
class TestQuickPlot(unittest.TestCase):
def test_simple_ode_model(self):
model = pybamm.lithium_ion.BaseModel(name="Simple ODE Model")
whole_cell = ["negative electrode", "separator", "positive electrode"]
# Create variables: domain is explicitly empty since these variables are only
# functions of time
a = pybamm.Variable("a", domain=[])
b = pybamm.Variable("b", domain=[])
c = pybamm.Variable("c", domain=[])
# Simple ODEs
model.rhs = {a: pybamm.Scalar(2), b: pybamm.Scalar(0), c: -c}
# Simple initial conditions
model.initial_conditions = {
a: pybamm.Scalar(0),
b: pybamm.Scalar(1),
c: pybamm.Scalar(1),
}
# no boundary conditions for an ODE model
# Broadcast some of the variables
model.variables = {
"a": a,
"b broadcasted": pybamm.FullBroadcast(b, whole_cell, "current collector"),
"c broadcasted": pybamm.FullBroadcast(
c, ["negative electrode", "separator"], "current collector"
),
"b broadcasted negative electrode": pybamm.PrimaryBroadcast(
b, "negative particle"
),
"c broadcasted positive electrode": pybamm.PrimaryBroadcast(
c, "positive particle"
),
}
model.timescale = pybamm.Scalar(1)
# ODEs only (don't use jacobian)
model.use_jacobian = False
# Process and solve
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
t_eval = np.linspace(0, 2, 100)
solution = solver.solve(model, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"a",
"b broadcasted",
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
quick_plot.axis_limits.update({("a",): new_axis})
self.assertEqual(quick_plot.axis_limits[("a",)], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[("a",)], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test with different output variables
quick_plot = pybamm.QuickPlot(solution, ["b broadcasted"])
self.assertEqual(len(quick_plot.axis_limits), 1)
quick_plot.plot(0)
quick_plot = pybamm.QuickPlot(
solution,
[
["a", "a"],
["b broadcasted", "b broadcasted"],
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
self.assertEqual(len(quick_plot.axis_limits), 5)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
var_key = ("c broadcasted",)
quick_plot.axis_limits.update({var_key: new_axis})
self.assertEqual(quick_plot.axis_limits[var_key], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[var_key], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test longer name
model.variables["Variable with a very long name"] = model.variables["a"]
quick_plot = pybamm.QuickPlot(solution, ["Variable with a very long name"])
quick_plot.plot(0)
# Test different inputs
quick_plot = pybamm.QuickPlot(
[solution, solution],
["a"],
colors=["r", "g", "b"],
linestyles=["-", "--"],
figsize=(1, 2),
labels=["sol 1", "sol 2"],
)
self.assertEqual(quick_plot.colors, ["r", "g", "b"])
self.assertEqual(quick_plot.linestyles, ["-", "--"])
self.assertEqual(quick_plot.figsize, (1, 2))
self.assertEqual(quick_plot.labels, ["sol 1", "sol 2"])
# Test different time units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 1)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="seconds")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 1)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="minutes")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 60)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 60
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="hours")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 3600)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 3600
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
with self.assertRaisesRegex(ValueError, "time unit"):
pybamm.QuickPlot(solution, ["a"], time_unit="bad unit")
# long solution defaults to hours instead of seconds
solution_long = solver.solve(model, np.linspace(0, 1e5))
quick_plot = pybamm.QuickPlot(solution_long, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 3600)
# Test different spatial units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="m")
self.assertEqual(quick_plot.spatial_unit, "m")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="mm")
self.assertEqual(quick_plot.spatial_unit, "mm")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="um")
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
with self.assertRaisesRegex(ValueError, "spatial unit"):
pybamm.QuickPlot(solution, ["a"], spatial_unit="bad unit")
# Test 2D variables
model.variables["2D variable"] = disc.process_symbol(
pybamm.FullBroadcast(
1, "negative particle", {"secondary": "negative electrode"}
)
)
quick_plot = pybamm.QuickPlot(solution, ["2D variable"])
quick_plot.plot(0)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
with self.assertRaisesRegex(NotImplementedError, "Cannot plot 2D variables"):
pybamm.QuickPlot([solution, solution], ["2D variable"])
# Test different variable limits
quick_plot = pybamm.QuickPlot(
solution, ["a", ["c broadcasted", "c broadcasted"]], variable_limits="tight"
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["2D variable"], variable_limits="tight"
)
self.assertEqual(quick_plot.variable_limits[("2D variable",)], (None, None))
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution,
["a", ["c broadcasted", "c broadcasted"]],
variable_limits={"a": [1, 2], ("c broadcasted", "c broadcasted"): [3, 4]},
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [1, 2])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [3, 4]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits={"a": "tight"}
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertNotEqual(
quick_plot.axis_limits[("b broadcasted",)][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
with self.assertRaisesRegex(
TypeError, "variable_limits must be 'fixed', 'tight', or a dict"
):
pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits="bad variable limits"
)
# Test errors
with self.assertRaisesRegex(ValueError, "Mismatching variable domains"):
pybamm.QuickPlot(solution, [["a", "b broadcasted"]])
with self.assertRaisesRegex(ValueError, "labels"):
pybamm.QuickPlot(
[solution, solution], ["a"], labels=["sol 1", "sol 2", "sol 3"]
)
# No variable can be NaN
model.variables["NaN variable"] = disc.process_symbol(pybamm.Scalar(np.nan))
with self.assertRaisesRegex(
ValueError, "All-NaN variable 'NaN variable' provided"
):
pybamm.QuickPlot(solution, ["NaN variable"])
pybamm.close_plots()
def test_spm_simulation(self):
# SPM
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
t_eval = np.linspace(0, 10, 2)
sim.solve(t_eval)
# mixed simulation and solution input
# solution should be extracted from the simulation
quick_plot = pybamm.QuickPlot([sim, sim.solution])
quick_plot.plot(0)
pybamm.close_plots()
def test_loqs_spme(self):
t_eval = np.linspace(0, 10, 2)
for model in [pybamm.lithium_ion.SPMe(), pybamm.lead_acid.LOQS()]:
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5}
mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
solution = solver.solve(model, t_eval)
pybamm.QuickPlot(solution)
# check 1D (space) variables update properly for different time units
t = solution["Time [s]"].entries
c_e_var = solution["Electrolyte concentration [mol.m-3]"]
# 1D variables should be evaluated on edges
L_x = param.evaluate(pybamm.geometric_parameters.L_x)
c_e = c_e_var(t=t, x=mesh.combine_submeshes(*c_e_var.domain).edges * L_x)
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0][:, 0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 1])
# test quick plot of particle for spme
if model.name == "Single Particle Model with electrolyte":
output_variables = [
"X-averaged negative particle concentration [mol.m-3]",
"X-averaged positive particle concentration [mol.m-3]",
"Negative particle concentration [mol.m-3]",
"Positive particle concentration [mol.m-3]",
]
pybamm.QuickPlot(solution, output_variables)
# check 2D (space) variables update properly for different time units
c_n = solution["Negative particle concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution,
["Negative particle concentration [mol.m-3]"],
time_unit=unit,
)
quick_plot.plot(0)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 1])
pybamm.close_plots()
def test_plot_1plus1D_spme(self):
spm = pybamm.lithium_ion.SPMe(
{"current collector": "potential pair", "dimensionality": 1}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5, var.z: 5}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
# check 2D (x,z space) variables update properly for different time units
# Note: these should be the transpose of the entries in the processed variable
c_e = solution["Electrolyte concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, -1])
pybamm.close_plots()
def test_plot_2plus1D_spm(self):
spm = pybamm.lithium_ion.SPM(
{"current collector": "potential pair", "dimensionality": 2}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: 5,
var.x_s: 5,
var.x_p: 5,
var.r_n: 5,
var.r_p: 5,
var.y: 5,
var.z: 5,
}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"Terminal voltage [V]",
],
)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(1)
# check 2D (y,z space) variables update properly for different time units
phi_n = solution["Negative current collector potential [V]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Negative current collector potential [V]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, -1])
with self.assertRaisesRegex(NotImplementedError, "Shape not recognized for"):
pybamm.QuickPlot(solution, ["Negative particle concentration [mol.m-3]"])
pybamm.close_plots()
def test_failure(self):
with self.assertRaisesRegex(TypeError, "solutions must be"):
pybamm.QuickPlot(1)
if __name__ == "__main__":
| |
internal system
raise NotImplementedError('Inventory data system not defined.')
# TODO: Complete the following function
def _load_current_forecast_data():
#Load current inventory from SAP HANA or relevant internal system
raise NotImplementedError('Inventory data system not defined.')
# extension = os.path.basename(path).split('.')[-1].lower()
# if extension == 'xlsx':
# pass
# return forecast
# TODO: Complete the following function
def _load_current_order_data():
# Loads current inventory from SAP HANA or relevant internal system
raise NotImplementedError('Order data system not defined.')
def _load_state_data_by_file(path, dtype='', pandas=False):
# Check to see when file was last modified. Prompt the user to continue
# if file is old.
today = datetime.now().date()
file_last_modified = datetime.utcfromtimestamp(os.path.getmtime(path)).date()
if today > file_last_modified:
user_input = input('{} file was last modified on {}'.format(
dtype, file_last_modified) + ' Do you want to continue working with' + \
' this data? (y/n)\n>>>>>>\t')
if str2bool(user_input) == False:
sys.exit('Program exited.')
supported_extensions = ['csv', 'xlsx', 'pkl']
# Load relevant xlsx workbook or csv file with inventory levels.
extension = os.path.basename(path).split('.')[-1]
if extension.lower() == 'csv':
data = pd.read_csv(path)
if data.shape[1] <= 1:
# Try another separator
data = pd.read_csv(path, sep=';')
elif extension.lower() == 'xlsx':
data = pd.read_excel(path, dtype=str)
elif extension.lower() == 'pkl':
data = pickle.load(open(path, 'rb'))
else:
raise ValueError('Extension {} not supported.'.format(extension) + \
' Ensure file is in one of the following formats: {}'.format(
supported_extensions))
if type(data) == pd.core.frame.DataFrame:
try:
data = data.drop('Unnamed: 0', axis=1)
except KeyError:
pass
if pandas == False:
data = data.values
return data
def XLTableExpandToDataFrame(location, limit=1000, index=1):
'''
Inputs
=========================================================================
location: openpyxl.cell.cell.cell to give location of the top left
corner of the relevant table
limit: int that limits the table size
index: 0 or 1 where 0 indicates a numeric index the size of the frame
and 1 indicates the first column of the table is used as the index
'''
assert index==0 or index==1, 'Index value must be either 0 or 1.'
frame = []
frame_cell = location
cols_count = 0
rows_count = 0
frame_cols = frame_cell
frame_rows = frame_cols
while not frame_rows.value is None and rows_count < limit:
train_frame_row = []
while not frame_cols.value is None and cols_count < limit:
train_frame_row.append(frame_cols.value)
cols_count += 1
frame_cols = frame_cell.offset(rows_count,cols_count)
frame.append(train_frame_row)
cols_count = 0
rows_count += 1
frame_rows = frame_cell.offset(rows_count,cols_count)
frame_cols = frame_rows
frame = np.vstack(frame)
if index==1:
frame = pd.DataFrame(data=frame[1:,1:], columns=frame[0,1:],
index=frame[1:,0])
else:
frame = pd.DataFrame(data=frame[1:,:], columns=frame[0,:],
index=np.arange(frame.shape[0]-1))
frame = frame.apply(pd.to_numeric, downcast="float", errors="ignore")
return frame
def load_product_data_from_excel(product_data_path):
wb = pyxl.load_workbook(product_data_path, data_only=True)
# Load train data
trains_loc = wb['Overview'][
wb.defined_names['Trains'].value.split("!")[1]].offset(1,0)
trains_df = XLTableExpandToDataFrame(trains_loc)
# Load production data
prod_loc = wb['Overview'][
wb.defined_names['Products'].value.split('!')[1]].offset(1,0)
prod_df = XLTableExpandToDataFrame(prod_loc, index=0)
prod_df.insert(0, 'train', trains_df['train_number'].values[0].astype(int))
# Load transition data
trans_loc = wb['Overview'][wb.defined_names[
'ProductsTransition'].value.split('!')[1]].offset(1,0)
trans_df = XLTableExpandToDataFrame(trans_loc, index=1)
# Transform transition data
max_losses = prod_df['batch_size'].max().astype(str)
transition_matrix = replace_chars_vec(max_losses,
trans_df.values).astype(float)
transition_matrix = np.hstack([prod_df['startup'].values.reshape(-1, 1),
transition_matrix])
transition_matrix = np.vstack([np.hstack([0, prod_df['startup']]),
transition_matrix])
# Get final products
zfin_loc = wb['Overview'][wb.defined_names[
'ProductsFinished'].value.split('!')[1]].offset(1,0)
zfin_list = XLTableExpandToDataFrame(zfin_loc)['gmid'].astype(int).values
# Get ZFIN-ZEMI/GMID mappings
zfin_loc = wb['Overview'][wb.defined_names['ProductsFinished'].value.split('!')[1]].offset(1,0)
zfin_df = XLTableExpandToDataFrame(zfin_loc, index=0)
zemi = prod_df['product_name'].map(lambda x: ' '.join(x.split(' ')[:-2]))
zfin = zfin_df['product_name'].map(lambda x: ' '.join(x.split(' ')[:-2])
if x.split(' ')[-1] == 'KG' else
' '.join(x.split(' ')[:-1]))
prod_df2 = prod_df.copy()
prod_df2['zemi'] = zemi
zfin_df['zemi'] = zfin
# Merge frames
merged = zfin_df.merge(prod_df2, on='zemi', how='left')
merged['packaging'] = merged['product_name_x'].map(
lambda x: parse_packaging(x))
merged['inventory_index'] = np.arange(len(merged))
zfin_to_gmid = {i[0]: i[1]
for i in merged[['gmid_x', 'gmid_y']].values.astype(int)}
zfin_data = {int(i[0]): [int(i[1]), i[2], i[3], i[4], i[5], i[6]]
for i in merged[['gmid_x', 'gmid_y',
'product_name_x', 'product_name_y',
'packaging', 'batch_size_x',
'inventory_index']].values}
return prod_df.values, transition_matrix, zfin_list, zfin_to_gmid, zfin_data
def replace_chars(replacement_value, val):
if type(val) != str:
val = str(val)
return re.sub("[a-zA-Z]+", replacement_value, val)
# Vectorize replace_chars function
replace_chars_vec = np.vectorize(replace_chars)
def parse_packaging(desc):
if 'BG6025' in desc:
return 'bag'
elif 'BB1200' in desc:
return 'ss'
elif 'BLK' in desc:
return 'bulk'
else:
return ''
def process_forecast_data(forecast_data, env):
# Check if forecast has already been processed
if forecast_data.shape[1] == len(env.gmids):
if type(forecast_data) == pd.core.frame.DataFrame:
forecast_data = forecast_data.values
else:
df = forecast_data.loc[forecast_data['Field-03'].isin(env.zfin.astype(str))]
assert len(df) > 0, "No matching ZFIN GMID's found in forecast."
# Melt frames separately and recombine on the country
melt_cats = ['ACTD', 'RSLF', 'HFSF', 'UAH7']
id_vars = ['Field-03'] #, 'Field-04']
join_cols = ['Field-03', 'Month', 'Year']
df_reshape = None
for cat in melt_cats:
melt_cols = [col for col in df.columns if cat in col]
[melt_cols.append(i) for i in id_vars]
_df_sub = df.loc[:, melt_cols]
df_sub = pd.DataFrame()
# Ensure numerical columns are formatted as such
for col in _df_sub.columns:
if col in id_vars:
df_sub = pd.concat([df_sub, _df_sub[col]], axis=1)
else:
df_sub = pd.concat([df_sub, _df_sub[col].astype(float)], axis=1)
df_agg = df_sub.groupby(id_vars).sum()
df_agg.reset_index(inplace=True)
df_melt = pd.melt(df_agg, id_vars=id_vars)
df_melt['Month'] = df_melt['variable'].map(
lambda x: x.split(' ')[-1])
df_melt['Year'] = df_melt['Month'].map(
lambda x: x.split('/')[-1])
df_melt['Month'] = df_melt['Month'].map(
lambda x: x.split('/')[0])
df_melt.drop('variable', axis=1, inplace=True)
col_list = df_melt.columns.tolist()
col_list[col_list.index('value')] = cat
df_melt.columns = col_list
if df_reshape is None:
df_reshape = df_melt.copy()
else:
df_reshape = df_reshape.merge(df_melt, on=join_cols,
how='outer')
df_reshape.fillna(0, inplace=True)
col_list = df_reshape.columns.tolist()
col_list[col_list.index('Field-03')] = 'ZFIN'
# col_list[col_list.index('Field-04')] = 'ZFIN Name'
df_reshape.columns = col_list
new_order = ['ZFIN', 'Year', 'Month', 'ACTD',
'RSLF', 'HFSF', 'UAH7']
df_reshape = df_reshape.loc[:, new_order].copy()
# Aggregate values by ZFIN and date
agg = df_reshape.groupby(['ZFIN', 'Year', 'Month'])[
['ACTD', 'RSLF', 'HFSF', 'UAH7']].sum()
agg = agg.loc[agg.sum(axis=1).values!=0].reset_index()
agg['GMID'] = agg['ZFIN'].map(lambda x: env.zfin_to_gmid_map[int(x)])
fcast_agg = agg.groupby(['GMID', 'Year', 'Month'])[
'RSLF'].sum().reset_index()
fcast_agg['year_mon'] = fcast_agg.apply(
lambda x: datetime.strptime(
str(x.Year) + '-' + str(x.Month),'%y-%m'), axis=1)
# Get first day of current month
# Convert to pd.Timestamp to avoid error
now = pd.Timestamp(date.today().replace(day=1))
next_year = pd.Timestamp(now.replace(year=now.year+1,
month=now.month - 1))
fcast = fcast_agg.loc[(fcast_agg['year_mon']>=now) &
(fcast_agg['year_mon']<=next_year)]
# Convert data types
fcast['GMID'] = fcast['GMID'].astype(str)
forecast_data = np.zeros((12, env.n_products))
for g in env.gmid_index_map.keys():
for i, m in enumerate(range(1, 13)):
forecast_data[i, env.gmid_index_map[g]] = fcast.loc[
(fcast['Month']==str(m)) &
(fcast['GMID']==str(int(g)))]['RSLF']
return forecast_data
def keep_base_name(s):
split = s.split(' ')[:4]
if split[-1] == 'HF':
return ' '.join(split)
else:
return ' '.join(split[:3])
def process_order_data(order_data, env):
# Check to see if order_book is already in the proper format
if order_data.shape[1] == len(env.order_cols):
return order_data
if type(order_data) != pd.core.frame.DataFrame:
raise ValueError("order_data loaded as {}; type not supported".format(
type(order_data)))
# Rename columns
order_data.columns = [j if 'Unnamed:' not in j
else order_data.columns[i-1] + ' Desc'
for i, j in enumerate(order_data.columns)]
# Filter orders by ZFIN
orders_sub = order_data.loc[order_data['Material'].isin(env.zfin.astype(str))]
# Filter orders by doc type
doc_types = ['ZOR', 'ZSO', 'ZBRJ', 'ZFD', 'ZRI', 'ZBRI', 'ZVER', 'ZLOR']
orders_sub = orders_sub.loc[orders_sub['Sales Doc. Type'].isin(doc_types)]
# Convert volumes from KG to MT
orders_sub['order_qty'] = np.round(
orders_sub['Weight - Net (w/o UoM)'].astype(float) / 1000, 3)
# Key corresponds to data column, value corresponds to new order book column
time_cols = {'Dt - (OI) Customer Requested Del (Confirmed)':
'cust_req_date', # See note above
'Dt - (DH) Goods Issue Actual': 'actl_gi_time',
'Dt - (DH) Goods Issue Plan': 'planned_gi_time',
'Dt - (OH) Created On': 'doc_create_time'}
# Convert time strings to datetime object
# Note # is used for missing values, we set to some large, future value for now
for key in time_cols.keys():
orders_sub[time_cols[key]] = orders_sub[key].map(
lambda x: datetime.strptime(str(x), '%m/%d/%Y') if x != '#'
else datetime.strptime('01/01/2100', '%m/%d/%Y'))
if key == 'Dt - (DH) Goods Issue Plan':
orders_sub['planned_gi_month'] = orders_sub[key].map(
lambda x: datetime.strptime(str(x), '%m/%d/%Y').month)
times = (orders_sub[time_cols[key]] - env.start_time).map(
lambda x: x.days)
if env.settings['BASE_TIME_UNIT'] == 'HOUR':
times *= 24
orders_sub[time_cols[key]] = times
col_name_map = {'Sales Document': 'doc_num',
'Material': 'gmid'}
for k in col_name_map.keys():
orders_sub[col_name_map[k]] = orders_sub[k].copy()
orders_sub['shipped'] = 1
orders_sub.loc[orders_sub['actl_gi_time']>365*2]['shipped'] = 0
orders_sub['on_time'] = 0
orders_sub['late_time'] = 0
orders_sub['cust_segment'] = 1
orders_sub['var_std_margin'] = 0
orders_sub['doc_num'] = orders_sub['doc_num'].astype(int)
orders_sub['late_time'] = orders_sub['actl_gi_time'] - orders_sub['cust_req_date']
# Add ZEMI and ZFIN
orders_sub['gmid'] = orders_sub['gmid'].map(lambda x:
env.zfin_to_gmid_map[int(x)])
orders_sub['zfin'] = orders_sub['Material'].copy()
orders_sub['shipped'] = 1
orders = orders_sub[env.order_cols].values
# Set shipped for future orders to 0
orders[np.where(orders[:, env.ob_indices['actl_gi_time']]>=365*2)[0],
env.ob_indices['shipped']] = 0
orders[np.where(orders[:, env.ob_indices['shipped']]==0)[0],
env.ob_indices['late_time']] = 0
return orders
def determine_date_format(date_series):
labels = ['%m', '%d', '%Y'] # strptime labels
dates = np.vstack(date_series.map(lambda x: re.split('\W+', x))).astype(int)
d = {j: labels[i] for i, j in enumerate(np.argsort(np.max(dates, axis=0)))}
date_format_string = '-'.join([d[k] for k in range(3)])
return date_format_string
def convert_date_series(date, date_format_string):
date_re = | |
<filename>TWEET_LANGUAGE_EDITION_v2.py
import sys
import urllib
import re
from datetime import datetime
import tweepy
from twython import Twython, TwythonError
import requests
import time
from random import randint, choice, sample, randrange, shuffle
from time import sleep
from twitter import *
import os
from colorama import init, Fore, Back, Style
from io import BytesIO
from googletrans import Translator
translator = Translator()
init(convert=True)
errorcount = 0
countpromotions = 0
totalprice = 0.00
extractMostPopular = []
price = 0.00
favouritecount = 0
def pricefix(qb):
try:
pricef = float(qb)
return pricef
except Exception:
qb = 0
return qb
def removeap(z):
z = z.replace(",", "")
return z
def AnalyseAnItemIDForTitle(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('<title>(.+?) | eBay</title>', mostsolddata)
apple = str(extractMostSoldonEbay)
az = apple[2:-2]
if az == "":
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('<meta name="description" content="(.+?)" />', mostsolddata)
apple = str(extractMostSoldonEbay)
az = apple[2:-2]
return az
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemID(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('">(.+?) sold</a></span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemIDForhours(analyse2):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse2)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('>(.+?) sold in last 24 hours</span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def AnalyseItemIDForPrice(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('">US(.+?)</span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace('$', '')
apple = apple.replace(' ', '')
apple = apple.replace('/ea','')
ap = apple[2:-2]
az = ap
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemIDForWatchers(analyse2):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse2)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('defaultWatchCount":(.+?),', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
s = s.replace("Collection of products named ", "")
s = s.replace("Advanced Search" , "")
s = s.replace("Verify site's SSL certificate", "")
s = s.replace("'", "")
return s
def unescape2(s):
s = s.replace(" ", "+")
return s
def unescape4(s):
s = s.replace(" ", "+")
s = s.replace("\n", "")
return s
def unescape5(s):
s = s.replace(",", "")
return s
def unescape6(s):
s = s.replace("'", "'")
s = s.replace("&", "&")
s = s.replace('"', '"')
s = s.replace("', '",",")
return s
#CODE FOR REMOVING UTF FORMAT
def BMP(s):
return "".join((i if ord(i) < 10000 else '\ufffd' for i in s))
def increment():
global countpromotions
global totalprice
global price
if countpromotions >= 8:
countpromotions = 0
countpromotions = countpromotions+1
totalprice = totalprice + price
print("CHECKING HOT EBAY USA TRENDS")
linkforpopular = ("https://www.wuanto.com/mostwatched/allkw/0/0") #This link has hot trends of things for sale in USA
zz = requests.get(linkforpopular)
populardata = zz.text
getlistofkeywords = re.findall('href="mostwatched/(.+?)/0/0', populardata) #extract a most sold keyword
for word in getlistofkeywords:
extractMostPopular.append(word)
print(extractMostPopular)
myfilelist =[]
try:
fh = open('myimportfile.txt')
for line in fh:
# in python 2
# print line
# in python 3
myfilelist.append(line)
fh.close()
print("MY OWN FILE LIST")
shuffle(myfilelist)
print(myfilelist)
except FileNotFoundError:
sys.exit("You need to create a file called myimportfile.txt and place it in the folder with .exe")
File_save="."
def auth_any_user():
"""
user authorization using pin code
using the library https://github.com/sixohsix/twitter
"""
CONSUMER_KEY = 'ENTER YOUR OWN TWITTER DEVELOPER KEY'
CONSUMER_SECRET = 'ENTER YOUR OWN TWITTER DEVELOPER KEY'
name_account="1" #this will be saved in folder as .1 , delete the .1 file if you want to authenticate a new Twitter account
File_save="."+name_account
MY_TWITTER_CREDS=os.path.expanduser(File_save)
#MY_TWITTER_CREDS = os.path.expanduser('~/.my_app_credentials')
if not os.path.exists(MY_TWITTER_CREDS):
oauth_dance("My App Name", CONSUMER_KEY, CONSUMER_SECRET,
MY_TWITTER_CREDS)
oauth_token, oauth_secret = read_token_file(MY_TWITTER_CREDS)
return oauth_token, oauth_secret
print("Your Twitter authentication details are save to ", os.getcwd(), "\\" , File_save)
def test_send_message(ABC,itemID):
try:
print("initializing sending...")
consumer_key = 'ENTER YOUR OWN TWITTER DEVELOPER KEY'
consumer_secret = 'ENTER YOUR OWN TWITTER DEVELOPER KEY'
with open(".1")as fp:
access_token=fp.readline().strip()
access_token_secret=fp.readline().strip()
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
sleep(randint(90,200))
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + itemID)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('<meta name="twitter:image" content="https://i.ebayimg.com/images/g/(.+?).jpg" />', mostsolddata)
extractMostSoldonEbay = extractMostSoldonEbay[0]
url = "https://i.ebayimg.com/thumbs/images/g/" + extractMostSoldonEbay + ".jpg"
response = requests.get(url)
photo = BytesIO(response.content)
response = twitter.upload_media(media=photo)
MyMessage = ABC
twitter.update_status(status=MyMessage, media_ids=[response['media_id']])
print("TWEET SENT")
"""if __name__== "__main__":"""
except Exception as ez:
try:
print("initializing sending...")
consumer_key = 'ENTER YOUR OWN TWITTER DEVELOPER KEY'
consumer_secret = 'ENTER YOUR OWN KEY'
with open(".1")as fp:
access_token=fp.readline().strip()
access_token_secret=fp.readline().strip()
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
sleep(randint(90,200))
MyMessage = ABC
twitter.update_status(status=MyMessage)
print("TWEET SENT")
"""if __name__== "__main__":"""
except Exception as ez:
print(ez)
global errorcount
errorcount += 1
return errorcount
auth_any_user()
def pricefix(qb):
try:
pricef = float(qb)
return pricef
except Exception:
qb = 0
return qb
def removeap(z):
z = z.replace(",", "")
return z
def AnalyseAnItemIDForTitle(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('<title>(.+?) | eBay</title>', mostsolddata)
apple = str(extractMostSoldonEbay)
az = apple[2:-2]
if az == "":
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('<meta name="description" content="(.+?)" />', mostsolddata)
apple = str(extractMostSoldonEbay)
az = apple[2:-2]
return az
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemID(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('">(.+?) sold</a></span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemIDForhours(analyse2):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse2)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('>(.+?) sold in last 24 hours</span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def AnalyseItemIDForPrice(analyse):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('">US(.+?)</span>', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace('$', '')
apple = apple.replace(' ', '')
apple = apple.replace('/ea','')
ap = apple[2:-2]
az = ap
return az
except Exception:
abz = 0
return abz
def AnalyseAnItemIDForWatchers(analyse2):
try:
linkforfindinghowmanysold = ("https://www.ebay.com/itm/" + analyse2)
h = requests.get(linkforfindinghowmanysold)
mostsolddata = h.text
extractMostSoldonEbay = re.findall('defaultWatchCount":(.+?),', mostsolddata)
apple = str(extractMostSoldonEbay)
apple = apple.replace(',', '')
ap = apple[2:-2]
az = int(ap)
return az
except Exception:
abz = 0
return abz
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
s = s.replace("Collection of products named ", "")
s = s.replace("Advanced Search" , "")
s = s.replace("Verify site's SSL certificate", "")
s = s.replace("'", "")
return s
def unescape2(s):
s = s.replace(" ", "+")
return s
def unescape4(s):
s = s.replace(" ", "+")
s = s.replace("\n", "")
return s
def unescape5(s):
s = s.replace(",", "")
return s
def unescape6(s):
s = s.replace("'", "'")
s = s.replace("&", "&")
s = s.replace('"', '"')
s = s.replace("', '",",")
return s
#CODE FOR REMOVING UTF FORMAT
def BMP(s):
return "".join((i if ord(i) < 10000 else '\ufffd' for i in s))
def ownfile():
try:
global myfilelist
myset_data = []
for fruit in myfilelist:
pear = unescape2(fruit)
print(pear)
print('{:8} {:8} {:5} {:10} {:14} {:10}'.format('Watch', 'Sold', 'Hour', 'Price','ItemID','Title'))
linkforprocessing = ("http://rest.ebay.com/epn/v1/find/item.rss?keyword="+ pear +"&sortOrder=BestMatch&programid=1&campaignid=5337424366&toolid=10039&minPrice="+ priceinput +".0&listingType1=All&topRatedSeller=true&feedType=rss&lgeo=1")
f = requests.get(linkforprocessing)
websiteData = f.text
extractEbayItemIDFromWebsite = re.findall ('<guid>(.+?)</guid>', websiteData)
global price
for itemID in extractEbayItemIDFromWebsite:
title = AnalyseAnItemIDForTitle(itemID)
price = AnalyseItemIDForPrice(itemID)
title = BMP(title)
title = unescape6(title)
title = title[0:81]
price = unescape5(price)
price = pricefix(price)
price = float(price)
mostSold = AnalyseAnItemID(itemID)
mostSoldinhours = AnalyseAnItemIDForhours(itemID)
watchers = AnalyseAnItemIDForWatchers(itemID)
print('{:*^8} {:_>8} {:*^5} {:_>10.2f} {:_^14} {:_<10}'.format(watchers, mostSold, mostSoldinhours, price, itemID, title))
if price >= float(priceinput) and watchers >= int(watchersinput) and mostSold >= int(mostSoldinput) and mostSoldinhours >= int(mostSoldinhoursvar):
global countpromotions
global totalprice
increment()
if mostSold > 2000 and mostSoldinhours > 2:
if itemID in myset_data:
print("Skipping Duplicate")
break
myset_data.append(itemID)
title = str(title)
translation = translator.translate(title, dest=mylanguagechoice)
print(translation.origin, ' -> ', translation.text)
title = translation.text
price = '{0:.2f}'.format(price)
| |
from bokeh.sampledata import us_states, us_counties
from bokeh.plotting import figure, show, output_notebook, output_file, save
from bokeh import palettes
from bokeh.models import ColorBar,HoverTool,LinearColorMapper,ColumnDataSource,FixedTicker, LogColorMapper
output_notebook()
import re
import numpy as np
from modeling import fit_and_predict
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import plot
from plotly.subplots import make_subplots
import json
import plotly.express as px
import plotly
import pandas as pd
from datetime import datetime, timedelta
credstr ='rgb(234, 51, 86)'
cbluestr = 'rgb(57, 138, 242)'
def plot_counties(df, variable_to_distribute, variables_to_display, state=None, logcolor=False):
"""Plots the distribution of a given variable across the given sttate
Params
------
df
df is a data frame containing the county level data
variable_to_distribute
variable_to_distribute is the variable that you want to see across the state
variables_to_display
Variables to display on hovering over each county
output: Bokeh plotting object
"""
from bokeh.sampledata.us_counties import data as counties
counties = {
code: county for code, county in counties.items()
if county["state"] == state.lower()
}
county_xs = [county["lons"] for county in counties.values()]
county_ys = [county["lats"] for county in counties.values()]
if variable_to_distribute in variables_to_display:
variables_to_display.remove(variable_to_distribute)
colors = palettes.RdBu11 #(n_colors)
min_value = df[variable_to_distribute].min()
max_value = df[variable_to_distribute].max()
gran = (max_value - min_value) / float(len(colors))
#print variable_to_distribute,state,min_value,max_value
index_range = [min_value + x*gran for x in range(len(colors))]
county_colors = []
variable_dictionary = {}
variable_dictionary["county_names"] = [county['name'] for county in counties.values()]
variable_dictionary["x"] = county_xs
variable_dictionary["y"] = county_ys
variable_dictionary[re.sub("[^\w]","",variable_to_distribute)] = []
for vd in variables_to_display:
variable_dictionary[re.sub("[^\w]","",vd)] = []
for county_id in counties:
StateCountyID = str(county_id[0]).zfill(2) + str(county_id[1]).zfill(3)
if StateCountyID in list(df["Header-FIPSStandCtyCode"].values):
temp_var = df[df["Header-FIPSStandCtyCode"] == StateCountyID][variable_to_distribute].values[0]
# if temp_var > 0.0:
variable_dictionary[re.sub("[^\w]","",variable_to_distribute)].append(temp_var)
for vd in variables_to_display:
variable_dictionary[re.sub("[^\w]","",vd)].append(round(float(df[df["Header-FIPSStandCtyCode"] == StateCountyID][vd].values),2))
color_idx = list(temp_var - np.array(index_range)).index(min(x for x in list(temp_var - np.array(index_range)) if x >= 0))
county_colors.append(colors[color_idx])
'''
else:
variable_dictionary[re.sub("[^\w]","",variable_to_distribute)].append(0.0)
county_colors.append("#A9A9A9")
for vd in variables_to_display:
variable_dictionary[re.sub("[^\w]","",vd)].append(0.0)
'''
else:
variable_dictionary[re.sub("[^\w]","",variable_to_distribute)].append(0.0)
county_colors.append("#A9A9A9")
for vd in variables_to_display:
variable_dictionary[re.sub("[^\w]","",vd)].append(0.0)
#print temp_var,counties[county_id]["name"]
variable_dictionary["color"] = county_colors
source = ColumnDataSource(data = variable_dictionary)
TOOLS = "pan,wheel_zoom,box_zoom,reset,hover,save"
if logcolor:
mapper = LogColorMapper(palette=colors, low=min_value, high=max_value)
else:
mapper = LinearColorMapper(palette=colors, low=min_value, high=max_value)
color_bar = ColorBar(color_mapper=mapper, location=(0, 0), orientation='horizontal',
title = variable_to_distribute,ticker=FixedTicker(ticks=index_range))
p = figure(title=variable_to_distribute, toolbar_location="left",tools=TOOLS,
plot_width=1100, plot_height=700,x_axis_location=None, y_axis_location=None)
p.patches('x', 'y', source=source, fill_alpha=0.7,fill_color='color',
line_color="#884444", line_width=2)
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
tool_tips = [("County ", "@county_names")]
for key in variable_dictionary.keys():
if key not in ["x","y","color","county_names"]:
tool_tips.append((key,"@"+re.sub("[^\w]","",key) + "{1.11}"))
hover.tooltips = tool_tips
p.add_layout(color_bar, 'below')
return p
def viz_curves(df, filename='out.html',
key_toggle='CountyName',
keys_table=['CountyName', 'StateName'],
keys_curves=['deaths', 'cases'],
dropdown_suffix=' County',
decimal_places=0,
expl_dict=None, interval_dicts=None,
point_id=None, show_stds=False, ):
'''Visualize explanation for all features (table + ICE curves)
and save to filename
Params
------
df: table of data
'''
color_strings = [credstr, cbluestr]
# plot the table
df_tab = df[keys_table]
fig = ff.create_table(df_tab.round(decimal_places))
# scatter plots
traces = []
num_traces_per_plot = len(keys_curves)
key0 = df_tab[key_toggle].values[0] # only want this to be visible
for i in range(df.shape[0]):
row = df.iloc[i]
key = row[key_toggle]
for j, key_curve in enumerate(keys_curves):
curve = row[key_curve]
x = np.arange(curve.size)
traces.append(go.Scatter(x=x,
y=curve,
showlegend=False,
visible=i==0, #key == key0,# False, #key == key0,
name=key_curve,
line=dict(color=color_strings[j], width=4),
xaxis='x2', yaxis='y2')
)
fig.add_traces(traces)
# add buttons to toggle visibility
buttons = []
for i, key in enumerate(df[key_toggle].values):
table_offset = 1
visible = np.array([True] * table_offset + [False] * num_traces_per_plot * len(df[key_toggle]))
visible[num_traces_per_plot * i + table_offset: num_traces_per_plot * (i + 1) + table_offset] = True
buttons.append(
dict(
method='restyle',
args=[{'visible': visible}],
label=key + dropdown_suffix
))
# initialize xaxis2 and yaxis2
fig['layout']['xaxis2'] = {}
fig['layout']['yaxis2'] = {}
fig.layout.updatemenus = [go.layout.Updatemenu(
dict(
active=int(np.argmax(df[key_toggle].values == key0)),
buttons=buttons,
x=0.8, # this is fraction of entire screen
y=1.05,
direction='down'
)
)]
# Edit layout for subplots
fig.layout.xaxis.update({'domain': [0, .5]})
fig.layout.xaxis2.update({'domain': [0.6, 1.]})
fig.layout.xaxis2.update({'title': 'Time'})
# The graph's yaxis MUST BE anchored to the graph's xaxis
fig.layout.yaxis.update({'domain': [0, .9]})
fig.layout.yaxis2.update({'domain': [0, .9], 'anchor': 'x2', })
fig.layout.yaxis2.update({'title': 'Count'})
# Update the margins to add a title and see graph x-labels.
fig.layout.margin.update({'t':50, 'b':120})
fig.layout.update({
'title': 'County-level outbreaks',
'height': 800
})
# fig.layout.template = 'plotly_dark'
plot(fig, filename=filename, config={'showLink': False,
'showSendToCloud': False,
'sendData': True,
'responsive': True,
'autosizable': True,
'displaylogo': False
})
# fig.show()
print('plot saved to', filename)
"""
Predicted death maps
"""
def make_us_map(title_text, dark=False):
fig = go.Figure()
fig.update_geos(
scope = 'usa',
projection=go.layout.geo.Projection(type = 'albers usa'),
subunitcolor = "rgb(0, 0, 0)",
landcolor = 'rgb(255, 255, 255)'
)
fig.update_layout(
dragmode = 'pan',
title = {'text' : title_text}
)
if dark:
fig.update_layout(
paper_bgcolor='rgba(0,0,0,255)',
plot_bgcolor='rgba(0,0,0,255)',
template='plotly_dark'
)
return fig
def add_counties_slider_choropleth_traces(fig, df, past_days, target_days, scale_max, counties_json):
def make_choropleth_trace(values, fips):
choropleth_trace = go.Choropleth(
visible=False,
colorscale=color_scl,
z=values,
geojson=counties_json,
locations=fips,
zmin=0,
zmax=scale_max,
hoverinfo='skip',
colorbar_title = "<b> Deaths </b>"
)
return choropleth_trace
color_scl = [[0.0, '#ffffff'],[0.2, '#ff9999'],[0.4, '#ff4d4d'],
[0.6, '#ff1a1a'],[0.8, '#cc0000'],[1.0, '#4d0000']] # reds
# add past days
for col in past_days:
values = df[col]
fips = df['SecondaryEntityOfFile']
# TODO: add new deaths
choropleth_trace = make_choropleth_trace(values, fips)
fig.add_trace(choropleth_trace)
for i in range(target_days.size):
#df['new_deaths'] = (preds - tot_deaths).apply(
# lambda x: np.array(
# [x[i] - x[i - 1] if i > 0 else x[i] for i in range(target_days.size)]
# )
#)
pred_col = f'Predicted Deaths {i+1}-day'
values = df[pred_col]
fips = df['SecondaryEntityOfFile']
choropleth_trace = make_choropleth_trace(values, fips)
fig.add_trace(choropleth_trace)
return None
def add_counties_slider_bubble_traces(fig, df, past_days, target_days, scale_max, plot_choropleth):
def make_bubble_trace(values, fips, lat, lon, text):
bubble_trace = go.Scattergeo(
visible=False,
lat=lat,
lon=lon,
text=text,
hovertemplate='%{text}',
name="Bubble Plot",
marker = dict(
size = values,
sizeref = 0.5*(100/scale_max), # make bubble slightly larger
color = values,
colorscale = color_scl,
cmin=0,
cmax=scale_max,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode='area',
colorbar_title = "<b> Deaths </b>",
showscale=not plot_choropleth
)
)
return bubble_trace
color_scl = [[0.0, '#ffffff'],[0.2, '#ff9999'],[0.4, '#ff4d4d'],
[0.6, '#ff1a1a'],[0.8, '#cc0000'],[1.0, '#4d0000']] # reds
# add past days
for col in past_days:
values = df[col]
fips = df['SecondaryEntityOfFile']
lat = df['lat']
lon = df['lon']
text = '<b>Actual # of Deaths</b>: ' + values.round().astype(str) + '<br>' + \
df['text'].tolist()
# TODO: add new deaths
bubble_trace = make_bubble_trace(values, fips, lat, lon, text)
fig.add_trace(bubble_trace)
# add predictions
for i in range(target_days.size):
#df['new_deaths'] = (preds - tot_deaths).apply(
# lambda x: np.array(
# [x[i] - x[i - 1] if i > 0 else x[i] for i in range(target_days.size)]
# )
#)
pred_col = f'Predicted Deaths {i+1}-day'
values = df[pred_col].round()
fips = df['SecondaryEntityOfFile']
lat = df['lat']
lon = df['lon']
text = '<b>Deaths Predicted</b>: ' + values.round().astype(str) + '<br>' + \
df['text'].tolist()
# day_name = "Day " + str(target_days[i])
# TODO: add new deaths
bubble_trace = make_bubble_trace(values, fips, lat, lon, text)
fig.add_trace(bubble_trace)
return None
def make_counties_slider_sliders(past_days, target_days, plot_choropleth):
sliders = [
{
"active": 0,
"visible": True,
"pad": {"t": 50},
"currentvalue": {'xanchor' : 'right'},
'transition': {'duration': 1000, 'easing': 'cubic-in-out'},
"steps": [],
}
]
days = list(map(lambda x: x.replace('#Deaths_', ''), past_days))
num_days = len(days) + target_days.size
# add steps for past days
for i, day in enumerate(days):
if plot_choropleth:
args = ["visible", [False] * (2*num_days)]
else:
args = ["visible", [False] * num_days]
slider_step = {
# the first falses are the map traces
# the last 12 trues are the scatter traces
"args": args,
"label": day,
"method": "restyle"
}
slider_step['args'][1][i] = True # Toggle i'th trace to "visible"
if plot_choropleth:
slider_step['args'][1][num_days + i] = True # and the other trace
sliders[0]['steps'].append(slider_step)
# add steps for predicted days
for i in range(target_days.size):
if i == 0:
day_name = "Today"
elif i == 1:
day_name = "Tomorrow"
else:
day_name = "In " + str(i) + " Days"
if plot_choropleth:
args = ["visible", [False] * (2*num_days)]
else:
args = ["visible", [False] * num_days]
slider_step = {
"args": args,
"label": day_name,
"method": "restyle"
}
slider_step['args'][1][len(days) + i] = True
if plot_choropleth:
slider_step['args'][1][num_days + len(days) + i] = True
sliders[0]['steps'].append(slider_step)
return sliders
def plot_counties_slider(df,
target_days=np.array([1, 2, 3, 4, 5]),
filename="results/deaths.html",
cumulative=True, # not currently used
plot_choropleth=False,
counties_json=None,
n_past_days=3,
dark=True,
auto_open=True):
"""
"""
if plot_choropleth:
assert counties_json is not None, 'counties_json must be included for plotting choropleth'
# TODO: note that df should have all data (preds and lat lon)
# TODO: add previous days
fips = df['SecondaryEntityOfFile'].tolist()
tot_deaths = df['tot_deaths']
d = df
d['text'] = 'State: ' + d['StateName'] + \
' (' + d['StateNameAbbreviation'] + ')' + '<br>' + \
'County: ' + | |
<reponame>felixmusil/run_qe
# Taken from https://gitlab.com/ase/ase/blob/master/ase/io/espresso.py for some small modifications about units
"""Reads Quantum ESPRESSO files.
Read multiple structures and results from pw.x output files. Read
structures from pw.x input files.
Built for PWSCF v.5.3.0 but should work with earlier and later versions.
Can deal with most major functionality, but might fail with ibrav =/= 0
or crystal_sg positions.
Units are converted using CODATA 2006, as used internally by Quantum
ESPRESSO.
"""
import os
import operator as op
import warnings
from collections import OrderedDict
from os import path
import numpy as np
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.constraints import FixAtoms, FixCartesian
from ase.data import chemical_symbols, atomic_numbers
from ase.units import create_units
from ase.utils import basestring
# Quantum ESPRESSO uses CODATA 2006 internally
units = create_units('2006')
# Section identifiers
_PW_START = 'Program PWSCF'
_PW_END = 'End of self-consistent calculation'
_PW_CELL = 'CELL_PARAMETERS'
_PW_POS = 'ATOMIC_POSITIONS'
_PW_MAGMOM = 'Magnetic moment per site'
_PW_FORCE = 'Forces acting on atoms'
_PW_TOTEN = '! total energy'
_PW_STRESS = 'total stress'
def read_qe(filename):
"""
Minimal wrapper to parse full qe outputfiles. The customization is for structure to be
all converted to angstrom. (look up "### MODIFIED" to find the modifications)
:param filename:
:return: list of Atoms from the file
"""
index = ':'
index = string2index(index)
fn, index = parse_filename(filename, index)
crystals = []
with open(fn,'r') as f:
for cc in read_espresso_out(f,index):
crystals.append(cc)
return crystals
def string2index(string):
if ':' not in string:
return int(string)
i = []
for s in string.split(':'):
if s == '':
i.append(None)
else:
i.append(int(s))
i += (3 - len(i)) * [None]
return slice(*i)
def parse_filename(filename, index=None):
if not isinstance(filename, basestring) or '@' not in filename:
return filename, index
newindex = None
if ('.json@' in filename or
'.db@' in filename or
filename.startswith('pg://')):
newfilename, newindex = filename.rsplit('@', 1)
else:
newfilename, newindex = filename.rsplit('@', 1)
try:
newindex = string2index(newindex)
except ValueError:
return filename, index
return newfilename, newindex
class Namelist(OrderedDict):
"""Case insensitive dict that emulates Fortran Namelists."""
def __contains__(self, key):
return super(Namelist, self).__contains__(key.lower())
def __delitem__(self, key):
return super(Namelist, self).__delitem__(key.lower())
def __getitem__(self, key):
return super(Namelist, self).__getitem__(key.lower())
def __setitem__(self, key, value):
super(Namelist, self).__setitem__(key.lower(), value)
def get(self, key, default=None):
return super(Namelist, self).get(key.lower(), default)
def read_espresso_out(fileobj, index=-1, results_required=True):
"""Reads Quantum ESPRESSO output files.
The atomistic configurations as well as results (energy, force, stress,
magnetic moments) of the calculation are read for all configurations
within the output file.
Will probably raise errors for broken or incomplete files.
Parameters
----------
fileobj : file|str
A file like object or filename
index : slice
The index of configurations to extract.
results_required : bool
If True, atomistic configurations that do not have any
associated results will not be included. This prevents double
printed configurations and incomplete calculations from being
returned as the final configuration with no results data.
Yields
------
structure : Atoms
The next structure from the index slice. The Atoms has a
SinglePointCalculator attached with any results parsed from
the file.
"""
if isinstance(fileobj, basestring):
fileobj = open(fileobj, 'rU')
# work with a copy in memory for faster random access
pwo_lines = fileobj.readlines()
# TODO: index -1 special case?
# Index all the interesting points
indexes = {
_PW_START: [],
_PW_END: [],
_PW_CELL: [],
_PW_POS: [],
_PW_MAGMOM: [],
_PW_FORCE: [],
_PW_TOTEN: [],
_PW_STRESS: []
}
for idx, line in enumerate(pwo_lines):
for identifier in indexes:
if identifier in line:
indexes[identifier].append(idx)
# Configurations are either at the start, or defined in ATOMIC_POSITIONS
# in a subsequent step. Can deal with concatenated output files.
all_config_indexes = sorted(indexes[_PW_START] +
indexes[_PW_POS])
# Slice only requested indexes
# setting results_required argument stops configuration-only
# structures from being returned. This ensures the [-1] structure
# is one that has results. Two cases:
# - SCF of last configuration is not converged, job terminated
# abnormally.
# - 'relax' and 'vc-relax' re-prints the final configuration but
# only 'vc-relax' recalculates.
if results_required:
results_indexes = sorted(indexes[_PW_TOTEN] + indexes[_PW_FORCE] +
indexes[_PW_STRESS] + indexes[_PW_MAGMOM])
# Prune to only configurations with results data before the next
# configuration
results_config_indexes = []
for config_index, config_index_next in zip(
all_config_indexes,
all_config_indexes[1:] + [len(pwo_lines)]):
if any([config_index < results_index < config_index_next
for results_index in results_indexes]):
results_config_indexes.append(config_index)
# slice from the subset
image_indexes = results_config_indexes[index]
else:
image_indexes = all_config_indexes[index]
# Extract initialisation information each time PWSCF starts
# to add to subsequent configurations. Use None so slices know
# when to fill in the blanks.
pwscf_start_info = dict((idx, None) for idx in indexes[_PW_START])
for image_index in image_indexes:
# Find the nearest calculation start to parse info. Needed in,
# for example, relaxation where cell is only printed at the
# start.
if image_index in indexes[_PW_START]:
prev_start_index = image_index
else:
# The greatest start index before this structure
prev_start_index = [idx for idx in indexes[_PW_START]
if idx < image_index][-1]
# add structure to reference if not there
if pwscf_start_info[prev_start_index] is None:
pwscf_start_info[prev_start_index] = parse_pwo_start(
pwo_lines, prev_start_index)
# Get the bounds for information for this structure. Any associated
# values will be between the image_index and the following one,
# EXCEPT for cell, which will be 4 lines before if it exists.
for next_index in all_config_indexes:
if next_index > image_index:
break
else:
# right to the end of the file
next_index = len(pwo_lines)
# Get the structure
# Use this for any missing data
prev_structure = pwscf_start_info[prev_start_index]['atoms']
if image_index in indexes[_PW_START]:
structure = prev_structure.copy() # parsed from start info
else:
if _PW_CELL in pwo_lines[image_index - 5]:
# CELL_PARAMETERS would be just before positions if present
cell, cell_alat = get_cell_parameters(
pwo_lines[image_index - 5:image_index])
else:
cell = prev_structure.cell
cell_alat = pwscf_start_info[prev_start_index]['alat']
# give at least enough lines to parse the positions
# should be same format as input card
n_atoms = len(prev_structure)
positions_card = get_atomic_positions(
pwo_lines[image_index:image_index + n_atoms + 1],
n_atoms=n_atoms, cell=cell, alat=cell_alat)
# convert to Atoms object
symbols = [label_to_symbol(position[0]) for position in
positions_card]
positions = [position[1] for position in positions_card]
structure = Atoms(symbols=symbols, positions=positions, cell=cell,
pbc=True)
# Extract calculation results
# Energy
energy = None
for energy_index in indexes[_PW_TOTEN]:
if image_index < energy_index < next_index:
energy = float(
pwo_lines[energy_index].split()[-2]) * units['Ry']
# Forces
forces = None
for force_index in indexes[_PW_FORCE]:
if image_index < force_index < next_index:
# Before QE 5.3 'negative rho' added 2 lines before forces
# Use exact lines to stop before 'non-local' forces
# in high verbosity
if not pwo_lines[force_index + 2].strip():
force_index += 4
else:
force_index += 2
# assume contiguous
forces = [
[float(x) for x in force_line.split()[-3:]] for force_line
in pwo_lines[force_index:force_index + len(structure)]]
forces = np.array(forces) * units['Ry'] / units['Bohr']
# Stress
stress = None
for stress_index in indexes[_PW_STRESS]:
if image_index < stress_index < next_index:
sxx, sxy, sxz = pwo_lines[stress_index + 1].split()[:3]
_, syy, syz = pwo_lines[stress_index + 2].split()[:3]
_, _, szz = pwo_lines[stress_index + 3].split()[:3]
stress = np.array([sxx, syy, szz, syz, sxz, sxy], dtype=float)
# sign convention is opposite of ase
stress *= -1 * units['Ry'] / (units['Bohr'] ** 3)
# Magmoms
magmoms = None
for magmoms_index in indexes[_PW_MAGMOM]:
if image_index < magmoms_index < next_index:
magmoms = [
float(mag_line.split()[5]) for mag_line
in pwo_lines[magmoms_index + 1:
magmoms_index + 1 + len(structure)]]
# Put everything together
calc = SinglePointCalculator(structure, energy=energy, forces=forces,
stress=stress, magmoms=magmoms)
structure.set_calculator(calc)
yield structure
def parse_pwo_start(lines, index=0):
"""Parse Quantum ESPRESSO calculation info from lines,
starting from index. Return a dictionary containing extracted
information.
- `celldm(1)`: lattice parameters (alat)
- `cell`: unit cell in Angstrom
- `symbols`: element symbols for the structure
- `positions`: cartesian coordinates of atoms in Angstrom
- `atoms`: an `ase.Atoms` object constructed from the extracted data
Parameters
----------
lines : list[str]
Contents of PWSCF output file.
index : int
Line number to begin parsing. Only first calculation will
be read.
Returns
-------
info : dict
Dictionary of calculation parameters, including `celldm(1)`, `cell`,
`symbols`, `positions`, `atoms`.
Raises
------
KeyError
If interdependent values cannot be found (especially celldm(1))
an error will be raised as other quantities cannot then be
calculated (e.g. cell and positions).
"""
# TODO: extend with extra DFT info?
info = {}
for idx, line in enumerate(lines[index:], start=index):
if 'celldm(1)' in line:
# celldm(1) has more digits | |
43.687 -6.651 1.00 30.00 C
ATOM 366 N1 DC X 18 -27.504 43.568 -6.588 1.00 30.00 N
ATOM 367 C2 DC X 18 -26.875 43.416 -5.345 1.00 30.00 C
ATOM 368 O2 DC X 18 -27.567 43.383 -4.323 1.00 30.00 O
ATOM 369 N3 DC X 18 -25.521 43.319 -5.296 1.00 30.00 N
ATOM 370 C4 DC X 18 -24.813 43.348 -6.434 1.00 30.00 C
ATOM 371 N4 DC X 18 -23.484 43.238 -6.349 1.00 30.00 N
ATOM 372 C5 DC X 18 -25.437 43.503 -7.708 1.00 30.00 C
ATOM 373 C6 DC X 18 -26.769 43.608 -7.737 1.00 30.00 C
TER
ATOM 374 O5' DT Y 5 -29.913 -51.862 23.113 1.00 30.00 O
ATOM 375 C5' DT Y 5 -30.530 -50.710 22.551 1.00 30.00 C
ATOM 376 C4' DT Y 5 -29.803 -49.443 22.968 1.00 30.00 C
ATOM 377 O4' DT Y 5 -28.370 -49.645 22.882 1.00 30.00 O
ATOM 378 C3' DT Y 5 -30.110 -48.213 22.114 1.00 30.00 C
ATOM 379 O3' DT Y 5 -30.260 -47.085 22.945 1.00 30.00 O
ATOM 380 C2' DT Y 5 -28.873 -48.085 21.232 1.00 30.00 C
ATOM 381 C1' DT Y 5 -27.790 -48.572 22.178 1.00 30.00 C
ATOM 382 N1 DT Y 5 -26.569 -49.068 21.481 1.00 30.00 N
ATOM 383 C2 DT Y 5 -25.717 -49.924 22.135 1.00 30.00 C
ATOM 384 O2 DT Y 5 -25.895 -50.302 23.272 1.00 30.00 O
ATOM 385 N3 DT Y 5 -24.633 -50.322 21.404 1.00 30.00 N
ATOM 386 C4 DT Y 5 -24.317 -49.957 20.112 1.00 30.00 C
ATOM 387 O4 DT Y 5 -23.314 -50.370 19.539 1.00 30.00 O
ATOM 388 C5 DT Y 5 -25.252 -49.056 19.482 1.00 30.00 C
ATOM 389 C7 DT Y 5 -25.019 -48.589 18.077 1.00 30.00 C
ATOM 390 C6 DT Y 5 -26.320 -48.660 20.189 1.00 30.00 C
ATOM 391 P DC Y 6 -31.679 -46.769 23.625 1.00 30.00 P
ATOM 392 OP1 DC Y 6 -32.607 -47.877 23.306 1.00 30.00 O
ATOM 393 OP2 DC Y 6 -32.034 -45.378 23.264 1.00 30.00 O
ATOM 394 O5' DC Y 6 -31.370 -46.798 25.191 1.00 30.00 O
ATOM 395 C5' DC Y 6 -30.887 -45.636 25.827 1.00 30.00 C
ATOM 396 C4' DC Y 6 -29.422 -45.779 26.179 1.00 30.00 C
ATOM 397 O4' DC Y 6 -28.725 -46.486 25.128 1.00 30.00 O
ATOM 398 C3' DC Y 6 -28.685 -44.470 26.316 1.00 30.00 C
ATOM 399 O3' DC Y 6 -28.894 -43.952 27.620 1.00 30.00 O
ATOM 400 C2' DC Y 6 -27.237 -44.898 26.083 1.00 30.00 C
ATOM 401 C1' DC Y 6 -27.395 -46.012 25.038 1.00 30.00 C
ATOM 402 N1 DC Y 6 -27.139 -45.573 23.630 1.00 30.00 N
ATOM 403 C2 DC Y 6 -25.916 -45.870 23.029 1.00 30.00 C
ATOM 404 O2 DC Y 6 -25.063 -46.492 23.673 1.00 30.00 O
ATOM 405 N3 DC Y 6 -25.702 -45.478 21.752 1.00 30.00 N
ATOM 406 C4 DC Y 6 -26.634 -44.812 21.088 1.00 30.00 C
ATOM 407 N4 DC Y 6 -26.355 -44.444 19.833 1.00 30.00 N
ATOM 408 C5 DC Y 6 -27.894 -44.497 21.677 1.00 30.00 C
ATOM 409 C6 DC Y 6 -28.105 -44.901 22.936 1.00 30.00 C
ATOM 410 P DT Y 7 -28.533 -42.425 27.954 1.00 30.00 P
ATOM 411 OP1 DT Y 7 -29.347 -41.996 29.115 1.00 30.00 O
ATOM 412 OP2 DT Y 7 -28.584 -41.659 26.689 1.00 30.00 O
ATOM 413 O5' DT Y 7 -27.012 -42.511 28.411 1.00 30.00 O
ATOM 414 C5' DT Y 7 -26.349 -41.371 28.891 1.00 30.00 C
ATOM 415 C4' DT Y 7 -24.859 -41.609 28.869 1.00 30.00 C
ATOM 416 O4' DT Y 7 -24.570 -42.685 27.949 1.00 30.00 O
ATOM 417 C3' DT Y 7 -24.042 -40.423 28.400 1.00 30.00 C
ATOM 418 O3' DT Y 7 -23.633 -39.684 29.522 1.00 30.00 O
ATOM 419 C2' DT Y 7 -22.853 -41.068 27.693 1.00 30.00 C
ATOM 420 C1' DT Y 7 -23.442 -42.372 27.162 1.00 30.00 C
ATOM 421 N1 DT Y 7 -23.865 -42.329 25.732 1.00 30.00 N
ATOM 422 C2 DT Y 7 -22.921 -42.505 24.756 1.00 30.00 C
ATOM 423 O2 DT Y 7 -21.743 -42.668 24.999 1.00 30.00 O
ATOM 424 N3 DT Y 7 -23.405 -42.478 23.475 1.00 30.00 N
ATOM 425 C4 DT Y 7 -24.718 -42.292 23.083 1.00 30.00 C
ATOM 426 O4 DT Y 7 -25.055 -42.279 21.902 1.00 30.00 O
ATOM 427 C5 DT Y 7 -25.663 -42.120 24.161 1.00 30.00 C
ATOM 428 C7 DT Y 7 -27.114 -41.906 23.864 1.00 30.00 C
ATOM 429 C6 DT Y 7 -25.197 -42.150 25.419 1.00 30.00 C
ATOM 430 P DG Y 8 -23.593 -38.086 29.451 1.00 30.00 P
ATOM 431 OP1 DG Y 8 -23.347 -37.579 30.820 1.00 30.00 O
ATOM 432 OP2 DG Y 8 -24.795 -37.660 28.700 1.00 30.00 O
ATOM 433 O5' DG Y 8 -22.303 -37.789 28.564 1.00 30.00 O
ATOM 434 C5' DG Y 8 -21.034 -38.257 28.992 1.00 30.00 C
ATOM 435 C4' DG Y 8 -20.019 -38.120 27.880 1.00 30.00 C
ATOM 436 O4' DG Y 8 -20.495 -38.844 26.718 1.00 30.00 O
ATOM 437 C3' DG Y 8 -19.786 -36.688 27.400 1.00 30.00 C
ATOM 438 O3' DG Y 8 -18.437 -36.539 26.951 1.00 30.00 O
ATOM 439 C2' DG Y 8 -20.776 -36.572 26.248 1.00 30.00 C
ATOM 440 C1' DG Y 8 -20.639 -37.952 25.637 1.00 30.00 C
ATOM 441 N9 DG Y 8 -21.776 -38.362 24.834 1.00 30.00 N
ATOM 442 C8 DG Y 8 -23.045 -38.675 25.260 1.00 30.00 C
ATOM 443 N7 DG Y 8 -23.848 -39.014 24.288 1.00 30.00 N
ATOM 444 C5 DG Y 8 -23.055 -38.917 23.149 1.00 30.00 C
ATOM 445 C6 DG Y 8 -23.370 -39.157 21.783 1.00 30.00 C
ATOM 446 O6 DG Y 8 -24.443 -39.536 21.288 1.00 30.00 O
ATOM 447 N1 DG Y 8 -22.260 -38.946 20.959 1.00 30.00 N
ATOM 448 C2 DG Y 8 -21.029 -38.544 21.412 1.00 30.00 C
ATOM 449 N2 DG Y 8 -20.096 -38.371 20.489 1.00 30.00 N
ATOM 450 N3 DG Y 8 -20.731 -38.313 22.678 1.00 30.00 N
ATOM 451 C4 DG Y 8 -21.786 -38.518 23.484 1.00 30.00 C
ATOM 452 P DA Y 9 -17.749 -35.085 26.899 1.00 30.00 P
ATOM 453 OP1 DA Y 9 -16.592 -35.057 27.832 1.00 30.00 O
ATOM 454 OP2 DA Y 9 -18.828 -34.091 27.076 1.00 30.00 O
ATOM 455 O5' DA Y 9 -17.219 -34.975 25.392 1.00 30.00 O
ATOM 456 C5' DA Y 9 -15.982 -35.582 25.028 1.00 30.00 C
ATOM 457 C4' DA Y 9 -15.841 -35.668 23.514 1.00 30.00 C
ATOM 458 O4' DA Y 9 -17.057 -36.237 22.944 1.00 30.00 O
ATOM 459 C3' DA Y 9 -15.641 -34.331 22.802 1.00 30.00 C
ATOM 460 O3' DA Y 9 -14.816 -34.505 21.655 1.00 30.00 O
ATOM 461 C2' DA Y 9 -17.062 -33.961 22.404 1.00 30.00 C
ATOM 462 C1' DA Y 9 -17.629 -35.320 22.036 1.00 30.00 C
ATOM 463 N9 DA Y 9 -19.075 -35.381 22.157 1.00 30.00 N
ATOM 464 C8 DA Y 9 -19.818 -35.263 23.296 1.00 30.00 C
ATOM 465 N7 DA Y 9 -21.108 -35.344 23.097 1.00 30.00 N
ATOM 466 C5 DA Y 9 -21.215 -35.512 21.731 1.00 30.00 C
ATOM 467 C6 DA Y 9 -22.319 -35.677 20.872 1.00 30.00 C
ATOM 468 N6 DA Y 9 -23.595 -35.676 21.272 1.00 30.00 N
ATOM 469 N1 DA Y 9 -22.065 -35.851 19.571 1.00 30.00 N
ATOM 470 C2 DA Y 9 -20.801 -35.824 19.140 1.00 30.00 C
ATOM 471 N3 DA Y 9 -19.684 -35.690 19.843 1.00 30.00 N
ATOM 472 C4 DA Y 9 -19.964 -35.538 21.143 1.00 30.00 C
ATOM 473 P DT Y 10 -14.338 -33.230 20.800 1.00 30.00 P
ATOM 474 OP1 DT Y 10 -12.873 -33.327 20.634 1.00 30.00 O
ATOM 475 OP2 DT Y 10 -14.911 -32.004 21.391 1.00 30.00 O
ATOM 476 O5' DT Y 10 -15.033 -33.445 19.381 1.00 30.00 O
ATOM 477 C5' DT Y 10 -14.762 -34.621 18.637 1.00 30.00 C
ATOM 478 C4' DT Y 10 -15.447 -34.580 17.286 1.00 30.00 C
ATOM 479 O4' DT Y 10 -16.882 -34.553 17.463 1.00 30.00 O
ATOM 480 C3' DT Y 10 -15.130 -33.356 16.424 1.00 30.00 C
ATOM 481 O3' DT Y 10 -15.199 -33.728 15.073 1.00 30.00 | |
as domestic or international and remove Domestic
print("\tCategorizing international and domestic flights",end="")
for i,j in G.edges():
if G.nodes[i]["country"] == G.nodes[j]["country"]:
G[i][j]['international'] = False
else:
G[i][j]['international'] = True
print("\t\t[Done]")
# Calculate distance between demographics
print("\tCalculaying demographic clusters distance",end="")
for i,j in G.edges():
G[i][j]['DistDemo'] = abs(float(G.nodes[i]["clust"]) - float(G.nodes[j]["clust"]))
print("\t\t[Done]")
# Remove nodes without inbound edges
print("\tRemoving isolated vertices",end="")
indeg = G.in_degree()
outdeg = G.out_degree()
to_remove=[n for n, degree in indeg if (indeg[n] + outdeg[n] < 1)]
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
# Limit to the first subgraph
print("\tFinding largest subgraph",end="")
undirected = G.to_undirected()
subgraphs = nx.subgraph(G, undirected)
subgraph_nodes = subgraphs.nodes()
to_remove = list()
for node in G.nodes():
if node not in subgraph_nodes:
to_remove.append(node)
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
return G
def infection(input_network, vaccination, starts, DELAY=DELAY, Cancel_Delay=Cancel_Delay, vis = True, file_name = "sir.csv", title = MName, RECALCULATE = False):
print("Simulating infection.")
network = input_network.copy()
# Recalculate the weights of the network as per necessary
# Open the data file
f = open(file_name, "w")
f.write("time, s, e, i, r\n")
f.close()
# Set the default to susceptable
sys.stdout.flush()
for node in network.nodes():
network.nodes[node]["status"] = "s"
network.nodes[node]["color"] = "#A0C8F0"
network.nodes[node]["age"] = 0
# Assign the infected
#for start in starts:
infected = start
network.nodes[infected]["status"] = "i"
network.nodes[infected]["color"] = "red"
if vis:
pos = nx.spring_layout(network, scale=2)
if isinstance(network,nx.DiGraph):
in_degree = network.in_degree()[infected]
out_degree = network.out_degree()[infected]
degree = in_degree + out_degree
else:
degree = network.degree()[infected]
print("\t",network.nodes[infected]["name"],"[",degree,"]", " connections")
#List vaccinated edges and remove
for i,j in network.edges():
network[i][j]["vaccinated"] = False
if network.nodes[i]["country"] in Vaccinated_Countries or network.nodes[j]["country"] in Vaccinated_Countries:
network[i][j]["vaccinated"] = True
vaccination = list(((u,v) for u,v,j in network.edges(data=True) if j['vaccinated'] == True))
if vaccination is not None:
print("\tVaccinated: ",Vaccinated_Countries, ": ", len(vaccination)," edges" )
else:
print("\tVaccinated: None")
if cancelled is not None:
print("\tCancelled: ", len(cancelled)," edges" )
else:
print("\tCancelled: None")
# Iterate Vaccination and/or Cancellation through the evolution of the disease.
for step in range(0,panday):
# If the delay is over, vaccinate.
# Convert the STRING!
if int(step) == int(DELAY):
if vaccination is not None:
print(DELAY,"Vaccination on step",DELAY)
network.remove_edges_from(vaccination)
# Recalculate the weights of the network as per necessary
if RECALCULATE == True:
network = calculate_weights(network)
if int(step) == int(Cancel_Delay):
if cancelled is not None:
print("Cancellation on step",Cancel_Delay, ": ", len(cancelled), " remove flights")
network.remove_edges_from(cancelled)
# Recalculate the weights of the network as per necessary
if RECALCULATE == True:
network = calculate_weights(network)
# Create variables to hold the outcomes as they happen
S,E,I,R = 0,0,0,0
for node in network.nodes():
status = network.nodes[node]["status"]
age = network.nodes[node]["age"]
color = network.nodes[node]["color"]
if status is "i" and age >= gamma:
# The infected has reached its recovery time after 60 days
network.nodes[node]["status"] = "r"
network.nodes[node]["color"] = "purple"
if status is "e" and age >= alpha and age < gamma:
# Exposed nodes have an incubation in average 14 days
network.nodes[node]["status"] = "i"
network.nodes[node]["color"] = "red"
elif status is "e":
network.nodes[node]["age"] += 1
elif status is "i":
# Propogate the infection.
if age > alpha:
victims = (list(network.successors(node)))
number_infections = 0
if len(victims) >= beta:
victims = random.sample((list(network.successors(node))), beta)
number_infections = 0
else:
victims = (list(network.successors(node)))
number_infections = 0
for victim in victims:
infect_status = network.nodes[victim]["status"]
infect = False # Set this flag to False to start weighting.
rand = random.uniform(0,1)
if network[node][victim]['international'] == False and random.uniform(0,1) <= float(network[node][victim]['weight']):
infect = True
number_infections+=1
if use_pred == True and network[node][victim]['international'] == True :
if use_demo == True and network[node][victim]['DistDemo'] >= rand:
infect = True
number_infections+=1
if use_weightP == True and rand <= float(network[node][victim]['weightP']):
infect = True
number_infections+=1
if use_trueevol == True and network[node][victim]['international'] == True and float(network[node][victim]['dayzero'])<step:
if use_demo == True and network[node][victim]['DistDemo'] >= rand:
infect = True
number_infections+=1
if use_weightP == True and rand <= float(network[node][victim]['weightP']):
infect = True
number_infections+=1
if infect_status == "s" and infect == True:
network.nodes[victim]["status"] = "e"
network.nodes[victim]["age"] = 0
network.nodes[victim]["color"] = "#30cc1f"
network.nodes[node]["age"] += 1
# Loop twice to prevent bias.
for node in network.nodes():
status = network.nodes[node]["status"]
age = network.nodes[node]["age"]
color = network.nodes[node]["color"]
if status is "s":
# Count those susceptable
S += 1
if status is "e":
E += 1
if status is "v":
S += 1
elif status is "r":
R += 1
elif status is "i":
I += 1
print("{0}, {1}, {2}, {3}, {4}".format(step, S, E, I, R))
printline = "{0}, {1}, {2}, {3}, {4}".format(step, S, E, I, R)
f = open(file_name, "a")
f.write(printline + "\n")
f.close()
print("\t"+printline)
if I is 0:
break
if vis:
#write_dot(network, title+".dot")
visualize(network, title, pos)
print("\t----------\n\tS: {0}, I: {1}, R: {2}".format(S,I,R))
return {"Suscceptable":S,"Infected":I, "Recovered":R}
def weighted_random(weights):
number = random.random() * sum(weights.values())
for k,v in weights.items():
if number <= v:
break
number -= v
return k
def pad_string(integer, n):
"""
Add "0" to the front of an interger so that the resulting string in n
characters long.
Args:
integer: The number to pad.
n: The desired length of the string
Returns
string: The padded string representation of the integer.
"""
string = str(integer)
while len(string) < n:
string = "0" + string
return string
def visualize(network, title,pos):
"""
Visualize the network given an array of posisitons.
"""
print("-- Starting to Visualize --")
colors = []
colori = []
i_edge_colors = []
d_edge_colors = []
default = []
infected = []
nstart = []
ninfect = []
for node in network.nodes():
colorn = network.nodes[node]["color"]
if colorn == "#A0C8F0":
nstart.append(node)
colors.append(network.nodes[node]["color"])
elif colorn == "#30cc1f" or colorn == "red" or colorn == "purple":
ninfect.append(node)
colori.append(network.nodes[node]["color"])
for i,j in network.edges():
color = network.nodes[i]["color"]
if color == "#A0C8F0" or color == "#30cc1f" or color == "purple":
color = "#A6A6A6"
default.append((i,j))
d_edge_colors.append(color)
else:
color = "red"
infected.append((i,j))
i_edge_colors.append(color)
plt.figure(figsize=(30,20))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
#make density plot of infection
node_positions = {node[0]: (float(node[1]['lon']), float(node[1]['lat'])) for node in network.nodes(data=True)}
xp = []
yp = []
for node in network.nodes():
infec = network.nodes[node]["status"]
if infec == 'i':
xp.append(network.nodes[node]['lon'])
yp.append(network.nodes[node]['lat'])
if len(xp)>=1:
m1, m2 = np.array(xp).astype(np.float), np.array(yp).astype(np.float)
xmin = -180
xmax = 180
ymin = -90
ymax = 90
# get the density estimation
Xp, Yp = np.mgrid[xmin:xmax:250j, ymin:ymax:250j]
XpYp = np.vstack([Xp.ravel(), Yp.ravel()]).T
XpYp = np.radians(XpYp)
values = np.column_stack((np.array(np.vstack(m1)), np.array(np.vstack(m2))))
kernel = KernelDensity(bandwidth=0.035)
kernel.fit(np.radians(values))
#kernel = stats.gaussian_kde(values)
Z = np.exp(kernel.score_samples(XpYp))
Z = Z.reshape(Xp.shape)
# plot the result
cmap = plt.cm.jet
cmap.set_under('white')
plt.imshow(np.rot90(Z), norm = plt.Normalize(vmin=(Z.max()-(Z.max()*0.9)), vmax=Z.max()), cmap=cmap,
extent=[xmin, xmax, ymin, ymax], alpha=0.3, interpolation = 'gaussian')
# Fist pass - Gray lines
nx.draw_networkx_edges(network,pos=node_positions,edgelist=default,
width=0.005,
edge_color=d_edge_colors,
alpha=0.005,
arrows=False)
# Second Pass - Colored lines
nx.draw_networkx_edges(network,pos=node_positions,edgelist=infected,
width=0.1,
edge_color=i_edge_colors,
alpha=0.25,
arrows=False)
# first Pass - small nodes
nx.draw_networkx_nodes(network,
pos=node_positions,
nodelist=nstart,
linewidths=0.2,
node_size=5,
with_labels=False,
node_color = colors)
# # Second Pass - large nodes
nx.draw_networkx_nodes(network,
pos=node_positions,
nodelist=ninfect,
linewidths=0.2,
node_size=20,
with_labels=False,
node_color = colori)
plt.axis('off')
number_files = str(len(os.listdir()))
while len(number_files) < 3:
number_files = "0" + number_files
plt.savefig("infection-{0}.png".format(number_files),
bbox_inches='tight', dpi=72
)
plt.show()
plt.close()
#%% BUILDING NETWORK
simulation = 0
for i in range (NUM_SIMULATIONS):
for effort in efforts:
#seed = 100
#random.seed(seed)
# Identify the script.
print("Flight Network Disease Simulator 1.0.0")
print("Modified by <NAME> from <NAME> and <NAME>\n\n")
#Simulation od the Pandemic
print("Setting Simulation Parameters.")
# Determine the parameters of the current simulation.
args = sys.argv[1:]
opts, args = getopt.getopt("brcsidv",["delay=","nsim="])
AIRPORT_DATA = args[0]
ROUTE_DATA = args[1]
# Make a new folder for the data.
subsim = (strategy + pad_string(simulation,4))
os.makedirs(subsim)
os.chdir(subsim)
# Create the network using the command arguments.
network = create_network(AIRPORT_DATA, ROUTE_DATA)
print("\tDetermining network type.")
# Determine if the graph is directed or undirected
if isinstance(network,nx.DiGraph):
network_type | |
<reponame>aaronrmm/filesystem_spec<filename>fsspec/implementations/reference.py<gh_stars>0
import base64
import io
import itertools
import logging
import os
from functools import lru_cache
import fsspec.core
try:
import ujson as json
except ImportError:
import json
from ..asyn import AsyncFileSystem, sync
from ..callbacks import _DEFAULT_CALLBACK
from ..core import filesystem, open
from ..mapping import get_mapper
from ..spec import AbstractFileSystem
logger = logging.getLogger("fsspec.reference")
class ReferenceFileSystem(AsyncFileSystem):
"""View byte ranges of some other file as a file system
Initial version: single file system target, which must support
async, and must allow start and end args in _cat_file. Later versions
may allow multiple arbitrary URLs for the targets.
This FileSystem is read-only. It is designed to be used with async
targets (for now). This FileSystem only allows whole-file access, no
``open``. We do not get original file details from the target FS.
Configuration is by passing a dict of references at init, or a URL to
a JSON file containing the same; this dict
can also contain concrete data for some set of paths.
Reference dict format:
{path0: bytes_data, path1: (target_url, offset, size)}
https://github.com/fsspec/kerchunk/blob/main/README.md
"""
protocol = "reference"
def __init__(
self,
fo,
target=None,
ref_storage_args=None,
target_protocol=None,
target_options=None,
remote_protocol=None,
remote_options=None,
fs=None,
template_overrides=None,
simple_templates=True,
loop=None,
ref_type=None,
**kwargs,
):
"""
Parameters
----------
fo : dict or str
The set of references to use for this instance, with a structure as above.
If str, will use fsspec.open, in conjunction with ref_storage_args to
open and parse JSON at this location.
target : str
For any references having target_url as None, this is the default file
target to use
ref_storage_args : dict
If references is a str, use these kwargs for loading the JSON file
target_protocol : str
Used for loading the reference file, if it is a path. If None, protocol
will be derived from the given path
target_options : dict
Extra FS options for loading the reference file, if given as a path
remote_protocol : str
The protocol of the filesystem on which the references will be evaluated
(unless fs is provided). If not given, will be derived from the first
URL that has a protocol in the templates or in the references, in that
order.
remote_options : dict
kwargs to go with remote_protocol
fs : file system instance
Directly provide a file system, if you want to configure it beforehand. This
takes precedence over target_protocol/target_options
template_overrides : dict
Swap out any templates in the references file with these - useful for
testing.
ref_type : "json" | "parquet" | "zarr"
If None, guessed from URL suffix, defaulting to JSON. Ignored if fo
is not a string.
simple_templates: bool
Whether templates can be processed with simple replace (True) or if
jinja is needed (False, much slower). All reference sets produced by
``kerchunk`` are simple in this sense, but the spec allows for complex.
kwargs : passed to parent class
"""
super().__init__(loop=loop, **kwargs)
self.target = target
self.dataframe = False
self.template_overrides = template_overrides
self.simple_templates = simple_templates
self.templates = {}
if hasattr(fo, "read"):
text = fo.read()
elif isinstance(fo, str):
if target_protocol:
extra = {"protocol": target_protocol}
else:
extra = {}
dic = dict(**(ref_storage_args or target_options or {}), **extra)
if ref_type == "zarr" or fo.endswith("zarr"):
import pandas as pd
import zarr
self.dataframe = True
m = get_mapper(fo, **dic)
z = zarr.open_group(m)
assert z.attrs["version"] == 1
self.templates = z.attrs["templates"]
self.gen = z.attrs.get("gen", None)
self.df = pd.DataFrame(
{k: z[k][:] for k in ["key", "data", "url", "offset", "size"]}
).set_index("key")
elif ref_type == "parquet" or fo.endswith("parquet"):
import fastparquet as fp
self.dataframe = True
with open(fo, "rb", **dic) as f:
pf = fp.ParquetFile(f)
assert pf.key_value_metadata["version"] == 1
self.templates = json.loads(pf.key_value_metadata["templates"])
self.gen = json.loads(pf.key_value_metadata.get("gen", "[]"))
self.df = pf.to_pandas(index="key")
else:
# text JSON
with open(fo, "rb", **dic) as f:
logger.info("Read reference from URL %s", fo)
text = f.read()
else:
# dictionaries; TODO: allow dataframe here?
text = fo
if self.dataframe:
self._process_dataframe()
else:
self._process_references(text, template_overrides)
if fs is not None:
self.fs = fs
return
if remote_protocol is None:
for ref in self.templates.values():
if callable(ref):
ref = ref()
protocol, _ = fsspec.core.split_protocol(ref)
if protocol:
remote_protocol = protocol
break
if remote_protocol is None:
for ref in self.references.values():
if callable(ref):
ref = ref()
if isinstance(ref, list) and ref[0]:
protocol, _ = fsspec.core.split_protocol(ref[0])
if protocol:
remote_protocol = protocol
break
if remote_protocol is None:
remote_protocol = target_protocol
self.fs = filesystem(remote_protocol, loop=loop, **(remote_options or {}))
@property
def loop(self):
return self.fs.loop if self.fs.async_impl else self._loop
def _cat_common(self, path):
path = self._strip_protocol(path)
logger.debug(f"cat: {path}")
# TODO: can extract and cache templating here
if self.dataframe:
part = self.df.loc[path]
if part["data"]:
part = part["data"]
else:
part = part[["url", "offset", "size"]]
else:
part = self.references[path]
if isinstance(part, str):
part = part.encode()
if isinstance(part, bytes):
logger.debug(f"Reference: {path}, type bytes")
if part.startswith(b"base64:"):
part = base64.b64decode(part[7:])
return part, None, None
if len(part) == 1:
logger.debug(f"Reference: {path}, whole file")
url = part[0]
start = None
end = None
else:
url, start, size = part
logger.debug(f"Reference: {path}, offset {start}, size {size}")
end = start + size
if url is None:
url = self.target
return url, start, end
async def _cat_file(self, path, start=None, end=None, **kwargs):
part_or_url, start0, end0 = self._cat_common(path)
if isinstance(part_or_url, bytes):
return part_or_url[start:end]
return (await self.fs._cat_file(part_or_url, start=start0, end=end0))[start:end]
def cat_file(self, path, start=None, end=None, **kwargs):
part_or_url, start0, end0 = self._cat_common(path)
if isinstance(part_or_url, bytes):
return part_or_url[start:end]
# TODO: update start0, end0 if start/end given, instead of slicing
return self.fs.cat_file(part_or_url, start=start0, end=end0)[start:end]
def pipe_file(self, path, value, **_):
"""Temporarily add binary data or reference as a file"""
self.references[path] = value
async def _get_file(self, rpath, lpath, **kwargs):
if self.isdir(rpath):
return os.makedirs(lpath, exist_ok=True)
data = await self._cat_file(rpath)
with open(lpath, "wb") as f:
f.write(data)
def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, **kwargs):
if self.isdir(rpath):
return os.makedirs(lpath, exist_ok=True)
data = self.cat_file(rpath, **kwargs)
callback.set_size(len(data))
with open(lpath, "wb") as f:
f.write(data)
callback.absolute_update(len(data))
def get(self, rpath, lpath, recursive=False, **kwargs):
if self.fs.async_impl:
return sync(self.loop, self._get, rpath, lpath, recursive, **kwargs)
return AbstractFileSystem.get(self, rpath, lpath, recursive=recursive, **kwargs)
def cat(self, path, recursive=False, **kwargs):
if self.fs.async_impl:
return sync(self.loop, self._cat, path, recursive, **kwargs)
elif isinstance(path, list):
if recursive or any("*" in p for p in path):
raise NotImplementedError
return {p: AbstractFileSystem.cat_file(self, p, **kwargs) for p in path}
else:
return AbstractFileSystem.cat_file(self, path)
def _process_dataframe(self):
self._process_templates(self.templates)
@lru_cache(1000)
def _render_jinja(url):
import jinja2
if "{{" in url:
if self.simple_templates:
return (
url.replace("{{", "{")
.replace("}}", "}")
.format(**self.templates)
)
return jinja2.Template(url).render(**self.templates)
return url
if self.templates:
self.df["url"] = self.df["url"].map(_render_jinja)
def _process_references(self, references, template_overrides=None):
if isinstance(references, (str, bytes)):
references = json.loads(references)
vers = references.get("version", None)
if vers is None:
self._process_references0(references)
elif vers == 1:
self._process_references1(references, template_overrides=template_overrides)
else:
raise ValueError(f"Unknown reference spec version: {vers}")
# TODO: we make dircache by iterating over all entries, but for Spec >= 1,
# can replace with programmatic. Is it even needed for mapper interface?
def _process_references0(self, references):
"""Make reference dict for Spec Version 0"""
if "zarr_consolidated_format" in references:
# special case for Ike prototype
references = _unmodel_hdf5(references)
self.references = references
def _process_references1(self, references, template_overrides=None):
if not self.simple_templates or self.templates:
try:
import jinja2
except ImportError as e:
raise ValueError("Reference Spec Version 1 requires jinja2") from e
self.references = {}
self._process_templates(references.get("templates", {}))
@lru_cache(1000)
def _render_jinja(u):
return jinja2.Template(u).render(**self.templates)
for k, v in references.get("refs", {}).items():
if isinstance(v, str):
if v.startswith("base64:"):
self.references[k] = base64.b64decode(v[7:])
self.references[k] = v
elif self.templates:
u = v[0]
if "{{" in u:
if self.simple_templates:
u = (
u.replace("{{", "{")
.replace("}}", "}")
.format(**self.templates)
)
else:
u = _render_jinja(u)
self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]]
else:
self.references[k] = v
self.references.update(self._process_gen(references.get("gen", [])))
def _process_templates(self, tmp):
import jinja2
self.templates = {}
if self.template_overrides is not None:
tmp.update(self.template_overrides)
for k, v in tmp.items():
if "{{" in v:
self.templates[k] = lambda temp=v, **kwargs: jinja2.Template(
temp
).render(**kwargs)
else:
self.templates[k] = v
def _process_gen(self, gens):
import jinja2
out = {}
for gen in gens:
dimension = {
k: v
if isinstance(v, list)
else range(v.get("start", 0), v["stop"], v.get("step", 1))
for k, v in gen["dimensions"].items()
}
products = (
dict(zip(dimension.keys(), values))
for values in itertools.product(*dimension.values())
)
for pr in products:
key = jinja2.Template(gen["key"]).render(**pr, **self.templates)
url = jinja2.Template(gen["url"]).render(**pr, **self.templates)
if ("offset" in gen) and ("length" in gen):
offset = int(
jinja2.Template(gen["offset"]).render(**pr, **self.templates)
)
| |
import enum
import datetime
from typing import Dict, List, Union
import pymongo.database
import iso8601
from bson.objectid import ObjectId
from bson.errors import BSONError
from layabase import ComparisonSigns, CRUDController
@enum.unique
class IndexType(enum.IntEnum):
Unique = 1
Other = 2
_operators = {
ComparisonSigns.Greater: "$gt",
ComparisonSigns.GreaterOrEqual: "$gte",
ComparisonSigns.Lower: "$lt",
ComparisonSigns.LowerOrEqual: "$lte",
}
class Column:
"""
Definition of a Mongo document field.
This field is used to:
- Validate a value.
- Deserialize a value to a valid Mongo (BSON) one.
- Serialize a Mongo (BSON) value to a valid JSON one.
"""
def __init__(self, field_type=None, **kwargs):
"""
:param field_type: Python field type. Default to str.
:param choices: Restrict valid values. Only for int, float, str and Enum fields.
Should be a list or a function (without parameters) returning a list.
Each list item should be of field type.
None by default, or all enum values in case of an Enum field.
:param counter: Custom counter definition. Only for auto incremented fields.
Should be a tuple or a function (without parameters) returning a tuple. Content should be:
- Counter name (field name by default), string value.
- Counter category (table name by default), string value.
:param default_value: Default field value returned to the client if field is not set.
Should be of field type.
None by default.
:param get_default_value: Function returning default field value returned to the client if field is not set.
Should be a function (with dictionary as parameter) returning a value of field type.
Return default_value by default.
:param description: Field description used in Swagger and in error messages.
Should be a string value. Default to None.
:param index_type: If and how this field should be indexed.
Value should be one of IndexType enum. Default to None (not indexed).
Parameter does not need to be provided if field is a primary key.
:param allow_none_as_filter: If None value should be kept in queries (GET/DELETE).
Should be a boolean value. Default to False.
:param is_primary_key: If this field value is not allowed to be modified after insert.
Should be a boolean value. Default to False (field value can always be modified).
index_type will be IndexType.Unique if field is primary_key.
:param is_nullable: If field value is optional.
Should be a boolean value.
Default to True if field is not a primary key.
Default to True if field has a default value.
Default to True (for insert only) if field value should auto increment.
Otherwise default to False.
Note that it is not allowed to force False if field has a default value or if value should auto increment.
:param is_required: If field value must be specified in client requests. Use it to avoid heavy requests.
Should be a boolean value. Default to False.
:param should_auto_increment: If field should be auto incremented. Only for integer fields.
Should be a boolean value. Default to False.
:param min_value: Minimum value for a number field.
:param max_value: Maximum value for a number field.
:param min_length: Minimum value length. Only for integer, list or dict fields.
Should be an integer value. Default to None (no minimum length).
:param max_length: Maximum value length. Only for integer, list or dict fields.
Should be an integer value. Default to None (no maximum length).
:param example: Sample value. Should be of the field type.
Default to None (default sample will be generated).
:param store_none: If field value should be stored if None and None is a valid value. Should be a boolean.
Default to False (None values will not be stored to save space).
:param allow_comparison_signs: If field can be queries with ComparisonSign. Should be a boolean.
Default to False (only equality can be queried).
"""
self.field_type = field_type or str
self.get_choices = self._to_get_choices(kwargs.pop("choices", None))
self.get_counter = self._to_get_counter(kwargs.pop("counter", None))
self.default_value = kwargs.pop("default_value", None)
self.get_default_value = self._to_get_default_value(
kwargs.pop("get_default_value", None)
)
self.description = kwargs.pop("description", None)
self.index_type = kwargs.pop("index_type", None)
self.allow_none_as_filter: bool = bool(
kwargs.pop("allow_none_as_filter", False)
)
self.should_auto_increment: bool = bool(
kwargs.pop("should_auto_increment", False)
)
self.is_required: bool = bool(kwargs.pop("is_required", False))
self.min_value = kwargs.pop("min_value", None)
self.max_value = kwargs.pop("max_value", None)
self.min_length: int = kwargs.pop("min_length", None)
if self.min_length is not None:
self.min_length = int(self.min_length)
self.max_length: int = kwargs.pop("max_length", None)
if self.max_length is not None:
self.max_length = int(self.max_length)
self._example = kwargs.pop("example", None)
self._store_none: bool = bool(kwargs.pop("store_none", False))
self.is_primary_key: bool = bool(kwargs.pop("is_primary_key", False))
self.allow_comparison_signs = bool(kwargs.pop("allow_comparison_signs", False))
if self.is_primary_key:
if self.index_type:
raise Exception(
"Primary key fields are supposed to be indexed as unique."
)
self.index_type = IndexType.Unique
is_nullable = bool(kwargs.pop("is_nullable", True))
if not is_nullable:
if self.should_auto_increment:
raise Exception(
"A field cannot be mandatory and auto incremented at the same time."
)
if self.default_value:
raise Exception(
"A field cannot be mandatory and having a default value at the same time."
)
self._is_nullable_on_insert = False
self._is_nullable_on_update = False
else:
# Field will be optional only if it is not a primary key without default value and not auto incremented
self._is_nullable_on_insert = (
not self.is_primary_key
or self.default_value
or self.should_auto_increment
)
# Field will be optional only if it is not a primary key without default value
self._is_nullable_on_update = not self.is_primary_key or self.default_value
self._check_parameters_validity()
def _check_parameters_validity(self):
if self.should_auto_increment and self.field_type is not int:
raise Exception("Only int fields can be auto incremented.")
if self.min_value is not None and not isinstance(
self.min_value, self.field_type
):
raise Exception(f"Minimum value should be of {self.field_type} type.")
if self.max_value is not None:
if not isinstance(self.max_value, self.field_type):
raise Exception(f"Maximum value should be of {self.field_type} type.")
if self.min_value is not None and self.max_value < self.min_value:
raise Exception(
"Maximum value should be superior or equals to minimum value"
)
if self.min_length is not None and self.min_length < 0:
raise Exception("Minimum length should be positive")
if self.max_length is not None:
if self.max_length < 0:
raise Exception("Maximum length should be positive")
if self.min_length is not None and self.max_length < self.min_length:
raise Exception(
"Maximum length should be superior or equals to minimum length"
)
if self._example is not None and not isinstance(self._example, self.field_type):
raise Exception("Example must be of field type.")
def __set_name__(self, owner, name):
self.name = name
if "_id" == self.name:
self.field_type = ObjectId
self._validate_query = self._get_query_validation_function()
self._validate_insert = self._get_insert_update_validation_function()
self._validate_update = self._get_insert_update_validation_function()
self._deserialize_value = self._get_value_deserialization_function()
def _to_get_counter(self, counter):
if counter:
return counter if callable(counter) else lambda model_as_dict: counter
return lambda model_as_dict: (self.name,)
def _to_get_choices(self, choices):
"""
Return a function without arguments returning the choices.
:param choices: A list of choices or a function providing choices (or None).
"""
if choices:
return choices if callable(choices) else lambda: choices
elif isinstance(self.field_type, enum.EnumMeta):
return lambda: list(self.field_type.__members__.keys())
return lambda: None
def _to_get_default_value(self, get_default_value):
return (
get_default_value
if get_default_value
else lambda model_as_dict: self.default_value
)
def __str__(self):
return self.name
def validate_query(self, filters: dict) -> dict:
"""
Validate this field for a get or delete request.
:param filters: Provided filters.
Each entry if composed of a field name associated to a value.
This field might not be in it.
:return: Validation errors that might have occurred on this field. Empty if no error occurred.
Entry would be composed of the field name associated to a list of error messages.
"""
value = filters.get(self.name)
if value is None:
if self.is_required:
return {self.name: ["Missing data for required field."]}
return {}
# Allow to specify a list of values when querying a field
if isinstance(value, list) and self.field_type != list:
errors = {}
for value_in_list in value:
errors.update(self._validate_query(value_in_list))
return errors
else:
return self._validate_query(value)
def validate_insert(self, document: dict) -> dict:
"""
Validate this field for a document insertion request.
:param document: Mongo to be document.
Each entry if composed of a field name associated to a value.
This field might not be in it.
:return: Validation errors that might have occurred on this field. Empty if no error occurred.
Entry would be composed of the field name associated to a list of error messages.
"""
value = document.get(self.name)
if value is None:
if not self._is_nullable_on_insert:
return {self.name: ["Missing data for required field."]}
return {}
return self._validate_insert(value)
def validate_update(self, document: dict) -> dict:
| |
o1)
b2 = bookings.add(owner, a1, o3)
bookings.accept_booking(b1)
assert b1.state == 'accepted'
assert b2.state == 'blocked'
def test_booking_limit_exemption(session, owner):
activities = ActivityCollection(session)
attendees = AttendeeCollection(session)
periods = PeriodCollection(session)
occasions = OccasionCollection(session)
bookings = BookingCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(datetime(2016, 9, 1), datetime(2016, 9, 30)),
booking=(datetime(2016, 9, 30), datetime(2016, 9, 30)),
execution=(datetime(2016, 10, 1), datetime(2016, 10, 31)),
active=True,
)
period.confirmed = True
o1 = occasions.add(
start=datetime(2016, 10, 4, 13),
end=datetime(2016, 10, 4, 14),
timezone="Europe/Zurich",
activity=activities.add("Activity 1", username=owner.username),
period=period,
spots=(0, 2)
)
o1.exempt_from_booking_limit = True
o2 = occasions.add(
start=datetime(2016, 11, 4, 13),
end=datetime(2016, 11, 4, 14),
timezone="Europe/Zurich",
activity=activities.add("Activity 2", username=owner.username),
period=period,
spots=(0, 2)
)
o2.exempt_from_booking_limit = False
a1 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male'
)
a2 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male'
)
transaction.commit()
# 1st case, get the exempt booking, then the non-exempt one
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a1, o2)
b1.state = 'open'
b2.state = 'open'
bookings.accept_booking(b1)
bookings.accept_booking(b2)
# 2nd case, get the non-exempt booking, then the exempt one
b3 = bookings.add(owner, a2, o1)
b4 = bookings.add(owner, a2, o2)
b3.state = 'open'
b4.state = 'open'
bookings.accept_booking(b3)
bookings.accept_booking(b4)
# none of these methods should throw an error, then we're good
def test_cancel_booking(session, owner):
activities = ActivityCollection(session)
attendees = AttendeeCollection(session)
periods = PeriodCollection(session)
occasions = OccasionCollection(session)
bookings = BookingCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(datetime(2016, 9, 1), datetime(2016, 9, 30)),
booking=(datetime(2016, 9, 30), datetime(2016, 9, 30)),
execution=(datetime(2016, 10, 1), datetime(2016, 10, 31)),
active=True,
)
o1 = occasions.add(
start=datetime(2016, 10, 4, 9),
end=datetime(2016, 10, 4, 12),
timezone="Europe/Zurich",
activity=activities.add("Activity 1", username=owner.username),
period=period,
spots=(0, 2)
)
o2 = occasions.add(
start=datetime(2016, 10, 4, 11),
end=datetime(2016, 10, 4, 14),
timezone="Europe/Zurich",
activity=activities.add("Activity 2", username=owner.username),
period=period,
spots=(0, 2)
)
o3 = occasions.add(
start=datetime(2016, 10, 4, 13),
end=datetime(2016, 10, 4, 16),
timezone="Europe/Zurich",
activity=activities.add("Activity 3", username=owner.username),
period=period,
spots=(0, 2)
)
o4 = occasions.add(
start=datetime(2016, 10, 4, 15),
end=datetime(2016, 10, 4, 18),
timezone="Europe/Zurich",
activity=activities.add("Activity 4", username=owner.username),
period=period,
spots=(0, 1)
)
a1 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male'
)
a2 = attendees.add(
user=owner,
name="<NAME>",
birth_date=date(2000, 1, 1),
gender='male'
)
a3 = attendees.add(
user=owner,
name="Eleven",
birth_date=date(2000, 1, 1),
gender='female'
)
transaction.commit()
# only works for confirmed periods
with pytest.raises(RuntimeError) as e:
bookings.cancel_booking(bookings.add(owner, a1, o1))
assert "The period has not yet been confirmed" in str(e.value)
transaction.abort()
periods.active().confirmed = True
transaction.commit()
# cancelling a booking will automatically accept the blocked ones
# (this is run after matching, so we want to make sure the matching
# is kept tight, with no unnecessarily open/denied bookings)
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a1, o2)
b3 = bookings.add(owner, a1, o3)
bookings.accept_booking(b2)
assert b1.state == 'blocked'
assert b2.state == 'accepted'
assert b1.state == 'blocked'
bookings.cancel_booking(b2, cascade=True)
assert b1.state == 'accepted'
assert b2.state == 'cancelled'
assert b3.state == 'accepted'
transaction.abort()
# same, this time with only one overlap
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a1, o2)
b3 = bookings.add(owner, a1, o3)
bookings.accept_booking(b1)
assert b1.state == 'accepted'
assert b2.state == 'blocked'
assert b3.state == 'open'
bookings.cancel_booking(b1, cascade=True)
assert b1.state == 'cancelled'
assert b2.state == 'accepted'
assert b3.state == 'blocked'
transaction.abort()
# if the occasions are already full, the state is going to be 'denied'
bookings.accept_booking(bookings.add(owner, a2, o1))
bookings.accept_booking(bookings.add(owner, a3, o1))
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a1, o2)
bookings.accept_booking(b2)
assert b1.state == 'blocked'
assert b2.state == 'accepted'
bookings.cancel_booking(b2, cascade=True)
assert b1.state == 'denied'
assert b2.state == 'cancelled'
transaction.abort()
# if the cancellation leads to open spots, other bookings are considered
b1 = bookings.add(owner, a1, o1)
b2 = bookings.add(owner, a2, o1)
b3 = bookings.add(owner, a3, o1)
bookings.accept_booking(b1)
bookings.accept_booking(b2)
assert b1.state == 'accepted'
assert b2.state == 'accepted'
assert b3.state == 'open'
bookings.cancel_booking(b2, cascade=True)
assert b1.state == 'accepted'
assert b2.state == 'cancelled'
assert b3.state == 'accepted'
transaction.abort()
# make sure a cancellation doesn't lead to overbooking
b1 = bookings.add(owner, a1, o4)
b2 = bookings.add(owner, a2, o4, priority=1)
b3 = bookings.add(owner, a3, o4)
bookings.accept_booking(b1)
assert b1.state == 'accepted'
assert b2.state == 'open'
assert b3.state == 'open'
bookings.cancel_booking(b1, cascade=True)
assert b1.state == 'cancelled'
assert b2.state == 'accepted'
assert b3.state == 'open'
transaction.abort()
# make sure the booking limit is honored
period = periods.active()
period.all_inclusive = True
period.max_bookings_per_attendee = 1
b1 = bookings.add(owner, a1, o1, priority=4)
b2 = bookings.add(owner, a1, o2, priority=3)
b3 = bookings.add(owner, a1, o3, priority=2)
b4 = bookings.add(owner, a1, o4, priority=1)
bookings.accept_booking(b1)
assert b1.state == 'accepted'
assert b2.state == 'blocked'
assert b3.state == 'blocked'
assert b4.state == 'blocked'
bookings.cancel_booking(b1, cascade=True)
assert b1.state == 'cancelled'
assert b2.state == 'accepted'
assert b3.state == 'blocked'
assert b4.state == 'blocked'
bookings.cancel_booking(b2, cascade=True)
assert b1.state == 'cancelled'
assert b2.state == 'cancelled'
assert b3.state == 'accepted'
assert b4.state == 'blocked'
transaction.abort()
# make sure accepting a previously denied booking of the same occasion will
# will be skipped if doing so would conflict with the limit
period = periods.active()
period.all_inclusive = True
period.max_bookings_per_attendee = 1
b1 = bookings.add(owner, a1, o4)
b2 = bookings.add(owner, a2, o1)
b3 = bookings.add(owner, a2, o4)
b1.state = 'accepted'
b2.state = 'accepted'
b3.state = 'denied'
session.flush()
bookings.cancel_booking(b1, cascade=True)
assert b1.state == 'cancelled'
assert b2.state == 'accepted'
assert b3.state == 'denied'
def test_period_phases(session):
periods = PeriodCollection(session)
period = periods.add(
title="Autumn 2016",
prebooking=(date(2016, 9, 1), date(2016, 9, 15)),
booking=(date(2016, 9, 15), date(2016, 9, 30)),
execution=(date(2016, 11, 1), date(2016, 11, 30)),
active=False,
)
assert period.finalizable
assert period.finalized is False
assert period.phase == 'inactive'
period.active = True
with freeze_time('2016-08-31'):
assert period.phase == 'inactive'
with patch.object(period, 'finalizable', return_value=False):
assert period.phase == 'inactive'
with freeze_time('2016-09-01'):
assert period.phase == 'wishlist'
with patch.object(period, 'finalizable', return_value=False):
assert period.phase == 'wishlist'
with freeze_time('2016-09-15'):
assert period.phase == 'wishlist'
with patch.object(period, 'finalizable', return_value=False):
assert period.phase == 'wishlist'
period.confirmed = True
with freeze_time('2016-09-14'):
assert period.phase == 'inactive'
with patch.object(period, 'finalizable', return_value=False):
assert period.phase == 'inactive'
with freeze_time('2016-09-15'):
assert period.phase == 'booking'
with patch.object(period, 'finalizable', return_value=False):
assert period.phase == 'booking'
period.finalized = True
with freeze_time('2016-10-31'):
assert period.phase == 'payment'
with freeze_time('2016-11-01'):
assert period.phase == 'execution'
with freeze_time('2016-12-01'):
assert period.phase == 'archive'
# Periods without billing (finalizable=False) will always have finalized=False ?!
# An example is Domat-Ems period 2020
# Furthermore, <NAME> used a booking period having the same end as the execution period
# I have no idea if this is not something that should not be done or that leads to unintended side effects
period.finalizable = False
period.finalized = False
with freeze_time('2016-10-31'):
# between end of booking phase and start of execution phase
assert period.phase == 'inactive'
with freeze_time('2016-11-01'):
# This does not make sense and has to be evaluated in the future when there is budget
assert period.phase == 'inactive'
# assert period.phase == 'execution'
with freeze_time('2016-12-01'):
# assert period.phase == 'archive'
assert period.phase == 'inactive'
## The phase might also take into consideration the period.archived attribute for the phase
def test_invoices(session, owner, prebooking_period, inactive_period):
p1 = prebooking_period
p2 = inactive_period
invoices = InvoiceCollection(session, user_id=owner.id)
assert invoices.total_amount == 0
assert invoices.outstanding_amount == 0
assert invoices.paid_amount == 0
i1 = invoices.add(period_id=p1.id)
i1.add("Malcolm", "Camp", 100.0, 1.0)
i1.add("Malcolm", "Pass", 25.0, 1.0)
i1.add("Dewey", "Football", 100.0, 1.0)
i1.add("Dewey", "Pass", 25.0, 1.0)
i1.add("Discount", "8%", 250, -0.05)
assert i1.total_amount == 237.5
assert i1.outstanding_amount == 237.5
assert i1.paid_amount == 0
assert InvoiceCollection(session).total_amount == 237.5
i2 = invoices.add(period_id=p2.id)
i2.add("Malcolm", "Camp", 100, 1 / 3)
# this is 33, not 33.33 because both unit and quantity are truncated
# to two decimals after the point. So when we added 1 / 3 above, we really
# added 0.33 to the database (100 * 0.33 = 33)
assert InvoiceCollection(session, period_id=p2.id).total_amount == 33
assert i2.total_amount == 33
assert i2.outstanding_amount == 33
assert i2.paid_amount == 0
assert InvoiceCollection(session, period_id=p1.id).total_amount == 237.5
assert InvoiceCollection(session, period_id=p2.id).total_amount == 33
assert InvoiceCollection(session).total_amount == 270.5
# pay part of the first invoice
i1.items[0].paid = True
assert i1.total_amount == 237.5
assert i1.outstanding_amount == 137.5
assert i1.paid_amount == 100.0
assert i1.paid == False
assert session.query(func.sum(Invoice.total_amount))\
.first()[0] == 270.5
assert session.query(func.sum(Invoice.outstanding_amount))\
.first()[0] == 170.5
assert session.query(func.sum(Invoice.paid_amount))\
.first()[0] == 100
| |
FixedTarget.from_name("Rigel")
>>> keck = Observer.at_site("Keck")
>>> rigel_rise_time = keck.target_rise_time(time, target, which="next") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(rigel_rise_time)) # doctest: +SKIP
ISO: 2001-02-04 00:51:23.330, JD: 2451944.53569
"""
return self._determine_which_event(self._calc_riseset,
dict(time=time, target=target,
which=which, rise_set='rising',
horizon=horizon,
n_grid_points=n_grid_points,
grid_times_targets=grid_times_targets))
@u.quantity_input(horizon=u.deg)
def target_set_time(self, time, target, which='nearest', horizon=0*u.degree,
grid_times_targets=False, n_grid_points=150):
"""
Calculate set time.
Compute time of the next/previous/nearest set of ``target``, where
"set" is defined as when the ``target`` transitions from altitudes
above ``horizon`` to below ``horizon``.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target : `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`, or list
Target celestial object(s)
which : {'next', 'previous', 'nearest'}
Choose which sunset relative to the present ``time`` would you
like to calculate
horizon : `~astropy.units.Quantity` (optional), default = zero degrees
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
grid_times_targets: bool
If True, the target object will have extra dimensions packed
onto the end, so that calculations with M targets and N times
will return an (M, N) shaped result. Otherwise, we rely on
broadcasting the shapes together using standard numpy rules.
n_grid_points : int (optional)
The number of grid points on which to search for the horizon
crossings of the target over a 24 hour period, default is 150 which
yields set time precisions better than one minute.
Returns
-------
`~astropy.time.Time`
Set time of target.
Examples
--------
Calculate the set time of Rigel at Keck Observatory:
>>> from astroplan import Observer, FixedTarget
>>> from astropy.time import Time
>>> time = Time("2001-02-03 04:05:06")
>>> target = FixedTarget.from_name("Rigel")
>>> keck = Observer.at_site("Keck")
>>> rigel_set_time = keck.target_set_time(time, target, which="next") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(rigel_set_time)) # doctest: +SKIP
ISO: 2001-02-03 12:29:34.768, JD: 2451944.02054
"""
return self._determine_which_event(self._calc_riseset,
dict(time=time, target=target,
which=which,
rise_set='setting',
horizon=horizon,
n_grid_points=n_grid_points,
grid_times_targets=grid_times_targets))
def target_meridian_transit_time(self, time, target, which='nearest',
grid_times_targets=False, n_grid_points=150):
"""
Calculate time at the transit of the meridian.
Compute time of the next/previous/nearest transit of the ``target``
object.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target : `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`, or list
Target celestial object(s)
which : {'next', 'previous', 'nearest'}
Choose which sunrise relative to the present ``time`` would you
like to calculate
grid_times_targets: bool
If True, the target object will have extra dimensions packed
onto the end, so that calculations with M targets and N times
will return an (M, N) shaped result. Otherwise, we rely on
broadcasting the shapes together using standard numpy rules.
n_grid_points : int (optional)
The number of grid points on which to search for the horizon
crossings of the target over a 24 hour period, default is 150 which
yields rise time precisions better than one minute.
Returns
-------
`~astropy.time.Time`
Transit time of target
Examples
--------
Calculate the meridian transit time of Rigel at Keck Observatory:
>>> from astroplan import Observer, FixedTarget
>>> from astropy.time import Time
>>> time = Time("2001-02-03 04:05:06")
>>> target = FixedTarget.from_name("Rigel")
>>> keck = Observer.at_site("Keck")
>>> rigel_transit_time = keck.target_meridian_transit_time(time, target,
... which="next") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(rigel_transit_time)) # doctest: +SKIP
ISO: 2001-02-03 06:42:26.863, JD: 2451943.77948
"""
return self._determine_which_event(self._calc_transit,
dict(time=time, target=target,
which=which,
n_grid_points=n_grid_points,
rise_set='setting',
grid_times_targets=grid_times_targets))
def target_meridian_antitransit_time(self, time, target, which='nearest',
grid_times_targets=False, n_grid_points=150):
"""
Calculate time at the antitransit of the meridian.
Compute time of the next/previous/nearest antitransit of the ``target``
object.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
target : `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`, or list
Target celestial object(s)
which : {'next', 'previous', 'nearest'}
Choose which sunrise relative to the present ``time`` would you
like to calculate
grid_times_targets : bool
If True, the target object will have extra dimensions packed onto the end,
so that calculations with M targets and N times will return an (M, N)
shaped result. Otherwise, we rely on broadcasting the shapes together
using standard numpy rules.
n_grid_points : int (optional)
The number of grid points on which to search for the horizon
crossings of the target over a 24 hour period, default is 150 which
yields rise time precisions better than one minute.
Returns
-------
`~astropy.time.Time`
Antitransit time of target
Examples
--------
Calculate the meridian anti-transit time of Rigel at Keck Observatory:
>>> from astroplan import Observer, FixedTarget
>>> from astropy.time import Time
>>> time = Time("2001-02-03 04:05:06")
>>> target = FixedTarget.from_name("Rigel")
>>> keck = Observer.at_site("Keck")
>>> rigel_antitransit_time = keck.target_meridian_antitransit_time(
... time, target, which="next") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(rigel_antitransit_time)) # doctest: +SKIP
ISO: 2001-02-03 18:40:29.761, JD: 2451944.27812
"""
return self._determine_which_event(self._calc_transit,
dict(time=time, target=target,
which=which, antitransit=True,
rise_set='setting',
n_grid_points=n_grid_points,
grid_times_targets=grid_times_targets))
@u.quantity_input(horizon=u.deg)
def sun_rise_time(self, time, which='nearest', horizon=0*u.degree, n_grid_points=150):
"""
Time of sunrise.
Compute time of the next/previous/nearest sunrise, where
sunrise is defined as when the Sun transitions from altitudes
below ``horizon`` to above ``horizon``.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
which : {'next', 'previous', 'nearest'}
Choose which sunrise relative to the present ``time`` would you
like to calculate.
horizon : `~astropy.units.Quantity` (optional), default = zero degrees
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
n_grid_points : int (optional)
The number of grid points on which to search for the horizon
crossings of the target over a 24 hour period, default is 150 which
yields rise time precisions better than one minute.
Returns
-------
`~astropy.time.Time`
Time of sunrise
Examples
--------
Calculate the time of the previous sunrise at Apache Point Observatory:
>>> from astroplan import Observer
>>> from astropy.time import Time
>>> apo = Observer.at_site("APO")
>>> time = Time('2001-02-03 04:05:06')
>>> sun_rise = apo.sun_rise_time(time, which="previous") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(sun_rise)) # doctest: +SKIP
ISO: 2001-02-02 14:02:50.554, JD: 2451943.08531
"""
return self.target_rise_time(time, get_sun(time), which, horizon,
n_grid_points=n_grid_points)
@u.quantity_input(horizon=u.deg)
def sun_set_time(self, time, which='nearest', horizon=0*u.degree, n_grid_points=150):
"""
Time of sunset.
Compute time of the next/previous/nearest sunset, where
sunset is defined as when the Sun transitions from altitudes
below ``horizon`` to above ``horizon``.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first argument
to the `~astropy.time.Time` initializer, so it can be anything that
`~astropy.time.Time` will accept (including a `~astropy.time.Time`
object)
which : {'next', 'previous', 'nearest'}
Choose which sunset relative to the present ``time`` would you
like to calculate
horizon : `~astropy.units.Quantity` (optional), default = zero degrees
Degrees above/below actual horizon to use
for calculating rise/set times (i.e.,
-6 deg horizon = civil twilight, etc.)
n_grid_points : int (optional)
The number of grid points on which to search for the horizon
crossings of the target over a 24 hour period, default is 150 which
yields set time precisions better than one minute.
Returns
-------
`~astropy.time.Time`
Time of sunset
Examples
--------
Calculate the time of the next sunset at Apache Point Observatory:
>>> from astroplan import Observer
>>> from astropy.time import Time
>>> apo = Observer.at_site("APO")
>>> time = Time('2001-02-03 04:05:06')
>>> sun_set = apo.sun_set_time(time, which="next") # doctest: +SKIP
>>> print("ISO: {0.iso}, JD: {0.jd}".format(sun_set)) # doctest: +SKIP
ISO: 2001-02-04 00:35:42.102, JD: 2451944.52479
"""
return self.target_set_time(time, get_sun(time), which, horizon,
n_grid_points=n_grid_points)
def noon(self, time, which='nearest', n_grid_points=150):
"""
Time at solar noon.
Parameters
----------
time : `~astropy.time.Time` or other (see below)
Time of observation. This will be passed in as the first | |
# Copyright 2002-2011 <NAME>. See LICENSE for licensing information.
"""mixminion.directory.ServerList
Implements a store of serverinfos for a directory, as well as functions
to generate and sign directories.
FFFF Right now, this is about maximally slow. There are a lot of tricks
FFFF we could do to speed it up: not revalidating servers in our cache;
FFFF pickling serverinfo objects for easy access, and so on. But
FFFF really, we'd need to get 1000 servers before any of these tricks made
FFFF more than a 10-second difference in directory generation time, so
FFFF let's leave it simple for now.
"""
__all__ = [ 'ServerList' ]
import os
import time
import threading
import mixminion
import mixminion.Config
import mixminion.directory.Directory
from mixminion.Crypto import pk_decode_public_key, pk_encode_public_key, \
pk_same_public_key
from mixminion.Common import IntervalSet, LOG, MixError, MixFatalError, \
UIError, createPrivateDir, formatBase64, formatDate, formatFnameTime, \
formatTime, Lockfile, openUnique, previousMidnight, readFile, \
readPickled, readPossiblyGzippedFile, stringContains, writeFile, \
writePickled
from mixminion.Config import ConfigError
from mixminion.ServerInfo import ServerDirectory, ServerInfo, \
_getDirectoryDigestImpl
class ServerList:
"""A ServerList holds a set of server descriptors for use in generating
directories. It checks new descriptors for consistency with old ones
as they are inserted. It will reject any server if:
-- it is expired (Valid-Until in the past)
-- it is superseded (For all time it is valid, a more-recently-
published descriptor is also valid.)
-- it is inconsistent (We already know a descriptor for this
nickname, with a different identity key.)
[FFFF This check will become stricter in the future.]
This implementation isn't terribly optimized, but there's no need to
optimize it until we have far more descriptors to worry about.
"""
##Fields:
# baseDir: Base directory of this list
# serverDir: Directory where we store active descriptors.
# rejectDir: Directory where we store invalid descriptors.
# archiveDir: Directory where we store old descriptors
# servers: Map from filename within <serverDir> to ServerInfo objects.
# serversByNickname: A map from lowercased server nickname to
# lists of filenames within <serverDir>
# idCache: an instance of Directory.IDCache
##Layout:
# basedir
# server-ids/
# nickname-dateinserted
# (Pickled: ("V0", (nickname, encoded public key)))
# incoming/new/
# nickname-dateinserted.N ...
# incoming/updates/
# nickname-dateinserted.N ...
# servers/
# nickname-dateinserted.N ...
# archive/
# nickname-dateinserted.N ...
# reject/
# nickname-dateinserted.N ...
# directory
# dirArchive/
# dir-dategenerated.N ...
# identity
# .lock
def __init__(self, baseDir, config, idCache=None):
"""Initialize a ServerList to store servers under baseDir/servers,
creating directories as needed.
"""
self.baseDir = baseDir
self.config = config
if idCache is None:
idCache = mixminion.directory.Directory.IDCache(
os.path.join(baseDir, "xx_idcache"))
self.idCache = idCache
self.serverIDDir = os.path.join(self.baseDir, "server-ids")
self.serverDir = os.path.join(self.baseDir, "servers")
self.rejectDir = os.path.join(self.baseDir, "reject")
self.archiveDir = os.path.join(self.baseDir, "archive")
self.dirArchiveDir = os.path.join(self.baseDir, "dirArchive")
self.lockfile = Lockfile(os.path.join(self.baseDir, ".lock"))
self.rlock = threading.RLock()
self.servers = {}
self.serversByNickname = {}
createPrivateDir(self.serverIDDir)
createPrivateDir(self.serverDir)
createPrivateDir(self.rejectDir)
createPrivateDir(self.archiveDir)
createPrivateDir(self.dirArchiveDir)
self.rescan()
def isServerKnown(self, server):
"""Return true iff the current server descriptor is known. Raises
MixError if we have a server descriptor with this name, but
a different key."""
try:
self._lock()
try:
return self.idCache.containsServer(server)
except mixminion.directory.Directory.MismatchedID:
raise UIError(("Already know a server named "
"%r with different identity key.")
% server.getNickname())
finally:
self._unlock()
def rebuildIDCache(self):
for fn in os.listdir(self.serverIDDir):
fname = os.path.join(self.serverIDDir, fn)
tp,val = readPickled(fname)
if tp != "V0":
LOG.warn("Weird file version %s on %s",tp,fname)
continue
nickname, ident = val
ID = mixminion.Crypto.sha1(ident)
self.idCache.insertID(nickname, ID)
def learnServerID(self, server):
"""Mark the ID for a server descriptor as the canonical
identity key associated with that server's nickname."""
try:
self._lock()
ident = server.getIdentity()
nickname = server.getNickname()
try:
if self.idCache.containsServer(server):
LOG.warn("Server %s already known", nickname)
except mixminion.directory.MismatchedID:
raise MixFatalError("Mismatched ID for server %s" % nickname)
LOG.info("Learning identity for new server %s", nickname)
self.idCache.insertServer(server)
writePickled(os.path.join(self.serverIDDir,
nickname+"-"+formatFnameTime()),
("V0", (nickname, pk_encode_public_key(ident))))
self.idCache.save()
finally:
self._unlock()
def importServerInfo(self, contents, knownOnly=0, server=None):
"""Insert a ServerInfo into the list. If the server is expired, or
superseded, or inconsistent, raise a MixError.
contents -- a string containing the descriptor, or the name of a
file containing the descriptor (possibly gzip'd)
knownOnly -- if true, raise MixError is we don't already have
a descriptor with this nickname.
server -- If provided, a parsed ServerInfo corresponding to
'contents'.
"""
# Raises ConfigError, MixError,
if not server:
contents, server = _readServer(contents)
try:
self._lock()
nickname = server.getNickname()
lcnickname = nickname.lower()
known = self.isServerKnown(server)
if knownOnly and not known:
raise UIError("Unknown server %s: use import-new."%nickname)
# Is the server already invalid?
if server.isExpiredAt(time.time()):
raise UIError("Descriptor has already expired")
# Is there already a server with the same nickname?
if self.serversByNickname.has_key(lcnickname):
# Okay -- make sure we don't have this same descriptor.
for fn in self.serversByNickname[lcnickname]:
oldServer = self.servers[fn]
if oldServer['Server']['Digest'] == \
server['Server']['Digest']:
raise UIError("Server descriptor already inserted.")
# Okay -- make sure that this server isn't superseded.
if server.isSupersededBy(
[ self.servers[fn] for fn in
self.serversByNickname[lcnickname]]):
raise UIError("Server descriptor is superseded")
if not known:
# Is the identity new to us?
self.learnServerID(server)
newFile = _writeServer(self.serverDir, contents, nickname)
# Now update the internal structure
self.servers[newFile] = server
self.serversByNickname.setdefault(lcnickname, []).append(newFile)
finally:
self._unlock()
def expungeServersByNickname(self, nickname):
"""Forcibly remove all servers named <nickname>"""
try:
self._lock()
LOG.info("Removing all servers named %s", nickname)
lcnickname = nickname.lower()
if not self.serversByNickname.has_key(lcnickname):
LOG.info(" (No such servers exist)")
return
servers = self.serversByNickname[lcnickname]
for fn in servers:
LOG.info(" Removing %s", fn)
_moveServer(self.serverDir, self.archiveDir, fn)
del self.servers[fn]
del self.serversByNickname[lcnickname]
LOG.info(" (%s servers removed)", len(servers))
finally:
self._unlock()
def generateDirectory(self,
startAt, endAt, extraTime,
identityKey,
publicationTime=None,
badServers=(),
excludeServers=()):
"""Generate and sign a new directory, to be effective from <startAt>
through <endAt>. It includes all servers that are valid at
any time between <startAt> and <endAt>+<extraTime>. The directory
is signed with <identityKey>.
Any servers whose nicknames appear in 'badServers' are marked as
not recommended; any servers whose nicknames appear in
'excludeServers' are left off the directory entirely.
"""
try:
self._lock()
self.clean()
if publicationTime is None:
publicationTime = time.time()
if previousMidnight(startAt) >= previousMidnight(endAt):
raise MixError("Validity range does not contain a full day.")
excludeServers = [ nickname.lower() for nickname in excludeServers]
# First, sort all servers by nickname.
includedByNickname = {}
for fn, s in self.servers.items():
nickname = s.getNickname().lower()
if nickname in excludeServers: continue
includedByNickname.setdefault(nickname, []).append((s, fn))
# Second, find all servers that are valid for part of the period,
# and that aren't superseded for the whole period.
timeRange = IntervalSet([(previousMidnight(startAt),
endAt+extraTime)])
for nickname, ss in includedByNickname.items():
# We prefer the most-recently-published descriptor. If two
# are published at the same time, we prefer the one that
# expires last.
ss = [ (s['Server']['Published'],
s['Server']['Valid-Until'],
s, fn) for s,fn in ss]
ss.sort()
ss.reverse()
uncovered = timeRange.copy()
included = []
for _, _, s, fn in ss:
valid = s.getIntervalSet()
if (uncovered * valid):
included.append((s, fn))
uncovered -= valid
includedByNickname[nickname] = included
# Now sort the remaining servers by nickname, then by valid-after.
included = []
for ss in includedByNickname.values():
for s,fn in ss:
nickname = s.getNickname()
validAfter = s['Server']['Valid-After']
included.append((nickname, validAfter, fn))
included.sort()
# FFFF We should probably not do all of this in RAM, but
# FFFF what the hey. It will only matter if we have many, many
# FFFF servers in the system.
contents = [ ]
for _, _, fn in included:
txt = readFile(os.path.join(self.serverDir, fn))
contents.append(txt)
goodServers = [n for n,_,_ in included if n not in badServers]
g = {}
for n in goodServers: g[n]=1
goodServers = g.keys()
goodServers.sort()
goodServers = ", ".join(goodServers)
clientVersions = self.config['Directory']['ClientVersions']
serverVersions = self.config['Directory']['ServerVersions']
#FFFF Support for multiple signatures
header = """\
[Directory]
Version: 0.2
Published: %s
Valid-After: %s
Valid-Until: %s
Recommended-Servers: %s
[Signature]
DirectoryIdentity: %s
DirectoryDigest:
DirectorySignature:
[Recommended-Software]
MixminionClient: %s
MixminionServer: %s
""" % (formatTime(publicationTime),
formatDate(startAt),
formatDate(endAt),
goodServers,
formatBase64(pk_encode_public_key(identityKey)),
", ".join(clientVersions),
", ".join(serverVersions))
directory = header+"".join(contents)
directory = _getDirectoryDigestImpl(directory, identityKey)
# Make sure that the directory checks out
# FFFF remove this once we are _very_ confident.
if 1:
parsed = ServerDirectory(string=directory)
includedDigests = {}
for _, _, fn in included:
includedDigests[self.servers[fn]['Server']['Digest']] = 1
foundDigests = {}
for s in parsed.getAllServers():
foundDigests[s['Server']['Digest']] = 1
assert foundDigests == includedDigests
writeFile(os.path.join(self.baseDir, "directory"),
directory,
mode=0644)
| |
<reponame>jjiege/odoo<gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import re
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.osv import expression
from odoo.addons import decimal_precision as dp
from odoo.tools import float_compare, pycompat
_logger = logging.getLogger(__name__)
class ProductCategory(models.Model):
_name = "product.category"
_description = "Product Category"
_parent_name = "parent_id"
_parent_store = True
_rec_name = 'complete_name'
_order = 'complete_name'
name = fields.Char('Name', index=True, required=True, translate=True)
complete_name = fields.Char(
'Complete Name', compute='_compute_complete_name',
store=True)
parent_id = fields.Many2one('product.category', 'Parent Category', index=True, ondelete='cascade')
parent_path = fields.Char(index=True)
child_id = fields.One2many('product.category', 'parent_id', 'Child Categories')
product_count = fields.Integer(
'# Products', compute='_compute_product_count',
help="The number of products under this category (Does not consider the children categories)")
@api.depends('name', 'parent_id.complete_name')
def _compute_complete_name(self):
for category in self:
if category.parent_id:
category.complete_name = '%s / %s' % (category.parent_id.complete_name, category.name)
else:
category.complete_name = category.name
def _compute_product_count(self):
read_group_res = self.env['product.template'].read_group([('categ_id', 'child_of', self.ids)], ['categ_id'], ['categ_id'])
group_data = dict((data['categ_id'][0], data['categ_id_count']) for data in read_group_res)
for categ in self:
product_count = 0
for sub_categ_id in categ.search([('id', 'child_of', categ.id)]).ids:
product_count += group_data.get(sub_categ_id, 0)
categ.product_count = product_count
@api.constrains('parent_id')
def _check_category_recursion(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive categories.'))
return True
@api.model
def name_create(self, name):
return self.create({'name': name}).name_get()[0]
class ProductPriceHistory(models.Model):
""" Keep track of the ``product.template`` standard prices as they are changed. """
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_description = 'Product Price List History'
def _get_default_company_id(self):
return self._context.get('force_company', self.env.user.company_id.id)
company_id = fields.Many2one('res.company', string='Company',
default=_get_default_company_id, required=True)
product_id = fields.Many2one('product.product', 'Product', ondelete='cascade', required=True)
datetime = fields.Datetime('Date', default=fields.Datetime.now)
cost = fields.Float('Cost', digits=dp.get_precision('Product Price'))
class ProductProduct(models.Model):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'default_code, name, id'
# price: total price, context dependent (partner, pricelist, quantity)
price = fields.Float(
'Price', compute='_compute_product_price',
digits=dp.get_precision('Product Price'), inverse='_set_product_price')
# price_extra: catalog extra value only, sum of variant extra attributes
price_extra = fields.Float(
'Variant Price Extra', compute='_compute_product_price_extra',
digits=dp.get_precision('Product Price'),
help="This is the sum of the extra price of all attributes")
# lst_price: catalog value + extra, context dependent (uom)
lst_price = fields.Float(
'Sale Price', compute='_compute_product_lst_price',
digits=dp.get_precision('Product Price'), inverse='_set_product_lst_price',
help="The sale price is managed from the product template. Click on the 'Configure Variants' button to set the extra attribute prices.")
default_code = fields.Char('Internal Reference', index=True)
code = fields.Char('Reference', compute='_compute_product_code')
partner_ref = fields.Char('Customer Ref', compute='_compute_partner_ref')
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the product without removing it.")
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
auto_join=True, index=True, ondelete="cascade", required=True)
barcode = fields.Char(
'Barcode', copy=False, oldname='ean13',
help="International Article Number used for product identification.")
attribute_value_ids = fields.Many2many(
'product.attribute.value', string='Attribute Values', ondelete='restrict')
product_template_attribute_value_ids = fields.Many2many(
'product.template.attribute.value', string='Template Attribute Values', compute="_compute_product_template_attribute_value_ids")
# image: all image fields are base64 encoded and PIL-supported
image_variant = fields.Binary(
"Variant Image", attachment=True,
help="This field holds the image used as image for the product variant, limited to 1024x1024px.")
image = fields.Binary(
"Big-sized image", compute='_compute_images', inverse='_set_image',
help="Image of the product variant (Big-sized image of product template if false). It is automatically "
"resized as a 1024x1024px image, with aspect ratio preserved.")
image_small = fields.Binary(
"Small-sized image", compute='_compute_images', inverse='_set_image_small',
help="Image of the product variant (Small-sized image of product template if false).")
image_medium = fields.Binary(
"Medium-sized image", compute='_compute_images', inverse='_set_image_medium',
help="Image of the product variant (Medium-sized image of product template if false).")
is_product_variant = fields.Boolean(compute='_compute_is_product_variant')
standard_price = fields.Float(
'Cost', company_dependent=True,
digits=dp.get_precision('Product Price'),
groups="base.group_user",
help = "Cost used for stock valuation in standard price and as a first price to set in average/fifo. "
"Also used as a base price for pricelists. "
"Expressed in the default unit of measure of the product.")
volume = fields.Float('Volume', help="The volume in m3.")
weight = fields.Float(
'Weight', digits=dp.get_precision('Stock Weight'),
help="Weight of the product, packaging not included. The unit of measure can be changed in the general settings")
pricelist_item_ids = fields.Many2many(
'product.pricelist.item', 'Pricelist Items', compute='_get_pricelist_items')
packaging_ids = fields.One2many(
'product.packaging', 'product_id', 'Product Packages',
help="Gives the different ways to package the same product.")
_sql_constraints = [
('barcode_uniq', 'unique(barcode)', "A barcode can only be assigned to one product !"),
]
def _get_invoice_policy(self):
return False
def _compute_is_product_variant(self):
for product in self:
product.is_product_variant = True
def _compute_product_price(self):
prices = {}
pricelist_id_or_name = self._context.get('pricelist')
if pricelist_id_or_name:
pricelist = None
partner = self.env.context.get('partner', False)
quantity = self.env.context.get('quantity', 1.0)
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist_id_or_name, pycompat.string_types):
pricelist_name_search = self.env['product.pricelist'].name_search(pricelist_id_or_name, operator='=', limit=1)
if pricelist_name_search:
pricelist = self.env['product.pricelist'].browse([pricelist_name_search[0][0]])
elif isinstance(pricelist_id_or_name, pycompat.integer_types):
pricelist = self.env['product.pricelist'].browse(pricelist_id_or_name)
if pricelist:
quantities = [quantity] * len(self)
partners = [partner] * len(self)
prices = pricelist.get_products_price(self, quantities, partners)
for product in self:
product.price = prices.get(product.id, 0.0)
def _set_product_price(self):
for product in self:
if self._context.get('uom'):
value = self.env['uom.uom'].browse(self._context['uom'])._compute_price(product.price, product.uom_id)
else:
value = product.price
value -= product.price_extra
product.write({'list_price': value})
def _set_product_lst_price(self):
for product in self:
if self._context.get('uom'):
value = self.env['uom.uom'].browse(self._context['uom'])._compute_price(product.lst_price, product.uom_id)
else:
value = product.lst_price
value -= product.price_extra
product.write({'list_price': value})
@api.depends('product_template_attribute_value_ids.price_extra')
def _compute_product_price_extra(self):
for product in self:
product.price_extra = sum(product.mapped('product_template_attribute_value_ids.price_extra'))
@api.depends('list_price', 'price_extra')
def _compute_product_lst_price(self):
to_uom = None
if 'uom' in self._context:
to_uom = self.env['uom.uom'].browse([self._context['uom']])
for product in self:
if to_uom:
list_price = product.uom_id._compute_price(product.list_price, to_uom)
else:
list_price = product.list_price
product.lst_price = list_price + product.price_extra
@api.one
def _compute_product_code(self):
for supplier_info in self.seller_ids:
if supplier_info.name.id == self._context.get('partner_id'):
self.code = supplier_info.product_code or self.default_code
break
else:
self.code = self.default_code
@api.one
def _compute_partner_ref(self):
for supplier_info in self.seller_ids:
if supplier_info.name.id == self._context.get('partner_id'):
product_name = supplier_info.product_name or self.default_code or self.name
self.partner_ref = '%s%s' % (self.code and '[%s] ' % self.code or '', product_name)
break
else:
self.partner_ref = self.name_get()[0][1]
@api.one
@api.depends('image_variant', 'product_tmpl_id.image')
def _compute_images(self):
if self._context.get('bin_size'):
self.image_medium = self.image_variant
self.image_small = self.image_variant
self.image = self.image_variant
else:
resized_images = tools.image_get_resized_images(self.image_variant, return_big=True, avoid_resize_medium=True)
self.image_medium = resized_images['image_medium']
self.image_small = resized_images['image_small']
self.image = resized_images['image']
if not self.image_medium:
self.image_medium = self.product_tmpl_id.image_medium
if not self.image_small:
self.image_small = self.product_tmpl_id.image_small
if not self.image:
self.image = self.product_tmpl_id.image
@api.one
def _set_image(self):
self._set_image_value(self.image)
@api.one
def _set_image_medium(self):
self._set_image_value(self.image_medium)
@api.one
def _set_image_small(self):
self._set_image_value(self.image_small)
@api.one
def _set_image_value(self, value):
if isinstance(value, pycompat.text_type):
value = value.encode('ascii')
image = tools.image_resize_image_big(value)
# This is needed because when there is only one variant, the user
# doesn't know there is a difference between template and variant, he
# expects both images to be the same.
if self.product_tmpl_id.image and self.product_variant_count > 1:
self.image_variant = image
else:
self.image_variant = False
self.product_tmpl_id.image = image
@api.depends('product_tmpl_id', 'attribute_value_ids')
def _compute_product_template_attribute_value_ids(self):
# Fetch and pre-map the values first for performance. It assumes there
# won't be too many values, but there might be a lot of products.
values = self.env['product.template.attribute.value'].search([
('product_tmpl_id', 'in', self.mapped('product_tmpl_id').ids),
('product_attribute_value_id', 'in', self.mapped('attribute_value_ids').ids),
])
values_per_template = {}
for ptav in values:
pt_id = ptav.product_tmpl_id.id
if pt_id not in values_per_template:
values_per_template[pt_id] = {}
values_per_template[pt_id][ptav.product_attribute_value_id.id] = ptav
for product in self:
product.product_template_attribute_value_ids = self.env['product.template.attribute.value']
for pav in product.attribute_value_ids:
if product.product_tmpl_id.id not in values_per_template or pav.id not in values_per_template[product.product_tmpl_id.id]:
_logger.warning("A matching product.template.attribute.value was not found for the product.attribute.value #%s on the template #%s" % (pav.id, product.product_tmpl_id.id))
else:
product.product_template_attribute_value_ids += values_per_template[product.product_tmpl_id.id][pav.id]
@api.one
def _get_pricelist_items(self):
self.pricelist_item_ids = self.env['product.pricelist.item'].search([
'|',
('product_id', '=', self.id),
('product_tmpl_id', '=', self.product_tmpl_id.id)]).ids
@api.constrains('attribute_value_ids')
def _check_attribute_value_ids(self):
for product in self:
attributes = self.env['product.attribute']
for value in product.attribute_value_ids:
if value.attribute_id in attributes:
raise ValidationError(_('Error! It is not allowed to choose more than one value for a given attribute.'))
if value.attribute_id.create_variant == 'always':
attributes |= value.attribute_id
return True
@api.onchange('uom_id', 'uom_po_id')
def _onchange_uom(self):
if self.uom_id and self.uom_po_id and self.uom_id.category_id != self.uom_po_id.category_id:
self.uom_po_id = self.uom_id
@api.model_create_multi
def create(self, vals_list):
products = super(ProductProduct, self.with_context(create_product_product=True)).create(vals_list)
for product, vals in pycompat.izip(products, vals_list):
# When a unique variant is created from tmpl then the standard price is set by _set_standard_price
if not (self.env.context.get('create_from_tmpl') and len(product.product_tmpl_id.product_variant_ids) == 1):
product._set_standard_price(vals.get('standard_price') or 0.0)
# `_get_variant_id_for_combination` depends on existing variants
self.clear_caches()
self.env['product.template'].invalidate_cache(
fnames=[
'valid_archived_variant_ids',
'valid_existing_variant_ids',
'product_variant_ids',
'product_variant_id',
'product_variant_count'
],
ids=products.mapped('product_tmpl_id').ids
)
return products
@api.multi
def write(self, values):
''' Store the standard price change in order to be able to retrieve the cost of a product for a given date'''
res = super(ProductProduct, self).write(values)
if 'standard_price' in values:
self._set_standard_price(values['standard_price'])
if 'attribute_value_ids' in values:
# `_get_variant_id_for_combination` depends on `attribute_value_ids`
self.clear_caches()
if 'active' | |
<filename>src/win/bootDeviceWin_FDCB.py
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class bootDeviceWin_FDCB
###########################################################################
class bootDeviceWin_FDCB ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 1374,699 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_win = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_notebook_ipCfg0 = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_ipCfg0 = wx.Panel( self.m_notebook_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
fgSizer_ipCfg0 = wx.FlexGridSizer( 0, 2, 0, 0 )
fgSizer_ipCfg0.SetFlexibleDirection( wx.BOTH )
fgSizer_ipCfg0.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText_tag = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"tag:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_tag.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_tag, 0, wx.ALL, 5 )
self.m_textCtrl_tag = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, u"0x42464346", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_tag, 0, wx.ALL, 5 )
self.m_staticText_version = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"version:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_version.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_version, 0, wx.ALL, 5 )
self.m_textCtrl_version = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, u"0x56010400", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_version, 0, wx.ALL, 5 )
self.m_staticText_readSampleClkSrc = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"readSampleClkSrc:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_readSampleClkSrc.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_readSampleClkSrc, 0, wx.ALL, 5 )
m_choice_readSampleClkSrcChoices = [ u"0 - LoopbackInternally", u"1 - LoopbackFromDqsPad", u"2 - LoopbackFromSckPad", u"3 - ExternalInputFromDqsPad" ]
self.m_choice_readSampleClkSrc = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice_readSampleClkSrcChoices, 0 )
self.m_choice_readSampleClkSrc.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_readSampleClkSrc, 0, wx.ALL, 5 )
self.m_staticText_csHoldTime = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"csHoldTime:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_csHoldTime.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_csHoldTime, 0, wx.ALL, 5 )
self.m_textCtrl_csHoldTime = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, u"0x3", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_csHoldTime, 0, wx.ALL, 5 )
self.m_staticText_csSetupTime = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"csSetupTime:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_csSetupTime.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_csSetupTime, 0, wx.ALL, 5 )
self.m_textCtrl_csSetupTime = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, u"0x3", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_csSetupTime, 0, wx.ALL, 5 )
self.m_staticText_columnAddressWidth = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"columnAddressWidth:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_columnAddressWidth.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_columnAddressWidth, 0, wx.ALL, 5 )
m_choice_columnAddressWidthChoices = [ u"0 - Other devices", u"3 - For HyperFlash" ]
self.m_choice_columnAddressWidth = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_columnAddressWidthChoices, 0 )
self.m_choice_columnAddressWidth.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_columnAddressWidth, 0, wx.ALL, 5 )
self.m_staticText_deviceModeCfgEnable = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"deviceModeCfgEnable:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_deviceModeCfgEnable.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_deviceModeCfgEnable, 0, wx.ALL, 5 )
m_choice_deviceModeCfgEnableChoices = [ u"0 - Disable", u"1 - Enable" ]
self.m_choice_deviceModeCfgEnable = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_deviceModeCfgEnableChoices, 0 )
self.m_choice_deviceModeCfgEnable.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_deviceModeCfgEnable, 0, wx.ALL, 5 )
self.m_staticText_deviceModeType = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"deviceModeType:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_deviceModeType.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_deviceModeType, 0, wx.ALL, 5 )
m_choice_deviceModeTypeChoices = [ u"0 - Generic", u"1 - Quad Enable", u"2 - SPI to xSPI mode", u"3 - xSPI to SPI mode", u"4 - SPI to NoCmd mode" ]
self.m_choice_deviceModeType = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.Size( 135,-1 ), m_choice_deviceModeTypeChoices, 0 )
self.m_choice_deviceModeType.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_deviceModeType, 0, wx.ALL, 5 )
self.m_staticText_waitTimeCfgCommands = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"waitTimeCfgCommands:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_waitTimeCfgCommands.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_waitTimeCfgCommands, 0, wx.ALL, 5 )
self.m_textCtrl_waitTimeCfgCommands = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_waitTimeCfgCommands, 0, wx.ALL, 5 )
self.m_staticText_deviceModeArg = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"deviceModeArg:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_deviceModeArg.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_deviceModeArg, 0, wx.ALL, 5 )
self.m_textCtrl_deviceModeArg = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_deviceModeArg, 0, wx.ALL, 5 )
self.m_staticText_configCmdEnable = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configCmdEnable:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configCmdEnable.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configCmdEnable, 0, wx.ALL, 5 )
m_choice_configCmdEnableChoices = [ u"0 - Disable", u"1 - Enable" ]
self.m_choice_configCmdEnable = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_configCmdEnableChoices, 0 )
self.m_choice_configCmdEnable.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_configCmdEnable, 0, wx.ALL, 5 )
self.m_staticText_configModeType0 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configModeType[0]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configModeType0.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configModeType0, 0, wx.ALL, 5 )
m_choice_configModeType0Choices = [ u"0 - Generic", u"1 - Quad Enable", u"2 - SPI to xSPI mode", u"3 - xSPI to SPI mode", u"4 - SPI to NoCmd mode" ]
self.m_choice_configModeType0 = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.Size( 135,-1 ), m_choice_configModeType0Choices, 0 )
self.m_choice_configModeType0.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_configModeType0, 0, wx.ALL, 5 )
self.m_staticText_configModeType1 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configModeType[1]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configModeType1.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configModeType1, 0, wx.ALL, 5 )
m_choice_configModeType1Choices = [ u"0 - Generic", u"1 - Quad Enable", u"2 - SPI to xSPI mode", u"3 - xSPI to SPI mode", u"4 - SPI to NoCmd mode" ]
self.m_choice_configModeType1 = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.Size( 135,-1 ), m_choice_configModeType1Choices, 0 )
self.m_choice_configModeType1.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_configModeType1, 0, wx.ALL, 5 )
self.m_staticText_configModeType2 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configModeType[2]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configModeType2.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configModeType2, 0, wx.ALL, 5 )
m_choice_configModeType2Choices = [ u"0 - Generic", u"1 - Quad Enable", u"2 - SPI to xSPI mode", u"3 - xSPI to SPI mode", u"4 - SPI to NoCmd mode" ]
self.m_choice_configModeType2 = wx.Choice( self.m_panel_ipCfg0, wx.ID_ANY, wx.DefaultPosition, wx.Size( 135,-1 ), m_choice_configModeType2Choices, 0 )
self.m_choice_configModeType2.SetSelection( 0 )
fgSizer_ipCfg0.Add( self.m_choice_configModeType2, 0, wx.ALL, 5 )
self.m_staticText_configCmdArgs0 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configCmdArgs[0]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configCmdArgs0.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configCmdArgs0, 0, wx.ALL, 5 )
self.m_textCtrl_configCmdArgs0 = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_configCmdArgs0, 0, wx.ALL, 5 )
self.m_staticText_configCmdArgs1 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configCmdArgs[1]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configCmdArgs1.SetLabelMarkup( u"configCmdArgs[1]:" )
self.m_staticText_configCmdArgs1.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configCmdArgs1, 0, wx.ALL, 5 )
self.m_textCtrl_configCmdArgs1 = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_configCmdArgs1, 0, wx.ALL, 5 )
self.m_staticText_configCmdArgs2 = wx.StaticText( self.m_panel_ipCfg0, wx.ID_ANY, u"configCmdArgs[2]:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_configCmdArgs2.Wrap( -1 )
fgSizer_ipCfg0.Add( self.m_staticText_configCmdArgs2, 0, wx.ALL, 5 )
self.m_textCtrl_configCmdArgs2 = wx.TextCtrl( self.m_panel_ipCfg0, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg0.Add( self.m_textCtrl_configCmdArgs2, 0, wx.ALL, 5 )
self.m_panel_ipCfg0.SetSizer( fgSizer_ipCfg0 )
self.m_panel_ipCfg0.Layout()
fgSizer_ipCfg0.Fit( self.m_panel_ipCfg0 )
self.m_notebook_ipCfg0.AddPage( self.m_panel_ipCfg0, u"flexspi ip cfg0", False )
wSizer_win.Add( self.m_notebook_ipCfg0, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_ipCfg1 = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_ipCfg1 = wx.Panel( self.m_notebook_ipCfg1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
fgSizer_ipCfg1 = wx.FlexGridSizer( 0, 2, 0, 0 )
fgSizer_ipCfg1.SetFlexibleDirection( wx.BOTH )
fgSizer_ipCfg1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_button_controllerMiscOption = wx.Button( self.m_panel_ipCfg1, wx.ID_ANY, u"controllerMiscOption", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_button_controllerMiscOption, 0, wx.ALL, 5 )
self.m_textCtrl_controllerMiscOption = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_controllerMiscOption, 0, wx.ALL, 5 )
self.m_staticText_deviceType = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"deviceType:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_deviceType.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_deviceType, 0, wx.ALL, 5 )
m_choice_deviceTypeChoices = [ u"1 - Serial NOR" ]
self.m_choice_deviceType = wx.Choice( self.m_panel_ipCfg1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_deviceTypeChoices, 0 )
self.m_choice_deviceType.SetSelection( 0 )
fgSizer_ipCfg1.Add( self.m_choice_deviceType, 0, wx.ALL, 5 )
self.m_staticText_sflashPadType = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"sflashPadType:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_sflashPadType.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_sflashPadType, 0, wx.ALL, 5 )
m_choice_sflashPadTypeChoices = [ u"1 - Single pad", u"2 - Dual Pads", u"4 - Quad pads", u"8 - Octal pads" ]
self.m_choice_sflashPadType = wx.Choice( self.m_panel_ipCfg1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_sflashPadTypeChoices, 0 )
self.m_choice_sflashPadType.SetSelection( 0 )
fgSizer_ipCfg1.Add( self.m_choice_sflashPadType, 0, wx.ALL, 5 )
self.m_staticText_serialClkFreq = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"serialClkFreq:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_serialClkFreq.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_serialClkFreq, 0, wx.ALL, 5 )
m_choice_serialClkFreqChoices = []
self.m_choice_serialClkFreq = wx.Choice( self.m_panel_ipCfg1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_serialClkFreqChoices, 0 )
self.m_choice_serialClkFreq.SetSelection( 0 )
fgSizer_ipCfg1.Add( self.m_choice_serialClkFreq, 0, wx.ALL, 5 )
self.m_staticText_lutCustomSeqEnable = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"lutCustomSeqEnable:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_lutCustomSeqEnable.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_lutCustomSeqEnable, 0, wx.ALL, 5 )
m_choice_lutCustomSeqEnableChoices = [ u"0 - Pre-defined id & num", u"1 -Parameters in this block" ]
self.m_choice_lutCustomSeqEnable = wx.Choice( self.m_panel_ipCfg1, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice_lutCustomSeqEnableChoices, 0 )
self.m_choice_lutCustomSeqEnable.SetSelection( 0 )
fgSizer_ipCfg1.Add( self.m_choice_lutCustomSeqEnable, 0, wx.ALL, 5 )
self.m_staticText_csPadSettingOverride = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"csPadSettingOverride:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_csPadSettingOverride.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_csPadSettingOverride, 0, wx.ALL, 5 )
self.m_textCtrl_csPadSettingOverride = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_csPadSettingOverride, 0, wx.ALL, 5 )
self.m_staticText_sclkPadSettingOverride = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"sclkPadSettingOverride:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_sclkPadSettingOverride.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_sclkPadSettingOverride, 0, wx.ALL, 5 )
self.m_textCtrl_sclkPadSettingOverride = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_sclkPadSettingOverride, 0, wx.ALL, 5 )
self.m_staticText_dataPadSettingOverride = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"dataPadSettingOverride", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_dataPadSettingOverride.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_dataPadSettingOverride, 0, wx.ALL, 5 )
self.m_textCtrl_dataPadSettingOverride = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_dataPadSettingOverride, 0, wx.ALL, 5 )
self.m_staticText_dqsPadSettingOverride = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"dqsPadSettingOverride:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_dqsPadSettingOverride.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_dqsPadSettingOverride, 0, wx.ALL, 5 )
self.m_textCtrl_dqsPadSettingOverride = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_dqsPadSettingOverride, 0, wx.ALL, 5 )
self.m_staticText_timeoutInMs = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"timeoutInMs:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_timeoutInMs.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_timeoutInMs, 0, wx.ALL, 5 )
self.m_textCtrl_timeoutInMs = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_timeoutInMs, 0, wx.ALL, 5 )
self.m_staticText_commandInterval = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"commandInterval:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_commandInterval.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_commandInterval, 0, wx.ALL, 5 )
self.m_textCtrl_commandInterval = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_commandInterval, 0, wx.ALL, 5 )
self.m_staticText_dataValidTime0time_100ps = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"dataValidTime[0].time_100ps:", wx.DefaultPosition, wx.Size( 160,-1 ), 0 )
self.m_staticText_dataValidTime0time_100ps.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_dataValidTime0time_100ps, 0, wx.ALL, 5 )
self.m_textCtrl_dataValidTime0time_100ps = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_dataValidTime0time_100ps, 0, wx.ALL, 5 )
self.m_staticText_dataValidTime0delay_cells = wx.StaticText( self.m_panel_ipCfg1, wx.ID_ANY, u"dataValidTime[0].delay_cells:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_dataValidTime0delay_cells.Wrap( -1 )
fgSizer_ipCfg1.Add( self.m_staticText_dataValidTime0delay_cells, 0, wx.ALL, 5 )
self.m_textCtrl_dataValidTime0delay_cells = wx.TextCtrl( self.m_panel_ipCfg1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer_ipCfg1.Add( self.m_textCtrl_dataValidTime0delay_cells, 0, wx.ALL, 5 )
self.m_staticText_dataValidTime1time_100ps | |
= random.choices(in_nodes_list, prob_in_copy)[0]
while in_nodes_count_copy[product2] < (1 + mod_num):
product2 = random.choices(in_nodes_list, prob_in_copy)[0]
reactant = random.choice(in_nodes_list)
if [[reactant], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if not mass_violating_reactions and reactant in {product1, product2}:
pick_continued += 1
continue
# if reaction_type == 'metabolic' and reactant in {product1, product2}:
# pick_continued += 1
# continue
mod_species = random.sample(nodes_list, mod_num)
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
in_nodes_count[product1] -= (1 + mod_num)
in_nodes_count[product2] -= (1 + mod_num)
reaction_list.append([rt, [reactant], [product1, product2], mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant], [product1, product2]])
edge_list.append((reactant, product1))
edge_list.append((reactant, product2))
if edge_type == 'metabolic':
if sum(1 for each in in_nodes_count if each >= 1) < 2 \
and max(in_nodes_count) < 2:
pick_continued += 1
continue
sum_in = sum(in_nodes_count)
prob_in = [x / sum_in for x in in_nodes_count]
product1 = random.choices(in_nodes_list, prob_in)[0]
while in_nodes_count[product1] < 1:
product1 = random.choices(in_nodes_list, prob_in)[0]
in_nodes_count_copy = deepcopy(in_nodes_count)
in_nodes_count_copy[product1] -= 1
sum_in_copy = sum(in_nodes_count_copy)
prob_in_copy = [x / sum_in_copy for x in in_nodes_count_copy]
product2 = random.choices(in_nodes_list, prob_in_copy)[0]
while in_nodes_count_copy[product2] < 1:
product2 = random.choices(in_nodes_list, prob_in_copy)[0]
reactant = random.choice(in_nodes_list)
if [[reactant], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if not mass_violating_reactions and reactant in {product1, product2}:
pick_continued += 1
continue
# if reaction_type == 'metabolic' and reactant in {product1, product2}:
# pick_continued += 1
# continue
mod_species = random.sample(nodes_list, mod_num)
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
if reactant != product1 and reactant != product2 and product1 != product2:
edge_list.append((reactant, product1))
edge_list.append((reactant, product2))
in_nodes_count[product1] -= 1
in_nodes_count[product2] -= 1
if reactant != product1 and product1 == product2:
edge_list.append((reactant, product1))
in_nodes_count[product1] -= 1
if reactant == product1 and product1 != product2:
edge_list.append(('syn', product2))
if reactant == product2 and product1 != product2:
edge_list.append(('syn', product1))
if reactant == product1 and product1 == product2:
edge_list.append(('syn', reactant))
reaction_list.append([rt, [reactant], [product1, product2], mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant], [product1, product2]])
# -----------------------------------------------------------------
if rt == TReactionType.BIBI:
if edge_type == 'generic':
if sum(1 for each in in_nodes_count if each >= (2 + mod_num)) < 2 \
and max(in_nodes_count) < (4 + 2 * mod_num):
pick_continued += 1
continue
sum_in = sum(in_nodes_count)
prob_in = [x / sum_in for x in in_nodes_count]
product1 = random.choices(in_nodes_list, prob_in)[0]
while in_nodes_count[product1] < (2 + mod_num):
product1 = random.choices(in_nodes_list, prob_in)[0]
in_nodes_count_copy = deepcopy(in_nodes_count)
in_nodes_count_copy[product1] -= (2 + mod_num)
sum_in_copy = sum(in_nodes_count_copy)
prob_in_copy = [x / sum_in_copy for x in in_nodes_count_copy]
product2 = random.choices(in_nodes_list, prob_in_copy)[0]
while in_nodes_count_copy[product2] < (2 + mod_num):
product2 = random.choices(in_nodes_list, prob_in)[0]
reactant1 = random.choice(in_nodes_list)
reactant2 = random.choice(in_nodes_list)
if [[reactant1, reactant2], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant2, reactant1], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant1, reactant2], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if [[reactant2, reactant1], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if {reactant1, reactant2} == {product1, product2}:
pick_continued += 1
continue
# if reaction_type == 'metabolic' and {reactant1, reactant2} & {product1, product2}:
# pick_continued += 1
# continue
mod_species = random.sample(nodes_list, mod_num)
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
in_nodes_count[product1] -= (2 + mod_num)
in_nodes_count[product2] -= (2 + mod_num)
reaction_list.append([rt, [reactant1, reactant2], [product1, product2],
mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant1, reactant2], [product1, product2]])
edge_list.append((reactant1, product1))
edge_list.append((reactant2, product1))
edge_list.append((reactant1, product2))
edge_list.append((reactant2, product2))
if edge_type == 'metabolic':
if sum(1 for each in in_nodes_count if each >= 2) < 2 \
and max(in_nodes_count) < 4:
pick_continued += 1
continue
sum_in = sum(in_nodes_count)
prob_in = [x / sum_in for x in in_nodes_count]
product1 = random.choices(in_nodes_list, prob_in)[0]
while in_nodes_count[product1] < 2:
product1 = random.choices(in_nodes_list, prob_in)[0]
in_nodes_count_copy = deepcopy(in_nodes_count)
in_nodes_count_copy[product1] -= 2
sum_in_copy = sum(in_nodes_count_copy)
prob_in_copy = [x / sum_in_copy for x in in_nodes_count_copy]
product2 = random.choices(in_nodes_list, prob_in_copy)[0]
while in_nodes_count_copy[product2] < 2:
product2 = random.choices(in_nodes_list, prob_in)[0]
reactant1 = random.choice(in_nodes_list)
reactant2 = random.choice(in_nodes_list)
if [[reactant1, reactant2], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant2, reactant1], [product1, product2]] in reaction_list2:
pick_continued += 1
continue
if [[reactant1, reactant2], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if [[reactant2, reactant1], [product2, product1]] in reaction_list2:
pick_continued += 1
continue
if {reactant1, reactant2} == {product1, product2}:
pick_continued += 1
continue
# if reaction_type == 'metabolic' and {reactant1, reactant2} & {product1, product2}:
# pick_continued += 1
# continue
mod_species = random.sample(nodes_list, mod_num)
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
if len({reactant1, reactant2, product1, product2}) == 4:
edge_list.append((reactant1, product1))
edge_list.append((reactant1, product2))
edge_list.append((reactant2, product1))
edge_list.append((reactant2, product2))
in_nodes_count[product1] -= 2
in_nodes_count[product2] -= 2
if reactant1 == reactant2 and len({reactant1, product1, product2}) == 3:
edge_list.append((reactant1, product1))
edge_list.append((reactant1, product2))
in_nodes_count[product1] -= 1
in_nodes_count[product2] -= 1
if reactant1 == reactant2 and product1 == product2 and reactant1 != product1:
edge_list.append((reactant1, product1))
in_nodes_count[product1] -= 1
if product1 == product2 and \
len({reactant1, reactant2, product1}) == len([reactant1, reactant2, product1]):
edge_list.append((reactant1, product1))
edge_list.append((reactant2, product1))
in_nodes_count[product1] -= 2
# ------------------------
if reactant1 == product1 and len({reactant1, reactant2, product1, product2}) == 3:
edge_list.append((reactant2, product2))
in_nodes_count[product2] -= 1
if reactant1 == product2 and len({reactant1, reactant2, product1, product2}) == 3:
edge_list.append((reactant2, product1))
in_nodes_count[product1] -= 1
if reactant2 == product1 and len({reactant1, reactant2, product1, product2}) == 3:
edge_list.append((reactant1, product2))
in_nodes_count[product2] -= 1
if reactant2 == product2 and len({reactant1, reactant2, product1, product2}) == 3:
edge_list.append((reactant1, product1))
in_nodes_count[product1] -= 1
# ------------------------
if reactant1 != reactant2 and len({reactant1, product1, product2}) == 1:
edge_list.append((reactant2, product2))
in_nodes_count[product2] -= 1
if reactant1 != reactant2 and len({reactant2, product1, product2}) == 1:
edge_list.append((reactant1, product1))
in_nodes_count[product1] -= 1
# ------------------------
if product1 != product2 and len({reactant1, reactant2, product1}) == 1:
edge_list.append((reactant2, product2))
in_nodes_count[product2] -= 1
if product1 != product2 and len({reactant1, reactant2, product2}) == 1:
edge_list.append((reactant1, product1))
in_nodes_count[product1] -= 1
reaction_list.append([rt, [reactant1, reactant2], [product1, product2],
mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant1, reactant2], [product1, product2]])
if sum(in_nodes_count) == 0:
break
# -----------------------------------------------------------------
if bool(out_samples) and not bool(in_samples):
pick_continued = 0
while True:
if pick_continued == 1000:
return [None], [out_samples, in_samples, joint_samples]
if rxn_prob:
rt = _pick_reaction_type(rxn_prob)
else:
rt = _pick_reaction_type()
mod_num = 0
if mod_reg:
mod_num = random.choices([0, 1, 2, 3], mod_reg[0])[0]
# -----------------------------------------------------------------
if rt == TReactionType.UNIUNI:
if edge_type == 'generic':
if sum(out_nodes_count) < (1 + mod_num):
pick_continued += 1
continue
sum_out = sum(out_nodes_count)
prob_out = [x / sum_out for x in out_nodes_count]
reactant = random.choices(out_nodes_list, prob_out)[0]
product = random.choice(out_nodes_list)
if [[reactant], [product]] in reaction_list2 or reactant == product:
pick_continued += 1
continue
mod_species = []
if mod_num > 0:
out_nodes_count_copy = deepcopy(out_nodes_count)
out_nodes_count_copy[reactant] -= 1
sum_out_copy = sum(out_nodes_count_copy)
prob_out_copy = [x / sum_out_copy for x in out_nodes_count_copy]
while len(mod_species) < mod_num:
new_mod = random.choices(out_nodes_list, prob_out_copy)[0]
if new_mod not in mod_species:
mod_species.append(new_mod)
if len(mod_species) < mod_num:
out_nodes_count_copy[mod_species[-1]] -= 1
sum_out_copy = sum(out_nodes_count_copy)
prob_out_copy = [x / sum_out_copy for x in out_nodes_count_copy]
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
out_nodes_count[reactant] -= 1
for each in mod_species:
out_nodes_count[each] -= 1
reaction_list.append([rt, [reactant], [product], mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant], [product]])
edge_list.append((reactant, product))
if edge_type == 'metabolic':
sum_out = sum(out_nodes_count)
prob_out = [x / sum_out for x in out_nodes_count]
reactant = random.choices(out_nodes_list, prob_out)[0]
product = random.choice(out_nodes_list)
if [[reactant], [product]] in reaction_list2 or reactant == product:
pick_continued += 1
continue
mod_species = random.sample(nodes_list, mod_num)
reg_signs = [random.choices([1, -1], [mod_reg[1], 1 - mod_reg[1]])[0] for _ in mod_species]
reg_type = [random.choices(['a', 's'], [mod_reg[2], 1 - mod_reg[2]])[0] for _ in mod_species]
out_nodes_count[reactant] -= 1
reaction_list.append([rt, [reactant], [product], mod_species, reg_signs, reg_type])
reaction_list2.append([[reactant], [product]])
if reactant != product:
edge_list.append((reactant, product))
# -----------------------------------------------------------------
if rt == TReactionType.BIUNI:
if edge_type == 'generic':
if sum(out_nodes_count) < | |
pass
def setRotatePivotTranslation(*args, **kwargs):
"""
Sets the transform's rotate pivot translation.
"""
pass
def setRotation(*args, **kwargs):
"""
Sets the transform's rotation using an MEulerRotation or MQuaternion.
"""
pass
def setRotationComponents(*args, **kwargs):
"""
Sets the transform's rotation using the individual components of an MEulerRotation or MQuaternion.
"""
pass
def setRotationOrder(*args, **kwargs):
"""
Sets the transform's rotation order.
"""
pass
def setScale(*args, **kwargs):
"""
Sets the transform's scale components.
"""
pass
def setScalePivot(*args, **kwargs):
"""
Sets the transform's scale pivot.
"""
pass
def setScalePivotTranslation(*args, **kwargs):
"""
Sets the transform's scale pivot translation.
"""
pass
def setShear(*args, **kwargs):
"""
Sets the transform's shear.
"""
pass
def setTransformation(*args, **kwargs):
"""
Sets the transform's attribute values to represent the given transformation matrix.
"""
pass
def setTranslation(*args, **kwargs):
"""
Sets the transform's translation.
"""
pass
def shear(*args, **kwargs):
"""
Returns a list containing the transform's shear components.
"""
pass
def shearBy(*args, **kwargs):
"""
Multiplies the transform's shear components by a sequence of three floats.
"""
pass
def transformation(*args, **kwargs):
"""
Returns the transformation matrix represented by this transform.
"""
pass
def translateBy(*args, **kwargs):
"""
Adds an MVector to the transform's translation.
"""
pass
def translation(*args, **kwargs):
"""
Returns the transform's translation as an MVector.
"""
pass
__new__ = None
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class MFnNurbsCurveData(MFnGeometryData):
"""
MFnNurbsCurveData allows the creation and manipulation of Nurbs Curve
data objects for use in the dependency graph.
__init__()
Initializes a new, empty MFnNurbsCurveData object
__init__(MObject)
Initializes a new MFnNurbsCurveData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
create() -> MObject
Creates a new nurbs curve data object, attaches it to this function set
and returns an MObject which references it.
"""
pass
__new__ = None
class MFnMesh(MFnDagNode):
"""
Function set for operation on meshes (polygonal surfaces).
__init__()
Initializes a new, empty MFnMesh object.
__init__(MDagPath path)
Initializes a new MFnMesh object and attaches it to the DAG path
of a mesh node.
__init__(MObject nodeOrData)
Initializes a new MFnMesh object and attaches it to a mesh
node or mesh data object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addHoles(*args, **kwargs):
"""
addHoles(faceIndex, vertices, loopCounts, mergeVertices=True, pointTolerance=kPointTolerance) -> self
Adds holes to a mesh polygon.
loopCounts is an array of vertex counts.
The first entry gives the count of vertices that make up the
first hole to add to the polygon (using that many entries in vertexArray). The following
entries in loopCounts give the count of vertices that make up each remaining hole,
using the following entries in vertexArray.
Therefore the sum of the entries of loopCounts should equal the total
length of vertexArray.
Note that holes should normally be specified with the opposite winding order
to the exterior polygon.
"""
pass
def addPolygon(*args, **kwargs):
"""
addPolygon(vertices, mergeVertices=True, pointTolerance=kPointTolerance, loopCounts=None) -> faceId
Adds a new polygon to the mesh, returning the index of the new
polygon. If mergeVertices is True and a new vertex is within
pointTolerance of an existing one, then they are 'merged' by reusing
the existing vertex and discarding the new one.
loopCounts allows for polygons with holes. If supplied, it is an array of integer vertex
counts. The first entry gives the count of vertices that make up the
exterior of the polygon (using that many entries in vertexArray). The following
entries in loopCounts give the count of vertices that make up each hole,
using the following entries in vertexArray.
Therefore the sum of the entries of loopCounts should equal the total
length of vertexArray.
Note that holes should normally be specified with the opposite winding order
to the exterior polygon.
"""
pass
def allIntersections(*args, **kwargs):
"""
allIntersections(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance, sortHits=False)
-> (hitPoints, hitRayParams, hitFaces, hitTriangles, hitBary1s, hitBary2s)
Finds all intersection of a ray starting at raySource and travelling
in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
or allIntersections()) is called with an identically-configured
MMeshIsectAccelParams object. If a different MMeshIsectAccelParams
object is used, then the acceleration structure will be deleted and
re-created according to the new settings. Once created, the
acceleration structure will persist until either the object is
destroyed (or rebuilt by a construction history operation), or the
freeCachedIntersectionAccelerator() method is called. The
cachedIntersectionAcceleratorInfo() and
globalIntersectionAcceleratorsInfo() methods provide useful
information about the resource usage of individual acceleration
structures, and of all such structures in the system.
If the ray hits the mesh, the details of the intersection points
will be returned as a tuple containing the following:
* hitPoints (MFloatPointArray) - coordinates of the points hit, in
the space specified by the caller.* hitRayParams (MFloatArray) - parametric distances along the ray to
the points hit.* hitFaces (MIntArray) - IDs of the faces hit
* hitTriangles (MIntArray) - face-relative IDs of the triangles hit
* hitBary1s (MFloatArray) - first barycentric coordinate of the
points hit. If the vertices of the hitTriangle are (v1, v2, v3)
then the barycentric coordinates are such that the hitPoint =
(*hitBary1)*v1 + (*hitBary2)*v2 + (1-*hitBary1-*hitBary2)*v3.* hitBary2s (MFloatArray) - second barycentric coordinate of the
points hit.
If no point was hit then the arrays will all be empty.
"""
pass
def anyIntersection(*args, **kwargs):
"""
anyIntersection(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance)
-> (hitPoint, hitRayParam, hitFace, hitTriangle, hitBary1, hitBary2)
Finds any intersection of a ray starting at raySource and travelling
in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be | |
<reponame>yyht/electra_electric
"""Create input function for estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import collections
import numpy as np
import tensorflow as tf
tf.disable_v2_behavior()
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
# if check_tf_version():
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from pretrain import pretrain_data
from pretrain import pretrain_helpers
from model.vqvae_utils import tfidf_utils
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
special_symbols_mapping = collections.OrderedDict([
("<unk>", "unk_id"),
("<s>", "bos_id"),
("</s>", "eos_id"),
("<cls>", "cls_id"),
("<sep>", "sep_id"),
("<pad>", "pad_id"),
("<mask>", "mask_id"),
("<eod>", "eod_id"),
("<eop>", "eop_id")
])
def _get_boundary_indices(tokenizer, seg, reverse=False):
"""Get all boundary indices of whole words."""
seg_len = len(seg)
if reverse:
seg = np.flip(seg, 0)
boundary_indices = []
for idx, token in enumerate(seg):
if tokenizer.is_start_token(token) and not tokenizer.is_func_token(token):
boundary_indices.append(idx)
boundary_indices.append(seg_len)
if reverse:
boundary_indices = [seg_len - idx for idx in boundary_indices]
return boundary_indices
def setup_special_ids(FLAGS, tokenizer):
"""Set up the id of special tokens."""
FLAGS.vocab_size = tokenizer.get_vocab_size()
tf.logging.info("Set vocab_size: %d.", FLAGS.vocab_size)
for sym, sym_id_str in special_symbols_mapping.items():
try:
sym_id = tokenizer.get_token_id(sym)
setattr(FLAGS, sym_id_str, sym_id)
tf.logging.info("Set %s to %d.", sym_id_str, sym_id)
except KeyError:
tf.logging.warning("Skip %s: not found in tokenizer's vocab.", sym)
def format_filename(prefix, suffix, seq_len, uncased):
"""Format the name of the tfrecord/meta file."""
seq_str = "seq-{}".format(seq_len)
if uncased:
case_str = "uncased"
else:
case_str = "cased"
file_name = "{}.{}.{}.{}".format(prefix, seq_str, case_str, suffix)
return file_name
def convert_example(example, use_bfloat16=False):
"""Cast int64 into int32 and float32 to bfloat16 if use_bfloat16."""
for key in list(example.keys()):
val = example[key]
if tf.keras.backend.is_sparse(val):
val = tf.sparse.to_dense(val)
if val.dtype == tf.int64:
val = tf.cast(val, tf.int32)
if use_bfloat16 and val.dtype == tf.float32:
val = tf.cast(val, tf.bfloat16)
example[key] = val
def sparse_to_dense(example):
"""Convert sparse feature to dense ones."""
for key in list(example.keys()):
val = example[key]
if tf.keras.backend.is_sparse(val):
val = tf.sparse.to_dense(val)
example[key] = val
return example
def _idx_pair_to_mask(FLAGS, beg_indices, end_indices, inputs, tgt_len, num_predict):
"""Turn beg and end indices into actual mask."""
non_func_mask = tf.logical_and(
tf.not_equal(inputs, FLAGS.sep_id),
tf.not_equal(inputs, FLAGS.cls_id))
all_indices = tf.where(
non_func_mask,
tf.range(tgt_len, dtype=tf.int32),
tf.constant(-1, shape=[tgt_len], dtype=tf.int32))
candidate_matrix = tf.cast(
tf.logical_and(
all_indices[None, :] >= beg_indices[:, None],
all_indices[None, :] < end_indices[:, None]),
tf.float32)
cumsum_matrix = tf.reshape(
tf.cumsum(tf.reshape(candidate_matrix, [-1])),
[-1, tgt_len])
masked_matrix = tf.cast(cumsum_matrix <= tf.cast(num_predict, dtype=cumsum_matrix.dtype), tf.float32)
target_mask = tf.reduce_sum(candidate_matrix * masked_matrix, axis=0)
is_target = tf.cast(target_mask, tf.bool)
return is_target, target_mask
def _word_span_mask(FLAGS, inputs, tgt_len, num_predict, boundary, stride=1):
"""Sample whole word spans as prediction targets."""
# Note: 1.2 is roughly the token-to-word ratio
input_mask = tf.cast(tf.not_equal(inputs, FLAGS.pad_id), dtype=tf.int32)
num_tokens = tf.cast(tf.reduce_sum(input_mask, -1), tf.int32)
num_predict = tf.cast(num_predict, tf.int32)
non_pad_len = num_tokens + 1 - stride
chunk_len_fp = tf.cast(non_pad_len / num_predict / 1.2, dtype=tf.float32)
round_to_int = lambda x: tf.cast(tf.round(x), tf.int64)
# Sample span lengths from a zipf distribution
span_len_seq = np.arange(FLAGS.min_word, FLAGS.max_word + 1)
probs = np.array([1.0 / (i + 1) for i in span_len_seq])
probs /= np.sum(probs)
logits = tf.constant(np.log(probs), dtype=tf.float32)
if check_tf_version():
span_lens = tf.random.categorical(
logits=logits[None],
num_samples=num_predict,
dtype=tf.int64,
)[0] + FLAGS.min_word
else:
span_lens = tf.multinomial(
logits=logits[None],
num_samples=num_predict,
output_dtype=tf.int64,
)[0] + FLAGS.min_word
# Sample the ratio [0.0, 1.0) of left context lengths
span_lens_fp = tf.cast(span_lens, tf.float32)
left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0)
left_ctx_len = left_ratio * span_lens_fp * (chunk_len_fp - 1)
left_ctx_len = round_to_int(left_ctx_len)
right_offset = round_to_int(span_lens_fp * chunk_len_fp) - left_ctx_len
beg_indices = (tf.cumsum(left_ctx_len) +
tf.cumsum(right_offset, exclusive=True))
end_indices = beg_indices + tf.cast(span_lens, dtype=tf.int32)
# Remove out of range `boundary` indices
max_boundary_index = tf.cast(tf.shape(boundary)[0] - 1, tf.int64)
valid_idx_mask = end_indices < max_boundary_index
beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask)
end_indices = tf.boolean_mask(end_indices, valid_idx_mask)
beg_indices = tf.gather(boundary, beg_indices)
end_indices = tf.gather(boundary, end_indices)
# Shuffle valid `position` indices
num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64)
order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int64))
beg_indices = tf.gather(beg_indices, order)
end_indices = tf.gather(end_indices, order)
return _idx_pair_to_mask(FLAGS, beg_indices, end_indices, inputs, tgt_len,
num_predict)
def _token_span_mask(FLAGS, inputs, tgt_len, num_predict, stride=1):
"""Sample token spans as prediction targets."""
# non_pad_len = tgt_len + 1 - stride
input_mask = tf.cast(tf.not_equal(inputs, FLAGS.pad_id), dtype=tf.int32)
num_tokens = tf.cast(tf.reduce_sum(input_mask, -1), tf.int32)
num_predict = tf.cast(num_predict, tf.int32)
non_pad_len = num_tokens + 1 - stride
chunk_len_fp = tf.cast(non_pad_len / num_predict, dtype=tf.float32)
round_to_int = lambda x: tf.cast(tf.round(x), tf.int32)
# Sample span lengths from a zipf distribution
# span_len_seq = np.arange(FLAGS.min_tok, FLAGS.max_tok + 1)
probs = [FLAGS.p * (1-FLAGS.p)**(i - FLAGS.min_tok) for i in range(FLAGS.min_tok, FLAGS.max_tok+1)]
# probs = [x / (sum(len_distrib)) for x in len_distrib]
# probs = np.array([1.0 / (i + 1) for i in span_len_seq])
probs /= np.sum(probs)
tf.logging.info("** sampling probs **")
tf.logging.info(probs)
logits = tf.constant(np.log(probs), dtype=tf.float32)
if check_tf_version():
span_lens = tf.random.categorical(
logits=logits[None],
num_samples=num_predict,
dtype=tf.int64,
)[0] + FLAGS.min_tok
else:
span_lens = tf.multinomial(
logits=logits[None],
num_samples=num_predict,
output_dtype=tf.int64,
)[0] + FLAGS.min_tok
# Sample the ratio [0.0, 1.0) of left context lengths
span_lens_fp = tf.cast(span_lens, tf.float32)
left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0)
left_ctx_len = left_ratio * span_lens_fp * (chunk_len_fp - 1)
left_ctx_len = round_to_int(left_ctx_len)
# Compute the offset from left start to the right end
right_offset = round_to_int(span_lens_fp * chunk_len_fp) - left_ctx_len
# Get the actual begin and end indices
beg_indices = (tf.cumsum(left_ctx_len) +
tf.cumsum(right_offset, exclusive=True))
end_indices = beg_indices + tf.cast(span_lens, dtype=tf.int32)
# Remove out of range indices
valid_idx_mask = end_indices < non_pad_len
beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask)
end_indices = tf.boolean_mask(end_indices, valid_idx_mask)
# Shuffle valid indices
num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64)
order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int64))
beg_indices = tf.gather(beg_indices, order)
end_indices = tf.gather(end_indices, order)
return _idx_pair_to_mask(FLAGS, beg_indices, end_indices, inputs, tgt_len,
num_predict)
def _whole_word_mask(FLAGS, inputs, tgt_len, num_predict, boundary):
"""Sample whole words as prediction targets."""
pair_indices = tf.concat([boundary[:-1, None], boundary[1:, None]], axis=1)
cand_pair_indices = tf.random.shuffle(pair_indices)[:num_predict]
beg_indices = cand_pair_indices[:, 0]
end_indices = cand_pair_indices[:, 1]
return _idx_pair_to_mask(FLAGS, beg_indices, end_indices, inputs, tgt_len,
num_predict)
def _single_token_mask(FLAGS, inputs, tgt_len, num_predict, exclude_mask=None):
"""Sample individual tokens as prediction targets."""
func_mask = tf.equal(inputs, FLAGS.cls_id)
func_mask = tf.logical_or(func_mask, tf.equal(inputs, FLAGS.sep_id))
func_mask = tf.logical_or(func_mask, tf.equal(inputs, FLAGS.pad_id))
if exclude_mask is None:
exclude_mask = func_mask
else:
exclude_mask = tf.logical_or(func_mask, exclude_mask)
candidate_mask = tf.logical_not(exclude_mask)
input_mask = tf.cast(tf.not_equal(inputs, FLAGS.pad_id), dtype=tf.int64)
num_tokens = tf.cast(tf.reduce_sum(input_mask, -1), tf.int64)
all_indices = tf.range(tgt_len, dtype=tf.int64)
candidate_indices = tf.boolean_mask(all_indices, candidate_mask)
masked_pos = tf.random.shuffle(candidate_indices)
if check_tf_version():
masked_pos = tf.sort(masked_pos[:num_predict])
else:
masked_pos = tf.contrib.framework.sort(masked_pos[:num_predict])
target_mask = tf.sparse_to_dense(
sparse_indices=masked_pos,
output_shape=[tgt_len],
sparse_values=1.0,
default_value=0.0)
is_target = tf.cast(target_mask, tf.bool)
return is_target, target_mask
def _online_sample_masks(FLAGS,
inputs, tgt_len, num_predict, boundary=None, stride=1):
"""Sample target positions to predict."""
# Set the number of tokens to mask out per example
input_mask = tf.cast(tf.not_equal(inputs, FLAGS.pad_id), dtype=tf.int64)
num_tokens = tf.cast(tf.reduce_sum(input_mask, -1), tf.float32)
# global_step = tf.train.get_or_create_global_step()
# mask_prob = tf.train.polynomial_decay(
# FLAGS.initial_ratio,
# global_step,
# int(FLAGS.num_train_steps*0.1),
# end_learning_rate=FLAGS.final_ratio,
# power=1.0,
# cycle=True)
mask_prob = FLAGS.final_ratio
tf.logging.info("mask_prob: `%s`.", mask_prob)
num_predict = tf.maximum(1, tf.minimum(
num_predict, tf.cast(tf.round(num_tokens * mask_prob), tf.int32)))
num_predict = tf.cast(num_predict, tf.int32)
tf.logging.info("Online sample with strategy: `%s`.", FLAGS.sample_strategy)
if FLAGS.sample_strategy == "single_token":
return _single_token_mask(inputs, tgt_len, num_predict)
else:
if FLAGS.sample_strategy == "whole_word":
assert boundary is not None, "whole word sampling requires `boundary`"
is_target, target_mask = _whole_word_mask(FLAGS, inputs, tgt_len, num_predict,
boundary)
elif FLAGS.sample_strategy == "token_span":
is_target, target_mask = _token_span_mask(FLAGS, inputs, tgt_len, num_predict,
stride=stride)
elif FLAGS.sample_strategy == "word_span":
assert boundary is not None, "word span sampling requires `boundary`"
is_target, target_mask = _word_span_mask(FLAGS, inputs, tgt_len, num_predict,
boundary, stride=stride)
else:
raise NotImplementedError
valid_mask = tf.not_equal(inputs, FLAGS.pad_id)
is_target = tf.logical_and(valid_mask, is_target)
target_mask = target_mask * tf.cast(valid_mask, tf.float32)
# Fill in single tokens if not full
cur_num_masked = tf.reduce_sum(tf.cast(is_target, tf.int32))
extra_mask, extra_tgt_mask = _single_token_mask(FLAGS,
inputs, tgt_len, num_predict - cur_num_masked, is_target)
return tf.logical_or(is_target, extra_mask), target_mask + extra_tgt_mask
def discrepancy_correction(FLAGS, inputs, is_target, tgt_len):
"""Construct the masked input."""
random_p = tf.random.uniform([tgt_len], maxval=1.0)
mask_ids = tf.constant(FLAGS.mask_id, dtype=inputs.dtype, shape=[tgt_len])
change_to_mask = tf.logical_and(random_p > FLAGS.leak_ratio, is_target)
masked_ids = tf.where(change_to_mask, mask_ids, inputs)
if FLAGS.rand_ratio > 0:
change_to_rand = tf.logical_and(
FLAGS.leak_ratio < random_p,
random_p < FLAGS.leak_ratio + FLAGS.rand_ratio)
change_to_rand = tf.logical_and(change_to_rand, is_target)
rand_ids = tf.random.uniform([tgt_len], maxval=FLAGS.vocab_size,
dtype=masked_ids.dtype)
masked_ids = tf.where(change_to_rand, rand_ids, masked_ids)
return masked_ids
def create_target_mapping(
example, is_target, seq_len, num_predict, **kwargs):
"""Create target mapping and retrieve the corresponding kwargs."""
if num_predict is not None:
# Get masked indices
indices = tf.range(seq_len, dtype=tf.int64)
indices = tf.boolean_mask(indices, is_target)
# Handle the case that actual_num_predict < num_predict
actual_num_predict = tf.shape(indices)[0]
pad_len = num_predict - actual_num_predict
# Create target mapping
target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32)
paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype)
target_mapping = tf.concat([target_mapping, paddings], axis=0)
example["target_mapping"] = tf.reshape(target_mapping,
[num_predict, seq_len])
# Handle fields in kwargs
for k, v | |
"--async-msg", action = "store_true", dest = srv_options['asyncmsg']['des'],
default = srv_options['asyncmsg']['def'], help = srv_options['asyncmsg']['help'])
server_parser.add_argument("-V", "--loglevel", action = "store", dest = srv_options['llevel']['des'], choices = srv_options['llevel']['choi'],
default = srv_options['llevel']['def'], help = srv_options['llevel']['help'], type = str)
server_parser.add_argument("-F", "--logfile", nargs = "+", action = "store", dest = srv_options['lfile']['des'],
default = srv_options['lfile']['def'], help = srv_options['lfile']['help'], type = str)
server_parser.add_argument("-S", "--logsize", action = "store", dest = srv_options['lsize']['des'], default = srv_options['lsize']['def'],
help = srv_options['lsize']['help'], type = float)
server_parser.add_argument("-h", "--help", action = "help", help = "show this help message and exit")
## Daemon (Etrigan) parsing.
daemon_parser = KmsParser(description = "daemon options inherited from Etrigan", add_help = False)
daemon_subparser = daemon_parser.add_subparsers(dest = "mode")
etrigan_parser = daemon_subparser.add_parser("etrigan", add_help = False)
etrigan_parser.add_argument("-g", "--gui", action = "store_const", dest = 'gui', const = True, default = False,
help = "Enable py-kms GUI usage.")
etrigan_parser = Etrigan_parser(parser = etrigan_parser)
## Connection parsing.
connection_parser = KmsParser(description = "connect options", add_help = False)
connection_subparser = connection_parser.add_subparsers(dest = "mode")
connect_parser = connection_subparser.add_parser("connect", add_help = False)
connect_parser.add_argument("-n", "--listen", action = "append", dest = srv_options['listen']['des'], default = [],
help = srv_options['listen']['help'], type = str)
connect_parser.add_argument("-b", "--backlog", action = "append", dest = srv_options['backlog']['des'], default = [],
help = srv_options['backlog']['help'], type = int)
connect_parser.add_argument("-u", "--no-reuse", action = "append_const", dest = srv_options['reuse']['des'], const = False, default = [],
help = srv_options['reuse']['help'])
connect_parser.add_argument("-d", "--dual", action = "store_true", dest = srv_options['dual']['des'], default = srv_options['dual']['def'],
help = srv_options['dual']['help'])
try:
userarg = sys.argv[1:]
# Run help.
if any(arg in ["-h", "--help"] for arg in userarg):
KmsParserHelp().printer(parsers = [server_parser, (daemon_parser, etrigan_parser),
(connection_parser, connect_parser)])
# Get stored arguments.
pykmssrv_zeroarg, pykmssrv_onearg = kms_parser_get(server_parser)
etrigan_zeroarg, etrigan_onearg = kms_parser_get(etrigan_parser)
connect_zeroarg, connect_onearg = kms_parser_get(connect_parser)
subdict = {'etrigan' : (etrigan_zeroarg, etrigan_onearg, daemon_parser.parse_args),
'connect' : (connect_zeroarg, connect_onearg, connection_parser.parse_args)
}
subpars = list(subdict.keys())
pykmssrv_zeroarg += subpars # add subparsers
exclude_kms = ['-F', '--logfile']
exclude_dup = ['-n', '--listen', '-b', '--backlog', '-u', '--no-reuse']
# Set defaults for server dict config.
# example case:
# python3 pykms_Server.py
srv_config.update(vars(server_parser.parse_args([])))
subindx = sorted([(userarg.index(pars), pars) for pars in subpars if pars in userarg], key = lambda x: x[0])
if subindx:
# Set `daemon options` and/or `connect options` for server dict config.
# example cases:
# 1 python3 pykms_Server.py [172.16.17.32] [1234] [--pykms_optionals] etrigan daemon_positional [--daemon_optionals] \
# connect [--connect_optionals]
#
# 2 python3 pykms_Server.py [172.16.17.32] [1234] [--pykms_optionals] connect [--connect_optionals] etrigan \
# daemon_positional [--daemon_optionals]
#
# 3 python3 pykms_Server.py [172.16.17.32] [1234] [--pykms_optionals] etrigan daemon_positional [--daemon_optionals]
# 4 python3 pykms_Server.py [172.16.17.32] [1234] [--pykms_optionals] connect [--connect_optionals]
first = subindx[0][0]
# initial.
kms_parser_check_optionals(userarg[0 : first], pykmssrv_zeroarg, pykmssrv_onearg, exclude_opt_len = exclude_kms)
kms_parser_check_positionals(srv_config, server_parser.parse_args, arguments = userarg[0 : first], force_parse = True)
# middle.
for i in range(len(subindx) - 1):
posi, posf, typ = subindx[i][0], subindx[i + 1][0], subindx[i][1]
kms_parser_check_optionals(userarg[posi : posf], subdict[typ][0], subdict[typ][1], msg = 'optional %s' %typ,
exclude_opt_dup = (exclude_dup if typ == 'connect' else []))
kms_parser_check_positionals(srv_config, subdict[typ][2], arguments = userarg[posi : posf], msg = 'positional %s' %typ)
# final.
pos, typ = subindx[-1]
kms_parser_check_optionals(userarg[pos:], subdict[typ][0], subdict[typ][1], msg = 'optional %s' %typ,
exclude_opt_dup = (exclude_dup if typ == 'connect' else []))
kms_parser_check_positionals(srv_config, subdict[typ][2], arguments = userarg[pos:], msg = 'positional %s' %typ)
if len(subindx) > 1:
srv_config['mode'] = '+'.join(elem[1] for elem in subindx)
else:
# Update `pykms options` for server dict config.
# example case:
# 5 python3 pykms_Server.py [172.16.17.32] [1234] [--pykms_optionals]
kms_parser_check_optionals(userarg, pykmssrv_zeroarg, pykmssrv_onearg, exclude_opt_len = exclude_kms)
kms_parser_check_positionals(srv_config, server_parser.parse_args)
kms_parser_check_connect(srv_config, srv_options, userarg, connect_zeroarg, connect_onearg)
except KmsParserException as e:
pretty_printer(put_text = "{reverse}{red}{bold}%s. Exiting...{end}" %str(e), to_exit = True)
class Etrigan_Check(Etrigan_check):
def emit_opt_err(self, msg):
pretty_printer(put_text = "{reverse}{red}{bold}%s{end}" %msg, to_exit = True)
class Etrigan(Etrigan):
def emit_message(self, message, to_exit = False):
if not self.mute:
pretty_printer(put_text = "{reverse}{green}{bold}%s{end}" %message)
if to_exit:
sys.exit(0)
def emit_error(self, message, to_exit = True):
if not self.mute:
pretty_printer(put_text = "{reverse}{red}{bold}%s{end}" %message, to_exit = True)
def server_daemon():
if 'etrigan' in srv_config.values():
path = os.path.join(gettempdir(), 'pykms_config.pickle')
if srv_config['operation'] in ['stop', 'restart', 'status'] and len(sys.argv[1:]) > 2:
pretty_printer(put_text = "{reverse}{red}{bold}too much arguments with etrigan '%s'. Exiting...{end}" %srv_config['operation'],
to_exit = True)
# Check file arguments.
Etrigan_Check().checkfile(srv_config['etriganpid'], '--etrigan-pid', '.pid')
Etrigan_Check().checkfile(srv_config['etriganlog'], '--etrigan-log', '.log')
if srv_config['gui']:
pass
else:
if srv_config['operation'] == 'start':
with open(path, 'wb') as file:
pickle.dump(srv_config, file, protocol = pickle.HIGHEST_PROTOCOL)
elif srv_config['operation'] in ['stop', 'status', 'restart']:
with open(path, 'rb') as file:
old_srv_config = pickle.load(file)
old_srv_config = {x: old_srv_config[x] for x in old_srv_config if x not in ['operation']}
srv_config.update(old_srv_config)
serverdaemon = Etrigan(srv_config['etriganpid'],
logfile = srv_config['etriganlog'], loglevel = srv_config['etriganlev'],
mute = srv_config['etriganmute'], pause_loop = None)
if srv_config['operation'] in ['start', 'restart']:
serverdaemon.want_quit = True
if srv_config['gui']:
serverdaemon.funcs_to_daemonize = [server_with_gui]
else:
server_without_gui = ServerWithoutGui()
serverdaemon.funcs_to_daemonize = [server_without_gui.start, server_without_gui.join]
indx_for_clean = lambda: (0, )
serverdaemon.quit_on_stop = [indx_for_clean, server_without_gui.clean]
elif srv_config['operation'] == 'stop':
os.remove(path)
Etrigan_job(srv_config['operation'], serverdaemon)
def server_check():
# Setup and some checks.
check_setup(srv_config, srv_options, loggersrv, where = "srv")
# Random HWID.
if srv_config['hwid'] == "RANDOM":
randomhwid = uuid.uuid4().hex
srv_config['hwid'] = randomhwid[:16]
# Sanitize HWID.
hexstr = srv_config['hwid']
# Strip 0x from the start of hexstr
if hexstr.startswith("0x"):
hexstr = hexstr[2:]
hexsub = re.sub(r'[^0-9a-fA-F]', '', hexstr)
diff = set(hexstr).symmetric_difference(set(hexsub))
if len(diff) != 0:
diff = str(diff).replace('{', '').replace('}', '')
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}HWID '%s' is invalid. Digit %s non hexadecimal. Exiting...{end}" %(hexstr.upper(), diff))
else:
lh = len(hexsub)
if lh % 2 != 0:
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}HWID '%s' is invalid. Hex string is odd length. Exiting...{end}" %hexsub.upper())
elif lh < 16:
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}HWID '%s' is invalid. Hex string is too short. Exiting...{end}" %hexsub.upper())
elif lh > 16:
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}HWID '%s' is invalid. Hex string is too long. Exiting...{end}" %hexsub.upper())
else:
srv_config['hwid'] = binascii.a2b_hex(hexsub)
# Check LCID.
srv_config['lcid'] = check_lcid(srv_config['lcid'], loggersrv.warning)
# Check sqlite.
if srv_config['sqlite']:
if isinstance(srv_config['sqlite'], str):
check_dir(srv_config['sqlite'], 'srv', log_obj = loggersrv.error, argument = '-s/--sqlite')
elif srv_config['sqlite'] is True:
srv_config['sqlite'] = srv_options['sql']['file']
try:
import sqlite3
except ImportError:
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Module 'sqlite3' not installed, database support disabled.{end}")
srv_config['sqlite'] = False
# Check other specific server options.
opts = [('clientcount', '-c/--client-count'),
('timeoutidle', '-t0/--timeout-idle'),
('timeoutsndrcv', '-t1/--timeout-sndrcv')]
if serverthread.with_gui:
opts += [('activation', '-a/--activation-interval'),
('renewal', '-r/--renewal-interval')]
check_other(srv_config, opts, loggersrv, where = 'srv')
# Check further addresses / ports.
if 'listen' in srv_config:
addresses = []
for elem in srv_config['listen']:
try:
addr, port = elem.split(',')
except ValueError:
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}argument `-n/--listen`: %s not well defined. Exiting...{end}" %elem)
try:
port = int(port)
except ValueError:
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}argument `-n/--listen`: port number '%s' is invalid. Exiting...{end}" %port)
if not (1 <= port <= 65535):
pretty_printer(log_obj = loggersrv.error, to_exit = True,
put_text = "{reverse}{red}{bold}argument `-n/--listen`: port number '%s' is invalid. Enter between 1 - 65535. Exiting...{end}" %port)
addresses.append((addr, port))
srv_config['listen'] = addresses
def server_create():
# Create address list (when the current user indicates execution inside the Windows Sandbox,
# then we wont allow port reuse - it is not supported).
all_address = [(
srv_config['ip'], srv_config['port'],
(srv_config['backlog_main'] if 'backlog_main' in srv_config else srv_options['backlog']['def']),
(srv_config['reuse_main'] if 'reuse_main' in srv_config else False if getuser() == 'WDAGUtilityAccount' \
else srv_options['reuse']['def'])
)]
log_address = "TCP server listening at %s on port %d" %(srv_config['ip'], srv_config['port'])
if 'listen' in srv_config:
for l, b, r in zip(srv_config['listen'], srv_config['backlog'], srv_config['reuse']):
r = (False if getuser() == 'WDAGUtilityAccount' else r)
all_address.append(l + (b,) + (r,))
log_address += justify("at %s on port %d" %(l[0], l[1]), indent = 56)
server = KeyServer(all_address, kmsServerHandler, want_dual = (srv_config['dual'] if 'dual' in srv_config else srv_options['dual']['def']))
server.timeout = srv_config['timeoutidle']
loggersrv.info(log_address)
loggersrv.info("HWID: %s" % deco(binascii.b2a_hex(srv_config['hwid']), 'utf-8').upper())
return server
def server_terminate(generic_srv, exit_server = False, exit_thread = False):
if exit_server:
generic_srv.terminate_serve()
if exit_thread:
generic_srv.terminate_thread()
class ServerWithoutGui(object):
def start(self):
import queue as Queue
daemon_queue = Queue.Queue(maxsize = 0)
daemon_serverthread = server_thread(daemon_queue, name = "Thread-Srv-Daemon")
daemon_serverthread.setDaemon(True)
# options already checked in `server_main_terminal`.
daemon_serverthread.checked = True
daemon_serverthread.start()
daemon_queue.put('start')
return 0, daemon_serverthread
def join(self, daemon_serverthread):
while daemon_serverthread.is_alive():
daemon_serverthread.join(timeout = 0.5)
def clean(self, daemon_serverthread):
server_terminate(daemon_serverthread, exit_server = True, exit_thread = True)
def server_main_terminal():
# Parse options.
server_options()
# Check options.
server_check()
serverthread.checked = True
if 'etrigan' not in srv_config.values():
| |
<reponame>Myself5/build
#
# Copyright (C) 2016 The EFIDroid Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import make_syntax
import subprocess
import copy
import re
from utils import *
from fstab import *
# compatibility imports
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
class VariableSpace:
__verify_pattern = re.compile('\$\((\w+)\)')
def __init__(self):
self.vars = {}
def get(self, name, throw=True):
if not name in self.vars:
if throw:
raise Exception('variable \''+name+'\' not found')
else:
return None
return self.vars[name]
def set(self, name, value):
if not name:
raise Exception('Invalid variable name')
if value == None:
raise Exception('no value given for variable \''+name+'\'')
self.vars[name] = value
def evaluate_str(self, s):
parsedvalue = s
processed = 1
count = 0
while processed > 0:
processed = 0
nvalue = parsedvalue
vars_to_replace = re.findall(VariableSpace.__verify_pattern, nvalue)
for varname in vars_to_replace:
if varname in self.vars:
nvalue = nvalue.replace('$('+varname+')', self.vars[varname])
if nvalue != parsedvalue:
parsedvalue = nvalue
processed += 1
count += 1
if count==10:
raise Exception('Variable recursion in: '+s)
return parsedvalue
@staticmethod
def evaluate_str_all(s, spaces):
parsedvalue = s
processed = 1
count = 0
while processed > 0:
processed = 0
vars_to_replace = re.findall(VariableSpace.__verify_pattern, parsedvalue)
for space in spaces:
nvalue = parsedvalue
for varname in vars_to_replace:
if varname in space.vars:
nvalue = nvalue.replace('$('+varname+')', space.vars[varname])
if nvalue != parsedvalue:
parsedvalue = nvalue
processed += 1
count += 1
if count==10:
raise Exception('Variable recursion in: '+s)
return parsedvalue
@staticmethod
def verify(s):
matches = re.findall(VariableSpace.__verify_pattern, s)
if len(matches)>0 and not 'MAKE' in matches:
raise Exception('unexpanded variables in: '+s)
def __deepcopy__(self, memo):
n = VariableSpace()
n.vars = copy.copy(self.vars)
n.dirty = True
return n
def __str__(self):
return self.vars.__str__()
def __repr__(self):
return self.vars.__repr__()
class Context:
__valid_buildtypes = ['DEBUG', 'USERDEBUG', 'RELEASE']
def __init__(self):
self.extdata = Bunch()
self.toolchains = []
self.toolchains_used = []
self.targets = []
self.iniparser = INIParser(self)
self.moduleparser = ModuleParser(self)
self.clazzes = []
self.architectures = []
self.globalvars = VariableSpace()
self.clazzvars = {}
self.cleantargets = []
self.distcleantargets = []
self.defaultarchitectures = []
self.globalvars.set('TOP', os.path.abspath(''))
self.enableClazz('host')
# get build type
buildtype = 'USERDEBUG'
if 'EFIDROID_BUILDTYPE' in os.environ:
buildtype = os.environ['EFIDROID_BUILDTYPE']
if not buildtype in Context.__valid_buildtypes:
raise Exception('Invalid build type \''+buildtype+'\'')
self.globalvars.set('BUILDTYPE', buildtype)
# get host type
kernel_name = os.uname()[0]
if kernel_name == 'Linux':
hosttype = 'linux-x86'
elif kernel_name == 'Darwin':
hosttype = 'darwin-x86'
else:
raise Exception('Unsupported kernel \''+kernel_name+'\'')
self.globalvars.set('HOSTTYPE', hosttype)
# use architectures from environment variable
if 'EFIDROID_TARGET_ARCH' in os.environ:
self.defaultarchitectures = os.environ['EFIDROID_TARGET_ARCH'].split()
# get device id
if 'EFIDROID_DEVICEID' in os.environ:
deviceid = os.environ['EFIDROID_DEVICEID']
self.defaultarchitectures = ['arm']
# add device class
self.enableClazz('target')
self.enableClazz('device')
# parse device id
tmp = deviceid.split('/')
if len(tmp) != 2:
raise Exception('Invalid device id: '+deviceid)
# run roomservice
roomservicerc = 0
path_roomservice = 'build/tools/roomservice.py'
if not os.path.isfile('device/'+deviceid+'/config.ini'):
roomservicerc = subprocess.call([path_roomservice, deviceid])
else:
roomservicerc = subprocess.call([path_roomservice, deviceid, 'true'])
# check return code
if roomservicerc != 0:
raise Exception('roomservice error: %d' % (roomservicerc))
# check if we finally have a device dir now
path_deviceconfig = 'device/'+deviceid+'/config.ini'
if not os.path.isfile(path_deviceconfig):
raise Exception('Device \''+deviceid+'\' does not exist')
# set device variables
self.clazzvars['device'].set('DEVICE', deviceid)
self.clazzvars['device'].set('DEVICEVENDOR', tmp[0])
self.clazzvars['device'].set('DEVICENAME', tmp[1])
self.clazzvars['device'].set('DEVICE_OUT', '$(OUT)/device/$(DEVICE)')
self.clazzvars['device'].set('DEVICE_DIR', '$(TOP)/device/$(DEVICE)');
def __addClazzVariableSpace(self, name, arch=None):
if arch:
clazzname = name+'_'+arch
outdir = '$(OUT)/'+name+'/'+arch
else:
clazzname = name
outdir = '$(OUT)/'+name
self.clazzvars[clazzname] = VariableSpace()
if name!='device':
self.clazzvars[clazzname].set(clazzname.upper()+'_OUT', outdir)
def enableClazz(self, name):
if name in self.clazzes:
return
self.clazzes.append(name)
if name=='target':
for arch in self.architectures:
self.__addClazzVariableSpace(name, arch)
else:
self.__addClazzVariableSpace(name)
def getClassVariableSpace(self, target):
if target.clazz=='target':
arch = target.vars.get('MODULE_ARCH', throw=False)
if arch:
return self.clazzvars['target_'+arch]
else:
return self.clazzvars[target.clazz]
return None
def expandVariables(self, target, s, otherspaces=[]):
spaces = []
if target:
# target variables first
spaces.append(target.vars)
# class variables
clazzspace = self.getClassVariableSpace(target)
if clazzspace:
spaces.append(clazzspace)
# device targets can use 'target' variables too
if target.clazz=='device':
arch = target.vars.get('MODULE_ARCH', throw=False)
if arch:
spaces.append(self.clazzvars['target_'+arch])
# every target can use host variables (Host already has them)
if target.clazz!='host':
spaces.append(self.clazzvars['host'])
# everyone can use global variables
spaces.append(self.globalvars)
for o in otherspaces:
spaces.append(o)
return VariableSpace.evaluate_str_all(s, spaces)
def enableArch(self, arch):
if arch in self.architectures:
return
self.architectures.append(arch)
if 'target' in self.clazzes:
self.__addClazzVariableSpace('target', arch)
def getTarget(self, name):
for target in self.targets:
if target.name==name:
return target
return None
def removeTarget(self, o):
if isinstance(o, str):
if o in self.cleantargets:
self.cleantargets.remove(o)
if o in self.distcleantargets:
self.distcleantargets.remove(o)
for i in range(len(self.targets)):
target = self.targets[i]
if target.name==o:
del self.targets[i]
return
else:
if o.name in self.cleantargets:
self.cleantargets.remove(o.name)
if o.name in self.distcleantargets:
self.distcleantargets.remove(o.name)
for i in range(len(self.targets)):
target = self.targets[i]
if target is o:
del self.targets[i]
return
raise Exception('target %s is not in the list' % o)
def addTarget(self, target):
if not target.name:
raise Exception('target \''+target.name+'\' doesn\'t have a name')
if not target.clazz in self.clazzes:
raise Exception('target \''+target.name+'\' uses disabled clazz \''+target.clazz+'\'')
self.targets.append(target)
def addToolchain(self, toolchain):
for t in self.toolchains:
if t.type==toolchain.type and t.arch==toolchain.arch and t.name==toolchain.name:
raise Exception('Toolchain \'%s\'(%s) conflicts with %s(%s)' % (
toolchain, self.striptopdir(toolchain.source),
t, self.striptopdir(t.source))
)
self.toolchains.append(toolchain)
def findToolchains(self, arch, name=None):
r = []
for t in self.toolchains:
localname = name
if not localname:
envvarname = 'EFIDROID_TOOLCHAIN_NAME_'+t.type.upper()
if envvarname in os.environ:
localname = os.environ[envvarname]
if not localname:
localname = 'gcc6'
if t.arch==arch and t.name==localname:
r.append(t)
if not t in self.toolchains_used:
self.toolchains_used.append(t)
return r
def getfname(self, path, absolute=False, otherspaces=[]):
if not absolute:
path = '$(TOP)/'+path
r = self.globalvars.evaluate_str(path)
for space in otherspaces:
r = space.evaluate_str(r)
VariableSpace.verify(r)
return r
def striptopdir(self, path):
top = self.globalvars.get('TOP')+'/'
if path.startswith(top):
return path[len(top):]
return path
def __escape_value(self, s):
s = s.replace('"', '\\"')
return s
def __generate_command_line(self, target, command, arch, toolchains):
line = ''
toolchainspaces = [t.toolchainvars for t in toolchains]
for arg in command:
if arg==Target.COMMAND_MAKE:
arg = '$(MAKE)'
line += arg+' '
elif arg==Target.COMMAND_ENV:
tmp = ' '
# add target variables
for name in target.vars.vars:
value = target.vars.vars[name]
tmp += name+'="'+self.__escape_value(value)+'" '
# add toolchain variables
for t in toolchains:
for name in t.toolchainvars.vars:
value = t.toolchainvars.vars[name]
tmp += name+'="'+self.__escape_value(value)+'" '
line += tmp+' '
else:
if isinstance(arg, Target.Raw):
narg = ' '+arg.s+' '
else:
narg = arg
# add argument
if narg in Target.OPERATORS or isinstance(arg, Target.Raw):
line += narg
else:
line += '"'+self.__escape_value(narg)+'" '
line = self.expandVariables(target, line, otherspaces=toolchainspaces)
VariableSpace.verify(line)
return line
def __generate_target_commands(self, name, target):
r = []
arch = None
toolchains = []
# get arch
if target.clazz!='host':
arch = target.vars.get('MODULE_ARCH', throw=False)
if not arch:
arch = self.architectures[0]
# get toolchains
toolchains = self.findToolchains(arch)
if arch!='host' and len(toolchains)<=0:
raise Exception('no toolchains found for architecture %s' % (arch))
if target.create_moduledir:
cmd_mkdir = ['mkdir', '-p', '$(MODULE_OUT)']
r.append(self.__generate_command_line(target, cmd_mkdir, arch, toolchains))
for command in target.commands:
r.append(self.__generate_command_line(target, command, arch, toolchains))
return r
@staticmethod
def __makehelptext(targetname, text):
return bldwht.replace('\033', '\\033')+targetname+': ' \
+txtrst.replace('\033', '\\033')+text.replace('\n', '\\n'+((len(targetname)+2)*' '))+'\\n'
def generate_makefile(self, filename):
makeout = StringIO()
make = make_syntax.Writer(makeout)
helptext = ''
helptext_internal = ''
# add force target
make.comment('Used to force goals to build. Only use for conditionally defined goals.')
make.target('FORCE')
make.newline()
# add targets
for target in sorted(self.targets, key=lambda x: x.name):
deps = target.dependencies
commands = []
target_arch = target.vars.get('MODULE_ARCH', throw=False)
if (target_arch) and (not target_arch in self.architectures):
continue
if target.force:
deps.append('FORCE')
# generate commands
if target.commands:
commands = self.__generate_target_commands(target.name, target)
compilationmessage = None
if not target.silent:
if target.name.endswith('_clean'):
compilationmessage = 'Cleaning '+target.name
elif target.name.endswith('_distclean'):
compilationmessage = 'Dist-Cleaning '+target.name
else:
compilationmessage = 'Compiling '+target.name
if target.compilationmessage:
compilationmessage = target.compilationmessage
# create actual make target
make.comment(self.striptopdir(target.source))
make.target(target.name, commands, deps, target.phony, compilationmessage)
make.newline()
# add to help text
nhelptext = Context.__makehelptext(target.name, target.description)
if not target.internal:
helptext += nhelptext
helptext_internal += nhelptext
# add additional help texts
nhelptext = Context.__makehelptext('help', 'Show available targets')
helptext += nhelptext
helptext_internal += nhelptext
helptext_internal += Context.__makehelptext('help-internal', 'Show available targets')
# help target
make.comment('HELP')
make.target('help', | |
resolution of azimuth and altitude angles for the screen transmittance map.
| A value of 0 means no transmittance map will be generated.
| Valid values for this field are 0, 1, 2, 3 and 5.
| Units: deg
Args:
value (int): value for IDD Field `Angle of Resolution for Screen Transmittance Output Map`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `angle_of_resolution_for_screen_transmittance_output_map` or None if not set
"""
return self["Angle of Resolution for Screen Transmittance Output Map"]
@angle_of_resolution_for_screen_transmittance_output_map.setter
def angle_of_resolution_for_screen_transmittance_output_map(
self,
value=None):
"""Corresponds to IDD field `Angle of Resolution for Screen
Transmittance Output Map`"""
self["Angle of Resolution for Screen Transmittance Output Map"] = value
class WindowMaterialShadeEquivalentLayer(DataObject):
""" Corresponds to IDD object `WindowMaterial:Shade:EquivalentLayer`
Specifies the properties of equivalent layer window shade material
Shades are considered to be perfect diffusers (all transmitted and
reflected radiation is hemispherically-diffuse) independent of angle
of incidence. Shade represents roller blinds.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'shade beam-beam solar transmittance',
{'name': u'Shade Beam-Beam Solar Transmittance',
'pyname': u'shade_beambeam_solar_transmittance',
'default': 0.0,
'maximum': 0.8,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'front side shade beam-diffuse solar transmittance',
{'name': u'Front Side Shade Beam-Diffuse Solar Transmittance',
'pyname': u'front_side_shade_beamdiffuse_solar_transmittance',
'maximum<': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'back side shade beam-diffuse solar transmittance',
{'name': u'Back Side Shade Beam-Diffuse Solar Transmittance',
'pyname': u'back_side_shade_beamdiffuse_solar_transmittance',
'maximum<': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'front side shade beam-diffuse solar reflectance',
{'name': u'Front Side Shade Beam-Diffuse Solar Reflectance',
'pyname': u'front_side_shade_beamdiffuse_solar_reflectance',
'maximum<': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'back side shade beam-diffuse solar reflectance',
{'name': u'Back Side Shade Beam-Diffuse Solar Reflectance',
'pyname': u'back_side_shade_beamdiffuse_solar_reflectance',
'maximum<': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'shade beam-beam visible transmittance at normal incidence',
{'name': u'Shade Beam-Beam Visible Transmittance at Normal Incidence',
'pyname': u'shade_beambeam_visible_transmittance_at_normal_incidence',
'maximum<': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'shade beam-diffuse visible transmittance at normal incidence',
{'name': u'Shade Beam-Diffuse Visible Transmittance at Normal Incidence',
'pyname': u'shade_beamdiffuse_visible_transmittance_at_normal_incidence',
'maximum<': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'shade beam-diffuse visible reflectance at normal incidence',
{'name': u'Shade Beam-Diffuse Visible Reflectance at Normal Incidence',
'pyname': u'shade_beamdiffuse_visible_reflectance_at_normal_incidence',
'maximum<': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'shade material infrared transmittance',
{'name': u'Shade Material Infrared Transmittance',
'pyname': u'shade_material_infrared_transmittance',
'default': 0.05,
'maximum<': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'front side shade material infrared emissivity',
{'name': u'Front Side Shade Material Infrared Emissivity',
'pyname': u'front_side_shade_material_infrared_emissivity',
'default': 0.91,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'maximum<': 1.0,
'unit': u'dimensionless'}),
(u'back side shade material infrared emissivity',
{'name': u'Back Side Shade Material Infrared Emissivity',
'pyname': u'back_side_shade_material_infrared_emissivity',
'default': 0.91,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'maximum<': 1.0,
'unit': u'dimensionless'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 6,
'name': u'WindowMaterial:Shade:EquivalentLayer',
'pyname': u'WindowMaterialShadeEquivalentLayer',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def shade_beambeam_solar_transmittance(self):
"""field `Shade Beam-Beam Solar Transmittance`
| The beam-beam solar transmittance at normal incidence. This value is
| the same as the openness area fraction of the shade material. Assumed
| to be the same for front and back sides.
| Units: dimensionless
| value <= 0.8
Args:
value (float): value for IDD Field `Shade Beam-Beam Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `shade_beambeam_solar_transmittance` or None if not set
"""
return self["Shade Beam-Beam Solar Transmittance"]
@shade_beambeam_solar_transmittance.setter
def shade_beambeam_solar_transmittance(self, value=None):
""" Corresponds to IDD field `Shade Beam-Beam Solar Transmittance`
"""
self["Shade Beam-Beam Solar Transmittance"] = value
@property
def front_side_shade_beamdiffuse_solar_transmittance(self):
"""field `Front Side Shade Beam-Diffuse Solar Transmittance`
| The front side beam-diffuse solar transmittance at normal incidence averaged
| over the entire spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Shade Beam-Diffuse Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_shade_beamdiffuse_solar_transmittance` or None if not set
"""
return self["Front Side Shade Beam-Diffuse Solar Transmittance"]
@front_side_shade_beamdiffuse_solar_transmittance.setter
def front_side_shade_beamdiffuse_solar_transmittance(self, value=None):
""" Corresponds to IDD field `Front Side Shade Beam-Diffuse Solar Transmittance`
"""
self["Front Side Shade Beam-Diffuse Solar Transmittance"] = value
@property
def back_side_shade_beamdiffuse_solar_transmittance(self):
"""field `Back Side Shade Beam-Diffuse Solar Transmittance`
| The back side beam-diffuse solar transmittance at normal incidence averaged
| over the entire spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Shade Beam-Diffuse Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_shade_beamdiffuse_solar_transmittance` or None if not set
"""
return self["Back Side Shade Beam-Diffuse Solar Transmittance"]
@back_side_shade_beamdiffuse_solar_transmittance.setter
def back_side_shade_beamdiffuse_solar_transmittance(self, value=None):
""" Corresponds to IDD field `Back Side Shade Beam-Diffuse Solar Transmittance`
"""
self["Back Side Shade Beam-Diffuse Solar Transmittance"] = value
@property
def front_side_shade_beamdiffuse_solar_reflectance(self):
"""field `Front Side Shade Beam-Diffuse Solar Reflectance`
| The front side beam-diffuse solar reflectance at normal incidence averaged
| over the entire spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Shade Beam-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_shade_beamdiffuse_solar_reflectance` or None if not set
"""
return self["Front Side Shade Beam-Diffuse Solar Reflectance"]
@front_side_shade_beamdiffuse_solar_reflectance.setter
def front_side_shade_beamdiffuse_solar_reflectance(self, value=None):
""" Corresponds to IDD field `Front Side Shade Beam-Diffuse Solar Reflectance`
"""
self["Front Side Shade Beam-Diffuse Solar Reflectance"] = value
@property
def back_side_shade_beamdiffuse_solar_reflectance(self):
"""field `Back Side Shade Beam-Diffuse Solar Reflectance`
| The back side beam-diffuse solar reflectance at normal incidence averaged
| over the entire spectrum of solar radiation.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Shade Beam-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_shade_beamdiffuse_solar_reflectance` or None if not set
"""
return self["Back Side Shade Beam-Diffuse Solar Reflectance"]
@back_side_shade_beamdiffuse_solar_reflectance.setter
def back_side_shade_beamdiffuse_solar_reflectance(self, value=None):
""" Corresponds to IDD field `Back Side Shade Beam-Diffuse Solar Reflectance`
"""
self["Back Side Shade Beam-Diffuse Solar Reflectance"] = value
@property
def shade_beambeam_visible_transmittance_at_normal_incidence(self):
"""field `Shade Beam-Beam Visible Transmittance at Normal Incidence`
| The beam-beam visible transmittance at normal incidence averaged over the
| visible spectrum range of solar radiation. Assumed to be the same for
| front and back sides of the shade.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Shade Beam-Beam Visible Transmittance at Normal Incidence`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `shade_beambeam_visible_transmittance_at_normal_incidence` or None if not set
"""
return self[
"Shade Beam-Beam Visible Transmittance at Normal Incidence"]
@shade_beambeam_visible_transmittance_at_normal_incidence.setter
def shade_beambeam_visible_transmittance_at_normal_incidence(
self,
value=None):
""" Corresponds to IDD field `Shade Beam-Beam Visible Transmittance at Normal Incidence`
"""
self[
"Shade Beam-Beam Visible Transmittance at Normal Incidence"] = value
@property
def shade_beamdiffuse_visible_transmittance_at_normal_incidence(self):
"""field `Shade Beam-Diffuse Visible Transmittance at Normal Incidence`
| The beam-diffuse visible transmittance at normal incidence averaged over the
| visible spectrum range of solar radiation. Assumed to be the same for
| front and back sides of the shade.
| Units: dimensionless
| value < 1.0
Args:
value (float): value for IDD Field `Shade Beam-Diffuse Visible Transmittance at Normal Incidence`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `shade_beamdiffuse_visible_transmittance_at_normal_incidence` or None if not set
"""
return self[
"Shade Beam-Diffuse Visible | |
DIV
"""
try:
import qrcode
import qrcode.image.svg
except ImportError:
return s3_str(value)
# Generate the QR Code
qr = qrcode.QRCode(version = 2,
# L-level good enough for displaying on screen, as
# it would rarely be damaged or dirty there ;)
error_correction = qrcode.constants.ERROR_CORRECT_L,
box_size = 10,
border = 4,
image_factory=qrcode.image.svg.SvgImage,
)
qr.add_data(s3_str(value))
qr.make(fit=True)
# Write the SVG into a buffer
qr_svg = qr.make_image()
from io import BytesIO
stream = BytesIO()
qr_svg.save(stream)
# Generate XML string to embed
stream.seek(0)
svgxml = XML(stream.read())
output = DIV(DIV(svgxml, _class="s3-qrcode-svg"),
_class="s3-qrcode-display",
)
if show_value:
output.append(DIV(s3_str(value), _class="s3-qrcode-val"))
return output
# =============================================================================
def s3_URLise(text):
"""
Convert all URLs in a text into an HTML <A> tag.
@param text: the text
"""
output = URLSCHEMA.sub(lambda m: '<a href="%s" target="_blank">%s</a>' %
(m.group(0), m.group(0)), text)
return output
# =============================================================================
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr):
"""
Represent a User as their profile picture or Gravatar
@param tablename: either "auth_user" or "pr_person" depending on which
table the 'user_id' refers to
@param attr: additional HTML attributes for the IMG(), such as _class
"""
size = (50, 50)
if user_id:
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db[tablename]
email = None
image = None
if tablename == "auth_user":
user = db(table.id == user_id).select(table.email,
cache = cache,
limitby = (0, 1),
).first()
if user:
email = user.email.strip().lower()
ltable = s3db.pr_person_user
itable = s3db.pr_image
query = (ltable.user_id == user_id) & \
(ltable.pe_id == itable.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
elif tablename == "pr_person":
user = db(table.id == user_id).select(table.pe_id,
cache = cache,
limitby = (0, 1),
).first()
if user:
ctable = s3db.pr_contact
query = (ctable.pe_id == user.pe_id) & \
(ctable.contact_method == "EMAIL")
email = db(query).select(ctable.value,
cache = cache,
limitby = (0, 1),
).first()
if email:
email = email.value
itable = s3db.pr_image
query = (itable.pe_id == user.pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby = (0, 1),
).first()
if image:
image = image.image
if image:
image = s3db.pr_image_library_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default", f="download",
args=image)
elif gravatar:
if email:
# If no Image uploaded, try Gravatar, which also provides a nice fallback identicon
import hashlib
email_hash = hashlib.md5(email).hexdigest()
url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash
else:
url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm"
else:
url = URL(c="static", f="img", args="blank-user.gif")
else:
url = URL(c="static", f="img", args="blank-user.gif")
if "_class" not in attr:
attr["_class"] = "avatar"
if "_width" not in attr:
attr["_width"] = size[0]
if "_height" not in attr:
attr["_height"] = size[1]
return IMG(_src=url, **attr)
# =============================================================================
def s3_auth_user_represent(user_id, row=None):
"""
Represent a user as their email address
@ToDo: Deprecate (replace with auth_UserRepresent)
"""
if row:
return row.email
elif not user_id:
return current.messages["NONE"]
db = current.db
table = db.auth_user
user = db(table.id == user_id).select(table.email,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
return user.email
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def s3_auth_user_represent_name(user_id, row=None):
"""
Represent users by their names
@ToDo: Deprecate (replace with auth_UserRepresent)
"""
if not row:
if not user_id:
return current.messages["NONE"]
db = current.db
table = db.auth_user
row = db(table.id == user_id).select(table.first_name,
table.last_name,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
try:
return s3_format_fullname(row.first_name.strip(),
None,
row.last_name.strip(),
)
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def s3_yes_no_represent(value):
" Represent a Boolean field as Yes/No instead of True/False "
if value is True:
return current.T("Yes")
elif value is False:
return current.T("No")
else:
return current.messages["NONE"]
# =============================================================================
def s3_keep_messages():
"""
Retain user messages from previous request - prevents the messages
from being swallowed by overhanging Ajax requests or intermediate
pages with mandatory redirection (see s3_redirect_default)
"""
response = current.response
session = current.session
session.confirmation = response.confirmation
session.error = response.error
session.flash = response.flash
session.information = response.information
session.warning = response.warning
# =============================================================================
def s3_redirect_default(location="", how=303, client_side=False, headers=None):
"""
Redirect preserving response messages, useful when redirecting from
index() controllers.
@param location: the url where to redirect
@param how: what HTTP status code to use when redirecting
@param client_side: if set to True, it triggers a reload of
the entire page when the fragment has been
loaded as a component
@param headers: response headers
"""
s3_keep_messages()
redirect(location,
how=how,
client_side=client_side,
headers=headers,
)
# =============================================================================
def s3_include_debug_css():
"""
Generates html to include the css listed in
/modules/templates/<theme>/css.cfg
"""
request = current.request
location = current.response.s3.theme_config
filename = "%s/modules/templates/%s/css.cfg" % (request.folder, location)
if not os.path.isfile(filename):
raise HTTP(500, "Theme configuration file missing: modules/templates/%s/css.cfg" % location)
link_template = '<link href="/%s/static/styles/%%s" rel="stylesheet" type="text/css" />' % \
request.application
links = ""
with open(filename, "r") as css_cfg:
links = "\n".join(link_template % cssname.rstrip()
for cssname in css_cfg if cssname[0] != "#")
return XML(links)
# =============================================================================
def s3_include_debug_js():
"""
Generates html to include the js scripts listed in
/static/scripts/tools/sahana.js.cfg
"""
request = current.request
scripts_dir = os.path.join(request.folder, "static", "scripts")
sys.path.append(os.path.join(scripts_dir, "tools"))
import mergejsmf
configDictCore = {
".": scripts_dir,
"ui": scripts_dir,
"web2py": scripts_dir,
"S3": scripts_dir
}
configFilename = "%s/tools/sahana.js.cfg" % scripts_dir
files = mergejsmf.getFiles(configDictCore, configFilename)[1]
script_template = '<script src="/%s/static/scripts/%%s"></script>' % \
request.application
scripts = "\n".join(script_template % scriptname for scriptname in files)
return XML(scripts)
# =============================================================================
def s3_include_ext():
"""
Add ExtJS CSS & JS into a page for a Map
- since this is normally run from MAP.xml() it is too late to insert into
s3.[external_]stylesheets, so must inject sheets into correct order
"""
s3 = current.response.s3
if s3.ext_included:
# Ext already included
return
request = current.request
appname = request.application
xtheme = current.deployment_settings.get_base_xtheme()
if xtheme:
xtheme = "%smin.css" % xtheme[:-3]
xtheme = \
"<link href='/%s/static/themes/%s' rel='stylesheet' type='text/css' />" % \
(appname, xtheme)
if s3.cdn:
# For Sites Hosted on the Public Internet, using a CDN may provide better performance
PATH = "//cdn.sencha.com/ext/gpl/3.4.1.1"
else:
PATH = "/%s/static/scripts/ext" % appname
if s3.debug:
# Provide debug versions of CSS / JS
adapter = "%s/adapter/jquery/ext-jquery-adapter-debug.js" % PATH
main_js = "%s/ext-all-debug.js" % PATH
main_css = \
"<link href='%s/resources/css/ext-all-notheme.css' rel='stylesheet' type='text/css' />" % PATH
if not xtheme:
xtheme = \
"<link href='%s/resources/css/xtheme-gray.css' rel='stylesheet' type='text/css' />" % PATH
else:
adapter = "%s/adapter/jquery/ext-jquery-adapter.js" % PATH
main_js = "%s/ext-all.js" % PATH
if xtheme:
main_css = \
"<link href='/%s/static/scripts/ext/resources/css/ext-notheme.min.css' rel='stylesheet' type='text/css' />" % appname
else:
main_css = \
"<link href='/%s/static/scripts/ext/resources/css/ext-gray.min.css' rel='stylesheet' type='text/css' />" % appname
scripts = s3.scripts
scripts_append = scripts.append
scripts_append(adapter)
scripts_append(main_js)
langfile = "ext-lang-%s.js" % s3.language
if os.path.exists(os.path.join(request.folder, "static", "scripts", "ext", "src", "locale", langfile)):
locale = "%s/src/locale/%s" % (PATH, langfile)
scripts_append(locale)
if xtheme:
s3.jquery_ready.append('''$('link:first').after("%s").after("%s")''' % (xtheme, main_css))
else:
s3.jquery_ready.append('''$('link:first').after("%s")''' % main_css)
s3.ext_included = True
# =============================================================================
def s3_include_simile():
"""
Add Simile CSS & JS into a page for a Timeline
"""
s3 = current.response.s3
if s3.simile_included:
# Simile already included
return
appname = current.request.application
scripts = s3.scripts
if s3.debug:
# Provide debug versions of CSS / JS
s3.scripts += ["/%s/static/scripts/S3/s3.simile.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/platform.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/debug.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/xmlhttp.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/json.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/dom.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/graphics.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/date-time.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/string.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/html.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/data-structure.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/units.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/ajax.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/history.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/window-manager.js" % appname,
"/%s/static/scripts/simile/ajax/scripts/remoteLog.js" % appname,
"/%s/static/scripts/S3/s3.timeline.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/timeline.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/band.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/themes.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/ethers.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/ether-painters.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/event-utils.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/labellers.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/sources.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/original-painter.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/detailed-painter.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/overview-painter.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/compact-painter.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/decorators.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/l10n/en/timeline.js" % appname,
"/%s/static/scripts/simile/timeline/scripts/l10n/en/labellers.js" % appname,
]
css = "".join(["<link href='/%s/static/scripts/simile/ajax/styles/graphics.css' rel='stylesheet' type='text/css' />" % appname,
"<link href='/%s/static/scripts/simile/timeline/styles/ethers.css' rel='stylesheet' type='text/css' />" % appname,
"<link href='/%s/static/scripts/simile/timeline/styles/events.css' rel='stylesheet' type='text/css' />" % appname,
"<link href='/%s/static/scripts/simile/timeline/styles/timeline.css' rel='stylesheet' type='text/css' />" % appname,
])
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
css = "".join(["<link href='/%s/static/scripts/simile/ajax/styles/graphics.css' rel='stylesheet' type='text/css' />" % appname,
"<link href='/%s/static/scripts/simile/timeline/timeline-bundle.css' rel='stylesheet' type='text/css' />" % appname,
])
s3.jquery_ready.append('''$('link:first').after("%s")''' % css)
supported_locales = [
"cs", # Czech
"de", # German
"en", # English
"es", # Spanish
"fr", # French
"it", # Italian
"nl", # Dutch (The Netherlands)
"pl", # Polish
"ru", # Russian
"se", # Swedish
"tr", # Turkish
"vi", # Vietnamese
"zh" # Chinese
]
if s3.language in supported_locales:
locale = s3.language
else:
locale = "en"
s3.scripts += ["/%s/static/scripts/simile/timeline/scripts/l10n/%s/timeline.js" % (appname, locale),
"/%s/static/scripts/simile/timeline/scripts/l10n/%s/labellers.js" % (appname, locale),
]
s3.simile_included = True
# =============================================================================
def s3_include_underscore():
"""
Add Undercore JS | |
= "{0} wrt {1} | {2} | {3} | {4} | {5}"\
.format(
pad_name('<output>', 30, quotes=True),
pad_name('<variable>', 30, quotes=True),
pad_name('calc mag.'),
pad_name('check mag.'),
pad_name('a(cal-chk)'),
pad_name('r(cal-chk)'),
)
else:
max_width_of = len("'<output>'")
max_width_wrt = len("'<variable>'")
for of, wrt in sorted_keys:
max_width_of = max(max_width_of, len(of) + 2) # 2 to include quotes
max_width_wrt = max(max_width_wrt, len(wrt) + 2)
if print_reverse:
header = \
"{0} wrt {1} | {2} | {3} | {4} | {5} | {6} | {7} | {8} | {9} | {10}" \
.format(
pad_name('<output>', max_width_of, quotes=True),
pad_name('<variable>', max_width_wrt, quotes=True),
pad_name('fwd mag.'),
pad_name('rev mag.'),
pad_name('check mag.'),
pad_name('a(fwd-chk)'),
pad_name('a(rev-chk)'),
pad_name('a(fwd-rev)'),
pad_name('r(fwd-chk)'),
pad_name('r(rev-chk)'),
pad_name('r(fwd-rev)')
)
else:
header = "{0} wrt {1} | {2} | {3} | {4} | {5}"\
.format(
pad_name('<output>', max_width_of, quotes=True),
pad_name('<variable>', max_width_wrt, quotes=True),
pad_name('calc mag.'),
pad_name('check mag.'),
pad_name('a(cal-chk)'),
pad_name('r(cal-chk)'),
)
if out_stream:
out_buffer.write(header + '\n')
out_buffer.write('-' * len(header) + '\n' + '\n')
def safe_norm(arr):
return 0. if arr is None or arr.size == 0 else np.linalg.norm(arr)
for of, wrt in sorted_keys:
mult = None
if totals:
fd_opts = global_options['']
else:
fd_opts = global_options[sys_name][wrt]
directional = fd_opts.get('directional')
do_rev = not totals and matrix_free and not directional
do_rev_dp = not totals and matrix_free and directional
derivative_info = derivatives[of, wrt]
# TODO total derivs may have been computed in rev mode, not fwd
forward = derivative_info['J_fwd']
try:
fd = derivative_info['J_fd']
except KeyError:
# this can happen when a partial is not declared, which means it should be zero
fd = np.zeros(forward.shape)
if do_rev:
reverse = derivative_info.get('J_rev')
if 'j_rev_mult' in derivative_info:
mult = derivative_info['j_rev_mult']
fwd_error = safe_norm(forward - fd)
if do_rev_dp:
fwd_rev_error = derivative_info['directional_fwd_rev']
rev_error = derivative_info['directional_fd_rev']
elif do_rev:
rev_error = safe_norm(reverse - fd)
fwd_rev_error = safe_norm(forward - reverse)
else:
rev_error = fwd_rev_error = None
fwd_norm = safe_norm(forward)
fd_norm = safe_norm(fd)
if do_rev:
rev_norm = safe_norm(reverse)
else:
rev_norm = None
derivative_info['abs error'] = abs_err = ErrorTuple(fwd_error, rev_error, fwd_rev_error)
derivative_info['magnitude'] = magnitude = MagnitudeTuple(fwd_norm, rev_norm, fd_norm)
if fd_norm == 0.:
if fwd_norm == 0.:
derivative_info['rel error'] = rel_err = ErrorTuple(nan, nan, nan)
else:
# If fd_norm is zero, let's use fwd_norm as the divisor for relative
# check. That way we don't accidentally squelch a legitimate problem.
if do_rev or do_rev_dp:
rel_err = ErrorTuple(fwd_error / fwd_norm,
rev_error / fwd_norm,
fwd_rev_error / fwd_norm)
derivative_info['rel error'] = rel_err
else:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fwd_norm,
None,
None)
else:
if do_rev or do_rev_dp:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,
rev_error / fd_norm,
fwd_rev_error / fd_norm)
else:
derivative_info['rel error'] = rel_err = ErrorTuple(fwd_error / fd_norm,
None,
None)
# Skip printing the dependent keys if the derivatives are fine.
if not compact_print and indep_key is not None:
rel_key = (of, wrt)
if rel_key in indep_key[sys_name] and fd_norm < abs_error_tol:
del derivative_data[sys_name][rel_key]
continue
# Informative output for responses that were declared with an index.
indices = derivative_info.get('indices')
if indices is not None:
of = '{} (index size: {})'.format(of, indices)
if not suppress_output:
if compact_print:
if totals:
if out_stream:
out_buffer.write(deriv_line.format(
pad_name(of, 30, quotes=True),
pad_name(wrt, 30, quotes=True),
magnitude.forward,
magnitude.fd,
abs_err.forward,
rel_err.forward,
) + '\n')
else:
error_string = ''
for error in abs_err:
if error is None:
continue
if not np.isnan(error) and error >= abs_error_tol:
error_string += ' >ABS_TOL'
break
# See if this component has the greater
# error in the derivative computation
# compared to the other components so far
is_worst_subjac = False
for i, error in enumerate(rel_err):
if error is None:
continue
if not np.isnan(error):
# only 1st and 2d errs
if i < 2 and error > worst_subjac_rel_err:
worst_subjac_rel_err = error
worst_subjac = (sys_type, sys_class_name, sys_name)
is_worst_subjac = True
if not np.isnan(error) and error >= rel_error_tol:
error_string += ' >REL_TOL'
break
if error_string: # Any error string indicates that at least one of the
# derivative calcs is greater than the rel tolerance
num_bad_jacs += 1
if out_stream:
if directional:
wrt = "(d)'%s'" % wrt
wrt_padded = pad_name(wrt, max_width_wrt, quotes=False)
else:
wrt_padded = pad_name(wrt, max_width_wrt, quotes=True)
if print_reverse:
deriv_info_line = \
deriv_line.format(
pad_name(of, max_width_of, quotes=True),
wrt_padded,
magnitude.forward,
_format_if_not_matrix_free(matrix_free and not directional,
magnitude.reverse),
magnitude.fd,
abs_err.forward,
_format_if_not_matrix_free(matrix_free,
abs_err.reverse),
_format_if_not_matrix_free(matrix_free,
abs_err.forward_reverse),
rel_err.forward,
_format_if_not_matrix_free(matrix_free,
rel_err.reverse),
_format_if_not_matrix_free(matrix_free,
rel_err.forward_reverse),
)
else:
deriv_info_line = \
deriv_line.format(
pad_name(of, max_width_of, quotes=True),
wrt_padded,
magnitude.forward,
magnitude.fd,
abs_err.forward,
rel_err.forward,
)
if not show_only_incorrect or error_string:
out_buffer.write(deriv_info_line + error_string + '\n')
if is_worst_subjac:
worst_subjac_line = deriv_info_line
else: # not compact print
fd_desc = "{}:{}".format(fd_opts['method'], fd_opts['form'])
# Magnitudes
if out_stream:
if directional:
out_buffer.write(f" {sys_name}: '{of}' wrt (d)'{wrt}'")
else:
out_buffer.write(f" {sys_name}: '{of}' wrt '{wrt}'")
if lcons and of in lcons:
out_buffer.write(" (Linear constraint)")
out_buffer.write('\n')
if do_rev or do_rev_dp:
out_buffer.write(' Forward')
else:
out_buffer.write(' Analytic')
out_buffer.write(' Magnitude: {:.6e}\n'.format(magnitude.forward))
if do_rev:
txt = ' Reverse Magnitude: {:.6e}'
if out_stream:
out_buffer.write(txt.format(magnitude.reverse) + '\n')
if out_stream:
out_buffer.write(' Fd Magnitude: {:.6e} ({})\n'.format(
magnitude.fd, fd_desc))
# Absolute Errors
if do_rev:
error_descs = ('(Jfor - Jfd) ', '(Jrev - Jfd) ', '(Jfor - Jrev)')
elif do_rev_dp:
error_descs = ('(Jfor - Jfd) ', '(Jrev - Jfd Dot Product Test) ',
'(Jrev - Jfor Dot Product Test) ')
else:
error_descs = ('(Jan - Jfd) ', )
for error, desc in zip(abs_err, error_descs):
error_str = _format_error(error, abs_error_tol)
if error_str.endswith('*'):
num_bad_jacs += 1
if out_stream:
out_buffer.write(' Absolute Error {}: {}\n'.format(desc, error_str))
if out_stream:
out_buffer.write('\n')
# Relative Errors
if do_rev:
if fd_norm == 0.:
error_descs = ('(Jfor - Jfd) / Jfor ', '(Jrev - Jfd) / Jfor ',
'(Jfor - Jrev) / Jfor')
else:
error_descs = ('(Jfor - Jfd) / Jfd ', '(Jrev - Jfd) / Jfd ',
'(Jfor - Jrev) / Jfd')
elif do_rev_dp:
if fd_norm == 0.:
error_descs = ('(Jfor - Jfd) / Jfor ',
'(Jrev - Jfd Dot Product Test) / Jfor ',
'(Jrev - Jfor Dot Product Test) / Jfor ')
else:
error_descs = ('(Jfor - Jfd) / Jfd ',
'(Jrev - Jfd Dot Product Test) / Jfd ',
'(Jrev - Jfor Dot Product Test) / Jfd ')
else:
if fd_norm == 0.:
error_descs = ('(Jan - Jfd) / Jan ', )
else:
error_descs = ('(Jan - Jfd) / Jfd ', )
for error, desc in zip(rel_err, error_descs):
error_str = _format_error(error, rel_error_tol)
if error_str.endswith('*'):
num_bad_jacs += 1
if out_stream:
out_buffer.write(' Relative Error {}: {}\n'.format(desc, error_str))
if out_stream:
if MPI and MPI.COMM_WORLD.size > 1:
out_buffer.write(' MPI Rank {}\n'.format(MPI.COMM_WORLD.rank))
out_buffer.write('\n')
# Raw Derivatives
if out_stream:
if do_rev_dp:
out_buffer.write(' Directional Forward Derivative (Jfor)\n')
else:
if not totals and matrix_free:
out_buffer.write(' Raw Forward')
else:
out_buffer.write(' Raw Analytic')
out_buffer.write(' Derivative (Jfor)\n')
out_buffer.write(str(forward) + '\n')
out_buffer.write('\n')
if not totals and matrix_free:
if out_stream:
if not directional:
if mult is not None:
reverse /= mult
out_buffer.write(' Raw Reverse Derivative (Jrev)\n')
out_buffer.write(str(reverse) + '\n')
out_buffer.write('\n')
if out_stream:
if directional:
out_buffer.write(' Directional FD Derivative (Jfd)\n')
else:
out_buffer.write(' Raw FD Derivative (Jfd)\n')
out_buffer.write(str(fd) + '\n')
out_buffer.write('\n')
if out_stream:
out_buffer.write(' -' * 30 + '\n')
# End of if compact print if/else
# End of if not suppress_output
# End of for of, wrt in sorted_keys
if not show_only_incorrect or num_bad_jacs:
if out_stream and not suppress_output:
out_stream.write(out_buffer.getvalue())
# End of for system in system_list
if not suppress_output and compact_print and not totals:
if worst_subjac:
worst_subjac_header = \
"Sub Jacobian with Largest Relative Error: {1} '{2}'".format(*worst_subjac)
out_stream.write('\n' + '#' * len(worst_subjac_header) + '\n')
out_stream.write("{}\n".format(worst_subjac_header))
out_stream.write('#' * len(worst_subjac_header) + '\n')
out_stream.write(header + '\n')
out_stream.write('-' * len(header) + '\n')
out_stream.write(worst_subjac_line + '\n')
def _format_if_not_matrix_free(matrix_free, val):
"""
Return string to represent deriv check value in compact display.
Parameters
----------
matrix_free : bool
If True, then the associated Component is matrix-free.
val : float
The deriv check value.
Returns
-------
str
String which is the actual value if matrix-free, otherwise 'n/a'
"""
if matrix_free:
return '{0:.4e}'.format(val)
else:
return pad_name('n/a')
def _format_error(error, tol):
"""
Format the error, flagging if necessary.
Parameters
----------
error : float
The absolute or relative error.
tol : float
Tolerance above which errors are flagged
Returns
-------
str
Formatted and possibly flagged error.
"""
if np.isnan(error) or error < tol:
return '{:.6e}'.format(error)
return '{:.6e} *'.format(error)
def | |
<gh_stars>1-10
# coot-utils.py
# adapted from coot-utils.scm
#
# Copyright 2004, 2005, 2006, 2007 by <NAME>
# Copyright 2008, 2009 by <NAME>, The University of York
# Copyright 2000 by <NAME>
# Copyright 2004, 2005, 2006, 2007 by <NAME>, The University of York
# <one line to give the program's name and a brief idea of what it does.>
# Copyright (C) <year> <name of author>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('/usr/local/coot/current/lib/python2.7/site-packages')
import coot
import re, string, os
# 3D annotations - a bit of a hack currently
global annotations
annotations = []
# used in Extensions -> Representation -> Ball & Stick
global default_ball_and_stick_selection
default_ball_and_stick_selection = "//A/1-2"
# for mingw debug
global have_mingw
have_mingw = False
if os.getenv("MSYSTEM"):
have_mingw = True
global user_defined_alert_smarts
# example: user_defined_alert_smarts = [['C', 'my-user-defined alert for carbon']]
user_defined_alert_smarts = []
# not sure if the implementation of the macros will work
# 'Macro' to tidy up a a setup of functions to be run with no backup
# for a particular molecule.
#
# funcs is a normal set of functions (not a thunk), here i.e. a list of
# functions with function as a list with func name and args,
# e.g.: [centre_of_mass, 0], [func_name, arg1, arg2,...],...
#
def with_no_backups(imol, *funcs):
b_state = backup_state(imol)
turn_off_backup(imol)
for f in funcs:
func = f[0]
args = f[1:len(f)]
#print "BL DEBUG:: func %s and args %s" %(func, args)
func(*args)
if backup_mode == 1:
turn_on_backup(imol)
# 'Macro' to tidy up a set of functions to be run with automatic
# accepting of the refinement
# returns the result of last function run...
#
# funcs is a normal set of functions (not a thunk), here i.e. a list of
# functions with function as a list with func name and args,
# e.g.: [centre_of_mass, 0], [func_name, arg1, arg2,...],...
#
def with_auto_accept(*funcs):
replace_state = refinement_immediate_replacement_state()
set_refinement_immediate_replacement(1)
for f in funcs:
func = f[0]
args = f[1:len(f)]
#print "BL DEBUG:: func %s and args %s" %(func, args)
ret = func(*args)
accept_regularizement()
if (replace_state == 0):
set_refinement_immediate_replacement(0)
return ret # returns result of last functions!!!!
# 'Macro' to run funcs on an active atom
# funcs is function, active_atom specifiers and extra args
# func, args, "aa_imol", "aa_chain_id", ..., args
# or list thereof
# [[func1, extra_arg1, ..., "aa_imol", "aa_chain_id",..., extra_arg2, extra arg3, ..], [func2,...]]
# returns what? The value from the last function evaluated
#
def using_active_atom(*funcs):
from types import ListType
active_atom = active_residue()
if (not active_atom):
add_status_bar_text("No residue found")
else:
def arg_to_append(item):
aa_dict = {"aa_imol": active_atom[0],
"aa_chain_id": active_atom[1],
"aa_res_no": active_atom[2],
"aa_ins_code": active_atom[3],
"aa_atom_name": active_atom[4],
"aa_alt_conf": active_atom[5]}
if isinstance(item, list):
arg_ls = []
for ele in item:
arg_ls.append(arg_to_append(ele))
return arg_ls
else:
if aa_dict.has_key(item):
return aa_dict[item]
else:
return item
if (len(funcs) == 1):
# we have a list of functions
# so use
ls_funcs = funcs[0]
elif (type(funcs[0]) is ListType):
# we have a range of lists
# use as is
ls_funcs = funcs
else:
# we have a single function with args
# make into list
ls_funcs = [funcs]
for ele in ls_funcs:
func = ele[0]
func_args = ele[1:]
args = []
for arg in func_args:
ins = arg_to_append(arg)
args.append(ins)
ret = func(*args)
return ret
# here some truely pythonic version of the macros. Should replace
# them in usage too:
# Pythonic 'Macro' to tidy up a a setup of functions to be run with no backup
# for a particular molecule.
#
# use with 'with', e.g.:
#
# > with NoBackups(imol=0):
# refine_zone(imol, "A", 43, 45, "")
# accept_regularizement()
#
class NoBackups:
"""'Macro' to tidy up a a setup of functions to be run with no backup
for a particular molecule.
use with 'with', e.g.:
> with WithNoBackups(imol=0):
refine_zone(imol, "A", 43, 45, "")
accept_regularizement()
"""
def __init__(self, imol):
self.imol = imol
def __enter__(self):
self.b_state = backup_state(self.imol)
turn_off_backup(self.imol)
def __exit__(self, type, value, traceback):
if (self.b_state == 1):
turn_on_backup(self.imol)
# Pythonic 'Macro' to tidy up a set of functions to be run with automatic
# accepting of the refinement.
#
# use with 'with', e.g.:
#
# >with AutoAccept():
# refine_zone(0, "A", 43, 45, "")
#
class AutoAccept:
"""
Pythonic 'Macro' to tidy up a set of functions to be run with automatic
accepting of the refinement.
use with 'with', e.g.:
> with AutoAccept():
refine_zone(0, "A", 43, 45, "")
"""
def __init__(self):
self.replace_state = -1
pass
def __enter__(self):
self.replace_state = refinement_immediate_replacement_state()
set_refinement_immediate_replacement(1)
def __exit__(self, type, value, traceback):
accept_regularizement()
if (self.replace_state == 0):
set_refinement_immediate_replacement(0)
class UsingActiveAtom:
"""
Run functions on the active atom.
use with 'with', e.g.:
> with UsingActiveAtom() as [aa_imol, aa_chain_id, aa_res_no, aa_ins_code, aa_atom_name, aa_alt_conf]:
refine_zone(aa_imol, aa_chain_id, aa_res_no-2, aa_res_no+2, aa_ins_code)
"""
def __init__(self):
self.no_residue = False
pass
def __enter__(self):
self.active_atom = active_residue()
if (not self.active_atom):
add_status_bar_text("No (active) residue found")
self.no_residue = True
#self.__exit__(None, "dummy", None)
else:
imol = self.active_atom[0]
chain_id = self.active_atom[1]
res_no = self.active_atom[2]
ins_code = self.active_atom[3]
atom_name = self.active_atom[4]
alt_conf = self.active_atom[5]
return [imol, chain_id, res_no, ins_code, atom_name, alt_conf]
def __exit__(self, type, value, traceback):
if (self.no_residue):
# internal calling of exit, ignore errors
return True
pass
# Pythonize function: return a python boolean.
#
def molecule_has_hydrogens(imol):
return (molecule_has_hydrogens_raw(imol) == 1)
def add_hydrogens_using_refmac(imol):
out_file_name = os.path.join("coot-refmac",
molecule_name_stub(imol, 0) + '-needs-H.pdb')
in_file_name = os.path.join("coot-refmac",
molecule_name_stub(imol, 0) + '-with-H.pdb')
make_directory_maybe('coot-refmac')
write_pdb_file(imol, out_file_name)
return add_hydrogens_using_refmac_inner(imol, in_file_name, out_file_name)
def add_hydrogens_to_chain_using_refmac(imol, chain_id):
out_file_name = os.path.join("coot-refmac",
molecule_name_stub(imol, 0) + '-chain-' + chain_id + '-needs-H.pdb')
in_file_name = os.path.join("coot-refmac",
molecule_name_stub(imol, 0) + '-chain-' + chain_id + '-with-H.pdb')
make_directory_maybe('coot-refmac')
write_chain_to_pdb_file(imol, chain_id, out_file_name)
return add_hydrogens_using_refmac_inner(imol, in_file_name, out_file_name)
def add_hydrogens_using_refmac_inner(imol, in_file_name, out_file_name):
status = popen_command("refmac5",
['XYZIN', out_file_name, 'XYZOUT', in_file_name],
['MAKE HOUT YES'],
'refmac-H-addition.log', 0)
try:
if (status == 0):
# all good
return add_hydrogens_from_file(imol, in_file_name)
except:
return False
# set this to a function accepting two argument (the molecule number
# and the manipulation mode) and it will be run after a model
# manipulation.
# e.g.
# def post_manipulation_script(imol, mode):
# ... continue below
#
# The manipulation mode will be one of (MOVINGATOMS), (DELETED) or
# (MUTATED) and these can be tested with "=".
#
# e.g.
#
# if (mode == DELETED):
# display/print "Something was deleted"
#
post_manipulation_script = False
# return a boolean
#
def pre_release_qm():
return "-pre" in coot_version()
# return a list of molecule numbers (closed and open)
# The elements of the returned list need to be tested against
# is_valid_model_molecule_qm
#
def molecule_number_list():
ret = []
for mol_no in range(coot.graphics_n_molecules()):
if (valid_map_molecule_qm(mol_no) or
valid_model_molecule_qm(mol_no)):
ret.append(mol_no)
return ret
def model_molecule_number_list():
return filter(valid_model_molecule_qm, molecule_number_list())
# Test for prefix-dir (1) being a string (2) existing (3) being a
# directory (4-6) modifiable by user (ie. u+rwx). prefix_dir must be a
# string.
#
# Return True or False.
#
def directory_is_modifiable_qm(prefix_dir):
from types import StringType
ret = False
# check string:
ret = type(prefix_dir) is StringType
if ret:
# check existence:
ret = os.access(prefix_dir, os.F_OK)
if ret:
# check dir:
ret = os.path.isdir(prefix_dir)
if ret:
# check readability
ret = os.access(prefix_dir, os.R_OK)
if ret:
# check writability
ret = os.access(prefix_dir, os.W_OK)
if ret:
# check executability (needed?!)
ret = os.access(prefix_dir, os.X_OK)
return ret
# return an absolute file-name for file-name or False
#
# def absolutify(file_name) - exists in python as os.path.abspath use that!!
# Find the most recently created file from the given glob and dir
#
# return False on no-such-file
#
def most_recently_created_file(glob_str, dir):
import glob, time
patt = os.path.join(dir, glob_str)
files = glob.glob(patt)
latest_file = False
latest_mtime = 0
for file_ in files:
this_mtime = os.path.getmtime(file_)
if this_mtime > latest_mtime:
latest_file = file_
latest_mtime = this_mtime
return latest_file
# Convert a residue_spec to an mmdb atom selection string.
# FIXME:: to be tested
#
def residue_spec2atom_selection_string(centre_residue_spec):
ret = "//" + centre_residue_spec[0] + \
"/" + str(centre_residue_spec[1])
return ret
def residue_atom2atom_name(ra):
if not isinstance(ra, list):
return False
else:
return ra[0][0]
def residue_atom2alt_conf(ra):
if not isinstance(ra, list):
return False
else:
return ra[0][1]
def residue_spec2chain_id(rs):
if not isinstance(rs, list):
return False
else:
if (len(rs) == 3):
| |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import types
import errno
import time
import datetime
import logging
import backends as backend
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import traceback
SHARD_KEY_TEMPLATE = 'shard-{}-{:d}'
# aliases for types
Model = backend.Model
Integer = backend.Integer
Float = backend.Float
String = backend.String
Text = backend.Text
Blob = backend.Blob
Key = backend.Key
Boolean = backend.Boolean
Json = backend.Json
Blob = backend.Blob
Cursor = backend.Cursor # NOTE: need to have a constructor that takes urlsafe= as an argument for deserialization, and needs a urlsafe() method for serialization
Computed = backend.Computed
Pickled = backend.Pickled
# aliases for keys
make_key = backend.make_key
# aliases for asynchronous operations
FutureWrapper = backend.FutureWrapper
FutureQueryWrapper = backend.FutureQueryWrapper
wait_futures = backend.wait_futures
deferred = backend.deferred
concurrent = backend.concurrent
concurrent_return = backend.concurrent_return
get_multi_async = backend.get_multi_async
put_multi_async = backend.put_multi_async
# synchronous operations
get_multi = backend.get_multi
put_multi = backend.put_multi
delete_multi = backend.delete_multi
# aliases for memcache
memcache = backend.memcache
# aliases for transaction
transaction = backend.transaction
transaction_async = backend.transaction_async
transactional = backend.transactional
# alises for query predicates
opAND = backend.opAND
opOR = backend.opOR
# toplevel decorator
toplevel = backend.toplevel
# aliases for common exceptions
RequestDeadlineExceededError = backend.RequestDeadlineExceededError
APIRequestDeadlineExceededError = backend.APIRequestDeadlineExceededError
URLRequestDeadlineExceededError = backend.URLRequestDeadlineExceededError
TransactionFailedError = backend.TransactionFailedError
def clock_gettime():
now = datetime.datetime.utcnow()
nowtt = now.timetuple()
now_sec = int(time.mktime( nowtt ))
now_nsec = int(now.microsecond * 1e3)
return (now_sec, now_nsec)
def get_time():
now_sec, now_nsec = clock_gettime()
return float(now_sec) + float(now_nsec) / 1e9
class Object( Model ):
# list of names of attributes of required attributes
required_attrs = []
# list of names of attributes that will be used to generate a primary key
key_attrs = []
# list of names of attributes that can be read, period
read_attrs = []
# list of names of attributes that can be read, but only with the object's API key
read_attrs_api_required = []
# list of names of attributes that can be read, but only by the administrator
read_attrs_admin_required = []
# list of names of attributes that can be written, period
write_attrs = []
# list of names of attributes that can be written, but only with the object's API key
write_attrs_api_required = []
# list of names of attributes that can be written, but only by the administrator
write_attrs_admin_required = []
# dict of functions that generate default values
# attribute name => lambda object_class, attribute_dict => default_value
default_values = {}
# dict of functions that validate fields
# attribute name => lambda object_class, attribute_value => true/false
validators = {}
# class of an Object that contains sharded data
shard_class = None
# fields in this Object stored on a shard.
shard_fields = []
# dict of functions that read sharded fields
# sharded attribute name => lambda instance, shard_objects => attribute_value
shard_readers = {}
# dict of functions that write shard fields
# sharded attribute name => lambda insance => attribute value
shard_writers = {}
# instance of a shard that will be populated and written
write_shard = None
# for RPC
key_type = None
@classmethod
def shard_key_name( cls, name, idx ):
"""
Generate the name for a shard, given its base name and index
"""
return SHARD_KEY_TEMPLATE.format( name, idx )
@classmethod
def get_shard_key( cls, name, idx ):
key_str = cls.shard_key_name( name, idx )
return make_key( cls.shard_class, key_str )
@classmethod
def get_shard_keys(cls, num_shards, key_name ):
"""
Get keys for all shards, given the number of shards.
The base name will be generated from the make_key_name() method, to which the given **attrs dict will be passed.
"""
shard_key_strings = [cls.shard_key_name( key_name, index ) for index in range(num_shards)]
return [make_key(cls.shard_class, shard_key_string) for shard_key_string in shard_key_strings]
def populate_from_shards(self, shards):
"""
Populate the base object using a list of shards.
This will use the methods to fill the fields indicated by the base instance's shard_readers dict.
This method throws an exception when passed a list of Nones
"""
if shards == None or len(shards) == 0:
return
shards_existing = filter( lambda x: x is not None, shards )
if len(shards_existing) == 0:
raise Exception("No valid shards for %s" % self)
# populate an instance with value from shards
for (shard_field, shard_reader) in self.shard_readers.items():
val = shard_reader( self, shards_existing )
setattr( self, shard_field, val )
def populate_base(self, **attrs):
"""
Populate the base instance of an object.
Specifically, populate fields in the object that are NOT in the shard_fields list.
"""
base_attrs = {}
for (key, value) in attrs.items():
if key not in self.shard_fields:
base_attrs[key] = value
super( Object, self ).populate( **base_attrs )
for (key, value) in attrs.items():
if key not in self._properties.keys():
setattr( self, key, value )
@classmethod
def get_shard_attrs( cls, inst, **attrs ):
"""
Generate and return a dict of shard attributes and values, given an **attrs dictionary.
The resulting dictionary will contain a key,value pair for each shard field, indicated by the base object instance's shard_fields list.
The key,value pairings will be taken first from **attrs. If a key does not have a value, it will be populated from the base object
instance's shard_writers dictionary.
"""
shard_attrs = {}
for (shard_field, shard_value) in attrs.items():
if shard_field in cls.shard_fields:
shard_attrs[shard_field] = shard_value
for (shard_field, shard_writer) in cls.shard_writers.items():
if shard_attrs.get( shard_field, None ) == None:
shard_attrs[shard_field] = shard_writer( inst )
return shard_attrs
@classmethod
def populate_shard_inst(cls, inst, shard_inst, **attrs):
"""
Populate an instance of a shard, given an instance of the base object and an instance of its associated shard class,
with the given set of attributes. Required attributes (from the base object's shard_fields list) that are not present
in **attrs will be generated using the indicated method in the base object's shard_writers dictionary.
"""
shard_attrs = cls.get_shard_attrs( inst, **attrs )
shard_inst.populate( -1, **shard_attrs )
def populate_shard(self, num_shards, **attrs ):
"""
Populate the base object instance's shard with the given attributes (**attrs), and store it under self.write_shard.
If num_shards <= 0 or the base object does not identify a shard class type (in self.shard_class), this method does nothing.
Missing attributes from **attrs are generated using the get_shard_attrs() method.
"""
if self.shard_class == None:
return
if num_shards <= 0:
return
shard_attrs = self.get_shard_attrs( self, **attrs )
if self.write_shard == None:
key_kwargs = {}
for k in self.key_attrs:
key_kwargs[k] = attrs.get(k)
shard_name = self.shard_key_name( self.make_key_name( **key_kwargs ), random.randint(0, num_shards-1) )
shard_key = make_key( self.shard_class, shard_name )
self.write_shard = self.shard_class( key=shard_key, **shard_attrs )
else:
self.write_shard.populate( num_shards, **shard_attrs )
def populate(self, num_shards, **attrs):
"""
Populate this object with the given attributes.
A shard will be generated as well, picked at random from the integer range [0, num_shards], and stored under self.write_shard
"""
self.populate_base( **attrs )
self.populate_shard( num_shards, **attrs )
def put_shard(self, **ctx_opts):
"""
Save the base object instance's shard instance to the data store, with the given context options (**ctx_opts).
If the object does not have a shard instance (self.write_shard), an exception will be raised.
"""
if self.write_shard == None:
raise Exception("No shard information given. Call populate_shard() first!")
k = self.write_shard.put( **ctx_opts )
return k
def put_shard_async(self, **ctx_opts):
"""
Asynchronously save the base object instance's shard instance to the data store, with the given context options (**ctx_opts)
If the object does not have a shard instance (self.write_shard), an exception will be raised.
"""
if self.write_shard == None:
raise Exception("No shard information given. Call populate_shard() first!")
kf = self.write_shard.put_async( **ctx_opts )
return kf
@classmethod
def cache_key_name( cls, **attrs ):
return "cache: " + cls.make_key_name( **attrs )
@classmethod
def cache_shard_key_name( cls, **attrs ):
return "shard: " + cls.make_key_name( **attrs )
@classmethod
def cache_listing_key_name( cls, | |
from esahub import scihub, utils, checksum, check, main
import unittest
import contextlib
import logging
import re
import datetime as DT
import pytz
import os
import sys
import subprocess
from shapely.wkt import loads as wkt_loads
from esahub.tests import config as test_config
from esahub import config
logger = logging.getLogger('esahub')
PY2 = sys.version_info < (3, 0)
SMALL_SIZE_QUERY = 'size: ???.* KB'
if hasattr(unittest.TestCase, 'subTest'):
class TestCase(unittest.TestCase):
pass
else:
class TestCase(unittest.TestCase):
@contextlib.contextmanager
def subTest(self, msg='', **params):
"""Mock subTest method so no exception is raised under Python2."""
utils.eprint('subTest:', msg, params)
yield
return
# -----------------------------------------------------------------------------
# TEST SETUP
# -----------------------------------------------------------------------------
def setUpModule():
test_config.set_test_config()
test_config.prepare()
def tearDownModule():
test_config.cleanup()
# -----------------------------------------------------------------------------
# SCIHUB
# -----------------------------------------------------------------------------
class ScihubTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
# def setUp(self):
def test_servers(self):
for name in scihub._get_available_servers():
cfg = config.CONFIG['SERVERS'][name]
with self.subTest(server_name=name):
url = '{}/search?q=*:*'.format(cfg['host'])
response = scihub.get_response(url)
#
# Assert that the HTML response has status code 200 (OK)
#
self.assertEqual(response.status, 200)
def test__generate_next_url(self):
# _generate_next_url(url, total=None)
pass
def test__parse_page(self):
# _parse_page(url, first=False)
pass
def test__get_file_list_from_url(self):
# _get_file_list_from_url(url, limit=None)
pass
def test__callback(self):
# _callback(result)
pass
def test__build_query(self):
# _build_query(query={})
pass
def test__build_url(self):
# _build_url(query, server)
pass
def test__download(self):
# _download(url, destination, quiet=None, queue=None)
pass
def test__get_file_list_wrapper(self):
# _get_file_list_wrapper(url)
pass
def test__ping_single(self):
# _ping_single(servername)
pass
def test__auto_detect_server_from_query(self):
queries = [
# (query, server)
({'mission': 'Sentinel-1'},
config.CONFIG['SATELLITES']['S1A']['source']),
({'mission': 'Sentinel-2'},
config.CONFIG['SATELLITES']['S2A']['source']),
({'mission': 'Sentinel-3'},
config.CONFIG['SATELLITES']['S3A']['source']),
({'satellite': 'S1A'},
config.CONFIG['SATELLITES']['S1A']['source']),
({'satellite': 'S3A'},
config.CONFIG['SATELLITES']['S3A']['source']),
({'satellite': 'S2B'},
config.CONFIG['SATELLITES']['S2B']['source']),
({'identifier': "S1A_IW_OCN__2SDV_20160924T181320_"
"20160924T181345_013198_014FDF_6692.zip"},
config.CONFIG['SATELLITES']['S1A']['source'])
]
for query, server in queries:
with self.subTest(query=query):
self.assertEqual(
scihub._auto_detect_server_from_query(query), server
)
def test__uuid_from_identifier(self):
products = scihub.search({}, limit=1)
for product in products:
with self.subTest(product=product):
self.assertEqual(
scihub.block(scihub._uuid_from_identifier,
product['title']),
product['uuid']
)
# def test__download_url_from_identifier(self):
# # _download_url_from_identifier(identifier)
# pass
# def test__checksum_url_from_identifier(self):
# # _checksum_url_from_identifier(identifier)
# pass
# def test__preview_url_from_identifier(self):
# # _preview_url_from_identifier(identifier)
# pass
# def test__download_url_from_uuid(self):
# # _download_url_from_uuid(uuid, host=None)
# pass
# def test__checksum_url_from_uuid(self):
# # _checksum_url_from_uuid(uuid, host=None)
# pass
# def test__preview_url_from_uuid(self):
# # _preview_url_from_uuid(uuid, host=None)
# pass
def test_get_response(self):
for name in scihub._get_available_servers():
with self.subTest(server_name=name):
response = scihub.get_response(
scihub._build_url({'query': '*:*'}, name)
)
self.assertEqual(response.status, 200)
def test_md5_from_file(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the md5 sum computed from the local file is equal
# to the md5 sum obtained from the remote server.
#
try:
remote_md5 = scihub.md5(f)
self.assertEqual(
checksum.md5(f), remote_md5
)
except Exception as e:
self.fail('Remote MD5 could not be obtained: {}'.format(e))
def test_exists_true(self):
existing = scihub.search({}, limit=1)
for e in existing:
with self.subTest(product=e['filename']):
self.assertTrue(scihub.exists(e['filename']))
def test_exists_false(self):
not_existing = 'this_is_not_on_scihub'
self.assertFalse(scihub.exists(not_existing))
# -----------------------------------------------------------------------------
# SCIHUB SEARCH
# -----------------------------------------------------------------------------
class ScihubSearchTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def test_query_entries(self):
query = {'mission': 'Sentinel-3'}
server = scihub._auto_detect_server_from_query(query,
available_only=True)[0]
url = scihub._build_url(query, server)
html = scihub.resolve(url)
#
# Assert that the number of entries found on the page matches the
# number of entries requested per page.
#
self.assertEqual(html.count('<entry>'),
config.CONFIG['GENERAL']['ENTRIES'])
def test_orbit_query(self):
for search_str, orbit in [
('ASC', 'ASCENDING'),
('DESC', 'DESCENDING')
]:
query = {'orbit': search_str}
result = scihub.search(query, limit=20)
for prod in result:
self.assertEqual(prod['orbit_direction'], orbit)
def test_id_query(self):
prod = scihub.search({}, limit=5)[-1]
query = {'id': prod['title']}
result = scihub.search(query)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], prod)
def test_queries(self):
queries = [
# (name, query)
('S3', {'mission': 'Sentinel-3'}),
]
for name, q in queries:
with self.subTest(name=name):
server = scihub._auto_detect_server_from_query(
q, available_only=True)[0]
url = scihub._build_url(q, server=server)
response = scihub.get_response(url)
#
# Assert that queries for each mission return a
# status code 200 (OK)
#
self.assertEqual(response.status, 200)
with self.subTest('count entries'):
q = {'mission': 'Sentinel-3'}
server = scihub._auto_detect_server_from_query(
q, available_only=True)[0]
url = scihub._build_url(q, server=server)
html = scihub.resolve(url)
#
# Assert that the number of entries found on the page matches the
# number of entries requested per page.
#
self.assertEqual(html.count('<entry>'),
config.CONFIG['GENERAL']['ENTRIES'])
def test_temporal_queries(self):
with self.subTest('yesterday'):
file_list = scihub.search({'mission': 'Sentinel-3',
'time': 'yesterday'},
limit=200)
yesterday = DT.datetime.now(pytz.utc)-DT.timedelta(1)
today = DT.datetime.now(pytz.utc)
start = DT.datetime(yesterday.year, yesterday.month, yesterday.day,
tzinfo=pytz.utc)
end = DT.datetime(today.year, today.month, today.day,
tzinfo=pytz.utc)
for f in file_list:
#
# Assert that the ingestiondate of each entry was yesterday.
#
self.assertGreaterEqual(f['ingestiondate'], start)
self.assertLessEqual(f['ingestiondate'], end)
with self.subTest('today'):
file_list = scihub.search({'mission': 'Sentinel-3',
'time': 'today'},
limit=200)
today = DT.datetime.now(pytz.utc)
start = DT.datetime(today.year, today.month, today.day,
tzinfo=pytz.utc)
for f in file_list:
#
# Assert that the ingestiondate of each entry is today.
#
self.assertGreaterEqual(f['ingestiondate'], start)
#
# NOTE: This test presently fails because apparantly,
# SciHub's `intersects` parameter does not work reliably.
#
def test_spatial_queries(self):
loc, ref_coords = next(iter(config.CONFIG['LOCATIONS'].items()))
with self.subTest(location=loc):
file_list = scihub.search(
{'location': [loc], 'time': 'to 2017-09-01T00:00:00Z'},
server='S3', limit=20)
for f in file_list:
with self.subTest(product=f['filename']):
#
# Assert that the products indeed intersect the
# requested location.
#
distance = wkt_loads(f['coords']).distance(
wkt_loads(ref_coords))
utils.eprint('Distance: {}'.format(distance))
self.assertLessEqual(distance, 0.5)
def test_get_file_list(self):
q = {'mission': 'Sentinel-3'}
limit = 107
file_list = scihub.search(q, limit=limit)
#
# Assert that only `limit` entries are returned.
#
self.assertEqual(limit, len(file_list))
for f in file_list:
#
# Assert that each entry contains the attributes `url`, `uuid` and
# `filename`.
#
self.assertIn('url', f)
self.assertIn('uuid', f)
self.assertIn('filename', f)
# -----------------------------------------------------------------------------
# SCIHUB DOWNLOAD
# -----------------------------------------------------------------------------
class ScihubDownloadTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.clear_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_download(self):
file_list = scihub.search({'query': SMALL_SIZE_QUERY}, limit=1)
for f in file_list:
with self.subTest(url=f['url']):
result = scihub.download(f)
#
# Assert that the download didn't fail and that
# the returned file path exists.
#
self.assertNotEqual(result, False)
self.assertTrue(os.path.isfile(result))
def test_download_many(self):
file_list = scihub.search({'query': SMALL_SIZE_QUERY},
limit=2)
scihub.download(file_list)
#
# Assert that all downloads were successful.
#
local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
local_files_identifiers = [os.path.splitext(os.path.split(_)[1])[0]
for _ in local_files]
for f in file_list:
self.assertIn(f['filename'], local_files_identifiers)
for f in local_files:
with self.subTest(file=f):
_, healthy, msg = check.check_file(f, mode='file')
utils.eprint(msg)
self.assertTrue(healthy)
def test_redownload(self):
test_config.copy_corrupt_data()
local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
scihub.redownload(local_files)
new_local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
self.assertEqual(set(local_files), set(new_local_files))
for f in local_files:
with self.subTest(file=f):
_, healthy, msg = check.check_file(f, mode='file')
utils.eprint(msg)
self.assertTrue(healthy)
# -----------------------------------------------------------------------------
# CHECK
# -----------------------------------------------------------------------------
class CheckTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_check_file_md5_healthy(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files check out in `md5` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='md5')
self.assertTrue(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_zip_healthy(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files check out in `file` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='file')
self.assertTrue(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_md5_corrupt(self):
test_config.clear_test_data()
test_config.copy_corrupt_data()
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files are detected as corrupt in `md5` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='md5')
self.assertFalse(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_zip_corrupt(self):
test_config.clear_test_data()
test_config.copy_corrupt_data()
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files are detected as corrupt in `file` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='file')
self.assertFalse(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
# -----------------------------------------------------------------------------
# CHECKSUM
# -----------------------------------------------------------------------------
class ChecksumTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_md5(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the md5 checksum returned by checksum.md5() is
# equal to the md5 sum returned by bash md5 or md5sum tool.
#
for exe in ['md5', 'md5sum']:
if utils._which(exe) is not None:
bash_output = subprocess.check_output([exe, f])
if not PY2:
bash_output = bash_output.decode()
bash_md5 = re.search('[a-zA-Z0-9]{32}',
bash_output).group()
break
self.assertEqual(
checksum.md5(f), bash_md5
)
def test_etag_small_files(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the computed etag is equal to the md5
# checksum for files smaller than the chunksize.
#
size_mb = max(10, int(os.path.getsize(f) / 1024**2))
self.assertEqual(
checksum.md5(f), checksum.etag(f, chunksize=2 * size_mb)
)
# def test_etag_large_files(self):
# pass
# -----------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------
class MainTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
cls.check_mode = config.CONFIG['GENERAL']['CHECK_MODE']
config.CONFIG['GENERAL']['CHECK_MODE'] = 'file'
@classmethod
def tearDownClass(cls):
config.CONFIG['GENERAL']['CHECK_MODE'] = cls.check_mode
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_all()
def test_ls(self):
q = {'time': 'today', 'satellite': 'S3A',
'location': ['Ireland_Mace_Head']}
files = scihub.search(q)
result = main.ls(q)
self.assertEqual(len(result), len(files))
def test_get(self):
test_config.clear_test_data()
q = {'satellite': 'S3A', 'query': SMALL_SIZE_QUERY}
files = scihub.search(q, limit=2)
main.get(q, limit=2)
for f in files:
ext = '.zip'
with self.subTest(product=f['filename']):
self.assertTrue(
os.path.isfile(os.path.join(
config.CONFIG['GENERAL']['DATA_DIR'],
f['filename']) + ext)
)
def test_doctor(self):
test_config.copy_corrupt_data()
corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT,
path=False)
# healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL,
# path=False)
result = main.doctor()
bad_files = [os.path.split(status[0])[1]
for status in result if status[1] is False]
#
# Assert that the number | |
k_ET + k_D
d_prob_ph_em = k_emission * δt # prob. of emission in δt
if d_prob_ph_em > α:
d_prob_ph_em = 1 - exp(-d_prob_ph_em)
p = rg.rand()
if d_prob_ph_em >= p:
break # break out of the loop when the photon is emitted
nanotime += δt
# Update R following the OU process
N = rg.randn()
R = ou_single_step_cy(R, δt, N, R_mean, R_sigma, τ_relax)
# photon emitted, let's decide if it is from D or A
p_DA = p / d_prob_ph_em # equivalent to rand(), but faster
prob_A_em = k_ET / k_emission
if prob_A_em >= p_DA:
A_ph[iph] = True
# time of D de-excitation by photon emission or energy transfer to A
t0 = t + nanotime
# save D-A distance at emission time
R_ph[iph] = R
# save time of emission relative to the excitation time `t`
T_ph[iph] = nanotime
return A_ph, R_ph, T_ph
def sim_DA_from_timestamps2(timestamps, dt, k_D, R0, R_mean, R_sigma,
τ_relax, rg, chunk_size=1000):
"""
Recoloring using a fixed δt and with random number caching
"""
R = rg.randn() * R_sigma + R_mean
t0 = 0
nanotime = 0
# Array flagging photons as A (1) or D (0) emitted
A_ph = np.zeros(timestamps.size, dtype=bool)
# Instantaneous D-A distance at D de-excitation time
R_ph = np.zeros(timestamps.size, dtype=np.float64)
# Time of D de-excitation relative to the last timestamp
T_ph = np.zeros(timestamps.size, dtype=np.float64)
iN = chunk_size - 1
for iph, t in enumerate(timestamps):
# each cycle starts with a new photon timestamp `t`
# excitation time is `t`, emission time is `t + nanotime`
delta_t = t - t0
if delta_t < 0:
# avoid negative delta_t possible when when two photons have
# the same macrotime
delta_t = 0
t = t0
# Compute the D-A distance at the "excitation time"
iN += 1
if iN == chunk_size:
#print(f'Outer rand iph:{iph}, iN:{iN}', flush=True)
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, delta_t, N, R_mean, R_sigma, τ_relax)
nanotime = 0
# loop through D-A diffusion steps with a fixed time-step dt
# until D de-excitation by photon emission or energy transfer to A
while True:
k_ET = k_D * (R0 / R)**6
k_emission = k_ET + k_D
d_prob_ph_em = k_emission * dt # prob. of emission in dt
if d_prob_ph_em >= p:
break # break out of the loop when the photon is emitted
nanotime += dt
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, dt, N, R_mean, R_sigma, τ_relax)
# photon emitted, let's decide if it is from D or A
p_DA = p / d_prob_ph_em # equivalent to rand(), but faster
prob_A_em = k_ET / (k_ET + k_D)
if prob_A_em >= p_DA:
A_ph[iph] = True
# time of D de-excitation by photon emission or energy transfer to A
t0 = t + nanotime
# save D-A distance at emission time
R_ph[iph] = R
# save time of emission relative to the excitation time `t`
T_ph[iph] = nanotime
return A_ph, R_ph, T_ph
def sim_DA_from_timestamps2_p(timestamps, dt_max, k_D, R0, R_mean, R_sigma,
tau_relax, rg, chunk_size=1000,
alpha=0.05, ndt=10):
"""
Recoloring using adaptive dt and with random number caching
The adaptive dt is chosen so that `k_emission * dt < 0.05`,
so that k_emission * dt is a good approximation for the exponential CDF
(1 - exp(-k_emission dt)).
"""
if tau_relax < ndt * dt_max:
dt_max = tau_relax / ndt
print(f'WARNING: Reducing dt_max to {dt_max:g} '
f'[tau_relax = {tau_relax}]')
R = rg.randn() * R_sigma + R_mean
t0 = 0
nanotime = 0
# Array flagging photons as A (1) or D (0) emitted
A_ph = np.zeros(timestamps.size, dtype=bool)
# Instantaneous D-A distance at D de-excitation time
R_ph = np.zeros(timestamps.size, dtype=np.float64)
# Time of D de-excitation relative to the last timestamp
T_ph = np.zeros(timestamps.size, dtype=np.float64)
iN = chunk_size - 1 # value to get the first chunk of random numbers
for iph, t in enumerate(timestamps):
# each cycle starts with a new photon timestamp `t`
# excitation time is `t`, emission time is `t + nanotime`
delta_t = t - t0
if delta_t < 0:
# avoid negative delta_t possible when when two photons have
# the same macrotime
delta_t = 0
t = t0
# Compute the D-A distance at the "excitation time"
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, delta_t, N, R_mean, R_sigma, tau_relax)
nanotime = 0
# loop through D-A diffusion steps with a fixed time-step dt
# until D de-excitation by photon emission or energy transfer to A
while True:
k_ET = k_D * (R0 / R)**6
k_emission = k_ET + k_D
dt = min(alpha / k_emission, dt_max)
d_prob_ph_em = k_emission * dt # prob. of emission in dt
if d_prob_ph_em >= p:
break # break out of the loop when the photon is emitted
nanotime += dt
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
# Update R following the OU process
R = ou_single_step_cy(R, dt, N, R_mean, R_sigma, tau_relax)
# photon emitted, let's decide if it is from D or A
p_DA = p / d_prob_ph_em # equivalent to rand(), but faster
prob_A_em = k_ET / (k_ET + k_D)
if prob_A_em >= p_DA:
A_ph[iph] = True
# time of D de-excitation by photon emission or energy transfer to A
t0 = t + nanotime
# save D-A distance at emission time
R_ph[iph] = R
# save time of emission relative to the excitation time `t`
T_ph[iph] = nanotime
return A_ph, R_ph, T_ph
#
# - FOLLOWING FUNCTIONS ARE NOT USED FOR RECOLORING - - - - - - - - - - -
#
@numba.jit(nopython=True)
def ou_process_core_numba(X, N, delta_t, ou_tau, ou_sigma):
"""Low-level function computing values of the OU process
"""
for i in range(1, X.size):
dt = delta_t[i - 1]
dt_over_tau = dt / ou_tau
relax = np.exp(-dt_over_tau)
diffuse = ou_sigma * np.sqrt(1 - relax**2)
X[i] = X[i - 1] * relax + diffuse * N[i]
def ou_process(delta_t, N, ou_mean, ou_sigma, ou_tau, x0=None):
"""Compute an Ornstein–Uhlenbeck (OU) process from a series of delta_t.
Arguments:
delta_t (array): intervals between time points where the OU process
will be evaluated
N (array): array of white Gaussian noise with sigma=1 of size
size(delta_t) + 1.
ou_mean (float): mean of the OU process
ou_sigma (float): standard deviation of the relaxed OU process
ou_tau (float): relaxation time of the OU process
x0 (float): initial value for the OU process. If None,
the initial value is `N[0] * ou_sigma`. If not None,
`N[0]` is not used.
Returns:
Array of OU process values. Output size is `size(delta_t) + 1`
"""
X = np.zeros(delta_t.size + 1, dtype=float)
if x0 is None:
X[0] = N[0] * ou_sigma
else:
X[0] = x0
ou_process_core_numba(X, N, delta_t, ou_tau, ou_sigma)
X += ou_mean
return X
def E_sim_gauss_R_burst(burst_size_series, R0, R_mean, τ_relax=0,
R_sigma=0, oversample=100, dithering_sigma=0.02):
"""Simulate a E for each burst using a fixed D-A distance per burst.
Distances are drawn from a Gaussian distribution.
Arguments:
burst_size_series (pandas.Series): series of burst sizes
"""
size_counts = burst_size_series.value_counts()
E_sim = []
for burst_size, counts in size_counts.iteritems():
num_bursts = oversample * counts
R = np.random.randn(num_bursts) * R_sigma + R_mean
assert (R >= 0).all()
E = fret.E_from_dist(R, R0)
assert (E >= 0).all() and (E <= 1).all()
na_sim = np.random.binomial(n=burst_size, p=E, size=num_bursts)
E_sim.append(na_sim / burst_size)
E_sim = np.hstack(E_sim)
assert E_sim.size == size_counts.sum() * oversample
E_sim += np.random.randn(E_sim.size) * dithering_sigma
return E_sim
def E_sim_gauss_R_ph(burst_size_series, R0, R_mean, τ_relax=0,
R_sigma=0, oversample=100, dithering_sigma=0.02):
"""Simulate E for each burst using a new Gaussian D-A distance per photon
- R drawn from Gaussian distribution for each photon in each burst
- Group by burst size
Arguments:
burst_size_series (pandas.Series): series of burst | |
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* Composition ************************************************************
"""
Contents
--------
* `Composition_Overview`
* `Composition_Creation`
- `Composition_Nested`
* `Composition_Run`
- `Composition_Run_Inputs`
- `Composition_Input_as_Function`
- `Composition_Scope_of_Execution`
* `Composition_Controller`
- `Composition_Controller_Assignment`
- `Composition_Controller_Execution`
* `Composition_Learning`
- `Composition_Learning_Standard`
• `Composition_Learning_Unsupervised`
• `Composition_Learning_Unsupervised`
- `Composition_Learning_Methods`
- `Composition_Learning_Components`
- `Composition_Learning_Execution`
- `Composition_Learning_AutodiffComposition`
- `Composition_Learning_UDF`
* `Composition_Visualization`
* `Composition_Class_Reference`
.. _Composition_Overview:
Overview
--------
Composition is the base class for objects that combine PsyNeuLink `Components <Component>` into an executable model.
It defines a common set of attributes possessed, and methods used by all Composition objects.
Composition "Nodes" are `Mechanisms <Mechanism>` and/or nested `Compositions <Composition>`. `Projections
<Projection>` connect two Nodes. The Composition's `graph <Composition.graph>` stores the structural relationships
among the Nodes of a Composition and the Projections that connect them. The Composition's `scheduler
<Composition.scheduler>` generates an execution queue based on these structural dependencies, allowing
for other user-specified scheduling and termination conditions to be specified.
.. _Composition_Creation:
Creating a Composition
----------------------
A generic Composition can be created by calling the constructor, and then adding `Components <Component>` using the
following Composition methods:
- `add_node <Composition.add_node>`
adds a node to the Composition
- `add_nodes <Composition.add_nodes>`
adds mutiple nodes to the Composition
- `add_projection <Composition.add_projection>`
adds a connection between a pair of nodes in the Composition
- `add_projections <Composition.add_projections>`
adds connection between multiple pairs of nodes in the Composition
- `add_linear_processing_pathway <Composition.add_linear_processing_pathway>`
adds and connects a list of nodes and/or Projections to the Composition;
Inserts a default Projection between any adjacent Nodes.
In addition, a Composition has the following set of `learning methods <Composition_Learning_Methods>` that can also
be used to create a Composition from (or add) pathways that implement `learning <Composition_Learning>`:
- `add_linear_learning_pathway` <Composition.add_linear_learning_pathway>`
adds and connects a list of nodes, including `learning components <Composition_Learning_Components>`
needed to implement the algorithm specified in its **learning_function** argument in the specified pathway.
- `add_reinforcement_learning_pathway <Composition.add_reinforcement_learning_pathway>`
adds and connects a list of nodes, including `learning components <Composition_Learning_Components>`
needed to implement `reinforcement learning` in the specified pathway;
- `add_td_learning_pathway <Composition.add_td_learning_pathway>`
adds and connects a list of nodes, including `learning components <Composition_Learning_Components>`
needed to implement the `temporal differences` method of reinforcement learning` in the specified pathway;
- `add_backpopagation_learning_pathway <Composition.add_backpopagation_learning_pathway>`
adds and connects a list of nodes, including `learning components <Composition_Learning_Components>`
needed to implement the `backpropagation learning algorithm` in the specified pathway.
.. note::
Only Mechanisms and Projections added to a Composition via the methods above constitute a Composition, even if
other Mechanism and/or Projections are constructed in the same script.
COMMENT:
• MOVE THE EXAPLES BELOW TO AN "Examples" SECTION
COMMENT
In the following script comp_0, comp_1 and comp_2 are identical, but constructed using different methods.
*Create Mechanisms:*
>>> import psyneulink as pnl
>>> A = pnl.ProcessingMechanism(name='A')
>>> B = pnl.ProcessingMechanism(name='B')
>>> C = pnl.ProcessingMechanism(name='C')
*Create Projections:*
>>> A_to_B = pnl.MappingProjection(name="A-to-B")
>>> B_to_C = pnl.MappingProjection(name="B-to-C")
*Create Composition; Add Nodes (Mechanisms) and Projections via the add_linear_processing_pathway method:*
>>> comp_0 = pnl.Composition(name='comp-0')
>>> comp_0.add_linear_processing_pathway(pathway=[A, A_to_B, B, B_to_C, C])
*Create Composition; Add Nodes (Mechanisms) and Projections via the add_nodes and add_projection methods:*
>>> comp_1 = pnl.Composition(name='comp-1')
>>> comp_1.add_nodes(nodes=[A, B, C])
>>> comp_1.add_projection(projection=A_to_B)
>>> comp_1.add_projection(projection=B_to_C)
*Create Composition; Add Nodes (Mechanisms) and Projections via the add_node and add_projection methods:*
>>> comp_2 = pnl.Composition(name='comp-2')
>>> comp_2.add_node(node=A)
>>> comp_2.add_node(node=B)
>>> comp_2.add_node(node=C)
>>> comp_2.add_projection(projection=A_to_B)
>>> comp_2.add_projection(projection=B_to_C)
*Run each Composition:*
>>> input_dict = {A: [[[1.0]]]}
>>> comp_0_output = comp_0.run(inputs=input_dict)
>>> comp_1_output = comp_1.run(inputs=input_dict)
>>> comp_2_output = comp_2.run(inputs=input_dict)
.. _Composition_Nested:
*Nested Compositions*
=====================
A Composition can be used as a node of another Composition, by calling `add_node <Composition.add_node>`
from the parent composition using the child Composition as an argument. Projections can then be specifed to and from
the nested composition just as for any other node.
*Create outer Composition:*
>>> outer_A = pnl.ProcessingMechanism(name='outer_A')
>>> outer_B = pnl.ProcessingMechanism(name='outer_B')
>>> outer_comp = pnl.Composition(name='outer_comp')
>>> outer_comp.add_nodes([outer_A, outer_B])
*Create and configure inner Composition:*
>>> inner_A = pnl.ProcessingMechanism(name='inner_A')
>>> inner_B = pnl.ProcessingMechanism(name='inner_B')
>>> inner_comp = pnl.Composition(name='inner_comp')
>>> inner_comp.add_linear_processing_pathway([inner_A, inner_B])
*Nest inner Composition within outer Composition using `add_node <Composition.add_node>`:*
>>> outer_comp.add_node(inner_comp)
*Create Projections:*
>>> outer_comp.add_projection(pnl.MappingProjection(), sender=outer_A, receiver=inner_comp)
>>> outer_comp.add_projection(pnl.MappingProjection(), sender=inner_comp, receiver=outer_B)
>>> input_dict = {outer_A: [[[1.0]]]}
*Run Composition:*
>>> outer_comp.run(inputs=input_dict)
*Using `add_linear_processing_pathway <Composition.add_linear_processing_pathway>` with nested compositions for brevity:*
>>> outer_A = pnl.ProcessingMechanism(name='outer_A')
>>> outer_B = pnl.ProcessingMechanism(name='outer_B')
>>> outer_comp = pnl.Composition(name='outer_comp')
>>> inner_A = pnl.ProcessingMechanism(name='inner_A')
>>> inner_B = pnl.ProcessingMechanism(name='inner_B')
>>> inner_comp = pnl.Composition(name='inner_comp')
>>> inner_comp.add_linear_processing_pathway([inner_A, inner_B])
>>> outer_comp.add_linear_processing_pathway([outer_A, inner_comp, outer_B])
>>> input_dict = {outer_A: [[[1.0]]]}
>>> outer_comp.run(inputs=input_dict)
.. _Composition_Run:
Running a Composition
---------------------
.. _Composition_Run_Inputs:
*Run with Input Dictionary*
===========================
The `run <Composition.run>` method presents the inputs for each `TRIAL` to the input_ports of the INPUT Nodes in the
`scope of execution <Composition_Scope_of_Execution>`. These input values are specified in the **inputs** argument of
a Composition's `execute <Composition.execute>` or `run <Composition.run>` methods.
COMMENT:
From KAM 2/7/19 - not sure "scope of execution" is the right phrase. To me, it implies that only a subset of the
nodes in the Composition belong to the "scope of execution". What we want to convey (I think) is that ALL of the
Nodes execute, but they do so in a "state" (history, parameter vals) corresponding to a particular execution id.
COMMENT
The standard way to specificy inputs is a Python dictionary in which each key is an `INPUT <NodeRole.INPUT>` Node and
each value is a list. The lists represent the inputs to the key `INPUT <NodeRole.INPUT>` Nodes, in which the i-th
element of the list represents the input value to the key Node on trial i.
.. _Composition_Run_Inputs_Fig_States:
.. figure:: _static/input_spec_states.svg
:alt: Example input specifications with input ports
Each input value must be compatible with the shape of the key `INPUT <NodeRole.INPUT>` Node's `external_input_values
<MechanismBase.external_input_values>`. As a result, each item in the list of inputs is typically a 2d list/array,
though `some shorthand notations are allowed <Composition_Input_Specification_Examples>`.
>>> import psyneulink as pnl
>>> a = pnl.TransferMechanism(name='a',
... default_variable=[[0.0, 0.0]])
>>> b = pnl.TransferMechanism(name='b',
... default_variable=[[0.0], [0.0]])
>>> c = pnl.TransferMechanism(name='c')
>>> pathway1 = [a, c]
>>> pathway2 = [b, c]
>>> comp = Composition(name='comp')
>>> comp.add_linear_processing_pathway(pathway1)
>>> comp.add_linear_processing_pathway(pathway2)
>>> input_dictionary = {a: [[[1.0, 1.0]], [[1.0, 1.0]]],
... b: [[[2.0], [3.0]], [[2.0], [3.0]]]}
>>> comp.run(inputs=input_dictionary)
.. note::
A Node's `external_input_values <MechanismBase.external_input_values>` attribute is always a 2d list in which the
index i element is the value of the Node's index i `external_input_port <MechanismBase.external_input_ports>`. In
many cases, `external_input_values <MechanismBase.external_input_values>` is the same as `variable
<MechanismBase.variable>`. Keep in mind that any InputPorts marked as "internal" are excluded from
`external_input_values <MechanismBase.external_input_values>`, and do not receive user-specified input values.
If num_trials is not in use, the number of inputs provided determines the number of trials in the run. For example, if
five inputs are provided for each INPUT Node, and num_trials is not specified, the Composition executes five times.
+----------------------+-------+------+------+------+------+
| Trial # |0 |1 |2 |3 |4 |
+----------------------+-------+------+------+------+------+
| Input to Mechanism a |1.0 |2.0 |3.0 |4.0 |5.0 |
+----------------------+-------+------+------+------+------+
>>> import psyneulink as pnl
>>> a = pnl.TransferMechanism(name='a')
>>> b = pnl.TransferMechanism(name='b')
>>> pathway1 = [a, b]
>>> comp = Composition(name='comp')
>>> comp.add_linear_processing_pathway(pathway1)
>>> input_dictionary = {a: [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]]]}
>>> comp.run(inputs=input_dictionary)
The number of inputs specified **must** be the same for all Nodes in the input dictionary (except for any Nodes for
which only one input is specified). In other words, all of the values in the input dictionary must have the same length
as each other (or length 1).
If num_trials is in use, `run` iterates over the inputs until num_trials is reached. For example, if five inputs
are provided for each `INPUT <NodeRole.INPUT>` Node, and num_trials = 7, the system executes seven times. The input
values from trials 0 and 1 are used again on trials 5 and 6, respectively.
+----------------------+-------+------+------+------+------+------+------+
| Trial # |0 |1 |2 |3 |4 |5 |6 |
+----------------------+-------+------+------+------+------+------+------+
| Input to | |
on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
box_scores = box_confidence * box_class_probs
# Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis = -1)
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
filtering_mask = K.greater_equal(box_class_scores, threshold)
# Step 4: Apply the mask to box_class_scores, boxes and box_classes
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
return scores, boxes, classes
# In[24]:
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
# **Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative.
# ### 2.3 - Non-max suppression ###
#
# Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS).
# <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;">
# <caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption>
#
# Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU.
# <img src="nb_images/iou.png" style="width:500px;height:400;">
# <caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption>
# In[57]:
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Assign variable names to coordinates for clarity
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.
xi1 = np.maximum(box1_x1, box2_x1)
yi1 = np.maximum(box1_y1, box2_y1)
xi2 = np.minimum(box1_x2, box2_x2)
yi2 = np.minimum(box1_y2, box2_y2)
inter_width = max((xi2 - xi1), 0)
inter_height = max((yi2 - yi1), 0)
inter_area = inter_width * inter_height
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
box1_area = (box1_x2 - box1_x1) * (box1_y2 - box1_y1)
box2_area = (box2_x2 - box2_x1) * (box2_y2 - box2_y1)
union_area = box1_area + box2_area - inter_area
# compute the IoU
iou = inter_area / union_area
return iou
# In[58]:
## Test case 1: boxes intersect
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou for intersecting boxes = " + str(iou(box1, box2)))
## Test case 2: boxes do not intersect
box1 = (1,2,3,4)
box2 = (5,6,7,8)
print("iou for non-intersecting boxes = " + str(iou(box1,box2)))
## Test case 3: boxes intersect at vertices only
box1 = (1,1,2,2)
box2 = (2,2,3,3)
print("iou for boxes that only touch at vertices = " + str(iou(box1,box2)))
## Test case 4: boxes intersect at edge only
box1 = (1,1,3,3)
box2 = (2,3,3,4)
print("iou for boxes that only touch at edges = " + str(iou(box1,box2)))
# #### YOLO non-max suppression
#
# We are now ready to implement non-max suppression. The key steps are:
# 1. Select the box that has the highest score.
# 2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).
# 3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.
#
# This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.
# In[61]:
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold)
# Use K.gather() to select only nms_indices from scores, boxes and classes
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
return scores, boxes, classes
# In[62]:
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
# ### 2.4 Wrapping up the filtering
#
# It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented.
#
# Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS.
# YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image.
# In[63]:
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for | |
<filename>spark_fhir_schemas/stu3/complex_types/procedure.py
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
BooleanType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ProcedureSchema:
"""
An action that is or was performed on a patient. This can be a physical
intervention like an operation, or less invasive like counseling or
hypnotherapy.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
An action that is or was performed on a patient. This can be a physical
intervention like an operation, or less invasive like counseling or
hypnotherapy.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a Procedure resource
identifier: This records identifiers associated with this procedure that are defined by
business processes and/or used to refer to it when a direct URL reference to
the resource itself is not appropriate (e.g. in CDA documents, or in written /
printed documentation).
definition: A protocol, guideline, orderset or other definition that was adhered to in
whole or in part by this procedure.
basedOn: A reference to a resource that contains details of the request for this
procedure.
partOf: A larger event of which this particular procedure is a component or step.
status: A code specifying the state of the procedure. Generally this will be in-
progress or completed state.
notDone: Set this to true if the record is saying that the procedure was NOT performed.
notDoneReason: A code indicating why the procedure was not performed.
category: A code that classifies the procedure for searching, sorting and display
purposes (e.g. "Surgical Procedure").
code: The specific procedure that is performed. Use text if the exact nature of the
procedure cannot be coded (e.g. "Laparoscopic Appendectomy").
subject: The person, animal or group on which the procedure was performed.
context: The encounter during which the procedure was performed.
performedDateTime: The date(time)/period over which the procedure was performed. Allows a period
to support complex procedures that span more than one date, and also allows
for the length of the procedure to be captured.
performedPeriod: The date(time)/period over which the procedure was performed. Allows a period
to support complex procedures that span more than one date, and also allows
for the length of the procedure to be captured.
performer: Limited to 'real' people rather than equipment.
location: The location where the procedure actually happened. E.g. a newborn at home, a
tracheostomy at a restaurant.
reasonCode: The coded reason why the procedure was performed. This may be coded entity of
some type, or may simply be present as text.
reasonReference: The condition that is the reason why the procedure was performed.
bodySite: Detailed and structured anatomical location information. Multiple locations
are allowed - e.g. multiple punch biopsies of a lesion.
outcome: The outcome of the procedure - did it resolve reasons for the procedure being
performed?
report: This could be a histology result, pathology report, surgical report, etc..
complication: Any complications that occurred during the procedure, or in the immediate
post-performance period. These are generally tracked separately from the
notes, which will typically describe the procedure itself rather than any
'post procedure' issues.
complicationDetail: Any complications that occurred during the procedure, or in the immediate
post-performance period.
followUp: If the procedure required specific follow up - e.g. removal of sutures. The
followup may be represented as a simple note, or could potentially be more
complex in which case the CarePlan resource can be used.
note: Any other notes about the procedure. E.g. the operative notes.
focalDevice: A device that is implanted, removed or otherwise manipulated (calibration,
battery replacement, fitting a prosthesis, attaching a wound-vac, etc.) as a
focal portion of the Procedure.
usedReference: Identifies medications, devices and any other substance used as part of the
procedure.
usedCode: Identifies coded items that were used as part of the procedure.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.procedure_performer import (
Procedure_PerformerSchema,
)
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
from spark_fhir_schemas.stu3.complex_types.procedure_focaldevice import (
Procedure_FocalDeviceSchema,
)
if (
max_recursion_limit
and nesting_list.count("Procedure") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Procedure"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human | |
<reponame>Garyfallidis/dipy
"""Create a unit sphere by subdividing all triangles of an octahedron
recursively.
The unit sphere has a radius of 1, which also means that all points in this
sphere (assumed to have centre at [0, 0, 0]) have an absolute value (modulus)
of 1. Another feature of the unit sphere is that the unit normals of this
sphere are exactly the same as the vertices.
This recursive method will avoid the common problem of the polar singularity,
produced by 2d (lon-lat) parameterization methods.
If you require a sphere with a radius other than that of 1, simply multiply
the vertex array by this new radius (although this will break the "vertex array
equal to unit normal array" property)
"""
import numpy as np
import warnings
t = (1+np.sqrt(5))/2
icosahedron_vertices = np.array( [
[ t, 1, 0], # 0
[ -t, 1, 0], # 1
[ t, -1, 0], # 2
[ -t, -1, 0], # 3
[ 1, 0, t], # 4
[ 1, 0, -t], # 5
[ -1, 0, t], # 6
[ -1, 0, -t], # 7
[ 0, t, 1], # 8
[ 0, -t, 1], # 9
[ 0, t, -1], # 10
[ 0, -t, -1] # 11
] )
icosahedron_edges = np.array( [
#[ 0, 8, 4], # 0
[0,8], [8,4],[4,0], #0,1,2
#[ 0, 5,10], # 1
[0,5],[5,10],[10,0], #3,4,5
#[ 2, 4, 9], # 2
[2,4],[4,9],[9,2], #6,7,8
#[ 2,11, 5], # 3
[2,11],[11,5],[5,2], #9,10,11
#[ 1, 6, 8], # 4
[1,6],[6,8],[8,1], #12,13,14
#[ 1,10, 7], # 5
[1,10],[10,7],[7,1], #15,16,17
#[ 3, 9, 6], # 6
[3,9],[9,6],[9,3], #18,19,20
#[ 3, 7,11], # 7
[3,7],[7,11],[11,3], #21,22,23
#[ 0,10, 8], # 8
[0,10],[10,8],[8,0], #24,25,26
#[ 11, 9, 2], #10
[11,9], #27
#[ 10, 8,1], # 9
[10,8], #28
#[ 3, 9,11], #11
[3,9],[9,11],[11,3],
#[ 4, 2, 0], #12
[4,2],[2,0],[0,4],
#[ 5, 0, 2], #13
[5,0],[0,2],[2,5],
#[ 6, 1, 3], #14
[6,1],[1,3],[3,6],
#[ 7, 3, 1], #15
[7,3],[3,1],[1,7],
#[ 8, 6, 4], #16
[8,6],[6,4],[4,8],
#[ 9, 4, 6], #17
[9,4],[4,6],[6,9],
#[10, 5, 7], #18
[10,5],[5,7],[7,10],
#[11, 7, 5] #19
[11,7],[7,5],[5,11]
], dtype='uint16' )
icosahedron_triangles = np.array( [
#[ 0, 8, 4], # 0
[0,1,2],
#[ 0, 5,10], # 1
[3,4,5],
#[ 2, 4, 9], # 2
[6,7,8],
#[ 2,11, 5], # 3
[9,10,11],
#[ 1, 6, 8], # 4
[12,13,14],
#[ 1,10, 7], # 5
[15,16,17],
#[ 3, 9, 6], # 6
[18,19,20],
#[ 3, 7,11], # 7
[21,22,23],
#[ 0,10, 8], # 8
[24,25,26],
#[11, 9, 2], #10
[27,8,9],
#[ 10, 8,1], # 9
[28,14,15],
#[ 3, 9,11], #11
[ 4, 2, 0], #12
[ 5, 0, 2], #13
[ 6, 1, 3], #14
[ 7, 3, 1], #15
[ 8, 6, 4], #16
[ 9, 4, 6], #17
[10, 5, 7], #18
[11, 7, 5] #19
], dtype='uint16')
#the vertices of an octahedron
octahedron_vertices = np.array( [
[ 1.0, 0.0, 0.0], # 0
[-1.0, 0.0, 0.0], # 1
[ 0.0, 1.0, 0.0], # 2
[ 0.0,-1.0, 0.0], # 3
[ 0.0, 0.0, 1.0], # 4
[ 0.0, 0.0,-1.0] # 5
] )
#each edge is a pair of neighboring vertices, the edges and triangles bellow
#follow the cycle rule. For more on the cycle rule see divide_all.
octahedron_edges = np.array( [
[0, 4], #0 #0
[1, 5], #10 #1
[4, 2], #1 #2
[5, 3], #11 #3
[2, 0], #2 #4
[3, 1], #6 #5
[2, 1], #3 #6
[3, 0], #7 #7
[1, 4], #4 #8
[0, 5], #8 #9
[4, 3], #5 #10
[5, 2], #9 #11
], dtype='uint16' )
#each triangle is a set of three edges, because these triangles and edges
#follow the cycle rule you can get the three vertices of a triangle by using
#octahedron_edges[octahedron_triangles, 0]. For more on the cycle rule see
#divide_all
octahedron_triangles = np.array( [
[ 0, 2, 4],
[ 1, 3, 5],
[ 2, 6, 8],
[ 3, 7, 9],
[ 8, 10, 5],
[ 9, 11, 4],
[ 0, 10, 7],
[ 1, 11, 6],
], dtype='uint16')
def divide_all( vertices, edges, triangles ):
r""" Subdivides triangles into smaller triangles
Parameters
------------
vertices : ndarray
A Vx3 array with the x, y, and z coordinates of of each vertex.
edges : ndarray
An Ex2 array of edges where each edge is a pair of neighboring
vertices.
triangles : ndarray
A Tx3 array of triangles, where each triangle is a set of three edges.
Returns
---------
vertices : ndarray
like above
edges : ndarray
like above
triangles : ndarray
like above
Important Note on Cycle Rule:
-----------------------------
The edges and triangles that are passed to this function must follow the
cycle rule. If they do not the result will not be correct. The cycle
rule states that the second vertex of each edge of each triangle must be
the first vertex in the next edge of that triangle. For example take the
triangle drawn below:
1
/\
B / \ C
/____\
0 A 2
If the triangle is [A, B, C] the edges must be:
A: [2, 0]
B: [0, 1]
C: [1, 2]
If the triangle is [C, B, A] the edges must be:
C: [2, 1]
B: [1, 0]
A: [0, 2]
This must be true for ALL of the triangles. Such an arrangement of edges
and triangles is not possible in general but is possible for the
octahedron.
Implementation Details
------------------------
Using the triangle drawn above, we segment the triangle into four smaller
triangles.
1
/\
/ \
b/____\c
/\ /\
/ \ / \
/____\/____\
0 a 2
Make new vertices at the center of each edge::
b = (0+1)/2
c = (1+2)/2
a = (2+0)/2
Normalize a, b, c, and replace each old edge with two new edges:
[0, b]
[1, c]
[2, a]
[b, 1]
[c, 2]
[a, 0]
Make a new edge connecting each pair of new vertices in the triangle:
[b, a]
[a, c]
[c, b]
Construct new triangles, notice that each edge was created in such a way
so that our new triangles follow the cycle rule:
t1 [0b,ba,a0]
t2 [1c,cb,b1]
t3 [2a,ac,c2]
t4 [ba,ac,cb]
Code was adjusted from dlampetest website
http://sites.google.com/site/dlampetest/python/triangulating-a-sphere-recursively
"""
num_vertices = len(vertices)
num_edges = len(edges)
num_triangles = len(triangles)
new_vertices = vertices[edges].sum(1)
norms_new_vertices = np.sqrt((new_vertices*new_vertices).sum(-1))
new_vertices /= norms_new_vertices[:, None]
vertices = np.vstack((vertices, new_vertices))
new_v_ind = np.arange(num_vertices, num_vertices+num_edges, dtype='uint16')
v_b = new_v_ind[triangles[:,0]]
v_c = new_v_ind[triangles[:,1]]
v_a = new_v_ind[triangles[:,2]]
edges = np.vstack((np.c_[edges[:,0], new_v_ind],
np.c_[new_v_ind, edges[:,1]],
np.c_[v_b, v_a],
np.c_[v_a, v_c],
np.c_[v_c, v_b],
))
E_0b = triangles[:,0]
E_b1 = triangles[:,0] + num_edges
E_1c = triangles[:,1]
E_c2 = triangles[:,1] + num_edges
E_2a = triangles[:,2]
E_a0 = triangles[:,2] + num_edges
S = 2*num_edges
E_ba = np.arange(S+0*num_triangles, S+1*num_triangles, dtype='uint16')
E_ac = np.arange(S+1*num_triangles, S+2*num_triangles, dtype='uint16')
E_cb = np.arange(S+2*num_triangles, S+3*num_triangles, dtype='uint16')
triangles = np.vstack((np.c_[E_0b, E_ba, E_a0],
np.c_[E_1c, E_cb, E_b1],
np.c_[E_2a, E_ac, E_c2],
np.c_[E_ba, E_ac, E_cb],
))
return vertices, edges, triangles
def create_unit_sphere( recursion_level=2 ):
""" Creates a unit sphere by subdividing a unit octahedron.
Starts with a unit octahedron and subdivides the faces, projecting the
resulting points onto the surface of a unit sphere.
Parameters
------------
recursion_level : int
Level of subdivision, recursion_level=1 will return an octahedron,
anything bigger will return a more subdivided sphere.
Returns
---------
vertices : ndarray
A Vx3 array with the x, y, and z coordinates of of each vertex.
edges : ndarray
An Ex2 array of edges where each edge is a pair of neighboring
vertices.
triangles : ndarray
A Tx3 array of triangles, where each triangle is a set of three edges.
See Also
----------
create_half_sphere, divide_all
"""
if recursion_level > 7 or recursion_level < 1:
raise ValueError("recursion_level must be between 1 and 7")
vertices = octahedron_vertices
edges = octahedron_edges
triangles = octahedron_triangles
for i in range( recursion_level - 1 ):
vertices, edges, triangles = divide_all(vertices, edges, triangles)
return vertices, edges, triangles
def create_half_unit_sphere( recursion_level=2 ):
""" Creates a unit sphere and returns | |
__check(sodium.crypto_stream_xchacha20_xor(c, message, mlen, nonce, key))
return c.raw
# crypto_stream_chacha20_xor_ic(unsigned char *c, const unsigned char *m, unsigned long long mlen, const unsigned char *n, uint64_t ic, const unsigned char *k)
def crypto_stream_xchacha20_xor_ic(message, nonce, initial_counter, key):
if len(nonce) != crypto_stream_xchacha20_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_stream_xchacha20_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_longlong(len(message))
ic = ctypes.c_uint64(initial_counter)
c = ctypes.create_string_buffer(len(message))
__check(sodium.crypto_stream_xchacha20_xor_ic(c, message, mlen, nonce, ic, key))
return c.raw
def crypto_stream_xchacha20_xor_ic_inplace(message: Union[bytearray, memoryview], nonce, initial_counter, key):
if len(nonce) != crypto_stream_xchacha20_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_stream_xchacha20_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_longlong(len(message))
ic = ctypes.c_uint64(initial_counter)
m = (ctypes.c_char * len(message)).from_buffer(message)
__check(sodium.crypto_stream_xchacha20_xor_ic(m, m, mlen, nonce, ic, key))
def crypto_stream_xchacha20_xor_ic_dest(message, dest, nonce, initial_counter, key):
if len(nonce) != crypto_stream_xchacha20_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_stream_xchacha20_KEYBYTES: raise ValueError("truncated key")
if len(message) > len(dest): raise ValueError("Not enough space in destination")
mlen = ctypes.c_longlong(len(message))
ic = ctypes.c_uint64(initial_counter)
__check(sodium.crypto_stream_xchacha20_xor_ic(message, dest, mlen, nonce, ic, key))
# crypto_aead_chacha20poly1305_encrypt(unsigned char *c, unsigned long long *clen, const unsigned char *m, unsigned long long mlen, const unsigned char *ad, unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k);
def crypto_aead_chacha20poly1305_encrypt(message, ad, nonce, key):
if len(nonce) != crypto_aead_chacha20poly1305_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value + 16)
clen = ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_encrypt(c, ctypes.byref(clen), message, mlen, ad, adlen, None, nonce, key))
return c.raw
# crypto_aead_chacha20poly1305_decrypt(unsigned char *m, unsigned long long *mlen, unsigned char *nsec, const unsigned char *c, unsigned long long clen, const unsigned char *ad, unsigned long long adlen, const unsigned char *npub, const unsigned char *k)
def crypto_aead_chacha20poly1305_decrypt(ciphertext, ad, nonce, key):
if len(nonce) != crypto_aead_chacha20poly1305_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_KEYBYTES: raise ValueError("truncated key")
m = ctypes.create_string_buffer(len(ciphertext) - 16)
mlen = ctypes.c_ulonglong(0)
clen = ctypes.c_ulonglong(len(ciphertext))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_decrypt(m, ctypes.byref(mlen), None, ciphertext, clen, ad, adlen, nonce, key))
return m.raw
# crypto_aead_chacha20poly1305_encrypt_detached(unsigned char *c, unsigned char *mac, unsigned long long *maclen_p, const unsigned char *m, unsigned long long mlen, const unsigned char *ad, unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 9)
def crypto_aead_chacha20poly1305_encrypt_detached(message, ad, nonce, key):
""" Return ciphertext, mac tag """
if len(nonce) != crypto_aead_chacha20poly1305_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value)
maclen_p = ctypes.c_ulonglong(crypto_aead_chacha20poly1305_ABYTES)
mac = ctypes.create_string_buffer(maclen_p.value)
__check(sodium.crypto_aead_chacha20poly1305_encrypt_detached(c, mac, ctypes.byref(maclen_p), message, mlen, ad, adlen, None, nonce, key))
return c.raw, mac.raw
# crypto_aead_chacha20poly1305_decrypt_detached(unsigned char *m, unsigned char *nsec, const unsigned char *c, unsigned long long clen, const unsigned char *mac, const unsigned char *ad, unsigned long long adlen, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 9)
def crypto_aead_chacha20poly1305_decrypt_detached(ciphertext, mac, ad, nonce, key):
""" Return message if successful or -1 (ValueError) if not successful"""
if len(nonce) != crypto_aead_chacha20poly1305_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_KEYBYTES: raise ValueError("truncated key")
if len(mac) != crypto_aead_chacha20poly1305_ABYTES:
raise ValueError("mac length != %i" % crypto_aead_chacha20poly1305_ABYTES)
clen = ctypes.c_ulonglong(len(ciphertext))
m = ctypes.create_string_buffer(clen.value)
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_decrypt_detached(m, None, ciphertext, clen, mac, ad, adlen, nonce, key))
return m.raw
# crypto_aead_chacha20poly1305_ietf_encrypt(unsigned char *c, unsigned long long *clen_p, const unsigned char *m, unsigned long long mlen, const unsigned char *ad, unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 4)
def crypto_aead_chacha20poly1305_ietf_encrypt(message, ad, nonce, key):
if len(nonce) != crypto_aead_chacha20poly1305_ietf_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value + 16)
clen = ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_ietf_encrypt(c, ctypes.byref(clen), message, mlen, ad, adlen, None, nonce, key))
return c.raw
# crypto_aead_chacha20poly1305_ietf_decrypt(unsigned char *m, unsigned long long *mlen, unsigned char *nsec, const unsigned char *c, unsigned long long clen, const unsigned char *ad, unsigned long long adlen, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 4)
def crypto_aead_chacha20poly1305_ietf_decrypt(ciphertext, ad, nonce, key):
if len(nonce) != crypto_aead_chacha20poly1305_ietf_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
m = ctypes.create_string_buffer(len(ciphertext) - 16)
mlen = ctypes.c_ulonglong(0)
clen = ctypes.c_ulonglong(len(ciphertext))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_ietf_decrypt(m, ctypes.byref(mlen), None, ciphertext, clen, ad, adlen, nonce, key))
return m.raw
# crypto_aead_chacha20poly1305_ietf_encrypt_detached(unsigned char *c, unsigned char *mac, unsigned long long *maclen_p, const unsigned char *m, unsigned long long mlen, const unsigned char *ad, unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 9)
def crypto_aead_chacha20poly1305_ietf_encrypt_detached(message, ad, nonce, key):
""" Return ciphertext, mac tag """
if len(nonce) != crypto_aead_chacha20poly1305_ietf_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value)
maclen_p = ctypes.c_ulonglong(crypto_aead_chacha20poly1305_ietf_ABYTES)
mac = ctypes.create_string_buffer(maclen_p.value)
__check(sodium.crypto_aead_chacha20poly1305_ietf_encrypt_detached(c, mac, ctypes.byref(maclen_p), message, mlen, ad, adlen, None, nonce, key))
return c.raw, mac.raw
# crypto_aead_chacha20poly1305_ietf_decrypt_detached(unsigned char *m, unsigned char *nsec, const unsigned char *c, unsigned long long clen, const unsigned char *mac, const unsigned char *ad, unsigned long long adlen, const unsigned char *npub, const unsigned char *k)
@sodium_version(1, 0, 9)
def crypto_aead_chacha20poly1305_ietf_decrypt_detached(ciphertext, mac, ad, nonce, key):
""" Return message if successful or -1 (ValueError) if not successful"""
if len(nonce) != crypto_aead_chacha20poly1305_ietf_NONCEBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_chacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
if len(mac) != crypto_aead_chacha20poly1305_ietf_ABYTES:
raise ValueError("mac length != %i" % crypto_aead_chacha20poly1305_ietf_ABYTES)
clen = ctypes.c_ulonglong(len(ciphertext))
m = ctypes.create_string_buffer(clen.value)
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_chacha20poly1305_ietf_decrypt_detached(m, None, ciphertext, clen, mac, ad, adlen, nonce, key))
return m.raw
#crypto_aead_xchacha20poly1305_ietf_encrypt(ciphertext, &ciphertext_len,
# message, message_len,
# additional_data, additional_data_len,
# null, nonce, key);
@sodium_version(1, 0, 12)
def crypto_aead_xchacha20poly1305_ietf_encrypt(message, ad, nonce, key):
if len(nonce) != crypto_aead_xchacha20poly1305_ietf_NPUBBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_xchacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value + 16)
clen = ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_xchacha20poly1305_ietf_encrypt(c, ctypes.byref(clen),
message, mlen,
ad, adlen,
None, nonce, key))
return c.raw
#crypto_aead_xchacha20poly1305_ietf_decrypt(decrypted, &decrypted_len,
# null,
# ciphertext, ciphertext_len,
# additional_data, additional_data_len,
# nonce, key);
@sodium_version(1, 0, 12)
def crypto_aead_xchacha20poly1305_ietf_decrypt(ciphertext, ad, nonce, key):
if len(nonce) != crypto_aead_xchacha20poly1305_ietf_NPUBBYTES: raise ValueError("truncated nonce")
if len(key) != crypto_aead_xchacha20poly1305_ietf_KEYBYTES: raise ValueError("truncated key")
m = ctypes.create_string_buffer(len(ciphertext) - 16)
mlen = ctypes.c_ulonglong(0)
clen = ctypes.c_ulonglong(len(ciphertext))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_aead_xchacha20poly1305_ietf_decrypt(m, ctypes.byref(mlen),
None,
ciphertext, clen,
ad, adlen,
nonce, key))
return m.raw
# crypto_auth(unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *k)
def crypto_auth(m, k):
if m is None or k is None:
raise ValueError("invalid parameters")
if len(k) != crypto_auth_KEYBYTES:
raise ValueError("invalid key")
buf = ctypes.create_string_buffer(crypto_auth_BYTES)
__check(sodium.crypto_auth(buf, m, ctypes.c_ulonglong(len(m)), k))
return buf.raw
# crypto_auth_verify(const unsigned char *h, const unsigned char *in, unsigned long long inlen, const unsigned char *k)
def crypto_auth_verify(h, m, k):
if h is None or m is None or k is None:
raise ValueError("invalid parameters")
if len(k) != crypto_auth_KEYBYTES:
raise ValueError("invalid key")
if len(h) != crypto_auth_BYTES:
raise ValueError("invalid tag")
__check(sodium.crypto_auth_verify(h, m, ctypes.c_ulonglong(len(m)), k))
# crypto_generichash(unsigned char *out, size_t outlen, const unsigned char *in, unsigned long long inlen, const unsigned char *key, size_t keylen)
@encode_strings
def crypto_generichash(m, k=b'', outlen=crypto_generichash_BYTES):
buf = ctypes.create_string_buffer(outlen)
__check(sodium.crypto_generichash(buf, ctypes.c_size_t(outlen), m, ctypes.c_ulonglong(len(m)), k, ctypes.c_size_t(len(k))))
return buf.raw
# crypto_generichash_init(crypto_generichash_state *state, const unsigned char *key, const size_t keylen, const size_t outlen);
@encode_strings
def crypto_generichash_init(outlen=crypto_generichash_BYTES, k=b''):
state = ctypes.create_string_buffer(crypto_generichash_STATEBYTES)
__check(sodium.crypto_generichash_init(ctypes.byref(state), k, ctypes.c_size_t(len(k)), ctypes.c_size_t(outlen)))
return state
# crypto_generichash_update(crypto_generichash_state *state, const unsigned char *in, unsigned long long inlen);
@encode_strings
def crypto_generichash_update(state, m):
if len(state) != crypto_generichash_STATEBYTES: raise ValueError("invalid state")
__check(sodium.crypto_generichash_update(ctypes.byref(state), m, ctypes.c_ulonglong(len(m))))
return state
# crypto_generichash_final(crypto_generichash_state *state, unsigned char *out, const size_t outlen);
def crypto_generichash_final(state, outlen=crypto_generichash_BYTES):
if len(state) != crypto_generichash_STATEBYTES: raise ValueError("invalid state")
buf = ctypes.create_string_buffer(outlen)
__check(sodium.crypto_generichash_final(ctypes.byref(state), buf, ctypes.c_size_t(outlen)))
return buf.raw
def crypto_generichash_blake2b_salt_personal(message, outlen = crypto_generichash_blake2b_BYTES, key = b'', salt = b'', personal = b''):
keylen = len(key)
if keylen != 0 and not crypto_generichash_blake2b_BYTES_MIN <= keylen <= crypto_generichash_blake2b_KEYBYTES_MAX:
raise ValueError("%d <= len(key) <= %d - %d received" % (crypto_generichash_blake2b_BYTES_MIN, crypto_generichash_blake2b_KEYBYTES_MAX, keylen))
salt = pad_buf(salt, crypto_generichash_blake2b_SALTBYTES, 'salt')
personal = pad_buf(personal, crypto_generichash_blake2b_PERSONALBYTES, 'personal')
buf = ctypes.create_string_buffer(outlen)
outlen = ctypes.c_size_t(outlen)
inlen = ctypes.c_ulonglong(len(message))
keylen = ctypes.c_size_t(keylen)
__check(sodium.crypto_generichash_blake2b_salt_personal(buf, outlen, message, inlen, key, keylen, salt, personal))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class ServicesOperations(object):
"""ServicesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def query_by_id(
self, subscription_id, resource_group, workspace, id, expand=False, custom_headers=None, raw=False, **operation_config):
"""Get a Service.
Get a Service by Id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group: The Name of the resource group in which the
workspace is located.
:type resource_group: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The Service Id.
:type id: str
:param expand: Set to True to include Model details.
:type expand: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ServiceResponseBase or ClientRawResponse if raw=true
:rtype: ~_restclient.models.ServiceResponseBase or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.query_by_id.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['expand'] = self._serialize.query("expand", expand, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceResponseBase', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
query_by_id.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/services/{id}'}
def patch(
self, subscription_id, resource_group, workspace, id, body, custom_headers=None, raw=False, **operation_config):
"""Patch a Service.
Patch a specific Service.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group: The Name of the resource group in which the
workspace is located.
:type resource_group: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The Service Id.
:type id: str
:param body: The payload that is used to patch the Service.
:type body: list[~_restclient.models.JsonPatchOperation]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.patch.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json-patch+json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, '[JsonPatchOperation]')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ModelErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
patch.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/services/{id}'}
def delete(
self, subscription_id, resource_group, workspace, id, custom_headers=None, raw=False, **operation_config):
"""Delete a Service.
Delete a specific Service.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group: The Name of the resource group in which the
workspace is located.
:type resource_group: str
:param workspace: The name of the workspace.
:type workspace: str
:param id: The Service Id.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str'),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ModelErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/modelmanagement/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.MachineLearningServices/workspaces/{workspace}/services/{id}'}
def list_query(
self, subscription_id, resource_group, workspace, image_id=None, image_digest=None, image_name=None, model_id=None, model_name=None, name=None, tag=None, count=None, compute_type=None, skip_token=None, tags=None, properties=None, run_id=None, expand=False, orderby="UpdatedAtDesc", custom_headers=None, raw=False, **operation_config):
"""Query the list of Services in a Workspace.
If no filter is passed, the query lists all Services in the Workspace.
The returned list is paginated and the count of item in each page is an
optional parameter.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group: The Name of the resource group in which the
workspace is located.
:type resource_group: str
:param workspace: The name of the workspace.
:type workspace: str
:param image_id: The Image Id.
:type image_id: str
:param image_digest: The Image Digest.
:type image_digest: str
:param image_name: The Image name.
:type image_name: str
:param model_id: The Model Id.
:type model_id: str
:param model_name: The Model name.
:type model_name: str
:param name: The object name.
:type name: str
:param tag: The object tag.
:type tag: str
:param count: The number of items to retrieve in a page.
:type count: int
:param compute_type: The compute environment type.
:type compute_type: str
:param skip_token: The continuation token to retrieve the next page.
:type skip_token: str
:param tags: A set of tags with which to filter the returned models.
It is a comma separated string of tags key or tags key=value
Example: tagKey1,tagKey2,tagKey3=value3 .
:type tags: str
:param properties: A set of properties with which to filter the
returned models.
It is a comma separated string of properties key and/or properties
key=value
Example: propKey1,propKey2,propKey3=value3 .
:type properties: str
:param run_id: runId for model associated with service.
:type run_id: str
:param expand: Set to True to include Model details.
:type expand: bool
:param orderby: The option to order the response. Possible values
include: 'CreatedAtDesc', 'CreatedAtAsc', 'UpdatedAtDesc',
'UpdatedAtAsc'
:type orderby: str or ~_restclient.models.OrderString
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PaginatedServiceList or ClientRawResponse if raw=true
:rtype: ~_restclient.models.PaginatedServiceList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ModelErrorResponseException<_restclient.models.ModelErrorResponseException>`
"""
# Construct URL
url = self.list_query.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroup': self._serialize.url("resource_group", resource_group, 'str'),
'workspace': self._serialize.url("workspace", workspace, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if image_id is not None:
query_parameters['imageId'] = self._serialize.query("image_id", image_id, 'str')
if image_digest is not None:
query_parameters['imageDigest'] = self._serialize.query("image_digest", image_digest, 'str')
if image_name is not None:
query_parameters['imageName'] = self._serialize.query("image_name", image_name, 'str')
if model_id is not None:
query_parameters['modelId'] = self._serialize.query("model_id", model_id, 'str')
if model_name is not None:
query_parameters['modelName'] = self._serialize.query("model_name", model_name, 'str')
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
if tag is not None:
query_parameters['tag'] = self._serialize.query("tag", tag, 'str')
if count is not None:
query_parameters['count'] = self._serialize.query("count", count, 'int')
if compute_type is not None:
query_parameters['computeType'] = self._serialize.query("compute_type", compute_type, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if tags is not None:
query_parameters['tags'] = self._serialize.query("tags", tags, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query("properties", properties, 'str')
if run_id is not None:
query_parameters['runId'] = self._serialize.query("run_id", run_id, 'str')
if expand is not None:
query_parameters['expand'] = self._serialize.query("expand", expand, 'bool')
if orderby is not None:
query_parameters['orderby'] = self._serialize.query("orderby", orderby, 'OrderString')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ModelErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code | |
]
testResult = main.FALSE
installResult = main.intentFunction.installPointIntent(
main,
name="NOOPTION",
senders=senders,
recipients=recipients )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="NOOPTION",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 point intent\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4_2: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4_2",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4_2",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-ICMP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-ICMP IPV4 using TCP point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01",
"ip":main.h1.hostIp }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09",
"ip":main.h9.hostIp }
]
ipProto = main.params[ 'SDNIP' ][ 'icmpProto' ]
# Uneccessary, not including this in the selectors
tcpSrc = main.params[ 'SDNIP' ][ 'srcPort' ]
tcpDst = main.params[ 'SDNIP' ][ 'dstPort' ]
installResult = main.intentFunction.installPointIntent(
main,
name="SDNIP-ICMP",
senders=senders,
recipients=recipients,
ethType="IPV4",
ipProto=ipProto,
tcpSrc=tcpSrc,
tcpDst=tcpDst )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="SDNIP_ICMP",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-TCP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-TCP IPV4 using ICMP point intents\n"
mac1 = main.hostsData[ 'h1' ][ 'mac' ]
mac2 = main.hostsData[ 'h9' ][ 'mac' ]
ip1 = str( main.hostsData[ 'h1' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ip2 = str( main.hostsData[ 'h9' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ipProto = main.params[ 'SDNIP' ][ 'tcpProto' ]
tcp1 = main.params[ 'SDNIP' ][ 'srcPort' ]
tcp2 = main.params[ 'SDNIP' ][ 'dstPort' ]
stepResult = main.intentFunction.pointIntentTcp(
main,
name="SDNIP-TCP",
host1="h1",
host2="h9",
deviceId1="of:0000000000000005/1",
deviceId2="of:0000000000000006/1",
mac1=mac1,
mac2=mac2,
ethType="IPV4",
ipProto=ipProto,
ip1=ip1,
ip2=ip2,
tcp1=tcp1,
tcp2=tcp2 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "DUALSTACK1: Add point intents between h3 and h11" )
main.assertReturnString = "Assertion Result for Dualstack1 IPV4 with mac address point intents\n"
senders = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
recipients = [
{ "name":"h11","device":"of:0000000000000006/3","mac":"00:00:00:00:00:0B" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN: Add point intents between h5 and h21" )
main.assertReturnString = "Assertion Result for VLAN IPV4 with mac address point intents\n"
senders = [
{ "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05" }
]
recipients = [
{ "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "1HOP: Add point intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP IPV4 with no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.intentFunction.report( main )
def CASE3000( self, main ):
"""
Add single point to multi point intents
- Get device ids
- Add single point to multi point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Single To Multi Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
devices = [ 'of:0000000000000005/8', 'of:0000000000000006/8', \
'of:0000000000000007/8' ]
macs = [ '00:00:00:00:00:08', '00:00:00:00:00:10', '00:00:00:00:00:18' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="NOOPTION",
hostNames=hostNames,
devices=devices,
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="NOOPTION: Successfully added single "
+ " point to multi point intents" +
" with no match action",
onfail="NOOPTION: Failed to add single point"
+ " point to multi point intents" +
" with no match action" )
main.step( "IPV4: Add single point to multi point intents" )
stepResult = main.TRUE
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses",
onfail="IPV4: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" )
main.step( "IPV4_2: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
ethType="IPV4",
lambdaAlloc=False )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4_2: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses",
onfail="IPV4_2: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses" )
main.step( "VLAN: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h4', 'h12', 'h20' ]
devices = [ 'of:0000000000000005/4', 'of:0000000000000006/4', \
'of:0000000000000007/4' ]
macs = [ '00:00:00:00:00:04', '00:00:00:00:00:0C', '00:00:00:00:00:14' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="VLAN",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="VLAN: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN",
onfail="VLAN: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN")
def CASE4000( self, main ):
"""
Add multi point to single point intents
- Get device ids
- Add multi point to single point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Multi To Single Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' | |
typing.Sequence[str]
gateway : str
ip_addresses : typing.Sequence[str]
is_up : bool
mac_address : str
space : str
'''
dns_nameservers_ = dns_nameservers
gateway_ = gateway
ip_addresses_ = ip_addresses
is_up_ = is_up
mac_address_ = mac_address
space_ = space
# Validate arguments against known Juju API types.
if dns_nameservers_ is not None and not isinstance(dns_nameservers_, (bytes, str, list)):
raise Exception("Expected dns_nameservers_ to be a Sequence, received: {}".format(type(dns_nameservers_)))
if gateway_ is not None and not isinstance(gateway_, (bytes, str)):
raise Exception("Expected gateway_ to be a str, received: {}".format(type(gateway_)))
if ip_addresses_ is not None and not isinstance(ip_addresses_, (bytes, str, list)):
raise Exception("Expected ip_addresses_ to be a Sequence, received: {}".format(type(ip_addresses_)))
if is_up_ is not None and not isinstance(is_up_, bool):
raise Exception("Expected is_up_ to be a bool, received: {}".format(type(is_up_)))
if mac_address_ is not None and not isinstance(mac_address_, (bytes, str)):
raise Exception("Expected mac_address_ to be a str, received: {}".format(type(mac_address_)))
if space_ is not None and not isinstance(space_, (bytes, str)):
raise Exception("Expected space_ to be a str, received: {}".format(type(space_)))
self.dns_nameservers = dns_nameservers_
self.gateway = gateway_
self.ip_addresses = ip_addresses_
self.is_up = is_up_
self.mac_address = mac_address_
self.space = space_
self.unknown_fields = unknown_fields
class NetworkRoute(Type):
_toSchema = {'destination_cidr': 'destination-cidr', 'gateway_ip': 'gateway-ip', 'metric': 'metric'}
_toPy = {'destination-cidr': 'destination_cidr', 'gateway-ip': 'gateway_ip', 'metric': 'metric'}
def __init__(self, destination_cidr=None, gateway_ip=None, metric=None, **unknown_fields):
'''
destination_cidr : str
gateway_ip : str
metric : int
'''
destination_cidr_ = destination_cidr
gateway_ip_ = gateway_ip
metric_ = metric
# Validate arguments against known Juju API types.
if destination_cidr_ is not None and not isinstance(destination_cidr_, (bytes, str)):
raise Exception("Expected destination_cidr_ to be a str, received: {}".format(type(destination_cidr_)))
if gateway_ip_ is not None and not isinstance(gateway_ip_, (bytes, str)):
raise Exception("Expected gateway_ip_ to be a str, received: {}".format(type(gateway_ip_)))
if metric_ is not None and not isinstance(metric_, int):
raise Exception("Expected metric_ to be a int, received: {}".format(type(metric_)))
self.destination_cidr = destination_cidr_
self.gateway_ip = gateway_ip_
self.metric = metric_
self.unknown_fields = unknown_fields
class NotifyWatchResult(Type):
_toSchema = {'error': 'error', 'notifywatcherid': 'NotifyWatcherId'}
_toPy = {'NotifyWatcherId': 'notifywatcherid', 'error': 'error'}
def __init__(self, notifywatcherid=None, error=None, **unknown_fields):
'''
notifywatcherid : str
error : Error
'''
notifywatcherid_ = notifywatcherid
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if notifywatcherid_ is not None and not isinstance(notifywatcherid_, (bytes, str)):
raise Exception("Expected notifywatcherid_ to be a str, received: {}".format(type(notifywatcherid_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.notifywatcherid = notifywatcherid_
self.error = error_
self.unknown_fields = unknown_fields
class NotifyWatchResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~NotifyWatchResult]
'''
results_ = [NotifyWatchResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class Number(Type):
_toSchema = {'build': 'Build', 'major': 'Major', 'minor': 'Minor', 'patch': 'Patch', 'tag': 'Tag'}
_toPy = {'Build': 'build', 'Major': 'major', 'Minor': 'minor', 'Patch': 'patch', 'Tag': 'tag'}
def __init__(self, build=None, major=None, minor=None, patch=None, tag=None, **unknown_fields):
'''
build : int
major : int
minor : int
patch : int
tag : str
'''
build_ = build
major_ = major
minor_ = minor
patch_ = patch
tag_ = tag
# Validate arguments against known Juju API types.
if build_ is not None and not isinstance(build_, int):
raise Exception("Expected build_ to be a int, received: {}".format(type(build_)))
if major_ is not None and not isinstance(major_, int):
raise Exception("Expected major_ to be a int, received: {}".format(type(major_)))
if minor_ is not None and not isinstance(minor_, int):
raise Exception("Expected minor_ to be a int, received: {}".format(type(minor_)))
if patch_ is not None and not isinstance(patch_, int):
raise Exception("Expected patch_ to be a int, received: {}".format(type(patch_)))
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
self.build = build_
self.major = major_
self.minor = minor_
self.patch = patch_
self.tag = tag_
self.unknown_fields = unknown_fields
class OfferArg(Type):
_toSchema = {'macaroons': 'macaroons', 'offer_uuid': 'offer-uuid'}
_toPy = {'macaroons': 'macaroons', 'offer-uuid': 'offer_uuid'}
def __init__(self, macaroons=None, offer_uuid=None, **unknown_fields):
'''
macaroons : typing.Sequence[~Macaroon]
offer_uuid : str
'''
macaroons_ = [Macaroon.from_json(o) for o in macaroons or []]
offer_uuid_ = offer_uuid
# Validate arguments against known Juju API types.
if macaroons_ is not None and not isinstance(macaroons_, (bytes, str, list)):
raise Exception("Expected macaroons_ to be a Sequence, received: {}".format(type(macaroons_)))
if offer_uuid_ is not None and not isinstance(offer_uuid_, (bytes, str)):
raise Exception("Expected offer_uuid_ to be a str, received: {}".format(type(offer_uuid_)))
self.macaroons = macaroons_
self.offer_uuid = offer_uuid_
self.unknown_fields = unknown_fields
class OfferArgs(Type):
_toSchema = {'args': 'args'}
_toPy = {'args': 'args'}
def __init__(self, args=None, **unknown_fields):
'''
args : typing.Sequence[~OfferArg]
'''
args_ = [OfferArg.from_json(o) for o in args or []]
# Validate arguments against known Juju API types.
if args_ is not None and not isinstance(args_, (bytes, str, list)):
raise Exception("Expected args_ to be a Sequence, received: {}".format(type(args_)))
self.args = args_
self.unknown_fields = unknown_fields
class OfferConnection(Type):
_toSchema = {'endpoint': 'endpoint', 'ingress_subnets': 'ingress-subnets', 'relation_id': 'relation-id', 'source_model_tag': 'source-model-tag', 'status': 'status', 'username': 'username'}
_toPy = {'endpoint': 'endpoint', 'ingress-subnets': 'ingress_subnets', 'relation-id': 'relation_id', 'source-model-tag': 'source_model_tag', 'status': 'status', 'username': 'username'}
def __init__(self, endpoint=None, ingress_subnets=None, relation_id=None, source_model_tag=None, status=None, username=None, **unknown_fields):
'''
endpoint : str
ingress_subnets : typing.Sequence[str]
relation_id : int
source_model_tag : str
status : EntityStatus
username : str
'''
endpoint_ = endpoint
ingress_subnets_ = ingress_subnets
relation_id_ = relation_id
source_model_tag_ = source_model_tag
status_ = EntityStatus.from_json(status) if status else None
username_ = username
# Validate arguments against known Juju API types.
if endpoint_ is not None and not isinstance(endpoint_, (bytes, str)):
raise Exception("Expected endpoint_ to be a str, received: {}".format(type(endpoint_)))
if ingress_subnets_ is not None and not isinstance(ingress_subnets_, (bytes, str, list)):
raise Exception("Expected ingress_subnets_ to be a Sequence, received: {}".format(type(ingress_subnets_)))
if relation_id_ is not None and not isinstance(relation_id_, int):
raise Exception("Expected relation_id_ to be a int, received: {}".format(type(relation_id_)))
if source_model_tag_ is not None and not isinstance(source_model_tag_, (bytes, str)):
raise Exception("Expected source_model_tag_ to be a str, received: {}".format(type(source_model_tag_)))
if status_ is not None and not isinstance(status_, (dict, EntityStatus)):
raise Exception("Expected status_ to be a EntityStatus, received: {}".format(type(status_)))
if username_ is not None and not isinstance(username_, (bytes, str)):
raise Exception("Expected username_ to be a str, received: {}".format(type(username_)))
self.endpoint = endpoint_
self.ingress_subnets = ingress_subnets_
self.relation_id = relation_id_
self.source_model_tag = source_model_tag_
self.status = status_
self.username = username_
self.unknown_fields = unknown_fields
class OfferFilter(Type):
_toSchema = {'allowed_users': 'allowed-users', 'application_description': 'application-description', 'application_name': 'application-name', 'application_user': 'application-user', 'connected_users': 'connected-users', 'endpoints': 'endpoints', 'model_name': 'model-name', 'offer_name': 'offer-name', 'owner_name': 'owner-name'}
_toPy = {'allowed-users': 'allowed_users', 'application-description': 'application_description', 'application-name': 'application_name', 'application-user': 'application_user', 'connected-users': 'connected_users', 'endpoints': 'endpoints', 'model-name': 'model_name', 'offer-name': 'offer_name', 'owner-name': 'owner_name'}
def __init__(self, allowed_users=None, application_description=None, application_name=None, application_user=None, connected_users=None, endpoints=None, model_name=None, offer_name=None, owner_name=None, **unknown_fields):
'''
allowed_users : typing.Sequence[str]
application_description : str
application_name : str
application_user : str
connected_users : typing.Sequence[str]
endpoints : typing.Sequence[~EndpointFilterAttributes]
model_name : str
offer_name : str
owner_name : str
'''
allowed_users_ = allowed_users
application_description_ = application_description
application_name_ = application_name
application_user_ = application_user
connected_users_ = connected_users
endpoints_ = [EndpointFilterAttributes.from_json(o) for o in endpoints or []]
model_name_ = model_name
offer_name_ = offer_name
owner_name_ = owner_name
# Validate arguments against known Juju API types.
if allowed_users_ is not None and not isinstance(allowed_users_, (bytes, str, list)):
raise Exception("Expected allowed_users_ to be a Sequence, received: {}".format(type(allowed_users_)))
if application_description_ is not None and not isinstance(application_description_, (bytes, str)):
raise Exception("Expected application_description_ to be a str, received: {}".format(type(application_description_)))
if application_name_ is not None and not isinstance(application_name_, (bytes, str)):
raise Exception("Expected application_name_ to be a str, received: {}".format(type(application_name_)))
if application_user_ is not None and not isinstance(application_user_, (bytes, str)):
raise Exception("Expected application_user_ to be a str, received: {}".format(type(application_user_)))
if connected_users_ is not None and not isinstance(connected_users_, (bytes, str, list)):
raise Exception("Expected connected_users_ to be a Sequence, received: {}".format(type(connected_users_)))
if endpoints_ is not None and not isinstance(endpoints_, (bytes, str, list)):
raise Exception("Expected endpoints_ to be a Sequence, received: {}".format(type(endpoints_)))
if model_name_ is not None and not isinstance(model_name_, (bytes, str)):
raise Exception("Expected model_name_ to be a str, received: {}".format(type(model_name_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise | |
<reponame>vedantirb/cross3d<gh_stars>100-1000
##
# \namespace blurapi.libs.XML.xmlelement
#
# \remarks defines the XML Element wrapper instance for the blurapi system
#
# \author <EMAIL>
# \author <NAME>
# \date 04/09/10
#
import xml.dom.minidom
# Load the monkey patched version to fix a known bug http://bugs.python.org/issue5752
import blurdev.XML.minidom
from blurdev.XML.minidom import escape, unescape
from PyQt4.QtCore import QRect, QRectF, QPoint, QPointF, QSize, QSizeF, QDate, QDateTime, QString, QByteArray, Qt
from PyQt4.QtGui import QColor, QFont
class XMLElement:
"""Ease of use wrapper class for :class:`xml.dom.minidom.Element`
The XMLElement class is the root class for all blurdev XML types. It wraps
the :class:`xml.dom.minidom.Element` type provided in the standard library.
The constructor allows it be initialized with a
:class:`xml.dom.minidom.Element` instance.
"""
def __eq__( self, other ):
""" checks to see if the wrapper <xml.dom.minidom.Element> instance is the same """
result = False
if ( isinstance( other, XMLElement ) ):
result = ( self._object == other._object )
return result
def __getattr__( self, key ):
""" pass along all unknown attributes to the <xml.dom.minidom.Element> class instance """
return getattr( self._object, key )
def __init__( self, object, filename = '' ):
""" initialize the class with an <xml.dom.minidom.Element> instance """
if ( object == None ):
object = xml.dom.minidom.Element(None)
self._object = object
self.__file__ = filename
# Used to allow saving empty attributes.
self.allowEmptyAttrs = False
def _document( self ):
""" recursese up the hierarchy to find the parent who is a <xml.dom.minidom.Document> class """
out = self._object
while ( out and not isinstance( out, xml.dom.minidom.Document ) ):
out = out.parentNode
return out
def _children( self ):
""" collects the minidom child nodes which are <xml.dom.minidom.Element> types """
if ( self._object ):
return [ child for child in self._object.childNodes if isinstance( child, xml.dom.minidom.Element ) ]
return []
def _findPoint(self, name, cls, method):
child = self.findChild(name)
if child:
x = method(child.attribute('x', 0))
y = method(child.attribute('y', 0))
return cls(x, y)
return cls()
def _findRect( self, name, cls, method ):
rect = cls()
child = self.findChild( name )
if ( child ):
x = method( child.attribute( 'x', 0 ) )
y = method( child.attribute( 'y', 0 ) )
w = method( child.attribute( 'width', 0 ) )
h = method( child.attribute( 'height', 0 ) )
rect = cls( x, y, w, h )
return rect
def _findSize(self, name, cls, method):
child = self.findChild(name)
if child:
w = method(child.attribute('width', 0))
h = method(child.attribute('height', 0))
return cls(w, h)
return cls()
def clear( self ):
children = list( self._object.childNodes )
for child in children:
self._object.removeChild( child )
def recordValue( self, value ):
# Convert Qt basics to python basics where possible
if ( type( value ) == QString ):
value = unicode( value )
valtype = type( value )
# Record a list of properties
if ( valtype in (list,tuple) ):
self.setAttribute( 'type', 'list' )
for val in value:
entry = self.addNode( 'entry' )
entry.recordValue( val )
# Record a dictionary of properties
elif ( valtype == dict ):
self.setAttribute( 'type', 'dict' )
for key, val in value.items():
entry = self.addNode( 'entry' )
entry.setAttribute( 'key', key )
entry.recordValue( val )
# Record a qdatetime
elif ( valtype == QDateTime ):
self.setAttribute( 'type', 'QDateTime' )
self.setAttribute( 'value', value.toString( 'yyyy-MM-dd hh:mm:ss' ) )
# Record a qdate
elif ( valtype == QDate ):
self.setAttribute( 'type', 'QDate' )
self.setAttribute( 'value', value.toString( 'yyyy-MM-dd' ) )
# Record a qrect
elif ( valtype in (QRect,QRectF) ):
self.setAttribute( 'type', valtype.__name__ )
self.setRect( 'rect', value )
# Record a point
elif ( valtype in (QPoint,QPointF) ):
self.setAttribute( 'type', valtype.__name__ )
self.setPoint( 'point', value )
# record a QFont
elif ( valtype == QFont ):
self.setAttribute( 'type', 'QFont' )
self.setAttribute( 'value', value.toString() )
# Record a size
elif valtype in (QSize, QSizeF):
self.setAttribute( 'type', valtype.__name__ )
self.setSize( 'size', value )
# Record a qcolor
elif ( valtype == QColor ):
self.setAttribute( 'type', 'QColor' )
self.setColor( 'color', value )
# Record a QByteArray (Experimental)
elif ( valtype == QByteArray ):
self.setAttribute( 'type', 'QByteArray' )
self.setAttribute( 'value', value.toPercentEncoding() )
# Record a basic property
else:
self.setAttribute( 'value', value )
typ = type( value ).__name__
if ( typ == 'unicode' ):
typ = 'str'
self.setAttribute( 'type', typ )
def restoreValue( self, fail = None ):
valtype = self.attribute( 'type' )
value = None
# Restore a list item
if ( valtype == 'list' ):
value = []
for child in self.children():
value.append( child.restoreValue() )
# Restore a dictionary item
elif ( valtype == 'dict' ):
value = {}
for child in self.children():
value[ child.attribute( 'key' ) ] = child.restoreValue()
# Record a qdatetime
elif ( valtype == 'QDateTime' ):
value = QDateTime.fromString( self.attribute( 'value' ), 'yyyy-MM-dd hh:mm:ss' )
# Record a qdate
elif ( valtype == 'QDate' ):
value = QDate.fromString( self.attribute( 'value' ), 'yyyy-MM-dd' )
# Restore a QRect
elif ( valtype == 'QRect' ):
value = self.findRect( 'rect' )
# Restore a QRectF
elif ( valtype == 'QRectF' ):
value = self.findRectF( 'rect' )
# Restore a QSize
elif ( valtype == 'QSize' ):
value = self.findSize( 'size' )
# Restore a QSizeF
elif ( valtype == 'QSizeF' ):
value = self.findSizeF( 'size' )
# Restore a QPoint
elif ( valtype == 'QPoint' ):
value = self.findPoint( 'point' )
# Restore a QPointF
elif ( valtype == 'QPointF' ):
value = self.findPointF( 'point' )
# Restore a QColor
elif ( valtype == 'QColor' ):
value = self.findColor( 'color' )
# restore a QFont
elif ( valtype == 'QFont' ):
value = QFont()
value.fromString(self.attribute('value'))
# Restore a string
elif ( valtype in ('str','unicode','QString') ):
value = unicode(self.attribute( 'value' ))
elif ( valtype == 'ViewMode' ):
# If treated as a basic value would return fail
value = int( self.attribute( 'value' ) )
# Restore a QByteArray (Experimental)
elif ( valtype == 'QByteArray' ):
value = QByteArray.fromPercentEncoding( self.attribute( 'value', '' ) )
# Restore a Qt.CheckState
elif valtype == 'CheckState':
value = Qt.CheckState(self.attribute('value', 0))
# Restore a basic value
else:
try:
value = eval( '%s(%s)' % (valtype,self.attribute('value')) )
except:
value = fail
return value
def addComment( self, comment ):
d = self._document()
if ( d ):
out = d.createComment( comment )
self._object.appendChild( out )
return True
return False
def addNode( self, nodeName ):
"""Adds a new node child to the current element with the given node name.
:param nodeName: name of the child to add
:type nodeName: str
:rtype: :class:`XMLElement`
"""
d = self._document()
if ( d ):
out = d.createElement( nodeName )
self._object.appendChild( out )
return XMLElement( out, self.__file__ )
return None
def addChild( self, child, clone = True, deep = True ):
if ( isinstance( child, XMLElement ) ):
child = child._object
if ( clone ):
self._object.appendChild( child.cloneNode( deep ) )
else:
self._object.appendChild( child )
def attribute( self, attr, fail = '' ):
"""Gets the attribute value of the element by the given attribute id
:param attr: Name of the atribute you want to recover.
:param fail: If the atribute does not exist return this.
"""
out = unicode( self._object.getAttribute( attr ) )
out = unescape(out)
if ( out ):
return out
return fail
def attributeDict(self):
"""
\Remarks Returns a dictionary of attributes
\Return <dict>
"""
out = {}
for item in self.attributes.values():
out.update({item.name: item.value})
return out
def childAt( self, index ):
"""Finds the child at the given index, provided the index is within the child range
"""
childList = self._children()
if ( 0 <= index and index < len( childList ) ):
return XMLElement( childList[index], self.__file__ )
return None
def childNames( self ):
"""Collects all the names of the children of this element whose
child type is an :class:`xml.dom.minidom.Element`
"""
if ( self._object ):
return [ child.nodeName for child in self._object.childNodes if isinstance( child, xml.dom.minidom.Element ) ]
return []
def children( self ):
"""Collects all the child nodes of this element whose child type is an
:class:`xml.dom.minidom.Element`, wrapping each child as an
:class:`XMLElement`.
"""
if ( self._object ):
return [ XMLElement( child, self.__file__ ) for child in self._object.childNodes if isinstance( child, xml.dom.minidom.Element ) ]
return []
def index( self, object ):
"""Finds the index of the inputed child object in this instance's
XMLElement children, returning -1 if it cannot be found.
"""
if ( self._object ):
if ( isinstance( object, XMLElement ) ):
if ( object._object in self._object.childNodes ):
return self._object.childNodes.index( object._object )
elif ( isinstance( object, xml.dom.minidom.Element ) ):
if ( object in self._object.childNodes ):
return self._object.childNodes.index( object )
return -1
def findChild( self, childName, recursive = False, autoCreate = False ):
"""Finds the first instance of the child of this instance whose nodeName is the given child name.
:param childName: Name to search for.
:param recursive: Recursively search each child node for more child nodes. Default is False
:param autoCreate: Create the node if it is not found.
"""
if ( self._object ):
childList = self._object.getElementsByTagName( childName )
if ( childList ):
if ( not recursive ):
for child in childList:
if child.parentNode == self._object:
return XMLElement( child, self.__file__ )
else:
return XMLElement( childList[0], self.__file__ )
if ( autoCreate ):
return self.addNode( childName )
return None
def findChildById( self, key ):
import re
key = '_'.join( re.findall( '[a-zA-Z0-9]*', key ) ).lower()
for child in self.children():
if ( key == child.getId() or key == '_'.join( re.findall( '[a-zA-Z0-9]*', child.nodeName ) ).lower() ):
return child
return None
def findChildren( self, childName, recursive = False ):
"""Finds all the children of this instance whose nodeName is the given child name.
:param childName: The name of the child nodes to look for.
:param recursive: Recursively search each child node for more child nodes. Default is False
"""
if ( self._object ):
if ( recursive ):
return [ XMLElement( child, self.__file__ ) for child in self._object.getElementsByTagName( childName ) ]
else:
return [ XMLElement( child, self.__file__ ) for child in self._object.childNodes if child.nodeName == childName | |
np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
if items.is_unique:
for block in self.blocks:
indexer = items.get_indexer(block.items)
if (indexer == -1).any():
raise AssertionError('Items must contain all block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
else:
for block in self.blocks:
mask = items.isin(block.items)
indexer = mask.nonzero()[0]
if (len(indexer) != len(block.items)):
raise AssertionError('All items must be in block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = blk._gi((j, loc))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = _consolidate(self.blocks, self.items)
self._is_consolidated = True
self._known_consolidated = True
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def iget(self, i):
item = self.items[i]
if self.items.is_unique:
return self.get(item)
else:
# ugh
try:
inds, = (self.items == item).nonzero()
except AttributeError: # MultiIndex
inds, = self.items.map(lambda x: x == item).nonzero()
_, block = self._find_block(item)
try:
binds, = (block.items == item).nonzero()
except AttributeError: # MultiIndex
binds, = block.items.map(lambda x: x == item).nonzero()
for j, (k, b) in enumerate(zip(inds, binds)):
if i == k:
return block.values[b]
raise Exception('Cannot have duplicate column names '
'split across dtypes')
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
self._delete_from_block(i, item)
if com._is_bool_indexer(loc): # dupe keys may return mask
loc = [i for i, v in enumerate(loc) if v]
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
self._known_consolidated = False
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = _block_shape(value,self.ndim-1)
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
def _set_item(item, arr):
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, arr, loc=None)
else:
block.set(item, arr)
try:
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
else:
subset = self.items[loc]
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
for i, (item, arr) in enumerate(zip(subset, value)):
_set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
try:
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value, loc=loc)
except:
# so our insertion operation failed, so back out of the new items
# GH 3010
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
# re-raise
raise
if len(self.blocks) > 100:
self._consolidate_inplace()
self._known_consolidated = False
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
for b in block.split_block_at(item):
self.blocks.append(b)
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
# hm, elaborate hack?
if loc is None:
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc + 1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % com.pprint_thing(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
if axis == 0:
# patch ref_items, #1823
for blk in result.blocks:
blk.ref_items = new_axis
return result
else:
return self
if axis == 0:
if method is not None:
raise AssertionError('method argument not supported for '
'axis == 0')
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, axis=axis, fill_value=fill_value)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
# update with observed items
mask |= selector
if not selector.any():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
allow_fill=False)
new_blocks.append(make_block(new_values, new_block_items,
new_items))
if not mask.all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindex_items(self, new_items, copy=True, fill_value=np.nan):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
blk.ref_items = new_items
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items, fill_value=np.nan):
# TODO: infer dtypes other than float64 from fill_value
block_shape = list(self.shape)
block_shape[0] = len(items)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
na_block = make_block(block_values, items, ref_items)
return na_block
def take(self, indexer, axis=1, verify=True):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
indexer = com._ensure_platform_int(indexer)
n = len(self.axes[axis])
if verify:
indexer = _maybe_convert_indices(indexer, n)
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_nd(blk.values, indexer, axis=axis,
allow_fill=False)
newb = make_block(new_values, blk.items, self.items)
new_blocks.append(newb)
return BlockManager(new_blocks, new_axes)
def merge(self, other, lsuffix=None, rsuffix=None):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
this, other = self._maybe_rename_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True):
to_rename = self.items.intersection(other.items)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
this = self.rename_items(lrenamer, copydata=copydata)
other = other.rename_items(rrenamer, copydata=copydata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check all axes except items
| |
supportVector_models = list()
kernel_type = get_kernel_type(model)
supportVector_models.append(pml.SupportVectorMachineModel(
modelName=model.__class__.__name__,
classificationMethod=get_classificationMethod(model),
VectorDictionary=get_vectorDictionary(model, derived_col_names, categoric_values),
SupportVectorMachine=get_supportVectorMachine(model),
taskType=tasktype,
**kernel_type,
**model_kwargs
))
# supportVector_models[0].export(sys.stdout,0," ")
return supportVector_models
def get_model_name(model):
if 'OneClassSVM' in str(model.__class__):
return 'ocsvm'
elif 'IsolationForest' in str(model.__class__):
return 'iforest'
elif 'XGB' in str(model.__class__):
return 'XGBoostModel'
elif 'LGB' in str(model.__class__):
return 'LightGBModel'
elif 'GradientBoosting' in str(model.__class__):
return 'GradientBoostingModel'
elif 'RandomForest' in str(model.__class__):
return 'RandomForestModel'
def get_ensemble_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype):
"""
It returns the Mining Model element of the model
Parameters
----------
model :
An instance of Scikit-learn model.
derived_col_names : List
Contains column names after preprocessing.
col_names : List
Contains list of feature/column names.
target_name : String
Name of the Target column.
mining_imp_val : tuple
Contains the mining_attributes,mining_strategy, mining_impute_value.
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
mining_models : List
Returns the MiningModel of the respective ensemble model
"""
model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values)
if model.__class__.__name__ == 'GradientBoostingRegressor':
model_kwargs['Targets'] = get_targets(model, target_name)
mining_fields = model_kwargs['MiningSchema'].MiningField
new_mining_fields = list()
if model.__class__.__name__ != 'IsolationForest':
for idx, imp_ in enumerate(model.feature_importances_):
if imp_ > 0:
new_mining_fields.append(mining_fields[idx])
else:
for idx in range(len(col_names)):
new_mining_fields.append(mining_fields[idx])
for fld in mining_fields:
if fld.usageType == 'target':
new_mining_fields.append(fld)
model_kwargs['MiningSchema'].MiningField = new_mining_fields
mining_models = list()
mining_models.append(pml.MiningModel(
modelName=model.__class__.__name__,
Segmentation=get_outer_segmentation(model, derived_col_names, col_names, target_name,
mining_imp_val, categoric_values,tasktype),
taskType=tasktype,
**model_kwargs
))
return mining_models
def get_targets(model, target_name):
"""
It returns the Target element of the model.
Parameters
----------
model :
A Scikit-learn model instance.
target_name : String
Name of the Target column.
Returns
-------
targets :
Returns a Target instance.
"""
if model.__class__.__name__ == 'GradientBoostingRegressor':
targets = pml.Targets(
Target=[
pml.Target(
field=target_name,
rescaleConstant="{:.16f}".format(model.init_.mean),
rescaleFactor="{:.16f}".format(model.learning_rate)
)
]
)
else:
targets = pml.Targets(
Target=[
pml.Target(
field=target_name,
rescaleConstant="{:.16f}".format(model.base_score)
)
]
)
return targets
def get_multiple_model_method(model):
"""
It returns the name of the Multiple Model Chain element of the model.
Parameters
----------
model :
A Scikit-learn model instance
Returns
-------
The multiple model method for a mining model.
"""
if model.__class__.__name__ == 'GradientBoostingClassifier':
return 'modelChain'
elif model.__class__.__name__ == 'GradientBoostingRegressor':
return 'sum'
elif model.__class__.__name__ == 'RandomForestClassifier':
return 'majorityVote'
elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']:
return 'average'
def get_outer_segmentation(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype):
"""
It returns the Segmentation element of the model.
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
col_names : List
Contains list of feature/column names.
target_name : String
Name of the Target column.
mining_imp_val : tuple
Contains the mining_attributes,mining_strategy, mining_impute_value
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
segmentation :
A segmentation instance.
"""
segmentation = pml.Segmentation(
multipleModelMethod=get_multiple_model_method(model),
Segment=get_segments(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype)
)
return segmentation
def get_segments(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype):
"""
It returns the Segment element of the model.
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
col_names : List
Contains list of feature/column names.
target_name : String
Name of the Target column.
mining_imp_val : tuple
Contains the mining_attributes,mining_strategy, mining_impute_value
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
segments :
A list of segment instances.
"""
segments = None
if 'GradientBoostingClassifier' in str(model.__class__):
segments = get_segments_for_gbc(model, derived_col_names, col_names, target_name,
mining_imp_val, categoric_values,tasktype)
else:
segments = get_inner_segments(model, derived_col_names, col_names, 0)
return segments
def get_segments_for_gbc(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype):
"""
It returns list of Segments element of the model.
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
col_names : List
Contains list of feature/column names.
target_name : String
Name of the Target column.
mining_imp_val : tuple
Contains the mining_attributes,mining_strategy, mining_impute_value
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
segments : List
Get the Segments for the Segmentation element.
"""
segments = list()
out_field_names = list()
for estm_idx in range(len(model.estimators_[0])):
mining_fields_for_first = list()
# for name in col_names:
for idx,imp_ in enumerate(model.feature_importances_):
# mining_fields_for_first.append(pml.MiningField(name=name))
if imp_ > 0:
mining_fields_for_first.append(pml.MiningField(name=col_names[idx]))
miningschema_for_first = pml.MiningSchema(MiningField=mining_fields_for_first)
output_fields = list()
output_fields.append(
pml.OutputField(
name='decisionFunction(' + str(estm_idx) + ')',
feature='predictedValue',
dataType="double",
isFinalResult=False
)
)
if len(model.classes_) == 2:
output_fields.append(
pml.OutputField(
name='transformedDecisionFunction(0)',
feature='transformedValue',
dataType="double",
isFinalResult=True,
Apply=pml.Apply(
function="+",
Constant=[pml.Constant(
dataType="double",
valueOf_="{:.16f}".format(model.init_.prior)
)],
Apply_member=[pml.Apply(
function="*",
Constant=[pml.Constant(
dataType="double",
valueOf_="{:.16f}".format(model.learning_rate)
)],
FieldRef=[pml.FieldRef(
field="decisionFunction(0)",
)]
)]
)
)
)
else:
output_fields.append(
pml.OutputField(
name='transformedDecisionFunction(' + str(estm_idx) + ')',
feature='transformedValue',
dataType="double",
isFinalResult=True,
Apply=pml.Apply(
function="+",
Constant=[pml.Constant(
dataType="double",
valueOf_="{:.16f}".format(model.init_.priors[estm_idx])
)],
Apply_member=[pml.Apply(
function="*",
Constant=[pml.Constant(
dataType="double",
valueOf_="{:.16f}".format(model.learning_rate)
)],
FieldRef=[pml.FieldRef(
field="decisionFunction(" + str(estm_idx) + ")",
)]
)]
)
)
)
out_field_names.append('transformedDecisionFunction(' + str(estm_idx) + ')')
segments.append(
pml.Segment(
True_=pml.True_(),
id=str(estm_idx),
MiningModel=pml.MiningModel(
functionName='regression',
modelName="MiningModel",
MiningSchema=miningschema_for_first,
Output=pml.Output(OutputField=output_fields),
Segmentation=pml.Segmentation(
multipleModelMethod="sum",
Segment=get_inner_segments(model, derived_col_names,
col_names, estm_idx)
)
)
)
)
reg_model = get_regrs_models(model, out_field_names,out_field_names, target_name, mining_imp_val, categoric_values,tasktype)[0]
reg_model.Output = None
if len(model.classes_) == 2:
reg_model.normalizationMethod="logit"
else:
reg_model.normalizationMethod="softmax"
segments.append(
pml.Segment(
id=str(len(model.estimators_[0])),
True_=pml.True_(),
RegressionModel=reg_model
)
)
return segments
def get_inner_segments(model, derived_col_names, col_names, index):
"""
It returns the Inner segments of the model.
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
col_names : List
Contains list of feature/column names.
index : Integer
The index of the estimator for the model
Returns
-------
segments : List
Get the Segments for the Segmentation element.
"""
segments = list()
for estm_idx in range(model.n_estimators):
if np.asanyarray(model.estimators_).ndim == 1:
estm = model.estimators_[estm_idx]
else:
estm = model.estimators_[estm_idx][index]
tree_features = estm.tree_.feature
features_ = list()
for feat in tree_features:
if feat != -2 and feat not in features_:
features_.append(feat)
if len(features_) != 0:
mining_fields = list()
# for feat in col_names:
feature_importances = estm.tree_.compute_feature_importances()
for idx,imp_ in enumerate(feature_importances):
if imp_ > 0:
# mining_fields.append(pml.MiningField(name=feat))
mining_fields.append(pml.MiningField(name=col_names[idx]))
segments.append(
pml.Segment(
True_=pml.True_(),
id=str(estm_idx),
TreeModel=pml.TreeModel(
modelName=estm.__class__.__name__,
functionName=get_mining_func(estm),
splitCharacteristic="multiSplit",
MiningSchema=pml.MiningSchema(MiningField = mining_fields),
Node=get_node(estm, derived_col_names, model)
)
)
)
return segments
def get_classificationMethod(model):
"""
It returns the Classification Model name of the model.
Parameters
----------
model :
A Scikit-learn model instance.
Returns
-------
Returns the classification method of the SVM model
"""
if model.__class__.__name__ == 'SVC':
return 'OneAgainstOne'
else:
return 'OneAgainstAll'
def get_vectorDictionary(model, derived_col_names, categoric_values):
"""
It return the Vector Dictionary element.
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
VectorDictionary :
A Vector Dictionary instance.
"""
model_coef = model.C
fieldref_element = get_vectorfields(model_coef, derived_col_names, categoric_values)
vectorfields_element = pml.VectorFields(FieldRef=fieldref_element)
vec_id = list(model.support_)
vecinsts = list()
vecs = list(model.support_vectors_)
if model.support_vectors_.__class__.__name__ != 'csr_matrix':
for vec_idx in range(len(vecs)):
vecinsts.append(pml.VectorInstance(
id=vec_id[vec_idx],
REAL_SparseArray=pml.REAL_SparseArray(
n=len(fieldref_element),
Indices=([x for x in range(1, len(vecs[vec_idx]) + 1)]),
REAL_Entries=vecs[vec_idx].tolist()
)
))
else:
for vec_idx in range(len(vecs)):
vecinsts.append(pml.VectorInstance(
id=vec_id[vec_idx],
REAL_SparseArray=pml.REAL_SparseArray(
n=len(fieldref_element),
Indices=([x for x in range(1, len(vecs[vec_idx].todense().tolist()[0]) + 1)]),
REAL_Entries=vecs[vec_idx].todense().tolist()[0]
)
))
vd=pml.VectorDictionary(VectorFields=vectorfields_element, VectorInstance=vecinsts)
return vd
def get_vectorfields(model_coef, feat_names, categoric_values):
"""
It return the Vector Fields .
Parameters
----------
model :
A Scikit-learn model instance.
derived_col_names : List
Contains column names after preprocessing.
categoric_values : tuple
Contains Categorical attribute names and its values
Returns
-------
Returns the Vector Dictionary instance for Support Vector model.
"""
der_fld_len = len(feat_names)
der_fld_idx = 0
row_idx = -1
predictors = list()
if categoric_values:
class_lbls = categoric_values[0]
class_attribute = categoric_values[1]
while der_fld_idx < der_fld_len:
if is_labelbinarizer(feat_names[der_fld_idx]):
if not is_stdscaler(feat_names[der_fld_idx]):
class_id = get_classid(class_attribute, feat_names[der_fld_idx])
cat_predictors = get_categoric_pred(feat_names[der_fld_idx],row_idx, der_fld_idx, model_coef, class_lbls[class_id],
class_attribute[class_id])
for predictor in cat_predictors:
predictors.append(predictor)
if len(class_lbls[class_id]) == 2:
incrementor = 1
else:
incrementor = len(class_lbls[class_id])
der_fld_idx = der_fld_idx + incrementor
else:
vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx])
predictors.append(vectorfields_element)
der_fld_idx += 1
elif is_onehotencoder(feat_names[der_fld_idx]):
if not is_stdscaler(feat_names[der_fld_idx]):
class_id = get_classid(class_attribute, feat_names[der_fld_idx])
cat_predictors = get_categoric_pred(feat_names[der_fld_idx],row_idx, der_fld_idx, model_coef, class_lbls[class_id],
class_attribute[class_id])
for predictor in cat_predictors:
predictors.append(predictor)
incrementor = len(class_lbls[class_id])
der_fld_idx = der_fld_idx + incrementor
else:
vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx])
predictors.append(vectorfields_element)
der_fld_idx += 1
else:
vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx])
predictors.append(vectorfields_element)
der_fld_idx += 1
return predictors
def is_onehotencoder(feat_name):
"""
Parameters
----------
feat_name : string
Contains the name of the attribute
Returns
-------
Returns a boolean value that states whether OneHotEncoder has been applied or not
"""
if "oneHotEncoder" in feat_name:
return True
else:
return False
def get_kernel_type(model):
"""
It returns the kernel type element.
Parameters
----------
model | |
#
#
# Copyright 2019 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Functions for describing type definitions for generating macros.
Implements the functions for describing and parsing the type definitions. Allows
emitting macros which can be read directly by a C/C++ program, to evaluate the
unresolved values in such macros and then generate include directives, constant
definitions and conversion functions that allow system constants to be converted
from the enclave C library implementation used by Asylo to target host
implementation on the untrusted side (typically libc).
For each type definition (eg. define_constants, define_structs), a definition
and getter methods are provided. The definition methods accept a type definition
one at a time, while the get methods return all the type definitions under a
single macro.
Finally, a write_output() method is provided, which emits all the type
definitions recorded so far in the definitions file (types.py).
"""
from __future__ import print_function
import collections
import re
import sys
# Stores system header includes as a set. Only header file names are expected
# with or without the .h extension and without the '#include' directive
# prefixed.
# We include stdbool.h by default so that the generated output (as .inc file) is
# also readable by a C program.
_includes = {'stdbool.h'}
# Map from enum names to dictionary of enum properties and their values.
_enum_map = collections.defaultdict(dict)
# Map from struct names to dictionary of struct properties and its members.
_struct_map = collections.defaultdict(dict)
# Declare the prefix to be used for C enum declarations and conversion
# functions. This prefix should be used for direct conversions between enclave
# C library and host library, ones which do not involve an intermediate bridge.
_klinux_prefix = 'kLinux'
def set_klinux_prefix(prefix):
"""Sets the prefix used for constants definitions and conversion functions.
Args:
prefix: Name of the prefix to be applied to a kernel based constant
definition or conversion function name.
"""
global _klinux_prefix
_klinux_prefix = prefix
def define_constants(name,
values,
include_header_file,
multi_valued=False,
skip_conversions=False,
wrap_macros_with_if_defined=False,
data_type='int'):
"""Defines a collection of related constants/macros and their properties.
Args:
name: Name of the collection of constants.
values: Constant names provided as a list of strings.
include_header_file: The system header file used for resolving values to
generate the type definition. The filename here is expected to be a
system header file (included as #include <filename>). This system header
file is used twice - once for resolving values of constants on the target
host implementation at compile time, then by the generated conversion
functions for converting the constant values between enclave C library and
the target host C library at runtime.
multi_valued: Boolean indicating if the constant values can be combined
using bitwise OR operations.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only constants definitions be generated. Useful
when conversion functions are complex and need to be written manually, but
the constants definitions can be generated automatically by resolving the
constants for the target host implementation.
wrap_macros_with_if_defined: Boolean indicating if each constant value in
the collection is to be wrapped inside a #if defined(value) ...#endif
while generating the conversion functions. This allows define_constants()
to safely accept constants that might not exist on a particular platform
or architecture. This parameter is intended for use only with constants
that are C/C++ macros.
data_type: String specifying the type of constants, if not int.
Raises:
ValueError: Invalid include_header_file format provided.
"""
# A constant here are written twice, once as a string literal, then as an
# numerical value pointing to the actual integer value of the constant. This
# allows types conversions generator to directly interpret the latter as a
# valid integer corresponding to the constant value, since casting string to
# enum value is non-trivial in c++.
# An example 'values', like ['CONST_VAL1', 'CONST_VAL2'] looks like the
# following stored as a dictionary entry -
# {"CONST_VAL1", CONST_VAL1}, {"CONST_VAL2", CONST_VAL2}
_enum_map[name]['values'] = ', '.join(
'{{"{}", {}}}'.format(val, val) for val in values)
_enum_map[name]['multi_valued'] = multi_valued
_enum_map[name]['skip_conversions'] = skip_conversions
_enum_map[name]['wrap_macros_with_if_defined'] = wrap_macros_with_if_defined
_enum_map[name]['data_type'] = '"{}"'.format(data_type)
add_include_header_file(include_header_file)
def add_include_header_file(include_header_file):
"""Adds a system header file to the list of includes to be generated.
Args:
include_header_file: Name of the system header file, in the format
'filename.h'. Do not use <> or "" to wrap the filename.
"""
if re.match(r'[<,"].*?[>,"]', include_header_file):
raise ValueError(
'Invalid include format for filename "%s". Please provide the include '
'file without enclosing pointy brackets <> or quotes "".' %
include_header_file)
if re.match('#include', include_header_file, re.IGNORECASE):
raise ValueError(
'Invalid include format for filename "%s". Please provide the filename '
'without the prefixing #include directive.' % include_header_file)
_includes.add(include_header_file)
def define_struct(name,
values,
include_header_file,
pack_attributes=True,
skip_conversions=False):
"""Defines a collection of structs and their properties.
Args:
name: Name of the struct. This should be the same as the struct name used in
enclave C library and the host C library for the system calls. Eg. 'stat',
'timeval'
values: List containing tuples of struct member types and struct member
names. The struct members names should match the corresponding struct
member names in the struct from enclave C library and libc. Eg.
[("int64_t", "st_dev"), ("int64_t", "st_ino")].
include_header_file: Kernel header file to include to identify |name| as a
valid kernel struct when generating conversion functions between kernel
structs and enclave structs.
pack_attributes: Boolean indicating if the compiler should be prevented from
padding the generated kernel struct members from their natural alignment.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only kernel struct definitions be generated.
Useful when kernel conversion functions are complex and need to be written
manually, but the struct definitions can be generated automatically.
"""
_struct_map[name]['values'] = ', '.join(
'{{"{}", "{}"}}'.format(member_name, member_type)
for member_type, member_name in values)
_struct_map[name]['pack_attributes'] = pack_attributes
_struct_map[name]['skip_conversions'] = skip_conversions
add_include_header_file(include_header_file)
def get_klinux_prefix():
"""Gets the prefix for generated C enums and conversion functions."""
return 'const char klinux_prefix[] = "{}";\n'.format(_klinux_prefix)
def get_includes_as_include_macros():
"""Returns all the includes as line separated #include macros.
These includes are required by the types conversions generator at compile time
to infer the values of constants for a given host implementation.
"""
return ''.join(
'#include <{}>\n'.format(filename) for filename in sorted(_includes))
def get_includes_in_define_macro():
"""Returns all the includes under a #define INCLUDES macro.
The returned list can be used to generate #include directives by a consumer.
"""
quoted_includes = ['"{}"'.format(incl) for incl in sorted(_includes)]
return '#define INCLUDES {}'.format(', \\\n'.join(quoted_includes))
def get_constants():
r"""Returns a macro containing all constants' description.
The returned macro is used by types conversions generator to initialize a enum
description table (enum_properties_table) mapping enum names to a struct
(EnumProperties) describing the enum properties, including the enum values. A
typical output of get_constants() looks like the following -
#define ENUMS_INIT \
{"FcntlCmd", {false, false, false, "int",
{{"F_GETFD", F_GETFD}, {"F_SETFD", F_SETFD}}}}, \
{"FileFlags", {0, 0, true, false, false, false, "int", {{"O_RDONLY",
O_RDONLY}, {"O_WRONLY", O_WRONLY}}}}
Each line contains an enum, and has the following pattern -
{"EnumName", {multi_valued, skip_conversions, wrap_macros_with_if_defined,
data_type, {{"const_val1", const_val1}, {"const_val2", const_val2}}}}, \
"""
enum_rows = []
for enum_name, enum_properties in sorted(_enum_map.items()):
enum_rows.append(
'{{{name}, {{{multi_valued}, {skip_conversions}, '
'{wrap_macros_with_if_defined}, {data_type}, {{{values}}}}}}}'.format(
name='"{}"'.format(enum_name),
multi_valued='true' if enum_properties['multi_valued'] else 'false',
skip_conversions='true'
if enum_properties['skip_conversions'] else 'false',
wrap_macros_with_if_defined='true'
if enum_properties['wrap_macros_with_if_defined'] else 'false',
data_type=enum_properties['data_type'],
values=enum_properties['values']))
return '#define ENUMS_INIT \\\n{}\n'.format(', \\\n'.join(enum_rows))
def get_structs():
r"""Returns a macro containing all struct descriptions.
The returned macro is used by types conversion generator to initialize a
struct description table (struct_properties_table) mapping struct names to a
struct (StructProperties) describing the struct properties, including struct
members. A typical output of get_structs looks like the following -
#define STRUCTS_INIT \
{"stat", {true, false, {{"st_dev", "int64_t"}, {"st_ino", "int64_t"}}}}, \
{"timespec", {true, false, {{"tv_sec", "int64_t"}, {"tv_nsec", "int64_t"}}}}
Each line contains a struct, and has the following | |
= {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AccelX, name)
__repr__ = _swig_repr
def value(self): return _wallaby.AccelX_value(self)
def __init__(self):
this = _wallaby.new_AccelX()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_AccelX
__del__ = lambda self : None;
AccelX_swigregister = _wallaby.AccelX_swigregister
AccelX_swigregister(AccelX)
class AccelY(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AccelY, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AccelY, name)
__repr__ = _swig_repr
def value(self): return _wallaby.AccelY_value(self)
def __init__(self):
this = _wallaby.new_AccelY()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_AccelY
__del__ = lambda self : None;
AccelY_swigregister = _wallaby.AccelY_swigregister
AccelY_swigregister(AccelY)
class AccelZ(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AccelZ, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AccelZ, name)
__repr__ = _swig_repr
def value(self): return _wallaby.AccelZ_value(self)
def __init__(self):
this = _wallaby.new_AccelZ()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_AccelZ
__del__ = lambda self : None;
AccelZ_swigregister = _wallaby.AccelZ_swigregister
AccelZ_swigregister(AccelZ)
class Compass(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Compass, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Compass, name)
__repr__ = _swig_repr
def __init__(self):
this = _wallaby.new_Compass()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Compass
__del__ = lambda self : None;
__swig_getmethods__["calibrate"] = lambda x: _wallaby.Compass_calibrate
if _newclass:calibrate = staticmethod(_wallaby.Compass_calibrate)
__swig_getmethods__["setParams"] = lambda x: _wallaby.Compass_setParams
if _newclass:setParams = staticmethod(_wallaby.Compass_setParams)
__swig_getmethods__["getAngle"] = lambda x: _wallaby.Compass_getAngle
if _newclass:getAngle = staticmethod(_wallaby.Compass_getAngle)
Compass_swigregister = _wallaby.Compass_swigregister
Compass_swigregister(Compass)
def Compass_calibrate():
return _wallaby.Compass_calibrate()
Compass_calibrate = _wallaby.Compass_calibrate
def Compass_setParams(*args):
return _wallaby.Compass_setParams(*args)
Compass_setParams = _wallaby.Compass_setParams
def Compass_getAngle():
return _wallaby.Compass_getAngle()
Compass_getAngle = _wallaby.Compass_getAngle
class Gyro(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Gyro, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Gyro, name)
__repr__ = _swig_repr
__swig_getmethods__["x"] = lambda x: _wallaby.Gyro_x
if _newclass:x = staticmethod(_wallaby.Gyro_x)
__swig_getmethods__["y"] = lambda x: _wallaby.Gyro_y
if _newclass:y = staticmethod(_wallaby.Gyro_y)
__swig_getmethods__["z"] = lambda x: _wallaby.Gyro_z
if _newclass:z = staticmethod(_wallaby.Gyro_z)
__swig_getmethods__["calibrate"] = lambda x: _wallaby.Gyro_calibrate
if _newclass:calibrate = staticmethod(_wallaby.Gyro_calibrate)
def __init__(self):
this = _wallaby.new_Gyro()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Gyro
__del__ = lambda self : None;
Gyro_swigregister = _wallaby.Gyro_swigregister
Gyro_swigregister(Gyro)
def Gyro_x():
return _wallaby.Gyro_x()
Gyro_x = _wallaby.Gyro_x
def Gyro_y():
return _wallaby.Gyro_y()
Gyro_y = _wallaby.Gyro_y
def Gyro_z():
return _wallaby.Gyro_z()
Gyro_z = _wallaby.Gyro_z
def Gyro_calibrate():
return _wallaby.Gyro_calibrate()
Gyro_calibrate = _wallaby.Gyro_calibrate
class GyroX(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GyroX, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, GyroX, name)
__repr__ = _swig_repr
def value(self): return _wallaby.GyroX_value(self)
def __init__(self):
this = _wallaby.new_GyroX()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_GyroX
__del__ = lambda self : None;
GyroX_swigregister = _wallaby.GyroX_swigregister
GyroX_swigregister(GyroX)
class GyroY(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GyroY, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, GyroY, name)
__repr__ = _swig_repr
def value(self): return _wallaby.GyroY_value(self)
def __init__(self):
this = _wallaby.new_GyroY()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_GyroY
__del__ = lambda self : None;
GyroY_swigregister = _wallaby.GyroY_swigregister
GyroY_swigregister(GyroY)
class GyroZ(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GyroZ, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, GyroZ, name)
__repr__ = _swig_repr
def value(self): return _wallaby.GyroZ_value(self)
def __init__(self):
this = _wallaby.new_GyroZ()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_GyroZ
__del__ = lambda self : None;
GyroZ_swigregister = _wallaby.GyroZ_swigregister
GyroZ_swigregister(GyroZ)
class Magneto(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Magneto, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Magneto, name)
__repr__ = _swig_repr
__swig_getmethods__["x"] = lambda x: _wallaby.Magneto_x
if _newclass:x = staticmethod(_wallaby.Magneto_x)
__swig_getmethods__["y"] = lambda x: _wallaby.Magneto_y
if _newclass:y = staticmethod(_wallaby.Magneto_y)
__swig_getmethods__["z"] = lambda x: _wallaby.Magneto_z
if _newclass:z = staticmethod(_wallaby.Magneto_z)
__swig_getmethods__["calibrate"] = lambda x: _wallaby.Magneto_calibrate
if _newclass:calibrate = staticmethod(_wallaby.Magneto_calibrate)
def __init__(self):
this = _wallaby.new_Magneto()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Magneto
__del__ = lambda self : None;
Magneto_swigregister = _wallaby.Magneto_swigregister
Magneto_swigregister(Magneto)
def Magneto_x():
return _wallaby.Magneto_x()
Magneto_x = _wallaby.Magneto_x
def Magneto_y():
return _wallaby.Magneto_y()
Magneto_y = _wallaby.Magneto_y
def Magneto_z():
return _wallaby.Magneto_z()
Magneto_z = _wallaby.Magneto_z
def Magneto_calibrate():
return _wallaby.Magneto_calibrate()
Magneto_calibrate = _wallaby.Magneto_calibrate
class MagnetoX(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, MagnetoX, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, MagnetoX, name)
__repr__ = _swig_repr
def value(self): return _wallaby.MagnetoX_value(self)
def __init__(self):
this = _wallaby.new_MagnetoX()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_MagnetoX
__del__ = lambda self : None;
MagnetoX_swigregister = _wallaby.MagnetoX_swigregister
MagnetoX_swigregister(MagnetoX)
class MagnetoY(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, MagnetoY, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, MagnetoY, name)
__repr__ = _swig_repr
def value(self): return _wallaby.MagnetoY_value(self)
def __init__(self):
this = _wallaby.new_MagnetoY()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_MagnetoY
__del__ = lambda self : None;
MagnetoY_swigregister = _wallaby.MagnetoY_swigregister
MagnetoY_swigregister(MagnetoY)
class MagnetoZ(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, MagnetoZ, name, value)
__swig_getmethods__ = {}
for _s in [ShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, MagnetoZ, name)
__repr__ = _swig_repr
def value(self): return _wallaby.MagnetoZ_value(self)
def __init__(self):
this = _wallaby.new_MagnetoZ()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_MagnetoZ
__del__ = lambda self : None;
MagnetoZ_swigregister = _wallaby.MagnetoZ_swigregister
MagnetoZ_swigregister(MagnetoZ)
class AbstractButton(BoolSensor):
__swig_setmethods__ = {}
for _s in [BoolSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AbstractButton, name, value)
__swig_getmethods__ = {}
for _s in [BoolSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AbstractButton, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_AbstractButton
__del__ = lambda self : None;
def setPressed(self, *args): return _wallaby.AbstractButton_setPressed(self, *args)
def isPressed(self): return _wallaby.AbstractButton_isPressed(self)
def isNotPressed(self): return _wallaby.AbstractButton_isNotPressed(self)
def isClicked(self): return _wallaby.AbstractButton_isClicked(self)
def waitUntilReleased(self): return _wallaby.AbstractButton_waitUntilReleased(self)
def waitUntilPressed(self): return _wallaby.AbstractButton_waitUntilPressed(self)
def waitUntilClicked(self): return _wallaby.AbstractButton_waitUntilClicked(self)
AbstractButton_swigregister = _wallaby.AbstractButton_swigregister
AbstractButton_swigregister(AbstractButton)
class AbstractTextButton(AbstractButton):
__swig_setmethods__ = {}
for _s in [AbstractButton]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AbstractTextButton, name, value)
__swig_getmethods__ = {}
for _s in [AbstractButton]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AbstractTextButton, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_AbstractTextButton
__del__ = lambda self : None;
def setText(self, *args): return _wallaby.AbstractTextButton_setText(self, *args)
def text(self): return _wallaby.AbstractTextButton_text(self)
def isTextDirty(self): return _wallaby.AbstractTextButton_isTextDirty(self)
def resetText(self): return _wallaby.AbstractTextButton_resetText(self)
AbstractTextButton_swigregister = _wallaby.AbstractTextButton_swigregister
AbstractTextButton_swigregister(AbstractTextButton)
class IdButton(AbstractTextButton):
__swig_setmethods__ = {}
for _s in [AbstractTextButton]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IdButton, name, value)
__swig_getmethods__ = {}
for _s in [AbstractTextButton]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, IdButton, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_IdButton(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_IdButton
__del__ = lambda self : None;
def setText(self, *args): return _wallaby.IdButton_setText(self, *args)
def text(self): return _wallaby.IdButton_text(self)
def isTextDirty(self): return _wallaby.IdButton_isTextDirty(self)
def setPressed(self, *args): return _wallaby.IdButton_setPressed(self, *args)
def value(self): return _wallaby.IdButton_value(self)
def resetText(self): return _wallaby.IdButton_resetText(self)
IdButton_swigregister = _wallaby.IdButton_swigregister
IdButton_swigregister(IdButton)
class ExtraButtons(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ExtraButtons, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ExtraButtons, name)
__repr__ = _swig_repr
__swig_getmethods__["show"] = lambda x: _wallaby.ExtraButtons_show
if _newclass:show = staticmethod(_wallaby.ExtraButtons_show)
__swig_getmethods__["hide"] = lambda x: _wallaby.ExtraButtons_hide
if _newclass:hide = staticmethod(_wallaby.ExtraButtons_hide)
__swig_getmethods__["setShown"] = lambda x: _wallaby.ExtraButtons_setShown
if _newclass:setShown = staticmethod(_wallaby.ExtraButtons_setShown)
__swig_getmethods__["isShown"] = lambda x: _wallaby.ExtraButtons_isShown
if _newclass:isShown = staticmethod(_wallaby.ExtraButtons_isShown)
def __init__(self):
this = _wallaby.new_ExtraButtons()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_ExtraButtons
__del__ = lambda self : None;
ExtraButtons_swigregister = _wallaby.ExtraButtons_swigregister
ExtraButtons_swigregister(ExtraButtons)
def ExtraButtons_show():
return _wallaby.ExtraButtons_show()
ExtraButtons_show = _wallaby.ExtraButtons_show
def ExtraButtons_hide():
return _wallaby.ExtraButtons_hide()
ExtraButtons_hide = _wallaby.ExtraButtons_hide
def ExtraButtons_setShown(*args):
return _wallaby.ExtraButtons_setShown(*args)
ExtraButtons_setShown = _wallaby.ExtraButtons_setShown
def ExtraButtons_isShown():
return _wallaby.ExtraButtons_isShown()
ExtraButtons_isShown = _wallaby.ExtraButtons_isShown
class Config(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Config, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Config, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Config(*args)
try: self.this.append(this)
except: self.this = this
| |
# -*- coding: utf-8 -*-
"""
Module: pypub.scrapers.taylorfrancis
NOTE: THIS IS FOR A DEPRECATED VERSION OF T&F!! THE HTML TAGS NEED TO BE CHANGED.
Tasks/Examples:
---------------
1) ****** Get references given a doi value *******
from pypub.scrapers import ________ as __
refs = __.get_references('0006899387903726',verbose=True)
refs = __.get_references('S1042368013000776',verbose=True)
df = refs[0].to_data_frame(refs)
Currently I am building something that allows extraction of references from
a URL.
"""
# Standard imports
import sys
import os
# Third party imports
import requests
from bs4 import BeautifulSoup
# Local imports
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pypub.utils import get_truncated_display_string as td
from pypub.utils import findValue
from pypub.pypub_errors import *
from pypub.scrapers.base_objects import *
_TF_URL = 'http://tandfonline.com/'
class TaylorFrancisAuthor(BaseAuthor):
def __init__(self, li_tag):
"""
Parameters
----------
li_tag
Returns
-------
Improvements
------------
1) Allow retrieval of icon info:
- corresponding author info
- email author
2) Split name into parts
"""
super().__init__()
# Get author name
self.name = li_tag.contents[0].text
self.affiliations = []
self.email = None
self.superscripts = []
self.affmap = {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8}
# TODO: THIS DOESN'T WORK. FIX IT.
# The superscripts are siblings of the li_tag, not children!
# Need to figure out how to get them and separate them by author.
# Parse superscripts
supers = li_tag.find_all('sup')
for x in supers:
if x.text != '*':
self.superscripts.append(x.text)
def populate_affiliations(self, aff_labels):
self.affiliations = [aff_labels[self.affmap[x]] for x in self.superscripts]
def __repr__(self):
return u'' + \
'name: %s\n' % self.name + \
'affiliations: %s\n' % self.affiliations + \
'email: %s\n' % self.email
class TaylorFrancisEntry(BaseEntry):
"""
This could be a step above the reference since it would, for example,
contain all authors on a paper.
Attributes
----------
doi : string
The unique identifier
See Also
----------
TaylorFrancisRef
Examples
----------
from pypub.scrapers import taylorfrancis as tf
url = ''
tfe = __.TaylorFrancisEntry(url,verbose=True)
Improvements
----------
- Add citing articles
"""
def __init__(self, soup, verbose=False):
super().__init__()
# Get entry content information
mainContent = soup.find('div', {'id': 'journal_content'})
if mainContent is None:
mainContent = soup.find('div', {'id': 'pb-page-content'})
if mainContent is None:
raise ParseException('Unable to find main content of page')
# Metadata:
# ---------
titlebox = mainContent.find('div', {'class': 'description'})
if titlebox is not None:
self.title = titlebox.find('h1').text.title()
else:
self.title = None
import pdb
pdb.set_trace()
# This box contains the publication name as well as Volume and Issue
pubbox = mainContent.find('div', {'class': 'borderedmodule'})
pubbox = pubbox.find('td')
self.publication = findValue(pubbox, 'h2')
if self.publication is not None:
self.publication = self.publication.strip()
# Parsing out the integer values of the volume and issue
vol_issue = pubbox.find('h3')
if vol_issue is None:
raise ParseException('Unable to find volume and issue data')
else:
vol_issue = vol_issue.text
issue_index = vol_issue.find('Issue')
# If an issue number is listed, extract it
if issue_index != -1:
vol_text = vol_issue[0:issue_index]
all_issue_text = vol_issue[issue_index:]
issue_text = all_issue_text[0:all_issue_text.find(',')]
issue_num_text = [x for x in issue_text if x.isdigit()]
self.issue = ''.join(issue_num_text)
else:
vol_text = vol_issue
self.issue = None
vol_num_text = [x for x in vol_text if x.isdigit()]
self.volume = ''.join(vol_num_text)
# Two dates are given: original publication date and
# online publication date. This returns the original journal pub date.
datebox = mainContent.find('div', {'class' : 'articleDates'})
if datebox is None:
raise ParseException('Unable to find publishing dates')
alldates = datebox.find_all('li')
full_date_text = alldates[-1].text
date_index = full_date_text.find('Published online: ')
if date_index > -1:
date = full_date_text[(date_index + 18):]
else: date = ''
self.date = date
self.year = self.date[-4:]
# Keywords
# TaylorFrancis keeps keywords below the abstract, separate from header info
abstract_section = mainContent.find('div', {'class' : 'abstract'})
keybox = abstract_section.find('ul', {'class' : 'keywords'})
if keybox is None:
raise ParseException('Unable to find keywords')
wordlist = keybox.find_all('li')
self.keywords = [w.text[0:w.text.find(',')] for w in wordlist]
metabox = mainContent.find('div', {'class' : 'doiMeta'})
self.pages = findValue(mainContent, 'div', label_type='class', label_name='pageRange')
# DOI Retrieval:
# --------------
# This might be more reliable than assuming we have the DOI in the title
self.doi = findValue(metabox, 'dd')
doi_startindex = self.doi.find('10.')
self.doi = self.doi[doi_startindex:] # to get rid of whitespace at the beginning
# Authors:
# --------
# Find list items within the ordered list with id 'authors'
# Need to find only classless li's so that it doesn't also retrieve the child li's corresponding
# to author affiliations at this stage.
authorList = metabox.find_all('span', {'class' : 'hlFld-ContribAuthor'})
self.authors = [TaylorFrancisAuthor(x) for x in authorList]
# Find the list of affiliations from the tabbed module at the bottom of the page
tabModule = mainContent.find('div', {'id' : 'tabModule'})
aff_list = tabModule.find('ul', {'class' : 'affiliations'})
affs = aff_list.find_all('li')
affiliations = []
for aff in affs:
affiliations.append(aff.text[1:]) # Get rid of the leading superscript letter
# Assign affiliations to authors
for author in self.authors:
author.populate_affiliations(affiliations)
def __repr__(self):
return u'' + \
' title: %s\n' % td(self.title) + \
' authors: %s\n' % self.authors + \
' keywords: %s\n' % self.keywords + \
' publication: %s\n' % self.publication + \
' date: %s\n' % self.date + \
' volume: %s\n' % self.volume + \
' issue: %s\n' % self.issue + \
' pages: %s\n' % self.pages + \
' doi: %s\n' % self.doi
@classmethod
def from_doi(doi):
entry = TaylorFrancisEntry(_TF_URL + '/doi/abs/' + str(doi))
return entry
# TODO: Inherit from some abstract ref class
# I think the abstract class should only require conversion to a common standard
class TaylorFrancisRef(BaseRef):
"""
This is the result class of calling get_references. It contains the
bibliographic information about the reference, as well as additional meta
information such as a DOI (if known).
Attributes:
-----------
ref_id : int
The index of the reference in the citing document. A value of 1
indicates that the reference is the first reference in the citing
document.
title : string
authors : string
List of the authors. This list may be truncated if there are too many
authors, e.g.: '<NAME>, <NAME>, <NAME>, et al.'
publication : string
Abbreviated (typically?) form of the journal
volume : string
date : string
This appears to always be the year
doi : string
Digital Object Identifier. May be None if not present. This is
currently based on the presence of a link to fulltext via Crossref.
pdf_link : string (default None)
If not None, this link points to the pdf of the article.
See Also:
get_references
"""
def __init__(self, ref_tags, ref_id):
"""
Parameters:
-----------
ref_tags: bs4.element.Tag
Html tags as soup of the reference. Information provided is that
needed in order to form a citation for the given reference.
ref_id: int
The id of the reference as ordered in the citing entry. A value
of 1 indicates that this object is the first reference in the bibliography.
"""
super().__init__()
self.ref_tags = ref_tags
# Reference Bibliography Section:
#--------------------------------
self.ref_id = ref_id + 1 # Input is 0 indexed
self.volume = None
self.pages = None
all_text = ref_tags.find_all(text=True)
self.citation = all_text[1]
# 'all_text' is a list of the text segments within each citation.
# If it is a short list, it means that the citation is likely a book,
# and doesn't include page numbers, PMID, DOI, etc.
if len(all_text) > 5:
metadata = all_text[3]
metadata = metadata[2:] # Get rid of leading '; '
divider = metadata.find(':') # This divides volume number from page range
self.volume = metadata[0:divider]
self.pages = metadata[divider+1:metadata.find(';')]
self.date = findValue(ref_tags, 'span')
# Reference Link Section:
#------------------------------
self.crossref = None
self.pubmed = None
self.pubmed_id = None
self.doi = None
self.web_of_science = None
# External links (i.e. PubMed, CrossRef) are kept in <a> tags,
# while the IDs are conveniently kept in <pub-id> tags
links = ref_tags.find_all('a')
ids = ref_tags.find_all('pub-id')
for link in ids:
id_type = link['pub-id-type']
if id_type == 'pmid':
self.pubmed_id = link.text
elif id_type == 'doi':
self.doi = link.text
if links is not None:
for link in links:
href = link['href'][1:] # Get rid of leading '/'
text = link.text.lower()
if 'crossref' in text:
self.crossref = _TF_URL + href
elif 'pubmed' in text:
self.pubmed = _TF_URL + href
elif 'science' in text:
self.web_of_science = _TF_URL + href
def __repr__(self):
return u'' + \
'ref_id: %s\n' % self.ref_id + \
'citation: %s\n' % self.citation + \
'date: %s \n' % self.date + | |
return True
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param id: List of calendar event's ids.
@param context: A standard dictionary for contextual values
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for id in ids:
#read these fields as SUPERUSER because if the record is private a normal search could return False and raise an error
data = self.read(cr, SUPERUSER_ID, id, ['interval', 'count'], context=context)
if data.get('interval', 0) < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative.'))
if data.get('count', 0) <= 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative or 0.'))
data = self.read(cr, uid, id, ['id','byday','recurrency', 'month_list','end_date', 'rrule_type', 'select1', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'exrule', 'day', 'week_list' ], context=context)
event = data['id']
if data['recurrency']:
result[event] = self.compute_rule_string(data)
else:
result[event] = ""
return result
# hook method to fix the wrong signature
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._rrule_write(self, cr, uid, ids, field_name, field_value, args, context=context)
def _rrule_write(self, obj, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
update_data = self._parse_rrule(field_value, dict(data), event.date)
data.update(update_data)
super(calendar_event, self).write(cr, uid, ids, data, context=context)
return True
_columns = {
'id': fields.integer('ID', readonly=True),
'sequence': fields.integer('Sequence'),
'name': fields.char('Description', size=64, required=False, states={'done': [('readonly', True)]}),
'date': fields.datetime('Date', states={'done': [('readonly', True)]}, required=True,),
'date_deadline': fields.datetime('End Date', states={'done': [('readonly', True)]}, required=True,),
'create_date': fields.datetime('Created', readonly=True),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), \
('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', size=264, help="Location of Event", states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], \
'Show Time as', states={'done': [('readonly', True)]}),
'base_calendar_url': fields.char('Caldav URL', size=264),
'state': fields.selection([
('tentative', 'Uncertain'),
('cancelled', 'Cancelled'),
('confirmed', 'Confirmed'),
], 'Status', readonly=True),
'exdate': fields.text('Exception Date/Times', help="This property \
defines the list of date/time exceptions for a recurring calendar component."),
'exrule': fields.char('Exception Rule', size=352, help="Defines a \
rule or repeating pattern of time to exclude from the recurring rule."),
'rrule': fields.function(_get_rulestring, type='char', size=124, \
fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)')
], 'Recurrency', states={'done': [('readonly', True)]},
help="Let the event automatically repeat at that interval"),
'alarm_id': fields.many2one('res.alarm', 'Reminder', states={'done': [('readonly', True)]},
help="Set an alarm at this time, before the event occurs" ),
'base_calendar_alarm_id': fields.many2one('calendar.alarm', 'Alarm'),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'vtimezone': fields.selection(_tz_get, size=64, string='Timezone'),
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'organizer': fields.char("Organizer", size=256, states={'done': [('readonly', True)]}), # Map with organizer attribute of VEvent.
'organizer_id': fields.many2one('res.users', 'Organizer', states={'done': [('readonly', True)]}),
'end_type' : fields.selection([('count', 'Number of repetitions'), ('end_date','End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'select1': fields.selection([('date', 'Date of month'),
('day', 'Day of month')], 'Option'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([
('MO', 'Monday'),
('TU', 'Tuesday'),
('WE', 'Wednesday'),
('TH', 'Thursday'),
('FR', 'Friday'),
('SA', 'Saturday'),
('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([
('1', 'First'),
('2', 'Second'),
('3', 'Third'),
('4', 'Fourth'),
('5', 'Fifth'),
('-1', 'Last')], 'By day'),
'month_list': fields.selection(months.items(), 'Month'),
'end_date': fields.date('Repeat Until'),
'attendee_ids': fields.many2many('calendar.attendee', 'event_attendee_rel', \
'event_id', 'attendee_id', 'Attendees'),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'active': fields.boolean('Active', help="If the active field is set to \
true, it will allow you to hide the event alarm information without removing it."),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'partner_ids': fields.many2many('res.partner', string='Attendees', states={'done': [('readonly', True)]}),
}
def create_attendees(self, cr, uid, ids, context):
att_obj = self.pool.get('calendar.attendee')
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, uid, uid, context=context)
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
mail_to = ""
for partner in event.partner_ids:
if partner.id in attendees:
continue
local_context = context.copy()
local_context.pop('default_state', None)
att_id = self.pool.get('calendar.attendee').create(cr, uid, {
'partner_id': partner.id,
'user_id': partner.user_ids and partner.user_ids[0].id or False,
'ref': self._name+','+str(event.id),
'email': partner.email
}, context=local_context)
if partner.email:
mail_to = mail_to + " " + partner.email
self.write(cr, uid, [event.id], {
'attendee_ids': [(4, att_id)]
}, context=context)
new_attendees.append(att_id)
if mail_to and current_user.email:
att_obj._send_mail(cr, uid, new_attendees, mail_to,
email_from = current_user.email, context=context)
return True
def default_organizer(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
user = user_pool.browse(cr, uid, uid, context=context)
res = user.name
if user.email:
res += " <%s>" %(user.email)
return res
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'state': 'tentative',
'class': 'public',
'show_as': 'busy',
'select1': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'organizer': default_organizer,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_deadline < event.date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['date_deadline']),
]
# TODO for trunk: remove get_recurrent_ids
def get_recurrent_ids(self, cr, uid, select, domain, limit=100, context=None):
"""Wrapper for _get_recurrent_ids to get the 'order' parameter from the context"""
if not context:
context = {}
order = context.get('order', self._order)
return self._get_recurrent_ids(cr, uid, select, domain, limit=limit, order=order, context=context)
def _get_recurrent_ids(self, cr, uid, select, domain, limit=100, order=None, context=None):
"""Gives virtual event ids for recurring events based on value of Recurrence Rule
This method gives ids of dates that comes between start date and end date of calendar views
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user's ID for security checks,
@param limit: The Number of Results to Return
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted"""
if not context:
context = {}
result = []
result_data = []
fields = ['rrule', 'recurrency', 'exdate', 'exrule', 'date']
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
fields = list(set(fields + order_fields))
for data in super(calendar_event, self).read(cr, uid, select, fields, context=context):
if not data['recurrency'] or not data['rrule']:
result_data.append(data)
result.append(data['id'])
continue
event_date = datetime.strptime(data['date'], "%Y-%m-%d %H:%M:%S")
# TOCHECK: the start date should be replaced by event date; the event date will be changed by that of calendar code
exdate = data['exdate'] and data['exdate'].split(',') or []
rrule_str = data['rrule']
new_rrule_str = []
rrule_until_date = False
is_until = False
for rule in rrule_str.split(';'):
name, value = rule.split('=')
if name == "UNTIL":
is_until = True
value = parser.parse(value)
rrule_until_date = parser.parse(value.strftime("%Y-%m-%d %H:%M:%S"))
value = value.strftime("%Y%m%d%H%M%S")
new_rule = '%s=%s' % (name, value)
new_rrule_str.append(new_rule)
new_rrule_str = ';'.join(new_rrule_str)
rdates = get_recurrent_dates(str(new_rrule_str), exdate, event_date, data['exrule'])
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
for arg in domain:
if str(arg[0]) in (str('date'), str('date_deadline')):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d')==arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d')>arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d')<arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d')>=arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d')<=arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
idval = real_id2base_calendar_id(data['id'], r_date.strftime("%Y-%m-%d %H:%M:%S"))
r_data = dict(data, id=idval, date=r_date.strftime("%Y-%m-%d %H:%M:%S"))
result.append(idval)
result_data.append(r_data)
ids = list(set(result))
if order_fields:
def comparer(left, right):
for fn, mult in comparers:
if type(fn(left)) == tuple and type(fn(right)) == tuple:
# comparing many2one values, sorting on name_get result
leftv, rightv = fn(left)[1], fn(right)[1]
else:
leftv, rightv = fn(left), fn(right)
| |
<reponame>SatisSoft/egts-debugger<gh_stars>0
import crcmod
EGTS_PROTOCOL_VERSION = b'\x01'
EGTS_PC_OK = 0
EGTS_TRANSPORT_LAYER_MIN_HEADER_LEN = 11
EGTS_SERVICE_LAYER_MIN_RECORD_HEADER_LEN = 7
EGTS_SERVICE_LAYER_MIN_SUBRECORD_LEN = 3
# polynomial are taken from EGTS documentation
# x^8 + x^5 + x^4 + 1 = 0x0131
crc8_func = crcmod.mkCrcFun(0x0131, initCrc=0xFF, rev=False)
# x^16 + x^12 + x^5 + 1 = 0x011021
crc16_func = crcmod.mkCrcFun(0x011021, initCrc=0xFFFF, rev=False)
EGTS_PT_RESPONSE = 0
EGTS_PT_APPDATA = 1
timestamp_20100101_000000_utc = 1262304000
EGTS_AUTH_SERVICE = 1
EGTS_TELEDATA_SERVICE = 2
EGTS_COMMANDS_SERVICE = 4
EGTS_SR_DISPATCHER_IDENTITY = 5
EGTS_SR_AUTH_PARAMS = 6
EGTS_SR_AUTH_INFO = 7
EGTS_SR_RESULT_CODE = 9
EGTS_SR_POS_DATA = 16
EGTS_SR_AD_SENSORS_DATA = 18
EGTS_SR_ABS_AN_SENS_DATA = 24
EGTS_SR_LIQUID_LEVEL_SENSOR = 27
EGTS_SR_COMMAND_DATA = 51
# EGTS_SR_COMMAND_DATA command types
CT_COMCONF = 0b0001
CT_MSGCONF = 0b0010
CT_MSGFROM = 0b0011
CT_MSGTO = 0b0100
CT_COM = 0b0101
CT_DELCOM = 0b0110
CT_SUBREQ = 0b0111
CT_DELIV = 0b1000
EGTS_SR_DISPATCHER_IDENTITY_DESCR = "EGTS_SR_DISPATCHER_IDENTITY"
EGTS_PC_UNS_PROTOCOL = 128
EGTS_PC_UNS_PROTOCOL_DESCR = "Unsupported protocol version"
EGTS_PC_INC_HEADERFORM = 131
EGTS_PC_INC_HEADERFORM_DESCR = "Header structure error"
EGTS_PC_UNS_TYPE = 133
EGTS_PC_UNS_TYPE_DESCR = "Unsupported type"
EGTS_PC_HEADERCRC_ERROR = 137
EGTS_PC_HEADERCRC_ERROR_DESCR = "Header check sum error"
EGTS_PC_DATACRC_ERROR = 138
EGTS_PC_DATACRC_ERROR_DESCR = "Data check sum error"
EGTS_PC_SRVC_UNKN = 150
EGTS_PC_SRVC_UNKN_DESCR = "Unknown service"
EGTS_PC_INVDATALEN = 139
EGTS_PC_INVDATALEN_DESCR = "Incorrect data length"
EGTS_PC_SR_UNKN = 165
EGTS_PC_SR_UNKN_DESCR = "Unknown service subrecord type"
EGTS_MAX_PACKET_LENGTH = 65535
class EgtsParsingError(ValueError):
def __init__(self, message, error_description, error_code):
if message:
error_description += " ({0})".format(message)
super().__init__(error_description)
self.error_code = error_code
class EgtsPcUnsProtocol(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_UNS_PROTOCOL_DESCR, EGTS_PC_UNS_PROTOCOL)
class EgtsPcIncHeaderForm(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_INC_HEADERFORM_DESCR, EGTS_PC_INC_HEADERFORM)
class EgtsPcUnsType(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_UNS_TYPE_DESCR, EGTS_PC_UNS_TYPE)
class EgtsPcHeadercrcError(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_HEADERCRC_ERROR_DESCR, EGTS_PC_HEADERCRC_ERROR)
class EgtsPcDatacrcError(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_DATACRC_ERROR_DESCR, EGTS_PC_DATACRC_ERROR)
class EgtsPcSrvcUnkn(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_SRVC_UNKN_DESCR, EGTS_PC_SRVC_UNKN)
class EgtsPcInvdatalen(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_INVDATALEN_DESCR, EGTS_PC_INVDATALEN)
class EgtsPcSrUnkn(EgtsParsingError):
def __init__(self, message=""):
super().__init__(message, EGTS_PC_SR_UNKN_DESCR, EGTS_PC_SR_UNKN)
class Egts:
"""Contains information about EGTS packet"""
def __init__(self, buffer):
index = self._index(buffer)
self._proc_transport_layer(buffer[index:])
self._proc_service_layer(buffer[index + self.header_len:])
self.rest_buff = buffer[index + self.header_len + len(self.body)+2:]
@staticmethod
def form_bin(pid, records):
body = Egts._body_bin(records)
packet = Egts._packet_bin(pid, body, EGTS_PT_APPDATA)
return packet
def reply(self, ans_pid, ans_rid):
subrecords = self._reply_record()
pack_id = self.pid.to_bytes(2, 'little')
body = pack_id + b'\x00' + Egts._make_record(self.service, ans_rid, subrecords)
reply = self._packet_bin(ans_pid, body, EGTS_PT_RESPONSE)
return reply
def _proc_transport_layer(self, buffer):
if len(buffer) < EGTS_TRANSPORT_LAYER_MIN_HEADER_LEN:
raise EgtsPcInvdatalen("Transport layer")
if buffer[2] >> 6 != 0:
raise EgtsPcUnsProtocol("PRF != 0")
self.header_len = buffer[3]
if self.header_len != EGTS_TRANSPORT_LAYER_MIN_HEADER_LEN:
raise EgtsPcIncHeaderForm("Transport layer")
header = buffer[:self.header_len]
header_crc = header[-1]
header_crc_calc = crc8_func(header[:-1])
if header_crc != header_crc_calc:
msg = "Calculated crc: {0}, crc in packet: {1}".format(header_crc_calc, header_crc)
raise EgtsPcHeadercrcError(msg)
self.body_len = int.from_bytes(header[5:7], byteorder='little')
self.pid = int.from_bytes(header[7:9], byteorder='little')
self.packet_type = header[9]
def _proc_service_layer(self, buffer):
if self.body_len == 0:
raise EgtsParsingError("", "Packet is correct, but body length = 0", -1)
if len(buffer) < self.body_len + 2:
msg = "Body buffer length is {0}; Must be at least {1}".format(len(buffer), self.body_len + 2)
raise EgtsPcInvdatalen(msg)
self.body = buffer[:self.body_len]
body_crc = int.from_bytes(buffer[self.body_len:self.body_len+2], byteorder='little')
body_crc_calc = crc16_func(self.body)
if body_crc != body_crc_calc:
msg = "Calculated crc: {0}, crc in packet: {1}".format(body_crc_calc, body_crc)
raise EgtsPcDatacrcError(msg)
if self.packet_type == EGTS_PT_APPDATA:
self._parse_appdata()
elif self.packet_type == EGTS_PT_RESPONSE:
self._parse_response()
else:
raise EgtsPcUnsType("Packet Type " + str(self.packet_type) + " is unknown")
def _parse_appdata(self):
self.records = []
rest_buf = self.body
while len(rest_buf) > 0:
rec = EgtsRecord.parse(rest_buf)
self.records.append(rec)
rest_buf = rest_buf[rec.rec_len:]
if self.records:
rec = self.records[0]
self.service = rec.sst
def _parse_response(self):
if len(self.body) > 3:
self.rpid = int.from_bytes(self.body[0:2], byteorder='little')
self.pr = self.body[2]
self.records = []
rest_buf = self.body[3:]
while len(rest_buf) > 0:
rec = EgtsRecord.parse(rest_buf)
self.records.append(rec)
rest_buf = rest_buf[rec.rec_len:]
if self.records:
rec = self.records[0]
self.service = rec.sst
else:
raise EgtsPcInvdatalen("Response SFRD")
@staticmethod
def _packet_bin(ans_pid, body, type):
bcs = crc16_func(body)
data_len = len(body)
header = Egts._make_header(ans_pid, data_len, type)
hcs = crc8_func(header)
bcs_bin = bcs.to_bytes(2, 'little')
reply = header + hcs.to_bytes(1, 'little') + body + bcs_bin
return reply
@staticmethod
def _index(buffer):
try:
return buffer.index(EGTS_PROTOCOL_VERSION)
except ValueError:
raise EgtsPcUnsProtocol("PRV not found")
def _get_data(self):
records = []
for record in self.records:
subrecords = record.subrecords
records.append(subrecords)
return records
def _reply_record(self):
res = b""
for record in self.records:
rec_id = record.rid
reply_subrec = bytes([0x00, 0x03, 0x00, rec_id % 256, rec_id//256, 0])
res += reply_subrec
return res
@staticmethod
def _body_bin(data):
res = b""
for rec in data:
record = rec.form_bin()
res += record
return res
@staticmethod
def _make_header(ans_pid, data_len, type):
rec_len = data_len.to_bytes(2, 'little')
ans_rid_bin = ans_pid.to_bytes(2, 'little')
header = b'\x01\x00\x03\x0b\x00' + rec_len + ans_rid_bin + type.to_bytes(1, 'little')
return header
@staticmethod
def _make_record(service, ans_rid, subrecords):
sub_len = len(subrecords).to_bytes(2, 'little')
rid = ans_rid.to_bytes(2, 'little')
body = sub_len + rid + b'\x18' + service.to_bytes(1, 'little') + service.to_bytes(1, 'little') + subrecords
return body
def __str__(self):
s = "Packet ID: {0}; Packet Type: {1}; ".format(self.pid, self.packet_type)
if self.packet_type == EGTS_PT_RESPONSE:
s += "Response Packet ID: {0}; Processing Result: {1}; ".format(self.rpid, self.pr)
records = self._records_2_string()
s += "records: [{0}]".format(records)
return s
def _records_2_string(self):
records = ""
for record in self.records:
records += record.record_to_string()
return records
class EgtsRecord:
"""Contains information about EGTS record"""
def __init__(self, **kwargs):
self.rid = kwargs['rid']
self.sst = kwargs['sst']
if 'id' in kwargs:
self.id = kwargs['id']
if 'rec_len' in kwargs:
self.rec_len = kwargs['rec_len']
self.subrecords = kwargs['subrecords']
@classmethod
def parse(cls, buffer):
if len(buffer) < EGTS_SERVICE_LAYER_MIN_RECORD_HEADER_LEN:
raise EgtsPcIncHeaderForm("Record is shorter then EGTS_SERVICE_LAYER_MIN_RECORD_HEADER_LEN")
data_len = int.from_bytes(buffer[:2], byteorder='little')
rid = int.from_bytes(buffer[2:4], byteorder='little')
tmfe = buffer[4] >> 2 & 1
evfe = buffer[4] >> 1 & 1
obfe = buffer[4] & 1
opt_len = (tmfe + evfe + obfe) * 4
header_len = EGTS_SERVICE_LAYER_MIN_RECORD_HEADER_LEN + opt_len
if len(buffer) < header_len:
raise EgtsPcIncHeaderForm("Record is shorter then EGTS_SERVICE_LAYER_MIN_RECORD_HEADER_LEN + opt_len")
sst = buffer[5 + opt_len]
kwargs = {'rid': rid, 'sst': sst, 'subrecords': []}
if obfe:
id = int.from_bytes(buffer[5:9], byteorder='little')
kwargs['id'] = id
rec_len = header_len + data_len
kwargs['rec_len'] = rec_len
rec = cls(**kwargs)
if data_len > 0:
if len(buffer) < rec_len:
raise EgtsPcInvdatalen("Record")
data = buffer[header_len:header_len+data_len]
rec._analyze_subrecords(data)
return rec
def _analyze_subrecords(self, buff):
while len(buff) > 0:
sub, buff = self._analyze_subrecord(buff)
self.subrecords.append(sub)
def _analyze_subrecord(self, buffer):
if len(buffer) < 3:
raise EgtsPcInvdatalen("Subrecord header")
srt = buffer[0]
srl = int.from_bytes(buffer[1:3], byteorder='little')
sub_len = 3 + srl
if len(buffer) < sub_len:
raise EgtsPcInvdatalen("Subrecord data")
sub_data = buffer[3:sub_len]
if srt == EGTS_PT_RESPONSE:
sub = self._analyze_subrecord_response(sub_data)
elif self.sst == EGTS_AUTH_SERVICE:
sub = self._analyze_subrecord_auth(sub_data, srt)
elif self.sst == EGTS_TELEDATA_SERVICE:
sub = self._analyze_subrecord_tele(sub_data, srt)
elif self.sst == EGTS_COMMANDS_SERVICE:
sub = self._analyze_subrecord_comm(sub_data, srt)
else:
message = "sst = {0}; srt = {1}".format(self.sst, srt)
raise EgtsPcSrvcUnkn(message)
return sub, buffer[sub_len:]
@staticmethod
def _analyze_subrecord_response(buff):
return EgtsResponse(buff)
def _analyze_subrecord_auth(self, buff, srt):
if srt == EGTS_SR_DISPATCHER_IDENTITY:
return EgtsSrDispatcherIdentity.parse(buff, srt)
if srt == EGTS_SR_AUTH_PARAMS:
return EgtsSrAuthParams.parse(buff, srt)
if srt == EGTS_SR_RESULT_CODE:
return EgtsSrResultCode.parse(buff, srt)
else:
return UnknownSubRecord(srt)
def _analyze_subrecord_tele(self, buff, srt):
if srt == EGTS_SR_POS_DATA:
return EgtsSrPosData.parse(buff)
elif srt == EGTS_SR_AD_SENSORS_DATA:
return EgtsSrAdSensorsData.parse(buff)
elif srt == EGTS_SR_ABS_AN_SENS_DATA:
return EgtsSrAbsAnSensData.parse(buff)
elif srt == EGTS_SR_LIQUID_LEVEL_SENSOR:
return EgtsSrLiquidLevelSensor.parse(buff)
else:
return UnknownSubRecord(srt)
def _analyze_subrecord_comm(self, buff, srt):
if srt == EGTS_SR_COMMAND_DATA:
return EgtsSrCommandData.parse(buff, srt)
else:
return UnknownSubRecord(srt)
def record_to_string(self):
s = "{" + "RecNum: {0}, sst: {1}, ".format(self.rid, self.sst)
if hasattr(self, "id"):
s = s + "ID: " + str(self.id) + ", "
subrecords = self.subrecords_to_string()
s += "subrecords: [{0}]".format(subrecords) + "}"
return s
def subrecords_to_string(self):
s = ""
i = 1
for subrecord in self.subrecords:
s += subrecord.subrecord_to_string()
if i != len(self.subrecords):
s += ","
i = i + 1
return s
def form_bin(self):
b = b''
for subrecord in self.subrecords:
b += subrecord.form_bin()
len_bin = len(b).to_bytes(2, 'little')
rid_bin = self.rid.to_bytes(2, 'little')
flags = 0
id_bin = b''
try:
id_bin = self.id.to_bytes(4, 'little')
flags |= 0b00000001
except AttributeError:
pass
sst_bin = self.sst.to_bytes(1, 'little')
record = len_bin + rid_bin + flags.to_bytes(1, 'little') + id_bin + sst_bin + sst_bin + b
return record
class EgtsSubRecord:
"""Contains information about EGTS subrecord"""
def __init__(self, srt):
self.type = srt
def subrecord_to_string(self):
return "Type: " + str(self.type)
class EgtsSrPosData(EgtsSubRecord):
"""Contains information about EGTS_SR_POS_DATA"""
def __init__(self, **kwargs):
super().__init__(EGTS_SR_POS_DATA)
self.vld = kwargs.get('vld')
self.ntm = kwargs.get('ntm')
self.lat = kwargs.get('lat')
self.long = kwargs.get('lon')
self.speed = kwargs.get('speed')
self.dir = kwargs.get('dir')
self.busy = kwargs.get('busy')
self.src = kwargs.get('src')
self.mv = kwargs.get('mv')
self.bb = kwargs.get('bb')
@classmethod
def parse(cls, buffer):
lohs = buffer[12] >> 6 & 1
lahs = buffer[12] >> 5 & 1
mv = buffer[12] >> 4 & 1
bb = buffer[12] >> 3 & 1
if buffer[12] & 1:
vld = True
else:
vld = False
ntm = (int.from_bytes(buffer[0:4], byteorder='little') + timestamp_20100101_000000_utc) * 1000
lat = (int.from_bytes(buffer[4:8], byteorder='little') * 90 / 0xffffffff) * (1 - 2 * | |
for MACHINE's vote.
with GoldSegReviewer(self, "voa3", "user2") as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += ",user2"
# User1 votes, which automagically advances the doc out of reconciliation.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
# OK, we add a THIRD user. At this point, the document is reconciled if the
# vote is 2 to 1. Let's try that case first.
self.workspace.registerUsers("user3", roles = "all")
ignore, lockId = self.workspace.openWorkspaceFile("core", "voa3", user = "user1")
self.workspace.folders["core"].saveFile(docToMangle, "voa3")
self.workspace.runFolderOperation("core", "save", basenames = ["voa3"],
lock_id = lockId, release_lock = True)
self._submitToReconciliation(basenames = ["voa3"], human_decision_user = "user3")
self._checkReconciliationInfo(self.workspace,
{"voa3": ["crossvalidation_challenge", None,
[("crossvalidation_challenge", "user1", 0),
("human_vote", "user1", 0),
("human_vote", "user2", 0),
("human_vote", "user3", 0),
("human_decision", "user3", 0)]]})
# Crossvalidation challenge.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
# At this point, we have user2 and user3 vote for MACHINE's vote.
for user in ["user2", "user3"]:
with GoldSegReviewer(self, "voa3", user) as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += "," + user
# User1 doesn't have to vote: she's outvoted already. But the greedy
# advancement algorithm doesn't implement that optimization yet, because
# in the future, we may do on-demand assignment, and we won't know in
# the middle how many voters we have at the end.
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 0),
("human_vote", "user2", 1),
("human_vote", "user3", 1),
("human_decision", "user3", 0)]]})
# Once user1 opens the document and saves it, her preference is recorded.
# Because the next round has only one annotator, we advance into it,
# and because no new votes were added, it advances out.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
# Finally, we experiment with new votes.
ignoreD, lockId = self.workspace.openWorkspaceFile("core", "voa3", user = "user1")
self.workspace.folders["core"].saveFile(docToMangle, "voa3")
self.workspace.runFolderOperation("core", "save", basenames = ["voa3"],
lock_id = lockId, release_lock = True)
self._submitToReconciliation(basenames = ["voa3"], human_decision_user = "user3")
# Crossvalidation challenge.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
# user2 votes for empty, user3 votes for the machine's vote.
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += ",user3"
break
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 0),
("human_vote", "user2", 0),
("human_vote", "user3", 1),
("human_decision", "user3", 0)]]})
with GoldSegReviewer(self, "voa3", "user2") as (d, goldSeg):
d._addVote(goldSeg, "", "user2", self.task)
# Now, user1 votes for MACHINE too.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
anns = _getListValue(v, "annotator")
if "user1" in anns:
anns.remove("user1")
v["annotator"] = ",".join(anns)
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += ",user1"
self._printDoc(d)
# But now, because user2 added a vote, user3 has to review again.
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 1),
("human_vote", "user2", 1),
("human_vote", "user3", 0),
("human_decision", "user3", 0)]]})
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
pass
# But this will cause the document to escape from reconciliation, because
# it's got enough votes to reconcile without going to human decision.
# (And if it went to human decision, it would exit anyway, because
# the last reviewer is also the decider).
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
# Now, we do the same thing, but user1 votes for herself. The
# reviewer is user3, so it doesn't advance automatically.
ignore, lockId = self.workspace.openWorkspaceFile("core", "voa3", user = "user1")
self.workspace.folders["core"].saveFile(docToMangle, "voa3")
self.workspace.runFolderOperation("core", "save", basenames = ["voa3"],
lock_id = lockId, release_lock = True)
self._submitToReconciliation(basenames = ["voa3"], human_decision_user = "user3")
# Crossvalidation challenge.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
# user2 votes for empty, user3 votes for the machine's vote.
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += ",user3"
break
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 0),
("human_vote", "user2", 0),
("human_vote", "user3", 1),
("human_decision", "user3", 0)]]})
with GoldSegReviewer(self, "voa3", "user2") as (d, goldSeg):
d._addVote(goldSeg, "", "user2", self.task)
# At this point, user 3 has to review again.
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 0),
("human_vote", "user2", 1),
("human_vote", "user3", 0),
("human_decision", "user3", 0)]]})
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
pass
# And user1 votes for herself.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_decision", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 1),
("human_vote", "user2", 1),
("human_vote", "user3", 1),
("human_decision", "user3", 0)]]})
# Now, the reviewer opens it. Nothing to do, since she's already reviewed it.
# Done.
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
self.failIf(goldSeg["to_review"] == "yes")
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
# Now, we add a fourth user. The votes will be such that no one wins, but
# the reviewer doesn't win yet either.
self.workspace.registerUsers("user4")
ignore, lockId = self.workspace.openWorkspaceFile("core", "voa3", user = "user1")
self.workspace.folders["core"].saveFile(docToMangle, "voa3")
self.workspace.runFolderOperation("core", "save", basenames = ["voa3"],
lock_id = lockId, release_lock = True)
self._submitToReconciliation(basenames = ["voa3"], human_decision_user = "user3")
# Crossvalidation challenge.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
# user3 votes for empty, user2 and user4 vote for the machine's vote.
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
d._addVote(goldSeg, "", "user3", self.task)
for user in ["user2", "user4"]:
with GoldSegReviewer(self, "voa3", user) as (d, goldSeg):
for v in d._votesForSegments().get(goldSeg, []):
if v["annotator"].find("MACHINE") > -1:
v["annotator"] += "," + user
break
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_vote", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 0),
("human_vote", "user2", 1),
("human_vote", "user3", 1),
("human_vote", "user4", 1),
("human_decision", "user3", 0)]]})
# And user1 votes for herself.
with GoldSegReviewer(self, "voa3", "user1") as (d, goldSeg):
pass
self._checkReconciliationInfo(self.workspace,
{"voa3": ["human_decision", None,
[("crossvalidation_challenge", "user1", 1),
("human_vote", "user1", 1),
("human_vote", "user2", 1),
("human_vote", "user3", 1),
("human_vote", "user4", 1),
("human_decision", "user3", 0)]]})
# Now, the reviewer opens it. There IS something to review, because the
# reviewer was outvoted by at least one other vote.
with GoldSegReviewer(self, "voa3", "user3") as (d, goldSeg):
self.failUnless(goldSeg["to_review"] == "yes")
# And once we close it, it should exit reconciliation.
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
class NullReconciliationTestCase(ReconciliationWorkspacePluginContextTestCase):
def disabledTestAlreadyReconciled(self):
# In this test, we do the basic reconciliation stuff, but
# in a workspace.
# So first, we insert a bunch of documents and modelbuild, to give us
# a baseline. And add a user first, because we need one.
self.workspace.runOperation("add_roles", ["user1"], roles = "all")
self._importGoldStandardDocs(self.workspace, ["voa1.txt.json", "voa2.txt.json"])
# Build a model.
self.workspace.runFolderOperation("core", "modelbuild")
# Insert another document.
self._importRawDocs(self.workspace, ["voa3.txt"])
# Autotag.
self.workspace.runFolderOperation("core", "autotag")
# Configure the workspace.
self.workspace.configureReconciliation("crossvalidation_challenge", "human_vote", "human_decision")
# Mark it partially gold.
docToMangle, lockId = self.workspace.openWorkspaceFile("core", "voa3", user = "user1")
docToMangle.__class__ = TestDocument
firstAnnot = docToMangle.orderAnnotations(self.task.getAnnotationTypesByCategory('content'))[0]
self._splitSegmentsAfter(docToMangle, firstAnnot)
seg = docToMangle._findSegmentIncludingIndex(firstAnnot.end, canEqual = True)
seg["status"] = "human gold"
seg["annotator"] = "user1"
# Save it.
self.workspace.folders["core"].saveFile(docToMangle, "voa3")
self.workspace.runFolderOperation("core", "save", basenames = ["voa3"],
lock_id = lockId, release_lock = True)
# Next round of checking. voa3 is partially gold.
self._checkBasenameInfo(self.workspace,
{("voa3", "voa3"): ["partially gold", None, None]})
# Now, submit it to reconciliation. NOTE: later, check rollback with a bad user.
self._submitToReconciliation(basenames = ["voa3"], human_decision_user = "user1")
# Now, the only document should be completely reconciled. And in fact, it should
# have been ejected from reconciliation.
self.failUnless(len(self.workspace.getDB().reconciliationInfo(["voa3"])) == 0)
# And, in fact, the core document should now have no gold
# segments, but rather either non-gold or reconciled.
docToMangle = self.workspace.folders["core"].openFile("voa3")
for seg in docToMangle.getAnnotations(["SEGMENT"]):
self.failUnless(seg["status"] in ("non-gold", "reconciled"), "seg status is %s" % seg["status"])
class UserTestCase(ReconciliationWorkspacePluginContextTestCase):
def disabledTestRoles(self):
phases = findReconciliationPhases()
# Register a user, with default roles.
d = self.workspace.getDB().listUsersAndRoles()
self.failUnless(set(d["user1"]) == set(["core_annotation"] + [p.name for p in phases.values() if p.roleIncludedInDefault]))
self.workspace.registerUsers("user2", roles = "all")
d = self.workspace.getDB().listUsersAndRoles()
self.failUnless(set(d["user2"]) == set(["core_annotation"] + phases.keys()))
# This should fail because there's already a user registered.
try:
self.workspace.registerUsers("user2")
self.fail("reregister of user2 should have failed")
except MAT.Workspace.WorkspaceError, e:
self.failUnless(str(e).find("already registered") > -1)
# Add another user, with a | |
from scipy.stats import kurtosis, skew
from rackio_AI.utils.utils_core import Utils
import pywt
import numpy as np
import pandas as pd
from rackio_AI.decorators.wavelets import WaveletDeco
from easy_deco.progress_bar import ProgressBar
from easy_deco.del_temp_attr import set_to_methods, del_temp_attr
# @set_to_methods(del_temp_attr)
class StatisticalsFeatures:
"""
When we consider the original discretized time domain signal , some basic discriminative
information can be extracted in form of statistical parameters from the $n$ samples
$s_{1},\cdots s_{n}$
"""
_instances = list()
def mean(
self,
s,
axis=None,
dtype=None,
out=None,
keepdims=np._NoValue
):
r"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
**Parameters**
* **s:** (2d array_like) Array containing numbers whose mean is desired. If `s` is not an
array, a conversion is attempted.
* **axis:** (None or int or tuple of ints, optional) Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the
axes as before.
* **dtype:** (data-type, optional) Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the input dtype.
* **out:** (ndarray, optional) Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if
necessary.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast correctly against the
input array. If the default value is passed, then `keepdims` will not be passed through to the `mean` method
of sub-classes of `ndarray`, however any non-default value will be. If the sub-class' method does not implement
`keepdims` any exceptions will be raised.
**Returns**
* **m:** (ndarray, see dtype parameter above) If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([[1, 2], [3, 4]])
>>> feature_extraction.stats.mean(s)
2.5
>>> feature_extraction.stats.mean(s, axis=0)
array([2., 3.])
>>> feature_extraction.stats.mean(s, axis=1)
array([1.5, 3.5])
```
"""
s = Utils.check_dataset_shape(s)
return np.mean(s, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def median(
self,
s,
axis=None,
out=None,
overwrite_input=False,
keepdims=False
):
r"""
Compute the median along the specified axis.
Returns the median of the array elements.
**Parameters**
* **s:** (2d array_like) Input array or object that can be converted to an array.
* **axis:** ({int, sequence of int, None}, optional) Axis or axes along which the medians \
are computed. The default is to compute the median along a flattened version of the array.
* **out:** (ndarray, optional) Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
* **overwrite_input:** (bool, optional) If True, then allow use of memory of input array
`s` for calculations. The input array will be modified by the call to `median`.
This will save memory when you do not need to preserve the contents of the input array.
Treat the input as undefined, but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `s` is not already an `ndarray`, an error
will be raised.
* **keepdims:** (bool, optional) If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option, the result will broadcast
correctly against the original `array`.
**Returns**
* **median:** (ndarray) A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is ``np.float64``.
Otherwise, the data-type of the output is the same as that of the input. If `out` is
specified, that array is returned instead.
## Notes
Given a vector $V$ of length $N$, the median of $V$ is the
middle value of a sorted copy of $V$, $V_{sorted}$ - i
e., $V_{sorted}\left[\frac{N-1}{2}\right]$, when $N$ is odd, and the average of the
two middle values of $V_{sorted}$ when $N$ is even.
## Snippet code
```python
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = np.array([[10, 7, 4], [3, 2, 1]])
>>> feature_extraction.stats.median(s)
3.5
>>> feature_extraction.stats.median(s, axis=0)
array([6.5, 4.5, 2.5])
>>> feature_extraction.stats.median(s, axis=1)
array([7., 2.])
>>> m = feature_extraction.stats.median(s, axis=0)
>>> out = np.zeros_like(m)
>>> feature_extraction.stats.median(s, axis=0, out=m)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = s.copy()
>>> feature_extraction.stats.median(b, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(s==b)
>>> b = s.copy()
>>> feature_extraction.stats.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(s==b)
```
"""
s = Utils.check_dataset_shape(s)
return np.median(
s,
axis=axis,
out=out,
overwrite_input=overwrite_input,
keepdims=keepdims
)
def kurt(
self,
s,
axis: int=0,
fisher: bool=True,
bias: bool=True,
nan_policy: str='propagate'
):
r"""
Compute the kurtosis (Fisher or Pearson) of a dataset $s$
Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definiton
is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from
biased moment estimators
**Parameters**
* **s:** (2d array) Data for which the kurtosis is calculated
* **axis:** (int or None) Axis along which the kurtosis is calculated. Default is 0. If None, compute
over the whole array dataset.
* **fisher:** (bool) If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's deifnition
is used (normal ==> 3.0)
* **bias:** (bool) If False, then the calculations are corrected for statistical bias.
* **nan_policy:** ({'propagate', 'raise', 'omit'}) Defines how to handle when inputs contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is propagate.
**returns**
* **kurtosis** (array 1xcols_dataset) The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition
and 0 for Pearson's definition
## Snippet Code
```python
>>> from scipy.stats import norm
>>> from rackio_AI import RackioAIFE
>>> feature_extraction = RackioAIFE()
>>> s = norm.rvs(size=1000, random_state=3)
>>> feature_extraction.stats.kurt(s)
array([-0.06928694])
>>> s = norm.rvs(size=(1000,2), random_state=3)
>>> feature_extraction.stats.kurt(s)
array([-0.00560946, -0.1115389 ])
```
"""
s = Utils.check_dataset_shape(s)
return kurtosis(
s,
axis=axis,
fisher=fisher,
bias=bias,
nan_policy=nan_policy
)
def std(
self,
s,
axis=None,
dtype=None,
out=None,
ddof=0,
keepdims=np._NoValue
):
r"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
**Parameters**
* **s:** (2d array_like) Calculate the standard deviation of these values.
* **axis:** (None or int or tuple of ints, optional) Axis or axes along which the standard deviation is computed.
The default is to compute the standard deviation of the flattened array.
If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single
axis or all the axes as before.
* **dtype:** (dtype, optional) Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is the same as the array type.
* **out:** (ndarray, optional) Alternative output array in which to place | |
= indices[None, ...]
return bcoo_todense(data, indices, shape=(max(data.shape[0], indices.shape[0]), *shape)), 0
ad.defjvp(bcoo_todense_p, _bcoo_todense_jvp, None)
ad.primitive_transposes[bcoo_todense_p] = _bcoo_todense_transpose
batching.primitive_batchers[bcoo_todense_p] = _bcoo_todense_batching_rule
xla.translations[bcoo_todense_p] = xla.lower_fun(
_bcoo_todense_impl, multiple_results=False)
#--------------------------------------------------------------------
# bcoo_fromdense
bcoo_fromdense_p = core.Primitive('bcoo_fromdense')
bcoo_fromdense_p.multiple_results = True
def bcoo_fromdense(mat, *, nse=None, n_batch=0, n_dense=0, index_dtype=jnp.int32):
"""Create COO-format sparse matrix from a dense matrix.
Args:
mat : array to be converted to COO, with ``ndim = n_batch + n_sparse + n_dense``.
nse : number of specified elements in each batch
n_batch : number of batch dimensions (default: 0)
n_dense : number of block_dimensions (default: 0)
index_dtype : dtype of sparse indices (default: int32)
Returns:
data : array of shape ``mat.shape[:n_batch] + (nse,) + mat.shape[mat.ndim - n_dense:]``
and dtype ``mat.dtype``
indices : array of shape ``mat.shape[:n_batch] + (n_sparse, nse)``
"""
mat = jnp.asarray(mat)
if nse is None:
nse = _bcoo_nse(mat, n_batch, n_dense)
nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_fromdense")
return bcoo_fromdense_p.bind(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype)
@bcoo_fromdense_p.def_impl
def _bcoo_fromdense_impl(mat, *, nse, n_batch, n_dense, index_dtype):
mat = jnp.asarray(mat)
mask = (mat != 0)
if n_dense > 0:
mask = mask.any([-(i + 1) for i in range(n_dense)])
nonzero = lambda a: jnp.nonzero(a, size=nse) if a.ndim else ()
for _ in range(n_batch):
nonzero = vmap(nonzero, 0)
indices = nonzero(mask)
if not indices:
indices = jnp.zeros(mask.shape[:n_batch] + (nse, 0), index_dtype)
else:
indices = jnp.moveaxis(jnp.array(indices, index_dtype), 0, n_batch + 1)
data = bcoo_extract(indices, mat)
true_nonzeros = jnp.arange(nse) < mask.sum(list(range(n_batch, mask.ndim)))[..., None]
true_nonzeros = true_nonzeros[(n_batch + 1) * (slice(None),) + n_dense * (None,)]
data = jnp.where(true_nonzeros, data, 0)
return data, indices
@bcoo_fromdense_p.def_abstract_eval
def _bcoo_fromdense_abstract_eval(mat, *, nse, n_batch, n_dense, index_dtype):
n_sparse = mat.ndim - n_batch - n_dense
data_shape = mat.shape[:n_batch] + (nse,) + mat.shape[n_batch + n_sparse:]
index_shape = mat.shape[:n_batch] + (n_sparse, nse)
return core.ShapedArray(data_shape, mat.dtype), core.ShapedArray(index_shape, index_dtype)
def _bcoo_fromdense_jvp(primals, tangents, *, nse, n_batch, n_dense, index_dtype):
M, = primals
Mdot, = tangents
primals_out = bcoo_fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense, index_dtype=index_dtype)
data, indices = primals_out
if type(Mdot) is ad.Zero:
data_dot = ad.Zero.from_value(data)
else:
data_dot = bcoo_extract(indices, Mdot)
tangents_out = (data_dot, ad.Zero.from_value(indices))
return primals_out, tangents_out
def _bcoo_fromdense_transpose(ct, M, *, nse, n_batch, n_dense, index_dtype):
data, indices = ct
n_sparse = M.ndim = n_batch - n_dense
assert data.shape == M.shape[:n_batch] + (nse,) + M.shape[n_batch + n_sparse:]
assert indices.shape == M.shape[:n_batch] + (n_sparse, nse)
assert indices.dtype == index_dtype
if isinstance(indices, ad.Zero):
raise ValueError("Cannot transpose with respect to sparse indices")
assert ad.is_undefined_primal(M)
return bcoo_todense(data, indices, shape=M.aval.shape)
def _bcoo_fromdense_batching_rule(batched_args, batch_dims, *, nse, n_batch, n_dense, index_dtype):
M, = batched_args
if batch_dims != (0,):
raise NotImplementedError(f"batch_dims={batch_dims}")
return bcoo_fromdense(M, nse=nse, n_batch=n_batch + 1, n_dense=n_dense, index_dtype=index_dtype), (0, 0)
ad.primitive_jvps[bcoo_fromdense_p] = _bcoo_fromdense_jvp
ad.primitive_transposes[bcoo_fromdense_p] = _bcoo_fromdense_transpose
batching.primitive_batchers[bcoo_fromdense_p] = _bcoo_fromdense_batching_rule
xla.translations[bcoo_fromdense_p] = xla.lower_fun(
_bcoo_fromdense_impl, multiple_results=True)
#----------------------------------------------------------------------
# bcoo_extract
bcoo_extract_p = core.Primitive('bcoo_extract')
def bcoo_extract(indices, mat):
"""Extract BCOO values from dense matrix `mat` at given BCOO indices."""
return bcoo_extract_p.bind(indices, mat)
@bcoo_extract_p.def_impl
def _bcoo_extract_impl(indices, mat):
n_batch, n_sparse, _ = _validate_bcoo(None, indices, mat.shape)
batch_slices = tuple(slice(s) for s in mat.shape[:n_batch])
sparse_ind = tuple(indices[tuple(np.mgrid[batch_slices]) + (slice(None), i)] for i in range(n_sparse))
batch_ind = tuple(np.mgrid[batch_slices + (slice(1),)])[:-1]
if not sparse_ind + batch_ind:
return mat[None]
return mat[batch_ind + sparse_ind]
@bcoo_extract_p.def_abstract_eval
def _bcoo_extract_abstract_eval(indices, mat):
n_batch, _, n_dense = _validate_bcoo(None, indices, mat.shape)
nse = indices.shape[-2]
out_shape = mat.shape[:n_batch] + (nse,) + mat.shape[mat.ndim - n_dense:]
return core.ShapedArray(out_shape, mat.dtype)
def _bcoo_extract_jvp(mat_dot, indices, mat):
assert mat_dot.shape == mat.shape
return bcoo_extract(indices, mat_dot)
def _bcoo_extract_transpose(ct, indices, mat):
assert ad.is_undefined_primal(mat)
if ad.is_undefined_primal(indices):
raise ValueError("Cannot transpose with respect to sparse indices")
assert ct.dtype == mat.aval.dtype
return indices, bcoo_todense(ct, indices, shape=mat.aval.shape)
def _bcoo_extract_batching_rule(batched_args, batch_dims):
indices, mat = batched_args
assert any(b is not None for b in batch_dims)
if batch_dims[0] is None:
bdim = batch_dims[1]
indices = lax.expand_dims(indices, (bdim,))
elif batch_dims[1] is None:
bdim = batch_dims[0]
mat = lax.expand_dims(mat, (bdim,))
else:
assert batch_dims[0] == batch_dims[1]
bdim = batch_dims[0]
n_batch = indices.ndim - 2
if bdim >= n_batch:
raise ValueError(f"batch_dims={batch_dims} out of range for indices with n_batch={n_batch}")
return bcoo_extract(indices, mat), bdim
ad.defjvp(bcoo_extract_p, None, _bcoo_extract_jvp)
ad.primitive_transposes[bcoo_extract_p] = _bcoo_extract_transpose
batching.primitive_batchers[bcoo_extract_p] = _bcoo_extract_batching_rule
xla.translations[bcoo_extract_p] = xla.lower_fun(
_bcoo_extract_impl, multiple_results=False)
#----------------------------------------------------------------------
# bcoo_transpose
# transpose of a BCOO array
bcoo_transpose_p = core.Primitive('bcoo_transpose')
bcoo_transpose_p.multiple_results = True
def bcoo_transpose(data, indices, *, permutation, shape):
if tuple(permutation) == tuple(range(len(shape))):
return data, indices
else:
return bcoo_transpose_p.bind(data, indices, permutation=permutation, shape=shape)
def _validate_permutation(data, indices, permutation, shape):
if not isinstance(permutation, (tuple, list, np.ndarray)):
raise TypeError(f"transpose permutation must be a tuple/list/ndarray, got {type(permutation)}.")
if tuple(sorted(permutation)) != tuple(range(len(shape))):
raise TypeError("transpose permutation isn't a permutation of operand dimensions, "
f"got permutation {permutation} for shape {shape}.")
n_batch, n_sparse, n_dense = _validate_bcoo(data, indices, shape)
batch_perm = permutation[:n_batch]
sparse_perm = [p - n_batch for p in permutation[n_batch: n_batch + n_sparse]]
dense_perm = [p - n_sparse - n_batch for p in permutation[n_batch + n_sparse:]]
if n_batch and tuple(sorted(batch_perm)) != tuple(range(n_batch)):
raise NotImplementedError("transpose permutation cannot permute batch axes with non-batch axes; "
f"got permutation {permutation}, with n_batch={n_batch}.")
if n_dense and tuple(sorted(dense_perm)) != tuple(range(n_dense)):
raise NotImplementedError("transpose permutation cannot permute dense axes with non-dense axes; "
f"got permutation {permutation}, with n_dense={n_dense}.")
return batch_perm, sparse_perm, dense_perm
@bcoo_transpose_p.def_impl
def _bcoo_transpose_impl(data, indices, *, permutation: Sequence[int], shape: Tuple[int]):
batch_perm, sparse_perm, dense_perm = _validate_permutation(data, indices, permutation, shape)
n_batch = len(batch_perm)
indices = indices[..., sparse_perm].transpose(*batch_perm, n_batch, n_batch + 1)
data = data.transpose(*batch_perm, n_batch, *(d + n_batch + 1 for d in dense_perm))
return data, indices
@bcoo_transpose_p.def_abstract_eval
def _bcoo_transpose_abstract_eval(data, indices, *, permutation: Sequence[int], shape: Tuple[int]):
batch_perm, _, dense_perm = _validate_permutation(data, indices, permutation, shape)
n_batch = len(batch_perm)
indices_shape = np.array(indices.shape)[[*batch_perm, n_batch, n_batch + 1]]
data_shape = np.array(data.shape)[[*batch_perm, n_batch, *(d + n_batch + 1 for d in dense_perm)]]
return core.ShapedArray(data_shape, data.dtype), core.ShapedArray(indices_shape, indices.dtype)
def _bcoo_transpose_jvp(primals, tangents, *, permutation, shape):
data, indices = primals
data_dot, _ = tangents
primals_out = bcoo_transpose(data, indices, permutation=permutation, shape=shape)
data_dot_out, _ = bcoo_transpose(data_dot, indices, permutation=permutation, shape=shape)
return primals_out, (data_dot_out, ad.Zero.from_value(indices))
def _bcoo_transpose_transpose(ct, data, indices, *, permutation, shape):
data_ct, indices_ct = ct
assert isinstance(indices_ct, ad.Zero)
if ad.is_undefined_primal(indices):
raise ValueError("Cannot transpose with respect to sparse indices")
assert data_ct.dtype == data.aval.dtype
ct_shape = tuple(shape[p] for p in permutation)
rev_permutation = np.argsort(permutation)
# TODO(jakevdp) avoid dummy indices?
dummy_indices = jnp.zeros([1 for i in range(indices.ndim - 2)] + list(indices.shape[-2:]), dtype=int)
data_trans, _ = bcoo_transpose(data_ct, dummy_indices, permutation=rev_permutation, shape=ct_shape)
return data_trans, indices_ct
def _bcoo_transpose_batch_rule(batched_args, batch_dims, *, permutation, shape):
data, indices = batched_args
batch_dims = list(batch_dims)
batch_size = max(0 if dim is None else arg.shape[dim]
for arg, dim in zip(batched_args, batch_dims))
if batch_dims[0] is None:
data = data[None]
else:
assert batch_dims[0] == 0
if batch_dims[1] is None:
indices = indices[None]
else:
assert batch_dims[1] == 0
batched_shape = (batch_size, *shape)
batched_permutation = (0, *(p + 1 for p in permutation))
data, indices = bcoo_transpose(data, indices, permutation=batched_permutation, shape=batched_shape)
if batch_dims[0] is None:
data = data[0]
if batch_dims[1] is None:
indices = indices[0]
return (data, indices), batch_dims
ad.primitive_jvps[bcoo_transpose_p] = _bcoo_transpose_jvp
ad.primitive_transposes[bcoo_transpose_p] = _bcoo_transpose_transpose
batching.primitive_batchers[bcoo_transpose_p] = _bcoo_transpose_batch_rule
xla.translations[bcoo_transpose_p] = xla.lower_fun(
_bcoo_transpose_impl, multiple_results=True)
#----------------------------------------------------------------------
# bcoo_dot_general
# (batched) general dot product of a BCOO sparse ND array and a dense ND array,
# returning a dense ND array.
bcoo_dot_general_p = core.Primitive('bcoo_dot_general')
def bcoo_dot_general(lhs_data, lhs_indices, rhs, *, dimension_numbers, lhs_shape):
return bcoo_dot_general_p.bind(jnp.asarray(lhs_data), jnp.asarray(lhs_indices), jnp.asarray(rhs),
dimension_numbers=dimension_numbers, lhs_shape=tuple(lhs_shape))
def bcoo_rdot_general(lhs, rhs_data, rhs_indices, *, dimension_numbers, rhs_shape):
# TODO(jakevdp): perhaps this should be part of the bcoo_dot_general primitive?
result = bcoo_dot_general(rhs_data, rhs_indices, lhs, lhs_shape=rhs_shape,
dimension_numbers=[d[::-1] for d in dimension_numbers])
n_contract, n_batch = (len(d[0]) for d in dimension_numbers)
n_swap = len(rhs_shape) - n_contract
permutation = tuple([*range(n_batch), *range(n_swap, result.ndim), *range(n_batch, n_swap)])
return lax.transpose(result, permutation)
@bcoo_dot_general_p.def_impl
def _bcoo_dot_general_impl(lhs_data, lhs_indices, rhs, *, dimension_numbers, lhs_shape):
lhs_data = jnp.asarray(lhs_data)
lhs_indices = jnp.asarray(lhs_indices)
rhs = jnp.asarray(rhs)
# Validate all inputs via abstract_eval
out_aval = _bcoo_dot_general_abstract_eval(lhs_data.aval, lhs_indices.aval, rhs.aval,
dimension_numbers=dimension_numbers,
lhs_shape=lhs_shape)
(lhs_contracting, rhs_contracting) , (lhs_batch, rhs_batch) = dimension_numbers
n_sparse = lhs_indices.shape[-1]
n_batch = lhs_indices.ndim - 2
# Move lhs batch dimensions to the front
if lhs_batch:
perm = list(lhs_batch) + remaining(range(n_batch), lhs_batch)
lhs_data = lhs_data.transpose(perm + list(range(n_batch, lhs_data.ndim)))
lhs_indices = lhs_indices.transpose(perm + list(range(n_batch, lhs_indices.ndim)))
# Move lhs contracting dimensions to the front of sparse dims, in order
n_contracting = len(lhs_contracting)
lhs_contracting = [d - n_batch for d in lhs_contracting]
perm = list(lhs_contracting) + remaining(range(n_sparse), lhs_contracting)
lhs_indices = lhs_indices[..., jnp.array(perm)]
# Move rhs batch dimensions then contracting dimensions to the front, in order
perm = (list(rhs_batch) + list(rhs_contracting) +
remaining(range(rhs.ndim), rhs_batch, rhs_contracting))
rhs = rhs.transpose(perm)
out_array = jnp.zeros(out_aval.shape, out_aval.dtype)
def result(out_array, lhs_data, lhs_indices, rhs):
idx = tuple(lhs_indices.T)
idx_right, idx_out = idx[:n_contracting], idx[n_contracting:]
ctc = [0] if n_contracting else []
prod = lax.dot_general(lhs_data, rhs[idx_right], (([], []), (ctc, ctc)))
return | |
item).
We also need to allow for special words which we want to assign
specific tags (which may differ from those used in the
training/testing corpora, e.g. the decision to just tag the word "to"
with its own unique label).
>>> specials={"to":"TO", "that": "THAT"}
Make a tagger using the data in BNC/TRAINING
>>> tagger = TAGGER(reader(os.path.join(BNC, "TRAINING"), lambda w: BNCTaggedWordReader(w, specials=specials)))
Apply this tagger to the data in BNC/TESTING
>>> tagged = (tagger(sentence) for sentence in reader(os.path.join(BNC, "TESTING"), BNCSentenceReader))
(or tagged = list(tagger(sentence) for sentence in reader(os.path.join(BNC, "TESTING"), BNCSentenceReader))
if you want to go over the output more than once, i.e. solidify the generatoy into a fixed list)
And also convert the data in BNC/TESTING into a Gold Standard test set
>>> goldstandard = reader2goldstandard(os.path.join(BNC, "TESTING"), specials={"to":"TO", "that": "THAT"})
And now compare them
>>> score, confusion = compareTaggers(tagged, goldstandard, N=500)
Is the tagger any good? You should always look at the *kinds* of
mistake that a tagger makes as well as the *number* of mistakes it
makes before deciding whether to use it. Some mistakes don't really
matter -- e.g. most of the time it doesn't matter whether something is
tagged as an adjective or a noun, because nouns can function perfectly
well as noun modifiers. It doesn't matter whether "expert" is tagged a
noun or an adjective in
While numbers of new AIDS cases reported officially each month
have remained relatively steady , there has been a big increase in
those needing expert medical and nursing advice at home with a
24-hour on call back up .
because the next stage of processing (e.g. the regex-based parser in
regexparser.py) will almost certainly allow nouns to be used as
noun-modifiers, so "expert medical advice" will be treated as medical
advice provided by experts no matter whether "expert" is tagged as a
noun or as an adjective. So if all the mistakes that your tagger made
were about labelling things that are to be treated as noun modifiers
as nouns when they should be adjectives then it would actually be
performing perfectly as far as the next stage of processing is
concerned.
compareTaggers returns a confusion matrix as well as the raw counts to
help you look at this.
>>> score, confusion = compareTaggers(tagged, goldstandard, N=5000)
The score gives you an overall percentage of places where the output
of tagger matches the Gold Standard:
>>> print(score)
Total words 5019, tagger agrees with Gold Standard on 4815: percentage agreement 0.959
What are the 4.1% cases that the tagger gets wrong like? Do they
matter? Is there anything we can do about them?
The first tool for investigating this is the confusion matrix:
>>> showConfusion(confusion)
AJ [(298, 'AJ'), (17, 'NN'), (16, 'VV'), (1, 'PR'), (1, 'AV')]
AT [(324, 'AT')]
AV [(222, 'AV'), (4, 'NN'), (3, 'PR'), (3, 'CJ'), (3, 'AJ'), (2, 'DT'), (1, 'VV')]
CJ [(194, 'CJ'), (7, 'PR'), (2, 'AV'), (1, 'VV'), (1, 'PU')]
CR [(120, 'CR'), (2, 'NN'), (1, 'PU'), (1, 'PN')]
DP [(47, 'DP')]
DT [(102, 'DT'), (8, 'AV'), (3, 'AJ')]
EX [(9, 'EX')]
IT [(1, 'IT')]
NN [(1103, 'NN'), (17, 'NP'), (8, 'VV'), (3, 'PU'), (3, 'CR'), (3, 'AV')]
NP [(299, 'NP'), (8, 'NN'), (2, 'UN'), (1, 'PU'), (1, 'CR'), (1, 'AT')]
OR [(14, 'OR'), (4, 'UN'), (1, 'NN')]
PN [(133, 'PN'), (2, 'CR')]
PO [(20, 'PO'), (2, 'VB')]
PR [(521, 'PR'), (3, 'AV'), (2, 'CJ')]
PU [(547, 'PU')]
THAT [(18, 'THAT')]
TO [(135, 'TO')]
UN [(11, 'NN'), (5, 'CR'), (2, 'NP'), (1, 'PU'), (1, 'PR'), (1, 'AJ')]
VB [(166, 'VB'), (1, 'NN')]
VD [(20, 'VD')]
VH [(54, 'VH')]
VM [(53, 'VM'), (1, 'VV')]
VV [(406, 'VV'), (21, 'NN'), (7, 'AJ')]
XX [(20, 'XX')]
ZZ [(4, 'ZZ')]
We see that the three biggest errors are labelling a verb as a noun
(VV [(406, 'VV'), (21, 'NN'), ...]), labelling an adjective as a noun
(AJ [(298, 'AJ'), (17, 'NN'), ...]) and labelling a noun as a proper
name (NN [(1103, 'NN'), (17, 'NP'), ...]). As we've just seen, it may
be that these errors don't really matter, but it's also worth checking
to see if they really are errors. The Gold Standard will generally
have been constructed by human annotators, and *human annotators can
also make mistakes*.
We can use the confusion matrix to see what words have been given a
specific tag in the Gold Standard and a different one by the tagger,
e.g. which words are tagged as verbs in the Gold Standard and as nouns
by the tagger:
>>> confusion["VV"]["NN"]
['JUMBLE', 'work', 'Shopping', 'Daysitting', 'care', 'Nurse', 'care', 'pedal', 'raising', 'experience', 'support', 'Volunteer', 'spending', 'volunteer', 'Issue', 'caring', 'help', 'respite', 'stay', 'planning', 'NOTICE']
findInstances will then find all the instances of a word that has been
given one tag by the tagger and another in the Gold Standard, e.g.
>>> findInstances("nursing", "NN", "VV", tagged, goldstandard)
Catherine qualified in general nursing at Dr Steeven 's Hospital , Dublin .
Catherine:NP qualified:VV in:PR general:AJ nursing:NN at:PR Dr:NP Steeven:NP 's:PO Hospital:NN ,:PU Dublin:NP .:PU
Catherine:NP qualified:VV in:PR general:AJ nursing:VV at:PR Dr:NP Steeven:NP 's:PO Hospital:NN ,:PU Dublin:NP .:PU
We are the largest independent provider of professional home care in the capital giving pain control , nursing and medical advice , 24 hour on call , emotional support and practical volunteer help , including nightsitting .
We:PN are:VB the:AT largest:AJ independent:AJ provider:NN of:PR professional:AJ home:NN care:NN in:PR the:AT capital:NN giving:VV pain:NN control:NN ,:PU nursing:NN and:CJ medical:AJ advice:NN ,:PU 24:CR hour:NN on:PR call:NN ,:PU emotional:AJ support:NN and:CJ practical:AJ volunteer:NN help:NN ,:PU including:PR nightsitting:VV .:PU
We:PN are:VB the:AT largest:AJ independent:AJ provider:NN of:PR professional:AJ home:NN care:NN in:PR the:AT capital:NN giving:VV pain:NN control:NN ,:PU nursing:VV and:CJ medical:AJ advice:NN ,:PU 24:CR hour:NN on:PR call:NN ,:PU emotional:AJ support:NN and:CJ practical:AJ volunteer:NN help:NN ,:PU including:PR nightsitting:VV .:PU
...
In these first two instances of the tagger saying "nursing" is a noun
and the Gold Standard saying it's a verb, the tagger is actually
right: "general nursing" is an NP, and "nursing" is its head, "pain
control , nursing and medical advice" is an NP listing three things --
"pain control" is an NP, "medical advice" is an NP, of "nursing" is
part of a list including these two then it must be an NP. Overall, of
the 19 instances of "nursing" being labelled as a noun by the tagger
and a verb in the Gold Standard, the correct label is noun in 10, verb
in 8 and one is unclear. Likewise, of the 7 instances of "pedal" that
are labelled as verbs in the Gold Standard and as nouns by the tagger,
6 are in fact nouns and 1 is a verb.
*This is inevitable* -- any very large manually labelled corpus will
contain errors, because people make errors, and any very large
automatically labelled corpus will make errors, because algorithms
make errors. So before you decide to use the scores as a way of
choosing between taggers, have a good look at the Gold Standard,
because if it contains lots of errors of a kind that concern you then
you should be very wary of relying too much on the score (this also
applies to places where the tagger and the Gold Standard do agree --
if all the places where the Gold Standard said that "nursing" was a
verb were wrong, then the tagger would also be wrong when it agreed
with it).
The moral? If you want to know whether you should use a given tagger,
you have to look carefully at its "mistakes". They may not matter, and
they may not actually be mistakes. There isn't anything to go on apart
from the scores and the confusion matrix, but you should treat these
very carefully.
"""
def reader2goldstandard(corpus=os.path.join(BNC, "TESTING"), specials={"to":"TO", "that": "THAT"}):
return reader(corpus,
lambda data: BNCTaggedSentenceReader(data, wordreader=lambda data: BNCTaggedWordReader(data, specials=specials)))
def compareTaggers(tagged1, tagged2, target=None, N=100):
k = n = 0
confusion = {}
for s1, s2 in zip(tagged1, tagged2):
forms = " ".join([x[0] for x in s1])
print("""
%s"""%(forms))
s = ""
"""
tagged1 and tagged2 may have been tokenised differently. Use
dynamic time warping to get the cases where they do actually
contain the same items, ignore the rest.
"""
for t1t2 in dtw.ARRAY(s1, s2, EXCHANGE=lambda x1, x2: 0 if x1[0] == x2[0] else 3).align():
t1 = t1t2[0]
t2 = t1t2[1]
if not(t1 == "*" or t2 == "*") and t1[0] == t2[0]:
form = t1[0]
tag1 = t1[1]
tag2 = t2[1]
if not tag2 in confusion:
confusion[tag2] = {}
if tag1 in confusion[tag2]:
confusion[tag2][tag1].append(form)
| |
# pylint: disable=W0611, W0621, C0302, R0914
import allure
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin.utils import parametrize_by_data_subdirs
from .utils import prepare_cluster_and_get_config
@allure.step('Check that field is invisible if group is active or not')
def _check_that_field_is_invisible_if_group_active_or_not(sdk_client: ADCMClient, path, app):
"""Check that field is invisible if group is active or not."""
_, config = prepare_cluster_and_get_config(sdk_client, path, app)
group_name = path.split("/")[-1]
with allure.step('Check that field is visible if group is not active'):
group_active = config.group_is_active_by_name(group_name)
assert not group_active
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
with allure.step('Check that field is invisible if group is active'):
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
@allure.step('Check that field invisible if activatable group active and not')
def _check_that_field_invisible_if_activatable_group_active_and_not(
sdk_client: ADCMClient, path, app):
"""Check that field invisible if activatable group active and not."""
_, config = prepare_cluster_and_get_config(sdk_client, path, app)
group_name = path.split("/")[-1]
with allure.step('Check that field is visible if activatable group is not active'):
group_active = config.group_is_active_by_name(group_name)
assert group_active
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
with allure.step('Check that field invisible if activatable group active'):
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
@allure.step('Check that all fields and groups invisible')
def _check_that_all_fields_and_groups_invisible(
sdk_client: ADCMClient, path, app):
"""Check that all fields and groups invisible."""
_, config = prepare_cluster_and_get_config(sdk_client, path, app)
fields = config.get_field_groups()
with allure.step('Check that all fields and groups invisible'):
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert not group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
fields = config.get_field_groups()
group_names = config.get_group_elements()
assert not group_names
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
@allure.step('Check that field is visible if advanced and activatable true')
def _check_that_all_field_is_visible_if_advanced_and_activatable_true(
sdk_client: ADCMClient, path, app):
"""Field visible if advanced and activatable true"""
_, config = prepare_cluster_and_get_config(sdk_client, path, app)
group_name = path.split("/")[-1]
with allure.step('Check that field is visible if advanced and activatable'):
config.check_that_fields_and_group_are_invisible()
if not config.advanced:
config.click_advanced()
assert config.advanced
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
@allure.step('Check that field invisible')
def _check_that_all_field_is_invisible(
sdk_client: ADCMClient, path, app):
"""Check that field invisible."""
_, config = prepare_cluster_and_get_config(sdk_client, path, app)
group_name = path.split("/")[-1]
with allure.step('Check that field invisible'):
config.check_that_fields_and_group_are_invisible()
if not config.advanced:
config.click_advanced()
assert config.advanced
group_active = config.group_is_active_by_name(group_name)
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_false_invisible_false_activiatable_false")
def test_group_advanced_false_invisible_false_field_advanced_false_invisible_false_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that group not active and field is invisible until group is not active."""
_, config = prepare_cluster_and_get_config(sdk_client_fs, path, app_fs)
group_name = path.split("/")[-1]
with allure.step('Check that group not active and field is invisible'):
group_active = config.group_is_active_by_name(group_name)
assert not group_active
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
with allure.step('Check that group is active and field is visible'):
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_false_invisible_false_activiatable_true")
def test_group_advanced_false_invisible_false_field_advanced_false_invisible_false_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that group active and all fields always visible."""
_, config = prepare_cluster_and_get_config(sdk_client_fs, path, app_fs)
group_name = path.split("/")[-1]
with allure.step('Check that group active and all fields always visible'):
group_active = config.group_is_active_by_name(group_name)
assert group_active
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
with allure.step('Check that group active and fields are visible'):
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_false_invisible_true_activiatable_false")
def test_group_advanced_false_invisible_false_field_advanced_false_invisible_true_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field is invisible if group is active or not."""
_check_that_field_is_invisible_if_group_active_or_not(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_false_invisible_true_activiatable_true")
def test_group_advanced_false_invisible_false_field_advanced_false_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field invisible if activatable group active and not."""
_check_that_field_invisible_if_activatable_group_active_and_not(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_true_invisible_false_activiatable_false")
def test_group_advanced_false_invisible_false_field_advanced_true_invisible_false_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field visible if advanced group is enabled."""
_, config = prepare_cluster_and_get_config(sdk_client_fs, path, app_fs)
group_name = path.split("/")[-1]
with allure.step('Check that group not active'):
group_active = config.group_is_active_by_name(group_name)
assert not group_active
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
with allure.step('Check that field visible if advanced group is enabled'):
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_true_invisible_false_activiatable_true")
def test_group_advanced_false_invisible_false_field_advanced_true_invisible_false_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field is visible if group active and advanced enabled."""
_, config = prepare_cluster_and_get_config(sdk_client_fs, path, app_fs)
group_name = path.split("/")[-1]
group_active = config.group_is_active_by_name(group_name)
with allure.step('Check that group is active'):
assert group_active
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
group_names = config.get_group_elements()
assert len(group_names) == 1
assert group_names[0].text == group_name
assert group_names, group_names
if not config.advanced:
config.click_advanced()
assert config.advanced
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
with allure.step('Check that field is visible if group active and advanced enabled'):
assert group_active
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_true_invisible_true_activiatable_false")
def test_group_advanced_false_invisible_false_field_advanced_true_invisible_true_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field is invisible if group is active or not."""
_check_that_field_is_invisible_if_group_active_or_not(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_false_field_advanced_true_invisible_true_activiatable_true")
def test_group_advanced_false_invisible_false_field_advanced_true_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field invisible if activatable group active and not."""
_check_that_field_invisible_if_activatable_group_active_and_not(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_false_invisible_false_activiatable_false")
def test_group_advanced_false_invisible_true_field_advanced_false_invisible_false_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_false_invisible_false_activiatable_true")
def test_group_advanced_false_invisible_true_field_advanced_false_invisible_false_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_false_invisible_true_activiatable_false")
def test_group_advanced_false_invisible_true_field_advanced_false_invisible_true_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_false_invisible_true_activiatable_true")
def test_group_advanced_false_invisible_true_field_advanced_false_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_true_invisible_false_activiatable_false")
def test_group_advanced_false_invisible_true_field_advanced_true_invisible_false_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_true_invisible_false_activiatable_true")
def test_group_advanced_false_invisible_true_field_advanced_true_invisible_false_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_true_invisible_true_activiatable_false")
def test_group_advanced_false_invisible_true_field_advanced_true_invisible_true_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_false_invisible_true_field_advanced_true_invisible_true_activiatable_true")
def test_group_advanced_false_invisible_true_field_advanced_true_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that all fields and groups invisible."""
_check_that_all_fields_and_groups_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_true_invisible_false_field_advanced_false_invisible_false_activiatable_false")
def test_group_advanced_true_invisible_false_field_advanced_false_invisible_false_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Field visible if advanced and activatable true."""
_check_that_all_field_is_visible_if_advanced_and_activatable_true(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_true_invisible_false_field_advanced_false_invisible_false_activiatable_true")
def test_group_advanced_true_invisible_false_field_advanced_false_invisible_false_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Field visible if advanced and activatable true."""
_check_that_all_field_is_visible_if_advanced_and_activatable_true(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_true_invisible_false_field_advanced_false_invisible_true_activiatable_false")
def test_group_advanced_true_invisible_false_field_advanced_false_invisible_true_active_false(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Field invisible, group visible if advanced."""
_, config = prepare_cluster_and_get_config(sdk_client_fs, path, app_fs)
group_name = path.split("/")[-1]
config.check_that_fields_and_group_are_invisible()
if not config.advanced:
config.click_advanced()
assert config.advanced
group_active = config.group_is_active_by_name(group_name)
assert not group_active
config.activate_group_by_name(group_name)
group_active = config.group_is_active_by_name(group_name)
assert group_active
with allure.step('Check that fields and group are visible'):
group_names = config.get_group_elements()
assert group_names, group_names
assert len(group_names) == 1
assert group_names[0].text == group_name
fields = config.get_field_groups()
for field in fields:
assert not field.is_displayed(), field.get_attribute("class")
@parametrize_by_data_subdirs(
__file__,
"group_advanced_true_invisible_false_field_advanced_false_invisible_true_activiatable_true")
def test_group_advanced_true_invisible_false_field_advanced_false_invisible_true_active_true(
sdk_client_fs: ADCMClient, path, app_fs, login_to_adcm):
"""Check that field invisible."""
_check_that_all_field_is_invisible(sdk_client_fs, path, app_fs)
@parametrize_by_data_subdirs(
__file__,
"group_advanced_true_invisible_false_field_advanced_true_invisible_false_activiatable_false")
def | |
<filename>cora/foreground/pointsource.py
"""Simulating extra-galactic point sources."""
from os.path import join, dirname
import numpy as np
import numpy.random as rnd
# There's a very strange issue here, importing the following two lines in the
# (original) reverse order causes a segfault. It's maybe related to:
# https://github.com/Alwnikrotikz/healpy/issues/58 This was in 10/2019 running
# on macOS 10.15. I imagine other OSs are not affected.
import healpy
from scipy.optimize import newton
from cora.core import maps
from cora.util import units
from cora.foreground import poisson as ps
from cora.foreground import gaussianfg
def faraday_rotate(polmap, rm_map, frequencies):
"""Faraday rotate a set of sky maps (done inplace).
Parameters
----------
polmap : np.ndarray[freq, pol, pixel]
The maps of the sky (assumed to be packed as T, Q, U and optionally V.
rm_map : np.ndarray[pixel]
The rotation measure across the sky (in radians / m^2).
frequencies : np.ndarray[freq]
The frequencies in the map in MHz.
Returns
-------
rot_map : np.ndarray[freq, pol, pixel]
The Faraday rotated map.
"""
# Apply frequency by frequency to reduce memory consumption
for ii, freq in enumerate(frequencies):
qu_complex = polmap[ii, 1] + 1.0j * polmap[ii, 2]
wv = 1e-6 * units.c / freq
faraday = np.exp(-2.0j * wv * rm_map)
qu_complex = qu_complex * faraday
polmap[ii, 1] = qu_complex.real
polmap[ii, 2] = qu_complex.imag
return polmap
class PointSourceModel(maps.Map3d):
r"""Represents a population of astrophysical point sources.
This is the base class for modelling a population of point
sources. This assumes they are described by a source count
function, and a spectral function which may depend on the flux. To
create a model, a source_count and spectral_realisation must be
implemented.
Attributes
----------
flux_min : float
The lower flux limit of sources to include. Defaults to 1 mJy.
flux_max : {float, None}
The upper flux limit of sources to include. If `None` then
include all sources (with a high probability).
faraday : boolean
Whether to Faraday rotate polarisation maps (default is True).
sigma_pol_frac : scalar
The standard deviation of the polarisation fraction of sources.
Default is 0.03. See http://adsabs.harvard.edu/abs/2004A&A...415..549R
"""
flux_min = 1e-4
flux_max = None
faraday = True
sigma_pol_frac = 0.03
def __init__(self):
_data_file = join(dirname(__file__), "data", "skydata.npz")
f = np.load(_data_file)
self._faraday = f["faraday"]
def source_count(self, flux):
r"""The expected number of sources per unit flux (Jy) per steradian.
This is an abstract method that must be implemented in an actual model.
Parameters
----------
flux : float
The strength of the source in Jy.
Returns
-------
value : float
The differential source count at `flux`.
Notes
-----
"""
pass
def spectral_realisation(self, flux, frequencies):
r"""Generate a frequency distribution for a source of given `flux`.
This is an abstract method that must be implemented in an
actual model. Must be able to broadcast if `flux`, and
`frequencies` are numpy arrays.
Parameters
----------
flux : float
The strength of the source in Jy.
frequencies : ndarray
The frequencies to calculate the spectral distribution at.
Returns
-------
fluxes : ndarray
The flux at each of the `frequencies` given.
"""
pass
def generate_population(self, area):
r"""Create a set of point sources.
Parameters
----------
area : float
The area the population is contained within (in sq degrees).
Returns
-------
sources : ndarray
The fluxes of the sources in the population.
"""
flux_max = self.flux_max
# If we don't have a maximum flux set, set one by calculating
# the flux at which there is only a very small probability of
# there being a brighter source.
#
# For a power law with dN/dS \propto S^(1-\beta), this is the
# flux at which the probability of a source P(>S_max) < 0.05/
# \beta
if flux_max == None:
ratelog = lambda s: (s * area * self.source_count(s) - 5e-2)
flux_max = newton(ratelog, self.flux_min)
print("Using maximum flux: %e Jy" % flux_max)
# Generate realisation by creating a rate and treating like an
# inhomogenous Poisson process.
# rate = lambda s: area*self.source_count(self.flux_min+s)
# fluxes = self.flux_min + ps.inhomogeneous_process_approx(flux_max-self.flux_min, rate)
rate = (
lambda s: self.flux_min
* np.exp(s)
* area
* self.source_count(self.flux_min * np.exp(s))
)
fluxes = self.flux_min * np.exp(
ps.inhomogeneous_process_approx(np.log(flux_max / self.flux_min), rate)
)
return fluxes
def getfield(self, catalogue=False):
r"""Create a simulated cube of point sources.
Create a pixelised realisation of the sources.
Parameters
----------
catalogue : boolean, optional
if true return the population catalogue.
Returns
-------
cube : ndarray
An array of dimensions (`numf`, `numx` `numy`)
"""
c = np.zeros(self._num_array())
fluxes = self.generate_population(
np.radians(self.x_width) * np.radians(self.y_width)
)
freq = self.nu_pixels
sr = self.spectral_realisation(fluxes[:, np.newaxis], freq[np.newaxis, :])
for i in range(sr.shape[0]):
# Pick random pixel
x = int(rnd.rand() * self.x_num)
y = int(rnd.rand() * self.y_num)
c[:, x, y] += sr[i, :]
if not catalogue:
return c
else:
return c, fluxes
def getsky(self):
"""Simulate a map of point sources.
Returns
-------
sky : ndarray [nfreq, npix]
Map of the brightness temperature on the sky (in K).
"""
if self.flux_min < 0.1:
print("This is going to take a long time. Try raising the flux limit.")
npix = 12 * self.nside ** 2
freq = self.nu_pixels
nfreq = len(freq)
sky = np.zeros((nfreq, npix), dtype=np.float64)
pxarea = 4 * np.pi / npix
fluxes = self.generate_population(4 * np.pi)
sr = self.spectral_realisation(fluxes[:, np.newaxis], freq[np.newaxis, :])
for i in range(sr.shape[0]):
# Pick random pixel
ix = int(rnd.rand() * npix)
sky[:, ix] += sr[i, :]
# Convert flux map in Jy to brightness temperature map in K.
sky = (
sky
* 1e-26
* units.c ** 2
/ (2 * units.k_B * self.nu_pixels[:, np.newaxis] ** 2 * 1e12 * pxarea)
)
return sky
def getpolsky(self):
"""Simulate polarised point sources."""
sky_I = self.getsky()
sky_pol = np.zeros((sky_I.shape[0], 4, sky_I.shape[1]), dtype=sky_I.dtype)
q_frac = (
self.sigma_pol_frac
* np.random.standard_normal(sky_I.shape[1])[np.newaxis, :]
)
u_frac = (
self.sigma_pol_frac
* np.random.standard_normal(sky_I.shape[1])[np.newaxis, :]
)
sky_pol[:, 0] = sky_I
sky_pol[:, 1] = sky_I * q_frac
sky_pol[:, 2] = sky_I * u_frac
if self.faraday:
faraday_rotate(
sky_pol, healpy.ud_grade(self._faraday, self.nside), self.nu_pixels
)
return sky_pol
class PowerLawModel(PointSourceModel):
r"""A simple point source model.
Use a power-law luminosity function, and power-law spectral
distribution with a single spectral index drawn from a Gaussian.
Attributes
----------
source_index : scalar
The spectral index of the luminosity function.
source_pivot : scalar
The pivot of the luminosity function (in Jy).
source_amplitude : scalar
The amplitude of the luminosity function (number of sources /
Jy / deg^2 at the pivot).
spectral_mean : scalar
The mean index of the spectral distribution
spectral_width : scalar
Width of distribution spectral indices.
spectral_pivot : scalar
Frequency of the pivot point (in Mhz). This is the frequency
that the flux is defined at.
Notes
-----
Default source count parameters based loosely on the results of
the 6C survey [1]_.
References
----------
.. [1] Hales et al. 1988
"""
source_index = 2.5
source_pivot = 1.0
source_amplitude = 2.396e3
spectral_mean = -0.7
spectral_width = 0.1
spectral_pivot = 151.0
def source_count(self, flux):
r"""Power law luminosity function."""
return self.source_amplitude * (flux / self.source_pivot) ** (
-self.source_index
)
def spectral_realisation(self, flux, freq):
r"""Power-law spectral function with Gaussian distributed index."""
ind = self.spectral_mean + self.spectral_width * rnd.standard_normal(flux.shape)
return flux * (freq / self.spectral_pivot) ** ind
class DiMatteo(PointSourceModel):
r"""Double power-law point source model
Uses the results of Di Mattero et al. [2]_
Attributes
----------
gamma1, gamma2 : scalar
The two power law indices.
S_0 : scalar
The pivot of the source count function (in Jy).
k1 : scalar
The amplitude of the luminosity function (number of sources /
Jy / deg^2 at the pivot).
spectral_mean : scalar
The mean index of the spectral distribution
spectral_width : scalar
Width of distribution spectral indices.
spectral_pivot : scalar
Frequency of the pivot point (in Mhz). This is the frequency
that the flux is defined at.
Notes
-----
Based on [2]_ and clarification in [3]_ (footnote 6). In this
:math:`S_0` is both the pivot and normalising flux, which means that k1
is rescaled by a factor of :math:`0.88**-1.75.i`
References
----------
.. [2] Di Matteo et al. 2002 (http://arxiv.org/abs/astro-ph/0109241)
.. [3] Santos et al. 2005 (http://arxiv.org/abs/astro-ph/0408515)
"""
gamma1 = 1.75
gamma2 = 2.51
S_0 = 0.88
| |
'/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:pw-routing/Cisco-IOS-XR-l2vpn-cfg:pw-routing-bgp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.evpn_route_distinguisher is not None and self.evpn_route_distinguisher._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.PwRouting.PwRoutingBgp']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:pw-routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.pw_routing_bgp is not None and self.pw_routing_bgp._has_data():
return True
if self.pw_routing_global_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.PwRouting']['meta_info']
class Neighbor(object):
"""
L2VPN neighbor submode
.. attribute:: ldp_flap
Enable targetted LDP session flap action
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ldp_flap = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-l2vpn-cfg:l2vpn/Cisco-IOS-XR-l2vpn-cfg:neighbor'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ldp_flap is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Neighbor']['meta_info']
class Database(object):
"""
L2VPN databases
.. attribute:: bridge_domain_groups
List of bridge groups
**type**\: :py:class:`BridgeDomainGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups>`
.. attribute:: flexible_xconnect_service_table
List of Flexible XConnect Services
**type**\: :py:class:`FlexibleXconnectServiceTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.FlexibleXconnectServiceTable>`
.. attribute:: g8032_rings
List of G8032 Ring
**type**\: :py:class:`G8032Rings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings>`
.. attribute:: pseudowire_classes
List of pseudowire classes
**type**\: :py:class:`PseudowireClasses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.PseudowireClasses>`
.. attribute:: redundancy
Redundancy groups
**type**\: :py:class:`Redundancy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.Redundancy>`
.. attribute:: xconnect_groups
List of xconnect groups
**type**\: :py:class:`XconnectGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bridge_domain_groups = L2Vpn.Database.BridgeDomainGroups()
self.bridge_domain_groups.parent = self
self.flexible_xconnect_service_table = L2Vpn.Database.FlexibleXconnectServiceTable()
self.flexible_xconnect_service_table.parent = self
self.g8032_rings = L2Vpn.Database.G8032Rings()
self.g8032_rings.parent = self
self.pseudowire_classes = L2Vpn.Database.PseudowireClasses()
self.pseudowire_classes.parent = self
self.redundancy = L2Vpn.Database.Redundancy()
self.redundancy.parent = self
self.xconnect_groups = L2Vpn.Database.XconnectGroups()
self.xconnect_groups.parent = self
class G8032Rings(object):
"""
List of G8032 Ring
.. attribute:: g8032_ring
G8032 Ring
**type**\: list of :py:class:`G8032Ring <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.g8032_ring = YList()
self.g8032_ring.parent = self
self.g8032_ring.name = 'g8032_ring'
class G8032Ring(object):
"""
G8032 Ring
.. attribute:: g8032_ring_name <key>
Name of the G8032 ring
**type**\: str
**length:** 0..32
.. attribute:: erp_instances
List of ethernet ring protection instance
**type**\: :py:class:`ErpInstances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances>`
.. attribute:: erp_port0s
Ethernet ring protection port0
**type**\: :py:class:`ErpPort0S <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S>`
.. attribute:: erp_port1s
Ethernet ring protection port0
**type**\: :py:class:`ErpPort1S <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S>`
.. attribute:: erp_provider_bridge
Ethernet ring protection provider bridge
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: exclusion_list
Vlan IDs in the format of a\-b,c,d,e\-f,g ,untagged
**type**\: str
.. attribute:: open_ring
Specify the G.8032 instance as open ring
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.g8032_ring_name = None
self.erp_instances = L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances()
self.erp_instances.parent = self
self.erp_port0s = L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S()
self.erp_port0s.parent = self
self.erp_port1s = L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S()
self.erp_port1s.parent = self
self.erp_provider_bridge = None
self.exclusion_list = None
self.open_ring = None
class ErpPort0S(object):
"""
Ethernet ring protection port0
.. attribute:: erp_port0
Configure ERP main port0
**type**\: list of :py:class:`ErpPort0 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.erp_port0 = YList()
self.erp_port0.parent = self
self.erp_port0.name = 'erp_port0'
class ErpPort0(object):
"""
Configure ERP main port0
.. attribute:: interface_name <key>
Port0 interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: monitor
Ethernet ring protection port0 monitor
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.monitor = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-port0[Cisco-IOS-XR-l2vpn-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.monitor is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:erp-port0s'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.erp_port0 is not None:
for child_ref in self.erp_port0:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S']['meta_info']
class ErpInstances(object):
"""
List of ethernet ring protection instance
.. attribute:: erp_instance
Ethernet ring protection instance
**type**\: list of :py:class:`ErpInstance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.erp_instance = YList()
self.erp_instance.parent = self
self.erp_instance.name = 'erp_instance'
class ErpInstance(object):
"""
Ethernet ring protection instance
.. attribute:: erp_instance_id <key>
ERP instance number
**type**\: int
**range:** 1..2
.. attribute:: aps
Automatic protection switching
**type**\: :py:class:`Aps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps>`
.. attribute:: description
Ethernet ring protection instance description
**type**\: str
**length:** 0..32
.. attribute:: inclusion_list
Associates a set of VLAN IDs with the G .8032 instance
**type**\: str
.. attribute:: profile
Ethernet ring protection instance profile
**type**\: str
**length:** 0..32
.. attribute:: rpl
Ring protection link
**type**\: :py:class:`Rpl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.erp_instance_id = None
self.aps = L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps()
self.aps.parent = self
self.description = None
self.inclusion_list = None
self.profile = None
self.rpl = L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl()
self.rpl.parent = self
class Rpl(object):
"""
Ring protection link
.. attribute:: port
ERP main port number
**type**\: :py:class:`ErpPort1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.ErpPort1Enum>`
.. attribute:: role
RPL role
**type**\: :py:class:`RplRoleEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.RplRoleEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.port = None
self.role = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:rpl'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.port is not None:
return True
if self.role is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl']['meta_info']
class Aps(object):
"""
Automatic protection switching
.. attribute:: enable
Enable automatic protection switching
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: level
Automatic protection switching level
**type**\: int
**range:** 0..7
.. attribute:: port0
Port0 APS channel in the format of InterfaceName
**type**\: str
.. attribute:: port1
APS channel for ERP port1
**type**\: :py:class:`Port1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.level = None
self.port0 = None
self.port1 = L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1()
self.port1.parent = self
class Port1(object):
"""
APS channel for ERP port1
.. attribute:: aps_channel
Port1 APS channel in the format of InterfaceName, BDName or XconnectName
**type**\: str
.. attribute:: aps_type
Port1 APS type
**type**\: :py:class:`ErpapsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.ErpapsEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.aps_channel = None
self.aps_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:port1'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.aps_channel is not None:
return True
if self.aps_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent | |
tokenize.transform(df)
# vectorize
vector_col = temp_col_name(with_token, output_column)
if vectorizer == VECTORIZER_HASHING:
# check for legal inputs
hashing_num_features = parse_parameter(
int, vectorizer_hashing_parameters.get("number_of_features", 262144), "number_of_features", 262144
)
vectorize = HashingTF(inputCol=token_col, outputCol=vector_col, numFeatures=hashing_num_features)
else: # "Count vectorizer"
min_term_freq = parse_parameter(
float,
vectorizer_count_vectorizer_parameters.get("minimum_term_frequency", 1.0),
"minimum_term_frequency",
1.0,
)
min_doc_freq = parse_parameter(
float,
vectorizer_count_vectorizer_parameters.get("minimum_document_frequency", 1.0),
"minimum_document_frequency",
1.0,
)
max_doc_freq = parse_parameter(
float,
vectorizer_count_vectorizer_parameters.get("maximum_document_frequency", 1.0),
"maximum_document_frequency",
1.0,
)
max_vocab_size = parse_parameter(
int,
vectorizer_count_vectorizer_parameters.get("maximum_vocabulary_size", 10000),
"maximum_vocabulary_size",
10000,
)
binary = parse_parameter(
bool, vectorizer_count_vectorizer_parameters.get("binarize_count", False), "binarize_count", False
)
vectorize, vectorize_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, CountVectorizerModel, "vectorizer_model"
)
if vectorize is None:
count_vectorizer = CountVectorizer(
inputCol=token_col,
outputCol=vector_col,
minTF=min_term_freq,
minDF=min_doc_freq,
maxDF=max_doc_freq,
vocabSize=max_vocab_size,
binary=binary,
)
vectorize = fit_and_save_model(trained_parameters, "vectorizer_model", count_vectorizer, with_token)
with_vector = vectorize.transform(with_token).drop(token_col)
# tf-idf
if apply_idf == APPLY_IDF_YES:
# check variables
min_doc_freq = parse_parameter(
int, apply_idf_yes_parameters.get("minimum_document_frequency", 1), "minimum_document_frequency", 1
)
idf_model, idf_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, IDFModel, "idf_model"
)
if idf_model is None:
idf = IDF(minDocFreq=min_doc_freq, inputCol=vector_col, outputCol=output_column,)
idf_model = fit_and_save_model(trained_parameters, "idf_model", idf, with_vector)
post_idf = idf_model.transform(with_vector)
else:
post_idf = with_vector.withColumn(output_column, with_vector[vector_col])
# flatten output if requested
if output_format == OUTPUT_FORMAT_COLUMNS:
index_to_name = vectorize.vocabulary
def indexing(vec, idx):
try:
return float(vec[int(idx)])
except (IndexError, ValueError):
return 0.0
indexing_udf = udf(indexing, returnType=DoubleType())
names = list(df.columns)
for col_index, cur_name in enumerate(index_to_name):
names.append(indexing_udf(output_column, lit(col_index)).alias(f"{output_column}_{cur_name}"))
output_df = post_idf.select(escape_column_names(names))
else:
output_df = post_idf.drop(vector_col)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def manage_columns_drop_column(df, column_to_drop=None, trained_parameters=None):
expects_column(df, column_to_drop, "Column to drop")
output_df = df.drop(column_to_drop)
return default_spark(output_df)
def manage_columns_duplicate_column(df, input_column=None, new_name=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
expects_valid_column_name(new_name, "New name")
if input_column == new_name:
raise OperatorSparkOperatorCustomerError(
f"Name for the duplicated column ({new_name}) cannot be the same as the existing column name ({input_column})."
)
df = df.withColumn(new_name, df[input_column])
return default_spark(df)
def manage_columns_rename_column(df, input_column=None, new_name=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
expects_valid_column_name(new_name, "New name")
if input_column == new_name:
raise OperatorSparkOperatorCustomerError(f"The new name ({new_name}) is the same as the old name ({input_column}).")
if not new_name:
raise OperatorSparkOperatorCustomerError(f"Invalid name specified for column {new_name}")
df = df.withColumnRenamed(input_column, new_name)
return default_spark(df)
def manage_columns_move_to_start(df, column_to_move=None, trained_parameters=None):
if column_to_move not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid column selected to move. Does not exist: {column_to_move}")
reordered_columns = [df[column_to_move]] + [col for col in df.columns if col != column_to_move]
df = df.select(escape_column_names(reordered_columns))
return default_spark(df)
def manage_columns_move_to_end(df, column_to_move=None, trained_parameters=None):
if column_to_move not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid column selected to move. Does not exist: {column_to_move}")
reordered_columns = [col for col in df.columns if col != column_to_move] + [df[column_to_move]]
df = df.select(escape_column_names(reordered_columns))
return default_spark(df)
def manage_columns_move_to_index(df, column_to_move=None, index=None, trained_parameters=None):
index = parse_parameter(int, index, "Index")
if column_to_move not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid column selected to move. Does not exist: {column_to_move}")
if index >= len(df.columns) or index < 0:
raise OperatorSparkOperatorCustomerError(
"Specified index must be less than or equal to the number of columns and greater than zero."
)
columns_without_move_column = [col for col in df.columns if col != column_to_move]
reordered_columns = columns_without_move_column[:index] + [column_to_move] + columns_without_move_column[index:]
df = df.select(escape_column_names(reordered_columns))
return default_spark(df)
def manage_columns_move_after(df, column_to_move=None, target_column=None, trained_parameters=None):
if column_to_move not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid column selected to move. Does not exist: {column_to_move}")
if target_column not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid target column selected to move after. Does not exist: {target_column}")
if column_to_move == target_column:
raise OperatorSparkOperatorCustomerError(
f"Invalid reference column name. "
f"The reference column ({target_column}) should not be the same as the column {column_to_move}."
f"Use a valid reference column name."
)
columns_without_move_column = [col for col in df.columns if col != column_to_move]
target_index = columns_without_move_column.index(target_column)
reordered_columns = (
columns_without_move_column[: (target_index + 1)]
+ [column_to_move]
+ columns_without_move_column[(target_index + 1) :]
)
df = df.select(escape_column_names(reordered_columns))
return default_spark(df)
def manage_columns_move_before(df, column_to_move=None, target_column=None, trained_parameters=None):
if column_to_move not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid column selected to move. Does not exist: {column_to_move}")
if target_column not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Invalid target column selected to move before. Does not exist: {target_column}")
if column_to_move == target_column:
raise OperatorSparkOperatorCustomerError(
f"Invalid reference column name. "
f"The reference column ({target_column}) should not be the same as the column {column_to_move}."
f"Use a valid reference column name."
)
columns_without_move_column = [col for col in df.columns if col != column_to_move]
target_index = columns_without_move_column.index(target_column)
reordered_columns = (
columns_without_move_column[:target_index] + [column_to_move] + columns_without_move_column[target_index:]
)
df = df.select(escape_column_names(reordered_columns))
return default_spark(df)
def manage_columns_move_column(df, **kwargs):
return dispatch(
"move_type",
[df],
kwargs,
{
"Move to start": (manage_columns_move_to_start, "move_to_start_parameters"),
"Move to end": (manage_columns_move_to_end, "move_to_end_parameters"),
"Move to index": (manage_columns_move_to_index, "move_to_index_parameters"),
"Move after": (manage_columns_move_after, "move_after_parameters"),
"Move before": (manage_columns_move_before, "move_before_parameters"),
},
)
from pyspark.ml.feature import (
VectorAssembler,
StandardScaler,
StandardScalerModel,
RobustScaler,
RobustScalerModel,
MinMaxScaler,
MinMaxScalerModel,
MaxAbsScaler,
MaxAbsScalerModel,
)
from pyspark.ml.functions import vector_to_array
from pyspark.sql import functions as sf
from pyspark.sql.types import NumericType
def process_numeric_standard_scaler(
df, input_column=None, center=None, scale=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
expects_valid_column_name(output_column, "Output column", nullable=True)
process_numeric_expects_numeric_column(df, input_column)
temp_vector_col = temp_col_name(df)
assembled = VectorAssembler(inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="keep").transform(df)
assembled_wo_nans = VectorAssembler(
inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="skip"
).transform(df)
temp_normalized_vector_col = temp_col_name(assembled)
trained_parameters = load_trained_parameters(
trained_parameters, {"input_column": input_column, "center": center, "scale": scale}
)
scaler_model, scaler_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, StandardScalerModel, "scaler_model"
)
if scaler_model is None:
scaler = StandardScaler(
inputCol=temp_vector_col,
outputCol=temp_normalized_vector_col,
withStd=parse_parameter(bool, scale, "scale", True),
withMean=parse_parameter(bool, center, "center", False),
)
scaler_model = fit_and_save_model(trained_parameters, "scaler_model", scaler, assembled_wo_nans)
output_df = transform_using_trained_model(scaler_model, assembled, scaler_model_loaded)
# convert the resulting vector back to numeric
temp_flattened_vector_col = temp_col_name(output_df)
output_df = output_df.withColumn(temp_flattened_vector_col, vector_to_array(temp_normalized_vector_col))
# keep only the final scaled column.
output_column = input_column if output_column is None or not output_column else output_column
output_column_value = sf.col(temp_flattened_vector_col)[0].alias(output_column)
output_df = output_df.withColumn(output_column, output_column_value)
final_columns = list(dict.fromkeys((list(df.columns) + [output_column])))
final_columns = escape_column_names(final_columns)
output_df = output_df.select(final_columns)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def process_numeric_robust_scaler(
df,
input_column=None,
lower_quantile=None,
upper_quantile=None,
center=None,
scale=None,
output_column=None,
trained_parameters=None,
):
expects_column(df, input_column, "Input column")
expects_valid_column_name(output_column, "Output column", nullable=True)
process_numeric_expects_numeric_column(df, input_column)
temp_vector_col = temp_col_name(df)
assembled = VectorAssembler(inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="keep").transform(df)
assembled_wo_nans = VectorAssembler(
inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="skip"
).transform(df)
temp_normalized_vector_col = temp_col_name(assembled)
trained_parameters = load_trained_parameters(
trained_parameters,
{
"input_column": input_column,
"center": center,
"scale": scale,
"lower_quantile": lower_quantile,
"upper_quantile": upper_quantile,
},
)
scaler_model, scaler_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, RobustScalerModel, "scaler_model"
)
if scaler_model is None:
scaler = RobustScaler(
inputCol=temp_vector_col,
outputCol=temp_normalized_vector_col,
lower=parse_parameter(float, lower_quantile, "lower_quantile", 0.25),
upper=parse_parameter(float, upper_quantile, "upper_quantile", 0.75),
withCentering=parse_parameter(bool, center, "with_centering", False),
withScaling=parse_parameter(bool, scale, "with_scaling", True),
)
scaler_model = fit_and_save_model(trained_parameters, "scaler_model", scaler, assembled_wo_nans)
output_df = transform_using_trained_model(scaler_model, assembled, scaler_model_loaded)
# convert the resulting vector back to numeric
temp_flattened_vector_col = temp_col_name(output_df)
output_df = output_df.withColumn(temp_flattened_vector_col, vector_to_array(temp_normalized_vector_col))
# keep only the final scaled column.
output_column = input_column if output_column is None or not output_column else output_column
output_column_value = sf.col(temp_flattened_vector_col)[0].alias(output_column)
output_df = output_df.withColumn(output_column, output_column_value)
final_columns = list(dict.fromkeys((list(df.columns) + [output_column])))
final_columns = escape_column_names(final_columns)
output_df = output_df.select(final_columns)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def process_numeric_min_max_scaler(
df, input_column=None, min=None, max=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
expects_valid_column_name(output_column, "Output column", nullable=True)
process_numeric_expects_numeric_column(df, input_column)
temp_vector_col = temp_col_name(df)
assembled = VectorAssembler(inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="keep").transform(df)
assembled_wo_nans = VectorAssembler(
inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="skip"
).transform(df)
temp_normalized_vector_col = temp_col_name(assembled)
trained_parameters = load_trained_parameters(
trained_parameters, {"input_column": input_column, "min": min, "max": max,}
)
scaler_model, scaler_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, MinMaxScalerModel, "scaler_model"
)
if scaler_model is None:
scaler = MinMaxScaler(
inputCol=temp_vector_col,
outputCol=temp_normalized_vector_col,
min=parse_parameter(float, min, "min", 0.0),
max=parse_parameter(float, max, "max", 1.0),
)
scaler_model = fit_and_save_model(trained_parameters, "scaler_model", scaler, assembled_wo_nans)
output_df = transform_using_trained_model(scaler_model, assembled, scaler_model_loaded)
# convert the resulting vector back to numeric
temp_flattened_vector_col = temp_col_name(output_df)
output_df = output_df.withColumn(temp_flattened_vector_col, vector_to_array(temp_normalized_vector_col))
# keep only the final scaled column.
output_column = input_column if output_column is None or not output_column else output_column
output_column_value = sf.col(temp_flattened_vector_col)[0].alias(output_column)
output_df = output_df.withColumn(output_column, output_column_value)
final_columns = list(dict.fromkeys((list(df.columns) + [output_column])))
final_columns = escape_column_names(final_columns)
output_df = output_df.select(final_columns)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def process_numeric_max_absolute_scaler(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
expects_valid_column_name(output_column, "Output column", nullable=True)
process_numeric_expects_numeric_column(df, input_column)
temp_vector_col = temp_col_name(df)
assembled = VectorAssembler(inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="keep").transform(df)
assembled_wo_nans = VectorAssembler(
inputCols=[input_column], outputCol=temp_vector_col, handleInvalid="skip"
).transform(df)
temp_normalized_vector_col = temp_col_name(assembled)
trained_parameters = load_trained_parameters(trained_parameters, {"input_column": input_column,})
scaler_model, scaler_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, MinMaxScalerModel, "scaler_model"
)
if scaler_model is None:
scaler = MinMaxScaler(inputCol=temp_vector_col, outputCol=temp_normalized_vector_col)
scaler_model = fit_and_save_model(trained_parameters, "scaler_model", scaler, assembled_wo_nans)
output_df = transform_using_trained_model(scaler_model, assembled, scaler_model_loaded)
scaler = MaxAbsScaler(inputCol=temp_vector_col, outputCol=temp_normalized_vector_col)
output_df = scaler.fit(assembled_wo_nans).transform(assembled)
# convert the resulting vector back to numeric
temp_flattened_vector_col = temp_col_name(output_df)
output_df = output_df.withColumn(temp_flattened_vector_col, vector_to_array(temp_normalized_vector_col))
# keep only the final scaled column.
output_column = input_column if output_column is None or not output_column else output_column
output_column_value = sf.col(temp_flattened_vector_col)[0].alias(output_column)
output_df = output_df.withColumn(output_column, output_column_value)
final_columns = list(dict.fromkeys((list(df.columns) + [output_column])))
final_columns = escape_column_names(final_columns)
output_df = output_df.select(final_columns)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def process_numeric_expects_numeric_column(df, input_column):
column_type = df.schema[input_column].dataType
if not isinstance(column_type, NumericType):
raise OperatorSparkOperatorCustomerError(
f'Numeric column required. Please cast column to a numeric type | |
<gh_stars>1-10
"""
This module exists to do basic processing of timecourse data that is output
from kinefld simulations
Possible things TODO:
add a wildcardstructure thing like ****** when structures not yet exposed
"""
import math
import numpy as np
import pandas
# from scipy.signal import argrelextrema
from scipy.optimize import curve_fit
from copy import deepcopy
from collections import OrderedDict
from collections import Counter
class TimeCourseStructure(object):
"""
class will contain the information of a given set of experimental runs on
Kinefold
Requires an output dictionay structure that is output from hyak processing
"""
def __init__(self, compresseddict, structurewindow=None, timewindow=None,
rescale=False, firstexposure=False, maxlength=False, cutoff=0.0):
"""
This function initializes the object - the only thing required for
this step is the dictionaryofruns
:param dictionaryofruns: This is the dictionary containing all of the
simulation imformation
:type dictionaryofruns: dict
:param structurewindow: The start and stop window that you want to look
at - (this is 1 indexed!)
:type structurewindow: list
:param timewindow: The start and stop of the timewindow you want to
consider
:type timewindow: list
:param cutoff: The smallest observed max frequency of structure to
consider. This is great for reducing the size of things that you
produce
:type cutoff: float
:param rescale: This will extend the max time of a simulation if True.
this is important for considering a diverse set of simulations
:type rescale: boolean
:param maxlength: If true the maximum length of the sequence will be
used as the end of the indexstomine
:type maxlength: boolean
"""
self.structures = {}
self.structuredataframe = None
self.stats = {}
self.timewindow = timewindow
self.structurewindow = structurewindow
#self.sequence = self.completesequence
self.dictionary = compresseddict
self.completesequence = compresseddict['sequence']
if structurewindow or (timewindow and maxlength):
self.generate_data(structurewindow, timewindow, cutoff, rescale,
maxlength, firstexposure)
@classmethod
def init_from_dictofruns(cls, dictionaryofruns, structurewindow=None, timewindow=None,
rescale=False, firstexposure=False, maxlength=False, cutoff=0.0):
"""
This function initializes the object - the only thing required for
this step is the dictionaryofruns
:param dictionaryofruns: This is the dictionary containing all of the
simulation imformation
:type dictionaryofruns: dict
:param structurewindow: The start and stop window that you want to look
at - (this is 1 indexed!)
:type structurewindow: list
:param timewindow: The start and stop of the timewindow you want to
consider
:type timewindow: list
:param cutoff: The smallest observed max frequency of structure to
consider. This is great for reducing the size of things that you
produce
:type cutoff: float
:param rescale: This will extend the max time of a simulation if True.
this is important for considering a diverse set of simulations
:type rescale: boolean
:param maxlength: If true the maximum length of the sequence will be
used as the end of the indexstomine
:type maxlength: boolean
"""
from ..hyak import process as hyakp
temprundict, baseadditiontime, completesequence, simulation_type = \
hyakp.consolidate_run_dictionary(dictionaryofruns)
#This step is time intensive and needs to be optimized
dictionary = hyakp.compress_run_dictionary(temprundict,
baseadditiontime, completesequence,
simulation_type)
#A list of structures that have been found thus far and their max
#frequency
output = cls(dictionary, structurewindow, timewindow, rescale,
firstexposure, maxlength, cutoff)
return output
def generate_data(self, structurewindow, timewindow=None, cutoff=0.0,
maxlength=None, firstexposure=True):
""" This function will condense the run dictionary information into a
pandas dataframe that is windowed by the specified bounds.
:param structurewindow: The start and stop location of structure (1
indexed)
:type structurewindow: (int, int)
:param timewindow: The timewindow of the simulation to consider when
generating the data
:type timewindow: (float, float)
:param cutoff: The minimum frequency a structure must have to be
considered
:type cutoff: float
:param maxlength:
:type maxlength:
:param firstexposure: Should the dataframe start after the first base
has been exposed?
:type firstexposure: bool
:returns: Populates the structure dataframe of the object
:rtype: None
"""
self.structurewindow = structurewindow
self.timewindow = timewindow
if maxlength:
self.structurewindow = calculate_indexs_from_time(
self.dictionary, self.timewindow)
self.structuredataframe = \
structure_evaluation(self.dictionary, self.structurewindow,
self.timewindow, cutoff, firstexposure)
self.sequence = self.completesequence[
self.structurewindow[0] - 1: self.structurewindow[1]]
def find_sequence_index(self, sequence):
"""this function will look through the parts and identify the start
and stop location of the part if it exists in the part
:param sequence: the sequence of the part that you are looking for
:type sequence: str
"""
# First convert the string to RNA
sequence = dna_to_rna(sequence)
try:
lowindex = self.completesequence.index(sequence) + 1
highindex = lowindex -1 + len(sequence)
except ValueError:
return 'Target sequence is not in the complete sequence'
return [lowindex, highindex]
def calculate_folding_rate(self, structurelist):
"""This function will fit an exponential function to fit folding data
with the goal of finding a rate of folding a max value of folding
sum the contribution of multiple parts for this folding rate
:param structurelist: list of all of the structures to be considered
:type structurelist: list of str
:return: returns popt and pcov
popt = [maxvalue, rate]
pcov an array of the variance of these values
"""
# First consolidate all of the structures
timearray = np.array(self.structuredataframe.index.tolist())
timearray = timearray - min(timearray)
structurearray = np.zeros(len(timearray))
for structure in structurelist:
try:
structurearray += self.structuredataframe[structure].values
except KeyError:
pass
# timearray = timearray[:1000]
# structurearray = structurearray[:1000]
# shift = self.baseadditiontime * self.structurewindow[1]
popt, pcov = curve_fit(rate_of_folding_func, timearray,
structurearray, p0=[0.5, 0.0001])
# popt, pcov = curve_fit(
# lambda t, maxvalue, rate: rate_of_folding_func(
# t, shift, maxvalue, rate),
# temptime, freq)
return popt, pcov
def maxium_frequency_of_structures(self, structurelist):
"""
This function searches the populated structuredataframe for all of the
structures that are requested. Right now this requires that you have
generated a dataframe with the appropriate window
:param structurelist: list of all of the structures to be considered
:type structurelist: list of str
:return: Maximum frequency of a combined set of structures appearing
:rtype: float
"""
structurearray = np.zeros(self.structuredataframe.shape[0])
for structure in structurelist:
try:
structurearray += self.structuredataframe[structure].values
except KeyError:
pass
return structurearray.max()
def final_frequency_of_structures(self, structurelist):
"""
This function searches the populated structuredataframe for all of the
structures that are requested and then returns the final frequency
of the collection of structures
:param structurelist: list of all of the structures to be considered
:type structurelist: list of str
:return: Maximum frequency of a combined set of structures appearing
:rtype: float
"""
return_frequency = 0.
for structure, frequency in self.structuredataframe.iloc[-1].iteritems():
if structure in structurelist:
return_frequency += frequency
return return_frequency
def average_frequency_of_structures(self, structurelist):
"""
This function will process an exisiting structuredataframe. It searches
for every strcuture that is asked for, creates a vector of their total
folding frequency, and then integrates under this data and normalizes
to the toal time that is elapsed
:param structurelist: list of all of the structures to be considered
:type structurelist: list of str
:return: Average frequency structures appeared
:rtype: float
"""
structurearray = np.zeros(self.structuredataframe.shape[0])
for structure in structurelist:
try:
structurearray += self.structuredataframe[structure].values
except KeyError:
pass
total_time = self.structuredataframe.index[-1] - \
self.structuredataframe.index[0]
return np.trapz(x=self.structuredataframe.index, y=structurearray) / \
total_time
def time_frequency_vector_of_structures(self, structurelist):
"""
This function pulls out the time and frequency of a set of structures
for a dataframe which has data.
:param sructurelist:
:type structurelist: list
:return: array of (time, frequency)
:rtype: np.array
"""
structurearray = np.zeros(self.structuredataframe.shape[0])
timearray = np.array(self.structuredataframe.index)
for structure in structurelist:
try:
structurearray += self.structuredataframe[structure].values
except KeyError:
pass
return np.vstack((timearray, structurearray))
def final_structures_seen(self, structurewindow, cutoff=0.0):
"""Generates data for the specific window and then returns the windowed
structures for the devices
:param structurewindow:
:type structurewindow:
:param cutoff: The maximum frequency of structure to be considered
:type cutoff: float
:returns: List of final dotbracket structures seen
:rtype: list
"""
return_list = []
self.generate_data(structurewindow, cutoff=cutoff,
firstexposure=True)
for structure, frequency in self.structuredataframe.iloc[-1].iteritems():
if frequency > cutoff:
return_list.append(structure)
return return_list
def __repr__(self):
# Crude Sorting system
sortlist = []
for structure in self.structures:
sortlist.append([self.structures[structure][0], structure])
sortlist.sort()
output = ''
output += '======================================================'
output += '\n'
output += self.sequence + ' ' + 'Time (ms)' + ' ' + 'Max Freq'
for structure in sortlist:
output += '\n'
structure = structure[1]
linetoprint = structure
linetoprint += ' '
linetoprint += str(self.structures[structure][0]).zfill(8)
linetoprint += ' '
linetoprint += str(self.structures[structure][1])
output += linetoprint
return output
def rate_of_folding_func(t, maxvalue, rate):
return maxvalue*(1 - np.exp(-rate*(t)))
def calcuate_time_vector_for_structures(compresseddict, timewindow,
firstexposure=False,
windowstartstop=None):
mintime, maxtime | |
<reponame>VicoWu/impala<filename>tests/authorization/test_owner_privileges.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Client tests to ensure object ownership functionality.
import grp
import pytest
from getpass import getuser
from os import getenv
from tests.common.sentry_cache_test_suite import SentryCacheTestSuite, TestObject
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.skip import SkipIfHive3
# Sentry long polling frequency to make Sentry refresh not run.
SENTRY_LONG_POLLING_FREQUENCY_S = 3600
SENTRY_CONFIG_DIR = getenv('IMPALA_HOME') + '/fe/src/test/resources/'
SENTRY_BASE_LOG_DIR = getenv('IMPALA_CLUSTER_LOGS_DIR') + "/sentry"
SENTRY_CONFIG_FILE_OO = SENTRY_CONFIG_DIR + 'sentry-site_oo.xml'
SENTRY_CONFIG_FILE_OO_NOGRANT = SENTRY_CONFIG_DIR + 'sentry-site_oo_nogrant.xml'
SENTRY_CONFIG_FILE_NO_OO = SENTRY_CONFIG_DIR + 'sentry-site_no_oo.xml'
@SkipIfHive3.sentry_not_supported
class TestOwnerPrivileges(SentryCacheTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestOwnerPrivileges, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def teardown_class(self):
super(self)
def setup_method(self, method):
super(TestOwnerPrivileges, self).setup_method(method)
self._setup_admin()
def teardown_method(self, method):
self._cleanup_admin()
super(TestOwnerPrivileges, self).teardown_method(method)
def _setup_ownership_test(self):
self._cleanup_ownership_test()
# Base roles for enabling tests.
self.execute_query("create role owner_priv_test_oo_user1")
# Role for verifying grant.
self.execute_query("create role owner_priv_test_all_role")
# Role for verifying transfer to role.
self.execute_query("create role owner_priv_test_owner_role")
self.execute_query("grant role owner_priv_test_oo_user1 to group oo_group1")
self.execute_query("grant role owner_priv_test_owner_role to group oo_group1")
self.execute_query("grant create on server to owner_priv_test_oo_user1")
self.execute_query("grant select on database functional to owner_priv_test_oo_user1")
def _cleanup_ownership_test(self):
# Clean up the test artifacts.
try:
self.cleanup_db("owner_priv_db", sync_ddl=0)
except Exception:
# Ignore this if we can't show tables.
pass
# Clean up any old roles created by this test
for role_name in self.execute_query("show roles").data:
if "owner_priv_test" in role_name:
self.execute_query("drop role %s" % role_name)
@staticmethod
def count_user_privileges(result):
"""
This method returns a new list of privileges that only contain user privileges.
"""
# results should have the following columns
# principal_name, principal_type, scope, database, table, column, uri, privilege,
# grant_option, create_time
total = 0
for row in result.data:
col = row.split('\t')
if col[0] == 'USER':
total += 1
return total
def _validate_no_user_privileges(self, client, user, refresh_authorization):
if refresh_authorization: self.execute_query("refresh authorization")
result = self.user_query(client, "show grant user %s" % user, user=user)
return TestOwnerPrivileges.count_user_privileges(result) == 0
def _setup_admin(self):
# Admin for manipulation and cleaning up.
try:
self.execute_query("drop role owner_priv_admin")
except Exception:
# Ignore in case it wasn't created yet.
pass
self.execute_query("create role owner_priv_admin")
self.execute_query("grant all on server to owner_priv_admin with grant option")
group_name = grp.getgrnam(getuser()).gr_name
self.execute_query("grant role owner_priv_admin to group `%s`" % group_name)
def _cleanup_admin(self):
self.execute_query("drop role owner_priv_admin")
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO, SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_drop_table_if_exists".format(SENTRY_BASE_LOG_DIR))
def test_drop_if_exists(self, vector, unique_database):
try:
# we need to use a separate db for testing the drop database if exists case
# For others we can rely on using unique_database which gets cleaned up
# automatically
test_db = "test_drop_if_exists_db"
self._setup_drop_if_exist_test(unique_database, test_db)
self._execute_drop_if_exists(TestObject(TestObject.DATABASE, test_db))
self._execute_drop_if_exists(TestObject(TestObject.TABLE, unique_database +
".test_table"))
self._execute_drop_if_exists(TestObject(TestObject.VIEW, unique_database +
".test_view"))
self._execute_drop_if_exists(TestObject(TestObject.FUNCTION, unique_database +
".test_func()"))
finally:
self._cleanup_drop_if_exist_test(test_db)
def _setup_drop_if_exist_test(self, unique_database, test_db):
# Cleanup test_db, as the previous test run may have not been able to clean it up
self.execute_query("drop database if exists %s" % test_db)
# create a role which can create objects on this server
# for the purposes of this test oo_user1 will have privileges
# to create (and drop) objects while oo_user_2 will not have drop privileges
# oo_user_3 will have privileges to select on the server but not to drop
for role_name in self.execute_query("show roles").data:
if role_name in ['oo_user1_role', 'oo_user2_role', 'oo_user3_role']:
self.execute_query("drop role {0}".format(role_name))
self.execute_query("create role oo_user1_role")
self.execute_query("create role oo_user2_role")
self.execute_query("create role oo_user3_role")
# grant create permissions to oo_user_1 so that they can create database/table
# or functions
self.execute_query("grant create on server to oo_user1_role")
# grant select permissions to oo_user_3 so that they can use database/table
# or functions, but cannot drop them
self.execute_query("grant select on server to oo_user3_role")
# oo_user1 needs permissions to create a view based of functional.alltypes
self.execute_query("grant select on database functional to oo_user1_role")
# oo_user1 needs permissions to create a function based of libTestUdfs.so
self.execute_query("grant all on uri 'hdfs:///test-warehouse/libTestUdfs.so' to"
" oo_user1_role")
# We need to provide explicit privileges to drop functions
self.execute_query("grant drop on database {0} to "
"oo_user1_role".format(unique_database))
self.execute_query("grant role oo_user1_role to group oo_group1")
self.execute_query("grant role oo_user2_role to group oo_group2")
self.execute_query("grant role oo_user3_role to group oo_group3")
self.execute_query("refresh authorization")
def _cleanup_drop_if_exist_test(self, test_db):
self.execute_query("revoke role oo_user1_role from group oo_group1")
self.execute_query("revoke role oo_user2_role from group oo_group2")
self.execute_query("revoke role oo_user3_role from group oo_group3")
self.execute_query("drop role oo_user1_role")
self.execute_query("drop role oo_user2_role")
self.execute_query("drop role oo_user3_role")
self.execute_query("refresh authorization")
self.execute_query("drop database if exists {0}".format(test_db))
def _execute_drop_if_exists(self, test_obj):
self._execute_drop_if_exists_inner(test_obj, True)
if test_obj.obj_type != TestObject.DATABASE:
self._execute_drop_if_exists_inner(test_obj, False)
def _execute_drop_if_exists_inner(self, test_obj, use_qualified_name):
"""
Executes a drop table if exists on a given object type and makes sure that
there is no authorization exception when the user has enough privileges. If the user
does not have correct privileges the test confirms that error is thrown
"""
self.oo_user1_impalad_client = self.create_impala_client()
self.oo_user2_impalad_client = self.create_impala_client()
self.oo_user3_impalad_client = self.create_impala_client()
fq_obj_name = test_obj.obj_name
obj_name = fq_obj_name if use_qualified_name else test_obj.table_name
if not use_qualified_name:
# Call "use db" and omit "db." in the queries of oo_user1 and oo_user3
# oo_user2 has no privileges for the database so it will always use qualified name
self.user_query(self.oo_user1_impalad_client, "use %s" % test_obj.db_name,
user="oo_user1")
self.user_query(self.oo_user3_impalad_client, "use %s" % test_obj.db_name,
user="oo_user3")
self.user_query(self.oo_user1_impalad_client, "create %s %s %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select, test_obj.func_def), user="oo_user1")
access_error_msg = \
"does not have privileges to ANY" if test_obj.obj_type == TestObject.FUNCTION \
else "does not have privileges to access"
drop_error_msg = \
"does not have privileges to DROP" if test_obj.obj_type == TestObject.FUNCTION \
else "does not have privileges to execute 'DROP'"
# Try to DROP IF EXISTS an existing object without DROP privileges
self.user_query(self.oo_user2_impalad_client, "drop %s if exists %s" %
(test_obj.obj_type, fq_obj_name), user="oo_user2,",
error_msg=access_error_msg)
self.user_query(self.oo_user3_impalad_client, "drop %s if exists %s" %
(test_obj.obj_type, obj_name), user="oo_user3",
error_msg=drop_error_msg)
# Try to DROP (without IF EXISTS) an existing object
self.user_query(self.oo_user2_impalad_client, "drop %s %s" %
(test_obj.obj_type, fq_obj_name), user="oo_user2",
error_msg=drop_error_msg)
self.user_query(self.oo_user3_impalad_client, "drop %s %s" %
(test_obj.obj_type, obj_name), user="oo_user3",
error_msg=drop_error_msg)
# oo_user1 has the privileges to drop the object, so the next query drops it
self.user_query(self.oo_user1_impalad_client, "drop %s %s" %
(test_obj.obj_type, obj_name), user="oo_user1")
# a drop if exists on a non-existing object should not error out if the user has
# minimum set of privileges required to list those object types
self.user_query(self.oo_user1_impalad_client, "drop %s if exists %s" %
(test_obj.obj_type, obj_name), user="oo_user1")
self.user_query(self.oo_user3_impalad_client, "drop %s if exists %s" %
(test_obj.obj_type, obj_name), user="oo_user3")
# oo_user2 does not have privileges on this object and hence should receive a
# authorization error.
error_msg = \
"does not have privileges to ANY" if test_obj.obj_type == TestObject.FUNCTION \
else "does not have privileges to access"
self.user_query(self.oo_user2_impalad_client, "drop %s if exists %s" %
(test_obj.obj_type, fq_obj_name), user="oo_user2",
error_msg=error_msg)
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO, SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_owner_privileges_with_grant".format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_with_grant(self, vector, unique_database):
"""Tests owner privileges with grant on database, table, and view.
- refresh_authorization=True: With Sentry refresh to make sure privileges are really
stored in Sentry.
- refresh_authorization=False: No Sentry refresh to make sure user can use owner
privileges right away without a Sentry refresh."""
for refresh in [True, False]:
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests(TestObject(TestObject.DATABASE,
"owner_priv_db",
grant=True),
refresh_authorization=refresh)
self._execute_owner_privilege_tests(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl",
grant=True),
refresh_authorization=refresh)
self._execute_owner_privilege_tests(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view",
grant=True),
refresh_authorization=refresh)
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests(self, test_obj, refresh_authorization):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
# oo_user2 is only used for transferring ownership.
self.oo_user2_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.validate_privileges(self.oo_user1_impalad_client, "show grant user oo_user1",
test_obj, user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure grant works.
| |
= spark_session.createDataFrame(
data=pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
)
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sparkdf_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_sparkdf_execution_engine_batch_identifiers_error_one_illegal_key(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine, spark_session
):
test_df = spark_session.createDataFrame(
data=pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
)
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sparkdf_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_sparkdf_execution_engine_set_data_asset_name_for_runtime_data(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine, spark_session
):
test_df = spark_session.createDataFrame(
data=pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
)
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sparkdf_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_sparkdf_execution_engine_get_available_data_asset_names(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine, spark_session
):
expected_available_data_asset_names: Dict[List[str]] = {
"test_runtime_data_connector": []
}
available_data_asset_names: Dict[
List[str]
] = (
datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_available_data_asset_names()
)
assert available_data_asset_names == expected_available_data_asset_names
def test_sparkdf_execution_engine_get_batch_definition_list_from_batch_request_length_one(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine, spark_session
):
test_df = spark_session.createDataFrame(
data=pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
)
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sparkdf_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data, SparkDFBatchData)
assert len(my_batch_1.data.dataframe.columns) == 2
def test_sparkdf_execution_engine_get_batch_definitions_and_get_batch_basics(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine, spark_session
):
test_df = spark_session.createDataFrame(
data=pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
)
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sparkdf_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
my_df: pd.DataFrame = pd.DataFrame({"x": range(10), "y": range(10)})
batch: Batch = datasource_with_runtime_data_connector_and_sparkdf_execution_engine.get_batch_from_batch_definition(
batch_definition=BatchDefinition(
"my_datasource",
"_pipeline",
"_pipeline",
batch_identifiers=IDDict({"some_random_id": 1}),
),
batch_data=my_df,
)
assert batch.batch_request == {}
####################################
# Tests with data passed in as query
####################################
@pytest.fixture
def datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine(db_file, sa):
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file}
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_self_check(
db_file, datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
report = (
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.self_check()
)
assert report == {
"execution_engine": {
"connection_string": f"sqlite:///{db_file}",
"module_name": "great_expectations.execution_engine.sqlalchemy_execution_engine",
"class_name": "SqlAlchemyExecutionEngine",
},
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"example_data_asset_names": [],
"data_assets": {},
"note": "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
},
},
}
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_datasource(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for an unknown datasource
with pytest.raises(TypeError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=None,
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_dataconnector(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for an unknown data_connector
with pytest.raises(TypeError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=None,
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_no_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal absence of batch_identifiers when batch_data is specified
with pytest.raises(TypeError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=None,
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_illegal_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal falsiness of batch_identifiers when batch_data is specified
with pytest.raises(TypeError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers={},
)
)
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present_with_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list[0].head().columns) == 11
def test_batch_identifiers_and_batch_identifiers_success_no_temp_table(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {"create_temp_table": False},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list[0].head().columns) == 11
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_mostly_legal_keys(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_single_illegal_key(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_set_data_asset_name_for_runtime_query_data(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_get_batch_definition_list_from_batch_request_length_one_from_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.selectable, sqlalchemy.Table)
def test_get_batch_with_pipeline_style_batch_request_missing_batch_identifiers_error(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": None,
}
with pytest.raises(TypeError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(**batch_request)
)
def test_get_batch_definitions_and_get_batch_basics_from_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_available_batch_definitions(
batch_request=batch_request
)
)
== | |
#!/usr/bin/env python
# coding: utf-8
# # Spatial Solver Tutorial
# Spatialsolver is a pyne module that contains seven neutron transport equation solvers.
# The neutron transport equation is a balance statement that conserves neutrons.
# In[2]:
import pyne
import pyne.spatialsolver
import numpy as np
# In[3]:
input_dict = {'Name': 'Jane', 'Age': 27};
# The spatial solver module takes in a dictionary that contains all of the input information required to run the solvers. There are many entries to allow a high degree of customization, not all of which are required. To find which entries are required, see the spatial solver documentation in the python api.
# In[4]:
input_dict['solver'] = "AHOTN"
# There are many different ways to solve the neutron transport equations. The spatial solver method supports seven different methods, described in the theory manual. The 'solver' key allows you to select which family of these solvers you would like to use, out of the following three options.
# 1. "AHOTN" - Arbitrarily higher order transport method
# 2. "DGFEM" - Discontinuous Galerkin Finite Element Method
# 3. "SCTSTEP" - SCT Step algorithm similar to Duo’s SCT algorithm implemented in three dimensional Cartesian geometry.
# In[5]:
input_dict['solver_type'] = "LN"
# Each family of solvers except for SCTSTEP offers a number of different choices for the specific way the neutron transport equation is solved. For full descriptions of each, consult the theory manual.
# For AHOTN, the supported solver_type's are:
# 1. "LN" - Arbitrarily higher order transport method of the nodal type linear-nodal method
# 2. "LL" - Arbitrarily higher order transport method of the nodal type linear-linear method
# 3. "NEFD" - Arbitrarily higher order transport method of the nodal type that makes use of the unknown nodal flux moments (NEFD algorithm).
#
# DGFEM
# 1. "LD" - The Discontinuous Galerkin Finite Element Method (DGFEM) with a linear discontinuous (LD) approximation for angular flux.
# 2. "DENSE" - The Discontinuous Galerkin Finite Element Method (DGFEM) that uses dense lagrange polynomials
# 3. "LAGRANGE" - The Discontinuous Galerkin Finite Element Method (DGFEM) that use lagrange polynomials
#
# SCTSTEP
#
# SCT Step algorithm similar to Duo’s SCT algorithm implemented in three dimensional Cartesian geometry.
# In[6]:
input_dict['spatial_order'] = 1
# The Spatial expansion order is the expansion order of the spatial moment. It is also known as lambda, and for all AHOTN solvers it must be 0, 1 or 2.
# In[7]:
input_dict['angular_quadrature_order'] = 4
# The angular quadrature order is the number of angles to be used per octant.
# For N sets of angles, there will be (N * (N + 2) / 8) ordinates per octant.
# The quadrature order may only be an even number!
# In[8]:
input_dict['angular_quadrature_type'] = 1
# The quadrature type is the type of quadrature scheme the code uses.
# The possibilities are:
#
# 1 - TWOTRAN
# 2 - EQN
# 3 - Read-in
# In[9]:
input_dict['nodes_xyz'] = [4,4,4]
# 'nodes_xyz' is the number of node's in the x y and z directions. It should be stored in a 1 by 3 array, with the following entries:<br />
# [0] = number of nodes in x direction (integer)<br />
# [1] = number of nodes in y direction (integer)<br />
# [2] = number of nodes in z direction (integer)
# In[10]:
input_dict['num_groups'] = 1
# 'num_groups' specifies the number of material groups you are using in the material id and cross section files found in later entries.
# In[11]:
input_dict['num_materials'] = 1
# 'num_materials' is the number of different materials used in the mesh ('material_id').
# In[12]:
input_dict['x_cells_widths'] = [0.25, 0.25, 0.25, 0.25]
# In[13]:
input_dict['y_cells_widths'] = [0.25, 0.25, 0.25, 0.25]
# In[14]:
input_dict['z_cells_widths'] = [0.25, 0.25, 0.25, 0.25]
# 'x_cells_widths', 'y_cells_widths', and 'z_cells_widths' are the cell widths for each cell in the x, y and z direction. Every unique cell cannot be a unique size, adjacent edges all must match up. Therefore, each cell width you specify is the width of all the cells in the plane orthogonal to the axis of the cell you specified. For example, if you selected 1 to be the first entry in x_cell_width, all of the cells with x dimension 1 would be 1 unit wide.
#
# This entry takes an array, which must be 1 by the number of nodes in that specific axis, and have all entries filled.
# In[15]:
input_dict['x_boundry_conditions'] = [2, 2]
# In[16]:
input_dict['y_boundry_conditions'] = [2, 2]
# In[17]:
input_dict['z_boundry_conditions'] = [2, 2]
# 'x_boundry_conditions', 'y_boundry_conditions', and 'z_boundry_conditions' are the boundry conditions for each face of the cubic mesh. The entries are as follows: x is the array set to the key 'x_boundry_conditions', y to 'y_boundry_conditions' and z to 'z_boundry_conditions'.
#
# x[0] = xsbc
# x[1] = xebc
# y[0] = ysbc
# y[1] = yebc
# z[0] = zsbc
# z[1] = zebc
#
# The following are supported boundry conditions:
# 1. 0 - vacuum
# 2. 1 - reflective
# 3. 2 - fixed inflow
# In[18]:
input_dict['material_id'] = [ [ [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1] ],
[ [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1] ],
[ [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1] ],
[ [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1] ] ]
# 'material_id' is an array containing the material infomation for the cubic mesh for which the neutron transport method is to be solved.
# note: Dimensions must match cells such that there is one material number
# in each spatial cell. The cells are ordered as x, y, z.
# In[19]:
input_dict['quadrature_file'] = 'quad_file'
# 'quad_file' is the quadrature file. It is only used if the quadrature_type is 2; in this case it is a required entry. If your quadrature_type is not 2, just create a blank file to pass in for this entry. See formatting notes in the Spatial Solver Python API.
# In[20]:
input_dict['xs_file'] = 'xs'
# 'xs_file' is the file containing the cross sectional data for the materials in your mesh ('material_id'). They should be formatted similar to the following 2 material example xs file:
#
# ! Cross section file
# ! Material # 1
# ! Group #1
# 5.894 ! Total XS
# 1.8 ! Scattering matrix
# ! Material # 2
# ! Group #1
# 1.237 ! Total XS
# 0.12 ! Scattering matrix
# In[21]:
input_dict['source_input_file'] = 'src_4.dat'
# Note: see input file formatting notes in the Source File Formatting section.
# In[22]:
input_dict['bc_input_file'] = 'bc_4.dat'
# 'bc_input_file' is the boundry condition input file. It contains the boundry neutron inflow for any faces of the mesh with the boundry condition specified as 2 (fixed inflow). See the Boundry Condition formatting notes in the Spatial Solver Python API for more information.
# In[23]:
input_dict['flux_output_file'] = 'phi_4.ahot'
# 'flux_output_file' is the output file for the angular flux to be printed to.
# In[24]:
input_dict['convergence_criterion'] = 1.e-12
# The solution is considered converged and the calculation completes when the flux
# in each cell at the current iteration is within "convergence_criterion" of the
# previous iterate. This is generally the relative difference, but in cases of
# very small flux values the absolute difference is used instead (see the
# Convergence Tolerance entry below).
# In[25]:
input_dict['max_iterations'] = 6000
# 'max_iterations' is the maximum number of times the mesh should be sweeped.
# Note: if this number of iterations is reached before the convergence criterion
# is satisfied, the calculation will terminate and report the current flux
# estimate.
# In[26]:
input_dict['moments_converged'] = 0
# Moments converged is the number of moments that should be converged upon for each quadrature in the
# solution space. Value for moments converged must be in range [0, spatial_order_in].
# In[27]:
input_dict['converge_tolerence'] = 1.e-10
# <pre>Converge tolerance is the tolerance that determines how the difference between
# flux iterates (df) that is used to determine convergence will be calculated.
# df is calculated as follows:
# f = current flux value
# ct = convergence tolerance (value for this key, "converge_tolerance")
# f1 = flux value from the previous iteration
# If f1 > ct:
# df = absolute(f - f1) / f1
# Else
# | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.5'
try:
import fcntl
import sys, os, time
from random import randint
from platform import system
import logging.handlers
from threading import Thread
from threading import Timer
from subprocess import Popen, PIPE
from signal import SIGINT,signal
import ConfigParser
import binascii
from Crypto import Random
from Crypto.Cipher import AES
import base64
# from gpiozero import LED,Button,OutputDevice
# Configure syslog environment
logging.basicConfig(level=logging.INFO)
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
dlogger = logging.getLogger('WifiKnockD')
dlogger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
dlogger.addHandler(handler)
except Exception as e:
print "Cannot import required library! (pip install -r requirements.txt)! %s" %e.message
exit()
# Default system variables
ap_channel = 9
ap_essid = "RogueAP"
ap_security = "WPA"
ap_key = "matrixman"
ap_hidden = False
verbose = False
debug = False
gpiobase = '/sys/class/gpio'
capfile = '/tmp/wifiknockd.cap' ## directory and file name to save captured packets
conffile='/etc/wifiknockd.conf'
count = 20 ## Default number of packets to send
seq = randint(1, 4096)
seqlist = []
portlist = []
gpiolist = []
payload = ''
payload_ie = 221
frequency = ''
DN = open(os.devnull, 'w')
closing = 0
intfmon=''
ap_on = 0
action_timeout = 0
msg_timeout = ""
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
broadcast = 'FF:FF:FF:FF:FF:FF' ## Destination address for beacons and probes
ignore = [broadcast, '00:00:00:00:00:00', '33:33:00:', '33:33:FF:', '01:80:C2:00:00:00', '01:00:5E:']
def PacketHandler(pkt):
global seqlist, seq, mac_whitelist, msg_timeout
# Filter broadcast and multicast
bssid = pkt.addr3.upper()
if bssid in ignore:
if debug: dlogger.debug('MAC adress filtered by ignored list (%s)!' %bssid)
return
# Check for whitelist MACs
sta = pkt.addr2.upper()
if len(mac_whitelist) > 1:
if sta not in mac_whitelist:
if debug: dlogger.debug('MAC Whitelist active, will not accept packet (%s)!' %sta)
return
# Check if probe request is directed to us
if bssid != listen_bssid:
if debug: dlogger.debug('Wrong destination BSSID (%s), will only accept: %s' %(bssid,listen_bssid))
return
# Check if packet is repeated or replayed
seq = pkt.SC
if seq in seqlist:
if debug: dlogger.debug("Repeated or replayed packet from %s." %sta)
return
seqlist.append(seq)
dlogger.info("RX Probe Request from STA %s." %sta)
# Check if encryption is enabled
# typical data: ap1~0545~rogueap,wpa,matrixman,4,0
ssid = pkt.info
if use_encryption:
if debug: dlogger.debug("Encrypted payload: %s" %ssid)
ssid = cipher.decrypt(ssid)
if debug: dlogger.debug("Decrypted payload: %s" %ssid)
command = ssid[:3]
value = []
# Security check if same SC is inside payload
seqck = ssid.split("~")[1]
rc = ""
if seqck != str(seq):
if verbose: dlogger.info('Wrong forged packet detected! (%s,%s)' %(seq, seqck))
return
# Max action timeout received?
if len(ssid.split("~")) > 3:
req_timeout = ssid.split("~")[3]
if len(req_timeout) > 0:
try:
if float(req_timeout).is_integer():
action_timeout = int(req_timeout)
msg_timeout = "for %d secs" %(action_timeout)
except:
pass
# Check for known commands inside payload
if command == "ap1":
value = ssid.split("~")[2].split(",")
StartAp(value,action_timeout)
elif command == "ap0":
StopAp()
elif command == "exe":
value = ssid.split("~")[2].split(" ")
rc = ExecProc(value,action_timeout)
elif command == "pr1":
value = ssid.split("~")[2].split(",")
OpenPorts(value,action_timeout)
elif command == "pr0":
value = ssid.split("~")[2].split(",")
ClosePorts(value)
elif command == "sw1":
value = ssid.split("~")[2].split(",")
GpioOn(value,action_timeout)
elif command == "sw0":
value = ssid.split("~")[2].split(",")
GpioOff(value)
else:
if verbose: logging.error('Wrong command: %s' %ssid)
return
if use_ack:
time.sleep(count*0.1)
apssid = 'ACK~'+ seqck + '~' + str(rc) + '~' + str(action_timeout)
if use_encryption: # Cipher and encode ssid
padd = len(apssid) % 16
if padd > 0: apssid = apssid + (' ' * (16 - padd))
apssid = base64.b64encode(cipher.encrypt(apssid))
dlogger.info('Sendig ACK in %d probe responses to MAC:%s' %(count, sta))
sdot11.proberesp(sta, count, apssid, listen_bssid, payload)
dlogger.info('Waiting for new actions...')
if verbose: wrpcap(capfile, pkt, append=True)
def GpioOn(value,timeout):
global gpiolist, msg_timeout
gpiolist.extend(value)
for gpio in value:
try:
gpio = Button(value)
gpio.on()
dlogger.info('Setting GPIO %s ON %s' %(gpio,msg_timeout))
except:
dlogger.info('Cannot set GPIO %s ON!' %gpio)
msg_timeout = ""
if timeout > 0:
t = threading.Timer(timeout, GpioOff, [value])
t.start()
def GpioOff(value):
global gpiolist
gpiolist=[x for x in gpiolist if x not in value]
for gpio in value:
try:
gpio = Button(value)
gpio.off()
dlogger.info('Setting GPIO %s OFF!' %gpio)
except:
dlogger.info('Cannot set GPIO %s OFF!' %gpio)
def OpenPorts(value,timeout):
global portlist, msg_timeout
# Remove closed ports from global list
portlist=[x for x in portlist if x not in value]
for port in value:
try:
if "U" in port.upper():
port = ''.join(filter(str.isdigit, port))
proc = Popen(['iptables','-DINPUT','-pudp','--dport', port,'-jDROP'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
dlogger.info('Requested to open UDP port %s %s' %(port, msg_timeout))
else:
proc = Popen(['iptables','-DINPUT','-ptcp','--dport', port,'-jDROP'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
dlogger.info('Requested to open TCP port %s %s' %(port, msg_timeout))
except OSError as e:
dlogger.info('Could not open port: %s!' %port)
os.kill(os.getpid(), SIGINT)
msg_timeout = ""
if timeout > 0:
t = threading.Timer(timeout, ClosePorts, [value])
t.start()
def ClosePorts(value):
global portlist
for port in value:
if not port in portlist:
portlist.append(port)
try:
if "U" in port.upper():
port = ''.join(filter(str.isdigit, port))
proc = Popen(['iptables','-AINPUT','-pudp','--dport', str(port),'-jDROP'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
dlogger.info('Closing UDP port %s' %port)
else:
proc = Popen(['iptables','-AINPUT','-ptcp','--dport', port,'-jDROP'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
dlogger.info('Closing TCP port %s' %port)
except OSError as e:
dlogger.info('Could not close port %s!' %port)
os.kill(os.getpid(), SIGINT)
def ExecProc(value,timeout):
global msg_timeout
try:
proc = Popen(value, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
time.sleep(1)
rc = proc.poll()
dlogger.info('Executing command %s: %s(return code:%s)' %(msg_timeout,' '.join(value), str(rc)))
msg_timeout = ""
return int(rc)
except OSError as e:
dlogger.info('Could not execute: %s!' %value)
os.kill(os.getpid(), SIGINT)
msg_timeout = ""
return -1
def StartAp(value,timeout):
global ap_on, msg_timeout
hostapdconf = '/tmp/hostapd.conf'
if debug: dlogger.debug("Updating hostapd config file: %s" %hostapdconf)
config=[]
config.append('interface='+ap_iface+'\n')
config.append('driver=nl80211'+'\n')
config.append('hw_mode=g'+'\n')
config.append('auth_algs=3'+'\n')
config.append('ctrl_interface=/var/run/hostapd'+'\n')
if len(value) > 3:
dlogger.info("Requested to switch AP ON %s: %s" %(msg_timeout,value))
config.append('ssid='+value[0]+'\n')
config.append('channel='+value[3]+'\n')
if value[1][:3].upper() == "WPA":
config.append('wpa=2'+'\n')
config.append('wpa_key_mgmt=WPA-PSK'+'\n')
config.append('rsn_pairwise=CCMP TKIP'+'\n')
config.append('wpa_passphrase='+value[2]+'\n')
if value[4] == "1": ## request for hidden ssid
config.apppend('ignore_broadcast_ssid=1'+'\n')
else:
dlogger.info("Requested to switch default AP ON %s:%s,%s,%s,%s,%s" %(msg_timeout,ap_essid,ap_security,ap_key,ap_channel,ap_hidden))
config.append('ssid='+ap_essid+'\n')
config.append('channel='+ap_channel+'\n')
if ap_security.upper() == "WPA":
config.append('wpa=2'+'\n')
config.append('wpa_key_mgmt=WPA-PSK'+'\n')
config.append('rsn_pairwise=CCMP TKIP'+'\n')
config.append('wpa_passphrase='+ap_key+'\n')
if ap_hidden == "1": ## request for hidden ssid
config.apppend('ignore_broadcast_ssid=1'+'\n')
f = open(hostapdconf,'w')
f.writelines(config)
f.close()
msg_timeout = ""
try:
proc = Popen(['/usr/bin/killall','hostapd'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['service','dnsmasq','stop'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['ifconfig',ap_iface,'10.0.1.1','netmask','255.255.255.0','up'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc1 = Popen(['/usr/sbin/hostapd','-B',hostapdconf], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc2 = Popen(['service','dnsmasq','restart'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc = Popen(['sysctl','-w','net.ipv4.ip_forward=1'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['iptables','-t nat','-A POSTROUTING','-o',ap_gateway,'-j MASQUERADE'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['iptables','-F FORWARD'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['iptables','-F FORWARD','-j ACCEPT'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
if verbose: dlogger.info("Starting AP hostapd")
ap_on = 1
except OSError as e:
dlogger.info('Could not execute: %s!' %value)
os.kill(os.getpid(), SIGINT)
return False
if timeout > 0:
t = threading.Timer(timeout, StopAp)
t.start()
def StopAp():
global ap_on
if ap_on:
dlogger.info("Requested to stop AP.")
else:
dlogger.info("AP not running. Nothing to do!")
return
try:
proc = Popen(['/usr/bin/killall','hostapd'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['service','dnsmasq','stop'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['ifconfig',ap_iface,'10.0.1.1','netmask','255.255.255.0','down'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['iptables','-t nat','--flush'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc = Popen(['iptables','-D FORWARD'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
if verbose: dlogger.info("Stopping AP and hostapd daemon")
ap_on = 0
except OSError as e:
dlogger.info('Could not execute: %s!' %value)
os.kill(os.getpid(), SIGINT)
return False
class Sniffer(Thread): # Scapy sniffer thread
def __init__(self):
Thread.__init__(self)
Thread.daemon = True
def run(self):
try:
sniff(iface=intfmon, prn=PacketHandler, lfilter=lambda p:(Dot11ProbeReq in p), store=0)
dlogger.info("Start sniffing data with interface %s" % intfmon)
except Exception as e:
logging.error("Cannot start sniffer thread with interface %s! %s" %(intfmon,e.message))
closeall(0,0)
def closeall(signal,frame):
global closing
StopAp()
OpenPorts(portlist,0)
GpioOff(gpiolist)
closing = 1
dlogger.info('Ending wifiknockd execution!')
exit()
def oscheck():
osversion = system()
if debug: dlogger.debug("Operating System: %s" % osversion)
if osversion != 'Linux':
dlogger.info("This script only works on Linux OS! Exitting!")
exit(1)
def GetMAC(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
return mac
def initmon(intfparent):
global intfmon, ignore
dlogger.info("Using WiFi interface to monitor packets: %s" %intfparent)
if not os.path.isdir("/sys/class/net/" + intfparent):
dlogger.info("WiFi parent interface %s does not exist! Cannot continue!" % intfparent)
exit(1)
else:
intfmon = 'mon' + intfparent[-1]
if os.path.isdir("/sys/class/net/" + intfmon):
if debug: dlogger.debug("WiFi interface %s exists! Deleting it!" % (intfmon))
try:
# create monitor interface using iw
os.system("iw dev %s del" % intfmon)
time.sleep(0.5)
except OSError as oserr:
if debug: dlogger.debug("Could not delete monitor interface %s. %s" % (intfmon, oserr.message))
os.kill(os.getpid(), SIGINT)
sys.exit(1)
try:
# create monitor interface using iw
os.system('rfkill unblock wlan')
time.sleep(0.3)
os.system("ifconfig %s down" % intfparent)
time.sleep(0.3)
os.system("iwconfig %s mode monitor" % intfparent)
time.sleep(0.3)
os.system("iw dev %s interface add %s type | |
1) # [batch x actions x hid]
Wy = torch.mul(u, a_oh).sum(1) + self.bias
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False, func='kconv5'):
super(BilinearSeqAttnAction, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(x_size))
self.w_conv = nn.ModuleList()
self.n_actions = n_actions
self.cnn_layers = int(func[5:].split('_')[0])
hid_cnn = 64
for i in range(self.cnn_layers):
chan_in = hid_cnn
chan_out = hid_cnn
kern = 3
pad = 1
if i == 0:
chan_in = 1
elif i == self.cnn_layers-1:
#kern = 1
#pad = 0
chan_out = 1
a_conv = nn.ModuleList()
for a in range(self.n_actions):
a_conv.append(nn.Conv2d(chan_in, chan_out, kern, stride=1, padding=pad))
self.w_conv.append(a_conv)
def conv_forw(a):
# w1 [emb x 3*(2hid)]
out = self.weight.unsqueeze(0).unsqueeze(0)
for i in range(self.cnn_layers):
if i != self.cnn_layers-1:
out = F.relu(self.w_conv[i][a](out))
else:
out = self.w_conv[i][a](out)
out = out.squeeze()
return out
self.func = lambda a: conv_forw(a)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(-1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u = []
for a in range(self.n_actions):
w_i = self.func(a)
u_i = y.mm(w_i)
u.append(u_i)
u = torch.stack(u, 1) # [batch x actions x hid]
Wy = torch.mul(u, a_oh).sum(1)
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction3(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False, func='mul_s'):
super(BilinearSeqAttnAction3, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(x_size))
self.wa = nn.Parameter(torch.Tensor(n_actions,y_size, x_size))
self.ba = nn.Parameter(torch.Tensor(n_actions, x_size))
self.n_actions = n_actions
if func == 'mul':
self.func = lambda a,b: torch.mul(a,b)
elif func == 'mul_s':
self.func = lambda a,b: torch.mul(a,F.sigmoid(b))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(-1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
self.ba.data.zero_()
self.wa.data.uniform_(-stdv, stdv)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u = []
for a in range(self.n_actions):
w_i = self.func(self.weight, self.wa[a])
u_i = y.mm(w_i)
u.append(u_i)
u = torch.stack(u, 1) # [batch x actions x hid]
b = self.func(self.bias, torch.mm(a_oh.squeeze(2), self.ba))
Wy = torch.mul(u, a_oh).sum(1) + b
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction2(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False):
super(BilinearSeqAttnAction2, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(n_actions,y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(n_actions,x_size))
self.n_actions = n_actions
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(2))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_onehot = one_hot(actions, self.n_actions)
w = torch.mm(a_onehot, self.weight.view(self.n_actions, -1)).view(x.size(0), self.weight.size(1), self.weight.size(2))
b = torch.mm(a_onehot, self.bias)
Wy = torch.bmm(y.unsqueeze(1), w).squeeze(1) + b
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class PointerNetworkAction(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, wn=False, opt=None):
super(PointerNetworkAction, self).__init__()
self.attention = SeqAttentionAction(
x_size,
y_size, opt['n_actions'], drop_r=opt['dropout_rnn'])
self.n_actions = n_actions
self.rnn_cell = MF.SRUCell(
x_size, y_size,
bidirectional=False,dropout=opt['dropout_rnn'],rnn_dropout=opt['dropout_rnn'],
use_tanh=1)
def forward(self, x, x_mask, c0, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
s_logits = self.attention(x, c0, x_mask, actions)
s_probs = F.softmax(s_logits, dim=-1)
attn_pool = (x*s_probs.unsqueeze(2)).sum(1)
state = self.rnn_cell(attn_pool, c0=c0)[1]
e_logits = self.attention(x, state, x_mask, actions)
if self.training:
nonlin = lambda x: F.log_softmax(x, dim=-1)
else:
nonlin = lambda x: F.softmax(x, dim=-1)
return nonlin(s_logits), nonlin(e_logits)
class PointerNetwork(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, wn=False, opt=None):
super(PointerNetwork, self).__init__()
self.attention = SeqAttention(
x_size,
y_size, wn=wn, drop_r=opt['dropout_rnn'])
self.rnn_cell = MF.SRUCell(
x_size, y_size,
bidirectional=False,dropout=opt['dropout_rnn'],rnn_dropout=opt['dropout_rnn'],
use_tanh=1)
def forward(self, x, x_mask, c0, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
s_logits = self.attention(x, c0, x_mask, log=True)
s_probs = F.softmax(s_logits, dim=-1)
attn_pool = (x*s_probs.unsqueeze(2)).sum(1)
state = self.rnn_cell(attn_pool, c0=c0)[1]
e_logits = self.attention(x, state, x_mask)
if self.training:
nonlin = lambda x: F.log_softmax(x, dim=-1)
else:
nonlin = lambda x: F.softmax(x, dim=-1)
return nonlin(s_logits), nonlin(e_logits)
class SeqAttentionAction(nn.Module):
"""attention between a sequence and a tensor:
* o_i = softmax(v*tanh(W1x_i+W2y)) for x_i in X.
"""
def __init__(self, x_size, y_size, n_actions, wn=False, drop_r=0.0):
super(SeqAttentionAction, self).__init__()
self.n_actions = n_actions
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = nn.Parameter(torch.Tensor(n_actions,x_size, x_size//4))
self.b1 = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.w2 = nn.Parameter(torch.Tensor(n_actions,y_size, x_size//4))
self.b2 = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.v = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.reset_parameters()
if drop_r>0:
self.dropout = nn.Dropout(drop_r)
self.drop_r = drop_r
def reset_parameters(self):
stdv = 1. / math.sqrt(self.w1.size(2))
self.w1.data.uniform_(-stdv, stdv)
self.b1.data.zero_()
self.w2.data.uniform_(-stdv, stdv)
self.b2.data.zero_()
self.v.data.uniform_(-stdv, stdv)
def get_action_parameters(self, a_onehot, x_size):
w1 = torch.mm(a_onehot, self.w1.view(self.n_actions, -1)).view(x_size[0], self.w1.size(1), self.w1.size(2))
w1 = w1.unsqueeze(1).expand(x_size[0], x_size[1], w1.size(1), w1.size(2))
w1 = w1.contiguous().view(-1,w1.size(2), w1.size(3))
w2 = torch.mm(a_onehot, self.w2.view(self.n_actions, -1)).view(x_size[0], self.w2.size(1), self.w2.size(2))
w2 = w2.unsqueeze(1).expand(x_size[0], x_size[1], w2.size(1), w2.size(2))
w2 = w2.contiguous().view(-1,w2.size(2), w2.size(3))
b1 = torch.mm(a_onehot, self.b1).unsqueeze(1).expand(x_size[0], x_size[1], self.b1.size(1)).contiguous().view(-1, self.b1.size(1))
b2 = torch.mm(a_onehot, self.b2).unsqueeze(1).expand(x_size[0], x_size[1], self.b2.size(1)).contiguous().view(-1, self.b2.size(1))
v = torch.mm(a_onehot, self.v).unsqueeze(1).expand(x_size[0], x_size[1], self.v.size(1)).contiguous().view(-1, self.v.size(1))
return w1, w2, b1, b2, v
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * hdim
y = batch * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, 1, x.size(-1))
y_flat = y.unsqueeze(1).expand(y.size(0), x.size(1), y.size(1)).contiguous().view(-1, 1, y.size(-1))
a_onehot = one_hot(actions, self.n_actions)
w1, w2, b1, b2, v = self.get_action_parameters(a_onehot, [x.size(0), x.size(1), x.size(2)])
x_t = torch.bmm(x_flat, w1).squeeze(1) + b1
y_t = torch.bmm(y_flat, w2).squeeze(1) + b2
inpt = F.tanh(x_t+y_t)
if self.drop_r>0:
inpt = self.dropout(inpt)
inpt = torch.bmm(inpt.unsqueeze(1), v.unsqueeze(2))
scores = inpt.view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
del w1,w2,b1,b2,v,x_flat,y_flat,inpt
return scores
class CriticLinear(nn.Module):
def __init__(self, x_size, y_size, identity=False, num_layers=2, wn=False, nl=4):
super(CriticLinear, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = self.wn(nn.Linear(x_size, y_size))
if num_layers == 3:
self.w2 = self.wn(nn.Linear(y_size, y_size))
if nl == 3:
self.w3 = self.wn(nn.Linear(y_size, 1))
elif nl == 4:
self.w3 = self.wn(nn.Linear(y_size, y_size))
self.w4 = self.wn(nn.Linear(y_size, 1))
self.nl = nl
self.num_layers = num_layers
def forward(self, x):
c1 = self.w1(x)
if self.num_layers == 3:
c1 = self.w2(F.relu(c1))
if self.nl == 4:
c2 = self.w4(F.relu(self.w3(F.relu(c1))))
else:
c2 = (self.w3(F.relu(c1)))
return c2.squeeze(1)
class PolicyLatent(nn.Module):
def __init__(self, x_size, y_size, n_actions, num_layers=2, identity=False, wn=False, add=1, nl=5):
super(PolicyLatent, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.add = add
self.n_actions = n_actions
if add == 3:
self.w1a = self.wn(nn.Linear(x_size//3, y_size))
self.w1b = self.wn(nn.Linear(x_size//3, y_size))
self.w1c = self.wn(nn.Linear(x_size//3, y_size))
elif add == 2:
self.w1a = self.wn(nn.Linear(x_size//2, y_size))
self.w1b | |
'fmu', 'mechanics')
version: a version string (e.g. '1.0', 'dangerous') (Default value = '')
objects: the objects which belong to the submodel (None will derive
objects from the selection) (Default value = None)
Returns:
: a tuple of the submodelgroup and interfacegroup/None
"""
if not objects:
objects = bpy.context.selected_objects
# split interface from physical objects
interfaces = [i for i in objects if i.phobostype == 'interface']
physical_objects = [p for p in objects if p.phobostype != 'interface']
# make the physical group
sUtils.selectObjects(physical_objects, True, 0)
submodelgroupname = submodeltype + ':' + submodelname
if version != '':
submodelgroupname += '/' + version
if submodelgroupname in bpy.data.groups.keys():
log('submodelgroupname ' + 'already exists', 'WARNING')
bpy.ops.group.create(name=submodelgroupname)
submodelgroup = bpy.data.groups[submodelgroupname]
submodelgroup['submodeltype'] = submodeltype
submodelgroup['version'] = version
modeldefs = defs.definitions['submodeltypes'][submodeltype]
# copy the definition parameters to the group properties
for key in modeldefs['definitions']:
submodelgroup[key] = modeldefs['definitions'][key]
# move objects to submodel layer
for obj in physical_objects:
if not 'submodel' in bpy.context.scene.collection.children.keys():
newcollection = bpy.data.collections.new('submodel')
bpy.context.scene.collection.children.link(newcollection)
for name, collection in bpy.context.scene.collection.children.items():
if name == 'submodel':
collection.objects.link(obj)
elif obj.name in collection.objects:
collection.objects.unlink(obj)
log('Created submodel group ' + submodelname + ' of type "' + submodeltype + '".', 'DEBUG')
interfacegroup = None
# make the interface group
if interfaces:
sUtils.selectObjects(interfaces, True, 0)
interfacegroupname = 'interfaces:' + submodelname
if version != '':
interfacegroupname += '/' + version
# TODO what about overwriting groups with same names?
bpy.ops.group.create(name=interfacegroupname)
interfacegroup = bpy.data.groups[interfacegroupname]
interfacegroup['submodeltype'] = 'interfaces'
# copy interface definitions from submodel definitions
for key in modeldefs['interfaces']:
interfacegroup[key] = modeldefs['interfaces'][key]
# move objects to interface layer
for obj in interfaces:
bUtils.sortObjectToCollection(obj, cname='interface')
log('Created interface group for submodel ' + submodelname + '.', 'DEBUG')
else:
log('No interfaces for this submodel.', 'DEBUG')
for i in interfaces:
i.show_name = True
return (submodelgroup, interfacegroup)
def removeSubmodel(submodelname, submodeltype, version='', interfaces=True):
"""Removes a submodel definition from the Blender project.
Returns True or False depending on whether groups have been removed or not.
Args:
submodelname: the name of the submodel
submodeltype: the submodeltype of the submodel
version: optional version of the submodel (Default value = '')
interfaces: True if interface should also be deleted, else False. (Default value = True)
Returns:
: True if groups have been removed, else False.
"""
# build the group name to look for
submodelgroupname = submodeltype + ':' + submodelname
if version != '':
submodelgroupname += '/' + version
# remove the submodelgroup
if submodelgroupname in bpy.data.groups:
bpy.data.groups.remove(bpy.data.groups[submodelgroupname])
if not interfaces:
return True
if interfaces:
interfacegroupname = 'interfaces:' + submodelname
if version != '':
interfacegroupname += '/' + version
if interfacegroupname in bpy.data.groups:
bpy.data.groups.remove(bpy.data.groups[interfacegroupname])
return True
return False
def createInterface(ifdict, parent=None):
"""Create an interface object and optionally parent to existing object.
ifdict is expected as:
| **type**: str
| **direction**: str
| **model**: str
| **name**: str
| **parent**: bpy.types.Object (optional)
| **scale**: float (optional)
Args:
ifdict(dict): interface data
parent(bpy.types.Object, optional): designated parent object (Default value = None)
Returns:
bpy.data.Object: newly created interface object
"""
if not parent:
try:
parent = ifdict['parent']
assert isinstance(parent, bpy.types.Object)
except (AttributeError, AssertionError, KeyError):
parent = None
location = parent.matrix_world.translation if parent else mathutils.Vector()
rotation = parent.matrix_world.to_euler() if parent else mathutils.Euler()
model = ifdict['model'] if 'model' in ifdict else 'default'
templateobj = ioUtils.getResource(('interface', model, ifdict['direction']))
scale = ifdict['scale'] if 'scale' in ifdict else 1.0
ifobj = bUtils.createPrimitive(
ifdict['name'],
'box',
(1.0, 1.0, 1.0),
defs.layerTypes['interface'],
plocation=location,
protation=rotation,
phobostype='interface',
)
nUtils.safelyName(ifobj, ifdict['name'], 'interface')
ifobj.data = templateobj.data
ifobj.scale = (scale,) * 3
ifobj['interface/type'] = ifdict['type']
ifobj['interface/direction'] = ifdict['direction']
if parent is not None:
ifobj['interface/parent'] = parent.name
parentObjectsTo(ifobj, parent)
bpy.ops.object.make_single_user(object=True, obdata=True)
def toggleInterfaces(interfaces=None, modename='toggle'):
"""
Args:
interfaces: (Default value = None)
modename: (Default value = 'toggle')
Returns:
"""
modedict = {'toggle': 0, 'activate': 1, 'deactivate': 2}
mode = modedict[modename]
if not interfaces:
interfaces = [i for i in bpy.context.selected_objects if i.phobostype == 'interface']
for i in interfaces:
if mode == 0:
i.show_name = not i.show_name
elif mode == 1:
i.show_name = True
elif mode == 2:
i.show_name = False
def connectInterfaces(parentinterface, childinterface, transform=None):
"""
Args:
parentinterface:
childinterface:
transform: (Default value = None)
Returns:
"""
# first check if the interface is child of the root object and if not, restructure the tree
root = sUtils.getRoot(childinterface)
parent = childinterface.parent
if root != parent:
restructureKinematicTree(parent)
childsubmodel = childinterface.parent
# connect the interfaces
sUtils.selectObjects(objects=[parentinterface], clear=True, active=0)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
sUtils.selectObjects(objects=[childinterface], clear=True, active=0)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# parent interfaces
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
parentObjectsTo(childsubmodel, childinterface, clear=True)
parentObjectsTo(childinterface, parentinterface)
loc, rot, sca = parentinterface.matrix_world.decompose()
# apply additional transform (ignoring the scale of the parent interface)
if not transform:
transform = (
mathutils.Euler((math.radians(180.0), 0.0, math.radians(180.0)), 'XYZ')
.to_matrix()
.to_4x4()
)
childinterface.matrix_world = (
mathutils.Matrix.Translation(loc) @ rot.to_matrix().to_4x4() @ transform
)
# TODO clean this up
# try:
# del childsubmodel['modelname']
# except KeyError:
# pass
# TODO: re-implement this for MECHANICS models
# try:
# # parent visual and collision objects to new parent
# children = sUtils.getImmediateChildren(parent, ['visual', 'collision', 'interface'])
# print(children)
# sUtils.selectObjects(children, True, 0)
# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# print()
# parentObjectsTo(children, sUtils.getEffectiveParent(parent, ignore_selection=True))
# bpy.ops.object.parent_set(type='BONE_RELATIVE')
# except (IndexError, AttributeError):
# pass # no objects to re-parent
parentinterface.show_name = False
childinterface.show_name = False
def disconnectInterfaces(parentinterface, childinterface, transform=None):
"""
Args:
parentinterface:
childinterface:
transform: (Default value = None)
Returns:
"""
# unparent the child
sUtils.selectObjects(objects=[childinterface], clear=True, active=0)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# select the former parent of the interface as new root
if childinterface.children and len(childinterface.children) > 0:
# prefer submodel instances
for child in childinterface.children:
if child.phobostype == 'submodel':
root = child
break
# otherwise just use the first child
else:
root = childinterface.children[0]
# restructure the kinematic tree to make the interface child of the submodel again
sUtils.selectObjects(objects=[root], clear=True, active=0)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
parentObjectsTo(childinterface, root)
# apply additional transform
if transform:
childinterface.matrix_world = root.matrix_world @ transform
# make the interfaces active again
parentinterface.show_name = True
childinterface.show_name = True
def setProperties(obj, diction, category=None):
"""Adds the specified dictionary as custom properties to the object.
If a category is provided, the keys of the dictionary are prepended with the category:
`category/key`
Args:
obj(bpy.types.Object): object to add the information to
diction(dict): information to add to the object
category(str, optional): category for the dictionary entries (Default value = None)
Returns:
"""
for key, value in diction.items():
obj[(category + '/' + key) if category else key] = value
def getProperties(obj, category=None):
"""Returns a dictionary of custom property information of the object.
If a category is provided, only the custom properties of the specified category are returned.
Otherwise, the phobostype of the object will be used as category.
The dictionary contains the custom property keys with the category removed (e.g. 'name' for
'link/name').
Args:
obj(bpy.types.Object): object to get properties of
category(str, optional): property category to look for (Default value = None)
Returns:
: dict -- custom property information of the phobostype/category for the object
"""
if not category:
category = obj.phobostype
try:
diction = {
key.replace(category + '/', ''): value
for key, value in obj.items()
if key.startswith(category + '/')
}
except KeyError:
log("Failed filtering properties for category " + category, "ERROR")
return diction
def removeProperties(obj, props, recursive=False):
"""Removes a list of custom properties from the specified object.
The specified property list can contain names with wildcards at the end (e.g. sensor*).
If recursive is set, the properties will be removed recursively from all children, too.
Args:
obj(bpy.types.Object): object to remove the properties from
props(list(str): list of property names, which will be removed from the object
recursive(bool, optional): if True, the properties will be removed recursively from the children, too (Default value = False)
Returns:
"""
for prop in props:
if len(prop) == 0:
continue
if prop in obj:
del obj[prop]
elif prop[-1] == '*':
for objprop in obj.keys():
if objprop.startswith(prop[:-1]):
del obj[objprop]
if recursive:
for child in obj.children:
removeProperties(child, props, recursive=recursive)
def mergeLinks(links, targetlink, movetotarget=False):
"""
Args:
links:
targetlink:
movetotarget: (Default value = False)
Returns:
"""
for link in | |
<filename>rebook/mrcdi.py
import cv2
import numpy as np
from dewarp import get_AH_lines, correct_geometry, estimate_vanishing, \
arc_length_points
from geometry import Line
import lib
from lib import RED, GREEN, BLUE
def peak_points(l, AH):
x_min, x_max = l[0][1], l[-1][1] + l[-1][3]
y_min = min([y for c, x, y, w, h in l]) + 1
y_max = max([y + h for c, x, y, w, h in l]) + 1
height, width = y_max - y_min, x_max - x_min
mask = np.zeros((y_max - y_min, x_max - x_min))
contours = [c for c, x, y, w, h in l]
cv2.drawContours(mask, contours, -1, 255, thickness=cv2.FILLED,
offset=(-x_min, -y_min))
old_bottom = height - mask[::-1].argmax(axis=0)
good_bottoms = mask.max(axis=0) > 0
bottom_xs, = np.where(good_bottoms)
bottom_ys = old_bottom[good_bottoms]
bottom = np.interp(np.arange(width), bottom_xs, bottom_ys)
assert (bottom[good_bottoms] == old_bottom[good_bottoms]).all()
delta = AH / 2
peaks = grey_dilation(bottom, size=2 * delta + 1)
bottom_points = np.array(list(zip(list(range(width)), bottom)))
peak_points = bottom_points[bottom_points[:, 1] == peaks]
return peak_points
def centroid(poly, line):
first, last = line[0], line[-1]
_, x0, _, w0, _ = first
_, x1, _, w1, _ = last
domain = np.linspace(x0, x1 + w1, 20)
points = np.vstack([domain, poly(domain)]).T
return points.mean(axis=0)
def plot_norm(points, *args, **kwargs):
norm = points - points[0]
norm /= norm[-1][0]
norm_T = norm.T
norm_T[1] -= norm_T[1][0]
# norm_T[1] /= norm_T[1].max()
plt.plot(norm_T[0], norm_T[1], *args, **kwargs)
def C0_C1(lines, v):
_, vy = v
# use bottom line as C0 if vanishing point above image
C0, C1 = (lines[-1], lines[0]) if vy < 0 else (lines[0], lines[-1])
return C0, C1
def widest_domain(lines, v, n_points):
C0, C1 = C0_C1(lines, v)
v_lefts = [Line.from_points(v, l[0].left_bot()) for l in lines if l is not C0]
v_rights = [Line.from_points(v, l[-1].right_bot()) for l in lines if l is not C0]
C0_lefts = [l.text_line_intersect(C0)[0] for l in v_lefts]
C0_rights = [l.text_line_intersect(C0)[0] for l in v_rights]
x_min = min(C0.left(), min(C0_lefts))
x_max = max(C0.left(), max(C0_rights))
domain = np.linspace(x_min, x_max, n_points)
debug = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
for l in lines:
cv2.line(debug, tuple(l[0].left_bot().astype(int)),
tuple(l[-1].right_bot().astype(int)), GREEN, 2)
Line.from_points(v, (x_min, C0(x_min))).draw(debug)
Line.from_points(v, (x_max, C0(x_max))).draw(debug)
lib.debug_imwrite('domain.png', debug)
return domain, C0, C1
N_POINTS = 200
MU = 30
def estimate_directrix(lines, v, n_points_w):
vx, vy = v
domain, C0, C1 = widest_domain(lines, v, N_POINTS)
C0_points = np.vstack([domain, C0(domain)])
longitudes = [Line.from_points(v, p) for p in C0_points.T]
C1_points = np.array([l.closest_poly_intersect(C1.model, p) \
for l, p in zip(longitudes, C0_points.T)]).T
lambdas = (vy - C0_points[1]) / (C1_points[1] - C0_points[1])
alphas = MU * lambdas / (MU + lambdas - 1)
C_points = (1 - alphas) * C0_points + alphas * C1_points
C = C_points.T.mean(axis=0)
theta = acos(f / sqrt(vx ** 2 + vy ** 2 + f ** 2))
print('theta:', theta)
A = np.array([
[1, C[0] / f * -sin(theta)],
[0, cos(theta) - C[1] / f * sin(theta)]
])
D_points = inv(A).dot(C_points)
D_points_arc, _ = arc_length_points(D_points)
C_points_arc = A.dot(D_points_arc)
# plot_norm(np.vstack([domain, C0(domain)]).T, label='C0')
# plot_norm(np.vstack([domain, C1(domain)]).T, label='C1')
# plot_norm(C_points.T, label='C20')
# plot_norm(D_points.T, label='D')
# plot_norm(C_points_arc.T, label='C')
# # plt.plot(C_points.T, label='C20')
# # plt.plot(C_points_arc.T, label='C')
# plt.axes().legend()
# plt.show()
return D_points_arc, C_points_arc
def aspect_ratio(im, lines, D, v, O):
vx, vy = v
C0, C1 = C0_C1(lines, v)
im_h, im_w = im.shape
m = -(vx - O[0]) / (vy - O[1])
L0 = Line.from_point_slope(C0.first_base(), m)
L1 = Line.from_point_slope(C1.first_base(), m)
perp = L0.altitude(v)
p0, p1 = L0.intersect(perp), L1.intersect(perp)
h_img = norm(p0 - p1)
L = Line(m, -m * O[0] - (f ** 2) / (vy - O[1]))
F = L.altitude(v).intersect(L)
_, x0r, y0r, w0r, h0r = lines[-1][-1]
p0r = np.array([x0r + w0r / 2.0, y0r + h0r])
F_C0r = Line.from_points(F, p0r)
q0 = F_C0r.intersect(L0)
l_img = norm(q0 - p0)
debug = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
L0.draw(debug)
L1.draw(debug)
L.draw(debug, color=GREEN)
F_C0r.draw(debug, color=RED)
lib.debug_imwrite('aspect.png', debug)
# Convergence line perp to V=(vx, vy, f)
# y = -vx / vy * x + -f^2 / vy
alpha = atan2(norm(p1 - O), f)
theta = acos(f / sqrt((vx - O[0]) ** 2 + (vy - O[1]) ** 2 + f ** 2))
beta = pi / 2 - theta
lp_img = abs(D[0][-1] - D[0][0])
wp_img = norm(np.diff(D.T, axis=0), axis=1).sum()
print('h_img:', h_img, 'l\'_img:', lp_img, 'alpha:', alpha)
print('l_img:', l_img, 'w\'_img:', wp_img, 'beta:', beta)
r = h_img * lp_img * cos(alpha) / (l_img * wp_img * cos(alpha + beta))
return r
class MuMode(object):
def __init__(self, val):
self.val = val
def __eq__(self, other):
return self.val == other.val
def index(self):
return 0 if self.val else -1
def point(self, l):
if self.val:
return l.top_point() # + np.array([0, -20])
else:
return l.base_point() # + np.array([0, 20])
MuMode.BOTTOM = MuMode(False)
MuMode.TOP = MuMode(True)
# find mu necessary to entirely cover line with mesh
def necessary_mu(C0, C1, v, all_lines, mode):
vx, vy = v
line = all_lines[mode.index()]
points = np.array([mode.point(l) for l in line])
for p in points:
global mu_debug
cv2.circle(mu_debug, tuple(p.astype(int)), 6, GREEN, -1)
longitudes = [Line.from_points(v, p) for p in points]
C0_points = np.array([l.text_line_intersect(C0) for l in longitudes]).T
C1_points = np.array([l.text_line_intersect(C1) for l in longitudes]).T
lambdas = (vy - C0_points[1]) / (C1_points[1] - C0_points[1])
alphas = (points[:, 1] - C0_points[1]) / (C1_points[1] - C0_points[1])
mus = alphas * (1 - lambdas) / (alphas - lambdas)
return mus.max() + 0.01 if np.median(mus) >= 0.5 else mus.min() - 0.01
@lib.timeit
def generate_mesh(all_lines, lines, C_arc, v, n_points_h):
vx, vy = v
C_arc_T = C_arc.T
C0, C1 = C0_C1(lines, v)
# first, calculate necessary mu.
global mu_debug
mu_debug = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
mu_bottom = necessary_mu(C0, C1, v, all_lines, MuMode.BOTTOM)
mu_top = necessary_mu(C0, C1, v, all_lines, MuMode.TOP)
lib.debug_imwrite('mu.png', mu_debug)
longitude_lines = [Line.from_points(v, p) for p in C_arc_T]
longitudes = []
mus = np.linspace(mu_top, mu_bottom, n_points_h)
for l, C_i in zip(longitude_lines, C_arc_T):
p0 = l.closest_poly_intersect(C0.model, C_i)
p1 = l.closest_poly_intersect(C1.model, C_i)
lam = (vy - p0[1]) / (p1[1] - p0[1])
alphas = mus * lam / (mus + lam - 1)
longitudes.append(np.outer(1 - alphas, p0) + np.outer(alphas, p1))
result = np.array(longitudes)
debug = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
for l in result[::50]:
for p in l[::50]:
cv2.circle(debug, tuple(p.astype(int)), 6, BLUE, -1)
trace_baseline(debug, C0, RED)
trace_baseline(debug, C1, RED)
lib.debug_imwrite('mesh.png', debug)
return np.array(longitudes).transpose(1, 0, 2)
def spline_model(line):
base_points = np.array([letter.base_point() for letter in line])
_, indices = np.unique(base_points[:, 0], return_index=True)
data = base_points[indices]
return interpolate.UnivariateSpline(data[:, 0], data[:, 1])
def valid_curvature(line):
if len(line) < 4: return True
poly = spline_model(line)
polyp = poly.derivative()
polypp = polyp.derivative()
x_range = line.left(), line.right()
x_points = np.linspace(x_range[0], x_range[1], 50)
curvature = abs(polypp(x_points)) / (1 + polyp(x_points) ** 2) # ** 3/2
# print 'curvature:', curvature.max()
global curvature_debug
for p in zip(x_points, poly(x_points)):
cv2.circle(curvature_debug, (int(p[0]), int(p[1])), 2, BLUE, -1)
return curvature.max() < 0.3
def min_crop(lines):
box = Crop(
min([line.left() for line in lines]),
min([letter.y for letter in lines[0]]),
max([line.right() for line in lines]),
max([letter.y + letter.h for letter in lines[-1]]),
)
debug = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
box.draw(debug)
lib.debug_imwrite('crop.png', debug)
return box
@lib.timeit
def dewarp_fine(im):
lib.debug_prefix = 'fine_'
AH, all_lines, lines = get_AH_lines(im)
points = []
offsets = []
for line in lines:
bases = np.array([l.base_point() for l in line])
median_y = np.median(bases[:, 1])
points.extend(bases)
offsets.extend(median_y - bases[:, 1])
points = np.array(points)
offsets = np.array(offsets)
im_h, im_w = im.shape
# grid_x, grid_y = np.mgrid[:im_h, :im_w]
# y_offset_interp = interpolate.griddata(points, offsets,
# (grid_x, grid_y), method='nearest')
y_offset_interp = interpolate.SmoothBivariateSpline(
points[:, 0], points[:, 1], offsets
)
new = np.full(im.shape, 0, dtype=np.uint8)
_, contours, [hierarchy] = \
cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
draw_contours(new, contours, hierarchy, y_offset_interp, 0, 255)
lib.debug_imwrite('fine.png', new)
return new
def draw_contours(im, contours, hierarchy, y_offset_interp, idx, color,
depth=0, passed_offset=None):
while idx >= 0:
x, y, w, h = cv2.boundingRect(contours[idx])
# print '+' * depth, idx, 'color:', color, '@', x, y, 'offset:', offset, 'area:', w * h
if passed_offset is None:
offset = (0, -int(round(y_offset_interp(x + w / 2.0, y + h))))
# offset = (0, -int(round(y_offset_interp[y + h - 1, x + w / 2 - 1])))
else:
offset = passed_offset
cv2.drawContours(im, contours, idx, color, thickness=cv2.FILLED,
offset=offset)
child = hierarchy[idx][2]
if child >= 0:
pass_offset = offset if color == 0 and w * h < 5000 else None
draw_contours(im, contours, hierarchy, y_offset_interp, child,
255 - color, depth=depth + 1, passed_offset=pass_offset)
idx = hierarchy[idx][0]
def full_lines(AH, lines, v):
C0 = max(lines, key=lambda l: l.right() - | |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer.layers.core import Layer
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
__all__ = [
'PoolLayer',
'MaxPool1d',
'MeanPool1d',
'MaxPool2d',
'MeanPool2d',
'MaxPool3d',
'MeanPool3d',
'GlobalMaxPool1d',
'GlobalMeanPool1d',
'GlobalMaxPool2d',
'GlobalMeanPool2d',
'GlobalMaxPool3d',
'GlobalMeanPool3d',
]
class PoolLayer(Layer):
"""
The :class:`PoolLayer` class is a Pooling layer.
You can choose ``tf.nn.max_pool`` and ``tf.nn.avg_pool`` for 2D input or
``tf.nn.max_pool3d`` and ``tf.nn.avg_pool3d`` for 3D input.
Parameters
----------
prev_layer : :class:`Layer`
The previous layer.
ksize : tuple of int
The size of the window for each dimension of the input tensor.
Note that: len(ksize) >= 4.
strides : tuple of int
The stride of the sliding window for each dimension of the input tensor.
Note that: len(strides) >= 4.
padding : str
The padding algorithm type: "SAME" or "VALID".
pool : pooling function
One of ``tf.nn.max_pool``, ``tf.nn.avg_pool``, ``tf.nn.max_pool3d`` and ``f.nn.avg_pool3d``.
See `TensorFlow pooling APIs <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#pooling>`__
name : str
A unique layer name.
Examples
--------
- see :class:`Conv2dLayer`.
"""
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='SAME',
pool=tf.nn.max_pool,
name='pool_layer',
):
super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"PoolLayer %s: ksize: %s strides: %s padding: %s pool: %s" %
(self.name, str(ksize), str(strides), padding, pool.__name__)
)
self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name)
self._add_layers(self.outputs)
class MaxPool1d(Layer):
"""Max pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.max_pooling1d <https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling1d>`__ .
Parameters
----------
prev_layer : :class:`Layer`
The previous layer with a output rank as 3 [batch, length, channel].
filter_size : tuple of int
Pooling window size.
strides : tuple of int
Strides of the pooling operation.
padding : str
The padding method: 'valid' or 'same'.
data_format : str
One of `channels_last` (default) or `channels_first`.
The ordering of the dimensions must match the inputs.
channels_last corresponds to inputs with the shape (batch, length, channels);
while channels_first corresponds to inputs with shape (batch, channels, length).
name : str
A unique layer name.
"""
@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self, prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name='maxpool1d'
):
super(MaxPool1d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MaxPool1d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.max_pooling1d(
self.inputs, filter_size, strides, padding=padding, data_format=data_format, name=name
)
self._add_layers(self.outputs)
class MeanPool1d(Layer):
"""Mean pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.average_pooling1d <https://www.tensorflow.org/api_docs/python/tf/layers/average_pooling1d>`__ .
Parameters
------------
prev_layer : :class:`Layer`
The previous layer with a output rank as 3 [batch, length, channel].
filter_size : tuple of int
Pooling window size.
strides : tuple of int
Strides of the pooling operation.
padding : str
The padding method: 'valid' or 'same'.
data_format : str
One of `channels_last` (default) or `channels_first`.
The ordering of the dimensions must match the inputs.
channels_last corresponds to inputs with the shape (batch, length, channels);
while channels_first corresponds to inputs with shape (batch, channels, length).
name : str
A unique layer name.
"""
# logging.info("MeanPool1d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)))
# outputs = tf.layers.average_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name)
#
# net_new = copy.copy(prev_layer)
# net_new.outputs = outputs
# net_new.all_layers.extend([outputs])
# return net_new
@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self, prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name='meanpool1d'
):
super(MeanPool1d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MeanPool1d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.average_pooling1d(
prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name
)
self._add_layers(self.outputs)
class MaxPool2d(Layer):
"""Max pooling for 2D image [batch, height, width, channel].
Parameters
-----------
prev_layer : :class:`Layer`
The previous layer with a output rank as 4 [batch, height, width, channel].
filter_size : tuple of int
(height, width) for filter size.
strides : tuple of int
(height, width) for strides.
padding : str
The padding method: 'valid' or 'same'.
name : str
A unique layer name.
"""
@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(self, prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool2d'):
if strides is None:
strides = filter_size
super(MaxPool2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MaxPool2d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.max_pooling2d(
self.inputs, filter_size, strides, padding=padding, data_format='channels_last', name=name
)
self._add_layers(self.outputs)
class MeanPool2d(Layer):
"""Mean pooling for 2D image [batch, height, width, channel].
Parameters
-----------
prev_layer : :class:`Layer`
The previous layer with a output rank as 4 [batch, height, width, channel].
filter_size : tuple of int
(height, width) for filter size.
strides : tuple of int
(height, width) for strides.
padding : str
The padding method: 'valid' or 'same'.
name : str
A unique layer name.
"""
@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(self, prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d'):
if strides is None:
strides = filter_size
super(MeanPool2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MeanPool2d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.average_pooling2d(
self.inputs, filter_size, strides, padding=padding, data_format='channels_last', name=name
)
self._add_layers(self.outputs)
class MaxPool3d(Layer):
"""Max pooling for 3D volume [batch, depth, height, width, channel]. Wrapper for `tf.layers.max_pooling3d <https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling3d>`__ .
Parameters
------------
prev_layer : :class:`Layer`
The previous layer with a output rank as 5 [batch, depth, height, width, channel].
filter_size : tuple of int
Pooling window size.
strides : tuple of int
Strides of the pooling operation.
padding : str
The padding method: 'valid' or 'same'.
data_format : str
One of `channels_last` (default) or `channels_first`.
The ordering of the dimensions must match the inputs.
channels_last corresponds to inputs with the shape (batch, length, channels);
while channels_first corresponds to inputs with shape (batch, channels, length).
name : str
A unique layer name.
Returns
-------
:class:`Layer`
A max pooling 3-D layer with a output rank as 5.
"""
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last',
name='maxpool3d'
):
super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MaxPool3d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.max_pooling3d(
self.inputs, filter_size, strides, padding=padding, data_format=data_format, name=name
)
self._add_layers(self.outputs)
class MeanPool3d(Layer):
"""Mean pooling for 3D volume [batch, depth, height, width, channel]. Wrapper for `tf.layers.average_pooling3d <https://www.tensorflow.org/api_docs/python/tf/layers/average_pooling3d>`__
Parameters
------------
prev_layer : :class:`Layer`
The previous layer with a output rank as 5 [batch, depth, height, width, channel].
filter_size : tuple of int
Pooling window size.
strides : tuple of int
Strides of the pooling operation.
padding : str
The padding method: 'valid' or 'same'.
data_format : str
One of `channels_last` (default) or `channels_first`.
The ordering of the dimensions must match the inputs.
channels_last corresponds to inputs with the shape (batch, length, channels);
while channels_first corresponds to inputs with shape (batch, channels, length).
name : str
A unique layer name.
Returns
-------
:class:`Layer`
A mean pooling 3-D layer with a output rank as 5.
"""
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last',
name='meanpool3d'
):
super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"MeanPool3d %s: filter_size: %s strides: %s padding: %s" %
(self.name, str(filter_size), str(strides), str(padding))
)
self.outputs = tf.layers.average_pooling3d(
prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name
)
self._add_layers(self.outputs)
class GlobalMaxPool1d(Layer):
"""The :class:`GlobalMaxPool1d` class is a 1D Global Max Pooling layer.
Parameters
------------
prev_layer : :class:`Layer`
The previous layer with a output rank as 3 [batch, length, channel].
name : str
A unique layer name.
Examples
---------
>>> x = tf.placeholder("float32", [None, 100, 30])
>>> n = InputLayer(x, name='in')
>>> n = GlobalMaxPool1d(n)
[None, 30]
"""
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(self, prev_layer, name='globalmaxpool1d'):
super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, name=name)
logging.info("GlobalMaxPool1d %s" % self.name)
self.outputs = tf.reduce_max(self.inputs, axis=1, name=name)
self._add_layers(self.outputs)
class GlobalMeanPool1d(Layer):
"""The :class:`GlobalMeanPool1d` class is a 1D Global Mean Pooling layer.
Parameters
------------
prev_layer : :class:`Layer`
The previous layer with a output rank as 3 [batch, length, channel].
name : str
A unique layer name.
Examples
---------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder("float32", [None, 100, 30])
| |
None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END CONSTRAINT_EXPONENTS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_optimize_lri_basis:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&OPTIMIZE_LRI_BASIS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END OPTIMIZE_LRI_BASIS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_opt_embed_embed_dens_diff_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_opt_embed_embed_dens_diff:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_qs_opt_embed_embed_dens_diff_each()
# basic setting
self.each.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&EMBED_DENS_DIFF\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END EMBED_DENS_DIFF\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.to_input(fout)
else:
pass
class cp2k_dft_qs_opt_embed_embed_pot_cube_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_opt_embed_embed_pot_cube:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_qs_opt_embed_embed_pot_cube_each()
# basic setting
self.each.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&EMBED_POT_CUBE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END EMBED_POT_CUBE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.to_input(fout)
else:
pass
class cp2k_dft_qs_opt_embed_embed_pot_vector_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_opt_embed_embed_pot_vector:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_qs_opt_embed_embed_pot_vector_each()
# basic setting
self.each.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&EMBED_POT_VECTOR\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END EMBED_POT_VECTOR\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_qs_opt_embed:
def __init__(self):
self.params = {
}
self.status = False
self.embed_dens_diff = cp2k_dft_qs_opt_embed_embed_dens_diff()
self.embed_pot_cube = cp2k_dft_qs_opt_embed_embed_pot_cube()
self.embed_pot_vector = cp2k_dft_qs_opt_embed_embed_pot_vector()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&OPT_EMBED\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.embed_dens_diff.status == True:
self.embed_dens_diff.to_input(fout)
if self.embed_pot_cube.status == True:
self.embed_pot_cube.to_input(fout)
if self.embed_pot_vector.status == True:
self.embed_pot_vector.to_input(fout)
fout.write("\t\t\t&END OPT_EMBED\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
if item.split("-")[-1] == "EMBED_DENS_DIFF":
self.embed_dens_diff.section = params[item]
elif item.split("-")[-1] == "EMBED_POT_CUBE":
self.embed_pot_cube.section = params[item]
elif item.split("-")[-1] == "EMBED_POT_VECTOR":
self.embed_pot_vector.section = params[item]
else:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[3] == "EMBED_DENS_DIFF":
self.embed_dens_diff.set_params({item: params[item]})
elif item.split("-")[3] == "EMBED_POT_CUBE":
self.embed_pot_cube.set_params({item: params[item]})
elif item.split("-")[3] == "EMBED_POT_VECTOR":
self.embed_pot_vector.set_params({item: params[item]})
else:
pass
class cp2k_dft_qs_s2_restraint:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&S2_RESTRAINT\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END S2_RESTRAINT\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_coulomb:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&COULOMB\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END COULOMB\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_exchange:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&EXCHANGE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END EXCHANGE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_ga:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&GA\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END GA\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_lr_correction:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&LR_CORRECTION\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END LR_CORRECTION\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_memory:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&MEMORY\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END MEMORY\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_neighbor_lists:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&NEIGHBOR_LISTS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END NEIGHBOR_LISTS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_print_ewald_info_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_print_ewald_info:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_qs_se_print_ewald_info_each()
# basic seting
self.each.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EWALD_INFO\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END EWALD_INFO\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_qs_se_print_neighbor_lists_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_print_neighbor_lists:
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_qs_se_print_neighbor_lists_each()
# basic seting
self.each.status = True
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&NEIGHBOR_LISTS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END NEIGHBOR_LISTS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_qs_se_print_subcell_each:
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_qs_se_print_subcell:
def __init__(self):
self.params = {
}
self.status = False
self.each | |
"""
Local MRIQC Stats
-----------------
This module allows the user to compare his/her images with all
similar images collected at the same scanner and same parameters,
for purpose of Quality Control (QC).
IQM: Image Quality Metrics
"""
import json
import urllib.request
import urllib.error
from datetime import datetime
from numbers import Number
from pathlib import Path
import pandas as pd
import numpy as np
from .utils import (DEVICE_SERIAL_NO,
REPOSITORY_PATH,
MRIQC_SERVER,
RELEVANT_KEYS,
read_mriqc_json,
)
def get_month_number(month):
"""
Get the month in numeric format or 'all'
Parameters
----------
month : int or str
Returns
-------
n_month : int or 'all'
Month in numeric format, or string 'all'
"""
if isinstance(month, str):
if month == 'current':
n_month = datetime.today().month
elif month == '':
n_month = 'all'
else:
if len(month) == 3:
n_month = datetime.strptime(month, '%b').month
else:
try:
n_month = datetime.strptime(month, '%B').month
except ValueError:
print('Wrong month: {0}'.format(month))
raise
elif isinstance(month, int):
if not (0 < month < 13):
raise ValueError('Wrong month: {0}'.format(month))
else:
n_month = month
else:
raise ValueError('Wrong month: {0}'.format(month))
return n_month
def get_device_iqms_from_server(modality, month='current', year='current', device_serial_no=None, versions=None):
"""
Grab all iqms for the given modality and device, for a given month/year
Parameters
----------
modality : str
Imaging modality
Options: "T1w", "T2w", "bold"
month : int or str
year : int or str
Desired year, or "current"
device_serial_no : str
Serial number of the device for which we want to query the
database
versions : list of str
Versions of MRIQC for which we want to retrieve data
Returns
-------
Pandas DataFrame with all the entries
"""
# TODO: - Define a global list or irrelevant fields:
# I can remove irrelevant fields (e.g., "WindowWidth", "WindowCenter", ...
# "SliceLocation"). Basically, all "Slices." fields.
# - see if saving the results as JSONs saves space (by adding them to the
# results only if the md5sum is not there already, or maybe replacing it)
software = 'mriqc'
url_root = 'https://{m_server}/api/v1/{{modality}}?{{query}}'.format(m_server=MRIQC_SERVER)
if device_serial_no is None:
device_serial_no = DEVICE_SERIAL_NO
if isinstance(year, str):
if year == 'current':
year = datetime.today().year
else:
year = int(year)
n_month = get_month_number(month)
if versions is None:
versions = ['*']
# prepare the query and get the data. E.g.:
# "bids_meta.DeviceSerialNumber":"166018","_updated":{"$gte":"Fri,%2012%20Jul%202019%2017:20:32%20GMT"}}&page=1
base_query = ['"bids_meta.DeviceSerialNumber":"{dev_no}"'.format(dev_no=device_serial_no),
'"provenance.software":"{software}"'.format(software=software)]
# it looks like the API requires the full date and time (e.g.: "Fri, 12 Jul 2019 17:20:32 GMT" )
if n_month == 'all':
begin_date = datetime(year, 1, 1).strftime('%a, %d %b %Y %H:%M:%S GMT')
end_date = datetime(year, 12, 31).strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
begin_date = datetime(year, n_month, 1).strftime('%a, %d %b %Y %H:%M:%S GMT')
if n_month < 12:
end_date = datetime(year, n_month + 1, 1).strftime('%a, %d %b %Y %H:%M:%S GMT')
else: # December:
end_date = datetime(year + 1, 1, 1).strftime('%a, %d %b %Y %H:%M:%S GMT')
base_query.append(
'"_updated":{{"$gte":"{begin_d}", "$lte":"{end_d}"}}'.format(
begin_d=begin_date,
end_d=end_date
)
)
dfs = []
for version in versions:
query = base_query
if version != '*':
query.append('"provenance.version":"%s"' % version)
page = 1
while True:
page_url = url_root.format(
modality=modality,
query='where={{{where}}}&page={page}'.format(
where=','.join(query),
page=page
)
)
print(page_url)
try:
# VERY IMPORTANT #
# Convert spaces in the page_url into "%20". Otherwise, it doesn't work:
with urllib.request.urlopen(page_url.replace(" ", "%20")) as url:
data = json.loads(url.read().decode())
dfs.append(pd.json_normalize(data['_items']))
if 'next' not in data['_links'].keys():
break
else:
page += 1
except urllib.error.HTTPError as err:
if err.code == 400:
print('No results for these dates')
break
else:
raise
except:
print('error')
raise
if len(dfs) > 0:
# Compose a pandas dataframe
return pd.concat(dfs, ignore_index=True, sort=True)
else:
return None
def read_iqms(path):
"""
Read the iqms from a file or folder
Parameters
----------
path : list or Path or str
Path of the json file or directory with the iqms
Returns
-------
iqms : pd.DataFrame
DataFrame with the iqms
"""
if isinstance(path, list):
# read iqms from all items into a single DataFrame:
iqms = pd.DataFrame()
for p in path:
this_iqms = read_iqms(p)
iqms = pd.concat([iqms, this_iqms], ignore_index=True)
iqms.drop_duplicates(subset=['provenance.md5sum'],
inplace=True,
keep='last')
elif Path(path).is_dir():
# read all the json files in the folder:
iqms = read_iqms(list(Path(path).rglob('*.json')))
elif Path(path).is_file():
if str(path).endswith(".json"):
# try to read as "normal" json:
try:
iqms = pd.read_json(path_or_buf=path, orient='split')
except ValueError:
# read it as mriqc does (mriqc/reports/individual.py):
iqms = read_mriqc_json(path)
else:
# read it as table:
iqms = pd.read_table(path)
elif str(path).endswith(".json"):
# it's a json file name, but the file does not exist:
raise FileNotFoundError('File {} does not exist'.format(str(path)))
else:
raise RuntimeError('Wrong argument')
return iqms
def download_and_save(modality, year, local_iqms_repository_folder, device_serial_no):
"""
Download iqms from server and save to a file.
Parameters
----------
modality : str
Imaging modality.
Options: "T1w", "T2w", "bold"
year : int or str
Desired year, or "current"
local_iqms_repository_folder : str or Path
Path to the folder with the repository of iqms
device_serial_no : str
Serial number of the device for which we want to query the
database
Returns
-------
iqms : pd.DataFrame
IQMs downloaded from server
"""
if not local_iqms_repository_folder:
local_iqms_repository_folder = REPOSITORY_PATH
if not device_serial_no:
device_serial_no = DEVICE_SERIAL_NO
iqms = get_device_iqms_from_server(modality,
year=year,
month='',
device_serial_no=device_serial_no)
iqms.drop_duplicates(subset=['provenance.md5sum'], inplace=True)
save_iqms_to_json_file(iqms, local_iqms_repository_folder / (
str(year) + '_' + modality + ".json"))
return iqms
def get_iqms_all_years(modality, year_init, local_iqms_repository_folder, device_serial_no):
"""
Gets iqms for all the years, for a given modality.
If they are not present in the local repository, it queries the MRIQC API
server, downloads them and saves them to the local repository.
Parameters
----------
modality : str
Imaging modality.
Options: "T1w", "T2w", "bold"
year_init : str or int
Initial year since which to get data (Default: current - 2)
local_iqms_repository_folder : str or Path
Path to the folder with the repository of iqms
device_serial_no : str
Serial number of the device for which we want to query the
database
Returns
-------
iqms : pd.DataFrame
IQMs for a given modality, for all years
"""
if not year_init:
year_init = datetime.today().year - 2
elif int(year_init) > datetime.today().year:
raise RuntimeError('"year_init" cannot be greater than current year.')
iqms = pd.DataFrame()
for year in range(int(year_init), datetime.today().year + 1):
try:
# Try to read from file:
this_iqms = read_iqms(
local_iqms_repository_folder / (str(year) + '_' + modality + ".json")
)
except FileNotFoundError:
this_iqms = download_and_save(modality, year, local_iqms_repository_folder, device_serial_no)
iqms = pd.concat([iqms, this_iqms], ignore_index=True)
# drop duplicates:
iqms.drop_duplicates(subset=['provenance.md5sum'], inplace=True)
return iqms
def find_iqms_w_parameters(iqms_df, desired_params):
"""
Find iqms in a DataFrame that match a dictionary of parameters
Parameters
----------
iqms_df : pd.DataFrame
DataFrame with the iqms we want to search through
desired_params : dict
Dictionary with the parameters we want to match.
If one key has more than one value, we can match any of them
Returns
-------
matching_iqms_df : pd.DataFrame
DataFrame with the iqms that match the parameters
"""
idx = np.ones(len(iqms_df), dtype='bool')
for key in desired_params.keys():
idx_key = np.empty([len(iqms_df), len(desired_params[key])], dtype='bool')
if isinstance(desired_params[key][0], Number):
# The following is basically equivalent to:
# for i, value in enumerate([desired_params[key]]):
# idx_key[:, i] = np.isclose(iqms_df[key] == value)
idx_key = np.array(
[np.isclose(iqms_df[key], val, rtol=0.01, equal_nan=True) for val in desired_params[key]]
)
elif isinstance(desired_params[key][0], str):
idx_key = np.array(
[iqms_df[key] == val for val in desired_params[key]]
)
elif isinstance(desired_params[key][0], np.bool_):
idx_key = np.array([iqms_df[key] == val for val in desired_params[key]])
# element-wise "OR":
idx_key = (idx_key.sum(axis=0) > 0)
idx = idx & idx_key
matching_iqms_df = iqms_df[idx]
return matching_iqms_df
def find_similar_iqms(iqms_df, sample_iqms):
"""
Find iqms in a DataFrame that have the same RELEVANT_PARAMETERS
as some sample_iqms
Parameters
----------
iqms_df : pd.DataFrame
DataFrame with the iqms we want to search through
sample_iqms : pd.DataFrame
DataFrame with iqms for which we want to find similar iqms
(Note: it can have more than one row)
Returns
-------
matching_iqms_df : pd.DataFrame
DataFrame with the iqms that match the parameters
"""
# create the dictionary of values we need to match:
similar_iqms = pd.DataFrame(columns=sample_iqms.columns)
for index, iqm in sample_iqms.iterrows():
desired_params = {}
for key in RELEVANT_KEYS:
if key in sample_iqms.columns:
desired_params[key] = list(np.unique(iqm[key]))
similar_to_iqm = find_iqms_w_parameters(iqms_df, desired_params)
similar_iqms = similar_iqms.append(
similar_to_iqm,
ignore_index=True,
)
if 'provenance.md5sum' in similar_iqms.keys():
similar_iqms.drop_duplicates(subset=['provenance.md5sum'], keep='last')
return similar_iqms
def save_iqms_to_json_file(iqms, path, append=False):
"""
Saves iqms to a json file
Parameters
----------
iqms : pd.DataFrame
DataFrame containing Image Quality Metrics
path : str
append : bool
Whether to append the iqms | |
= re.sub('^timestmp', 'timestamp', column_type)
column_type = re.sub('^blob', 'binary', column_type)
column_type = re.sub('^real', 'float', column_type)
column_type = re.sub('^vargraph', 'varchar', column_type)
column_type = re.sub('^graphic', 'varchar', column_type)
if self.common_config.db_db2as400 == True:
if re.search('^numeric\(', column_type):
column_type = re.sub('^numeric\(', 'decimal(', column_type)
column_type = re.sub('\)$', ',0)', column_type)
if re.search('^clob', column_type):
column_type = "string"
sqoop_column_type = "String"
column_type = re.sub('^integer', 'int', column_type)
column_type = re.sub('^timestmp', 'timestamp', column_type)
column_type = re.sub('^timestamp\(.*\)', 'timestamp', column_type)
column_type = re.sub('^varbinary$', 'binary', column_type)
column_type = re.sub('^varbinary\([0-9]*\)$', 'binary', column_type)
column_type = re.sub('^blob', 'binary', column_type)
column_type = re.sub('^real', 'float', column_type)
if self.common_config.db_mongodb == True:
column_type = re.sub(':null', ':string', column_type)
column_type = re.sub('^null$', 'string', column_type)
# Hive only allow max 255 in size for char's. If we get a char that is larger than 255, we convert it to a varchar
if column_type.startswith("char("):
column_precision = int(column_type.split("(")[1].split(")")[0])
if column_precision > 255:
column_type = re.sub('^char\(', 'varchar(', column_type)
# Remove precision from datatypes that doesnt include a precision
column_type = re.sub('^float\([0-9]*\)', 'float', column_type)
column_type = re.sub('^bigint\([0-9]*\)', 'bigint', column_type)
column_type = re.sub('^int\([0-9]*\)', 'int', column_type)
if columnTypeOverride != None:
column_type = columnTypeOverride
# If the data in the column will be anonymized, we will set the column type to a string type.
if anonymizationFunction != 'None':
column_type = "string"
# As Parquet imports some column types wrong, we need to map them all to string
if column_type in ("timestamp", "date", "bigint"):
sqoop_column_type = "String"
if re.search('^decimal\(', column_type):
sqoop_column_type = "String"
# Fetch if we should force this column to 'string' in Hive
if column_type.startswith("char(") == True or column_type.startswith("varchar(") == True:
columnForceString = self.getColumnForceString(column_name)
if columnForceString == True:
column_type = "string"
if includeColumnInImport == True:
if sqoopColumnTypeOverride != None:
self.sqoop_mapcolumnjava.append(column_name + "=" + sqoopColumnTypeOverride)
elif sqoop_column_type != None:
self.sqoop_mapcolumnjava.append(column_name + "=" + sqoop_column_type)
# Add , between column names in the list
if len(self.sqlGeneratedHiveColumnDefinition) > 0: self.sqlGeneratedHiveColumnDefinition = self.sqlGeneratedHiveColumnDefinition + ", "
# Add the column to the sqlGeneratedHiveColumnDefinition variable. This will be the base for the auto generated SQL
self.sqlGeneratedHiveColumnDefinition += "`" + column_name + "` " + column_type
if source_column_comment != None:
self.sqlGeneratedHiveColumnDefinition += " COMMENT '" + source_column_comment + "'"
# Add the column to the SQL query that can be used by sqoop or spark
column_name_parquet_supported = self.getParquetColumnName(column_name)
quote = self.common_config.getQuoteAroundColumn()
if len(self.sqlGeneratedSqoopQuery) == 0:
self.sqlGeneratedSqoopQuery = "select "
else:
self.sqlGeneratedSqoopQuery += ", "
if source_column_name != column_name_parquet_supported:
if re.search('"', source_column_name):
self.sqlGeneratedSqoopQuery += "'" + source_column_name + "' as \"" + column_name_parquet_supported + "\""
else:
self.sqlGeneratedSqoopQuery += quote + source_column_name + quote + " as " + quote + column_name_parquet_supported + quote
if self.isColumnNameReservedInSqoop(column_name_parquet_supported) == True:
columnNameReserved = True
logging.warning("The column '%s' is a reserved column namn in Sqoop. Please rename the column in 'column_name_override'"%(column_name_parquet_supported))
self.sqoop_use_generated_sql = True
else:
self.sqlGeneratedSqoopQuery += quote + source_column_name + quote
if self.isColumnNameReservedInSqoop(source_column_name) == True:
columnNameReserved = True
logging.warning("The column '%s' is a reserved column namn in Sqoop. Please rename the column in 'column_name_override'"%(source_column_name))
# Run a query to see if the column already exists. Will be used to determine if we do an insert or update
# query = "select column_id from import_columns where table_id = %s and source_column_name = %s "
# self.mysql_cursor01.execute(query, (self.table_id, source_column_name))
# logging.debug("SQL Statement executed: \n%s" % (self.mysql_cursor01.statement) )
# if self.mysql_cursor01.rowcount == 0:
if columnID == None:
query = ("insert into import_columns "
"("
" table_id,"
" hive_db,"
" hive_table,"
" column_name,"
" column_order,"
" source_column_name,"
" column_type,"
" source_column_type,"
" source_database_type,"
" sqoop_column_type,"
" last_update_from_source,"
" comment"
") values ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )")
self.mysql_cursor01.execute(query, (self.table_id, self.Hive_DB, self.Hive_Table, column_name.lower(), columnOrder, source_column_name, column_type, source_column_type, self.common_config.jdbc_servertype, sqoop_column_type, self.startDate, source_column_comment))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
else:
query = ("update import_columns set "
" hive_db = %s, "
" hive_table = %s, "
" column_name = %s, "
" column_order = %s, "
" column_type = %s, "
" source_column_type = %s, "
" source_database_type = %s, "
" sqoop_column_type = %s, "
" source_primary_key = NULL, "
" last_update_from_source = %s, "
" comment = %s "
"where table_id = %s and source_column_name = %s ")
self.mysql_cursor01.execute(query, (self.Hive_DB, self.Hive_Table, column_name.lower(), columnOrder, column_type, source_column_type, self.common_config.jdbc_servertype, sqoop_column_type, self.startDate, source_column_comment, self.table_id, source_column_name))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
if self.common_config.post_column_data == True:
jsonData = {}
jsonData["type"] = "column_data"
jsonData["date"] = self.startDate
jsonData["source_database_server_type"] = self.common_config.jdbc_servertype
jsonData["source_database_server"] = self.common_config.jdbc_hostname
jsonData["source_database"] = self.common_config.jdbc_database
jsonData["source_schema"] = self.source_schema
jsonData["source_table"] = self.source_table
jsonData["hive_db"] = self.Hive_DB
jsonData["hive_table"] = self.Hive_Table
jsonData["column"] = column_name.lower()
jsonData["source_column"] = source_column_name
jsonData["source_column_type"] = source_column_type
jsonData["column_type"] = column_type
logging.debug("Sending the following JSON to the REST interface: %s"% (json.dumps(jsonData, sort_keys=True, indent=4)))
response = self.rest.sendData(json.dumps(jsonData))
if response != 200:
# There was something wrong with the REST call. So we save it to the database and handle it later
logging.debug("REST call failed!")
logging.debug("Saving the JSON to the json_to_rest table instead")
query = "insert into json_to_rest (type, status, jsondata) values ('import_column', 0, %s)"
self.mysql_cursor01.execute(query, (json.dumps(jsonData), ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
# Commit all the changes to the import_column column
self.mysql_conn.commit()
# Some columnnames are reserved. But in order to change them, we need the information inside import_columns. So we just mark
# the columns with columnNameReserved = True and saves the data. After the save we check if it's equal to True
# and raise an exception. This way, the user have a chance to fix the problem
if columnNameReserved == True:
raise invalidConfiguration("There are columns with reserved words. DBImport cant continue until that is handled")
# Add the source the to the generated sql query
self.sqlGeneratedSqoopQuery += " from %s"%(self.common_config.getJDBCsqlFromTable(schema=self.source_schema, table=self.source_table))
# Add ( and ) to the Hive column definition so it contains a valid string for later use in the solution
self.sqlGeneratedHiveColumnDefinition = "( " + self.sqlGeneratedHiveColumnDefinition + " )"
logging.debug("Settings from import_config.saveColumnData()")
logging.debug(" sqlGeneratedSqoopQuery = %s"%(self.sqlGeneratedSqoopQuery))
logging.debug(" sqlGeneratedHiveColumnDefinition = %s"%(self.sqlGeneratedHiveColumnDefinition))
logging.debug("Executing import_config.saveColumnData() - Finished")
def isColumnNameReservedInSqoop(self, columnName):
""" Returns True or False depending if the column_name is reserved in Sqoop """
if columnName in ("const", "private", "public", "default", "long"):
return True
else:
return False
def getParquetColumnName(self, column_name):
# Changing the mapping in here also requires you to change it in DBImportOperation/common_operations.py, funtion getHiveColumnNameDiff
column_name = (column_name.lower()
.replace(' ', '_')
.replace('%', 'pct')
.replace('(', '_')
.replace(')', '_')
.replace('ü', 'u')
.replace('å', 'a')
.replace('ä', 'a')
.replace('ö', 'o')
.replace('#', 'hash')
.replace('`', '')
.replace('\'', '')
.replace(';', '')
.replace('\n', '')
.replace('\\', '')
.replace('’', '')
.replace(':', '')
.replace(',', '')
.replace('.', '')
.replace('"', '')
)
if column_name.startswith('_') == True:
# column_name = column_name[1:]
column_name = "underscore%s"%(column_name)
return column_name
def setPrimaryKeyColumn(self, ):
# This is one of the main functions when it comes to source system schemas. This will parse the output from the Python Schema Program
# and update the source_primary_key column in the import_columns table with the information on what key is part of the PK
logging.debug("")
logging.debug("Executing import_config.setPrimaryKeyColumn()")
logging.info("Setting PrimaryKey information in MySQL table - import_columns")
# COL_DATA_TYPE
# COL_KEY_POSITION
# COL_NAME
# CONSTRAINT_NAME
# CONSTRAINT_TYPE
# REFERENCE_COL_NAME
# REFERENCE_SCHEMA_NAME
# REFERENCE_TABLE_NAME
# SCHEMA_NAME
# TABLE_NAME
self.generatedPKcolumns = ""
for index, row in self.common_config.source_keys_df.iterrows():
key_type = row['CONSTRAINT_TYPE']
key_id = row['COL_KEY_POSITION']
column_name = self.stripUnwantedCharColumnName(row['COL_NAME'])
# Handle reserved column names in Hive
if column_name == "date": column_name = column_name + "_HIVE"
if column_name == "interval": column_name = column_name + "_HIVE"
# TODO: Loop through only PK's in the for loop when external Python code is completed. Same as we do for FK's
# Determine what the PK is called in the Pandas dataframe for each database type
key_type_reference = None
# if self.common_config.db_mssql == True: key_type_reference = "PRIMARY_KEY_CONSTRAINT"
# if self.common_config.db_oracle == True: key_type_reference = "P"
# if self.common_config.db_mysql == True: key_type_reference = "PRIMARY"
# if self.common_config.db_postgresql == True: key_type_reference = "p"
if key_type_reference == None: key_type_reference = constant.PRIMARY_KEY
# As we only work with PK in this function, we ignore all other kind of keys (thats usually Foreign Keys we ignore)
if key_type != key_type_reference: continue
logging.debug(" key_type: %s" % (key_type))
logging.debug(" key_type_reference: %s" % (key_type_reference))
logging.debug(" key_id: %s" % (key_id))
logging.debug(" column_name: %s" % (column_name))
# Append the column_name to the generated PK list.
if len(self.generatedPKcolumns) > 0: self.generatedPKcolumns += ","
self.generatedPKcolumns += column_name
query = ("update import_columns set "
" source_primary_key = %s "
"where table_id = %s and lower(source_column_name) = %s ")
self.mysql_cursor01.execute(query, (key_id, self.table_id, column_name))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
# Commit all the changes to the import_column column
self.mysql_conn.commit()
logging.debug("Executing import_config.setPrimaryKeyColumn() - Finished")
def saveIndexData(self,):
logging.debug("Executing import_config.saveIndexData()")
logging.info("Saving index metadata to MySQL table - import_tables_indexes")
try:
query = "delete from import_tables_indexes where table_id = %s"
self.mysql_cursor01.execute(query, ( self.table_id, ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
except mysql.connector.errors.IntegrityError as e:
logging.error("Unknown error when deleting old index data from DBImport configuration database")
raise(e)
try:
for index, row in self.common_config.source_index_df.sort_values(by=['Name', 'ColumnOrder']).iterrows():
# print("name: %s"%(row[0]))
# print("type: %s"%(row[1]))
# print("unique: %s"%(row[2]))
# print("column: %s"%(row[3]))
# print("columnType: %s"%(self.common_config.source_columns_df.loc[self.common_config.source_columns_df['SOURCE_COLUMN_NAME'] == row[3], 'SOURCE_COLUMN_TYPE'].iloc[0]))
# print("----------------------------------------------")
# Save the Index data to the MySQL table
query = ("insert into import_tables_indexes "
"("
" `table_id`, "
" `hive_db`, "
" `hive_table`, "
" `index_name`, "
" `index_type`, "
" `index_unique`, "
" `column`, "
" `column_type`, "
" `column_order`, "
" `column_is_nullable` "
") values ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )")
columnType = self.common_config.source_columns_df.loc[self.common_config.source_columns_df['SOURCE_COLUMN_NAME'] == row[3], 'SOURCE_COLUMN_TYPE'].iloc[0]
try:
self.mysql_cursor02.execute(query, (self.table_id, self.Hive_DB, self.Hive_Table, row[0], row[1], row[2], row[3], columnType, row[4], row[5] ))
self.mysql_conn.commit()
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor02.statement) )
except mysql.connector.errors.IntegrityError as e:
if ( "Duplicate entry" in str(e) ):
logging.warning("Table indexes cant be saved as name/id is not unique in DBImport Configuration Database")
else:
logging.error("Unknown error when saving index data to | |
<reponame>bertucho/epic-movie-quotes-quiz<filename>dialogos/build/idna/idna/idnadata.py
# This file is automatically generated by build-idnadata.py
scripts = {
'Arabic': frozenset(
list(range(0x600,0x605)) +
list(range(0x606,0x60c)) +
list(range(0x60d,0x61b)) +
[0x61e] +
list(range(0x620,0x640)) +
list(range(0x641,0x64b)) +
list(range(0x656,0x660)) +
list(range(0x66a,0x670)) +
list(range(0x671,0x6dd)) +
list(range(0x6de,0x700)) +
list(range(0x750,0x780)) +
list(range(0x8a0,0x8b3)) +
list(range(0x8e4,0x900)) +
list(range(0xfb50,0xfbc2)) +
list(range(0xfbd3,0xfd3e)) +
list(range(0xfd50,0xfd90)) +
list(range(0xfd92,0xfdc8)) +
list(range(0xfdf0,0xfdfe)) +
list(range(0xfe70,0xfe75)) +
list(range(0xfe76,0xfefd)) +
list(range(0x10e60,0x10e7f)) +
list(range(0x1ee00,0x1ee04)) +
list(range(0x1ee05,0x1ee20)) +
list(range(0x1ee21,0x1ee23)) +
[0x1ee24] +
[0x1ee27] +
list(range(0x1ee29,0x1ee33)) +
list(range(0x1ee34,0x1ee38)) +
[0x1ee39] +
[0x1ee3b] +
[0x1ee42] +
[0x1ee47] +
[0x1ee49] +
[0x1ee4b] +
list(range(0x1ee4d,0x1ee50)) +
list(range(0x1ee51,0x1ee53)) +
[0x1ee54] +
[0x1ee57] +
[0x1ee59] +
[0x1ee5b] +
[0x1ee5d] +
[0x1ee5f] +
list(range(0x1ee61,0x1ee63)) +
[0x1ee64] +
list(range(0x1ee67,0x1ee6b)) +
list(range(0x1ee6c,0x1ee73)) +
list(range(0x1ee74,0x1ee78)) +
list(range(0x1ee79,0x1ee7d)) +
[0x1ee7e] +
list(range(0x1ee80,0x1ee8a)) +
list(range(0x1ee8b,0x1ee9c)) +
list(range(0x1eea1,0x1eea4)) +
list(range(0x1eea5,0x1eeaa)) +
list(range(0x1eeab,0x1eebc)) +
list(range(0x1eef0,0x1eef2))
),
'Armenian': frozenset(
list(range(0x531,0x557)) +
list(range(0x559,0x560)) +
list(range(0x561,0x588)) +
[0x58a] +
list(range(0x58d,0x590)) +
list(range(0xfb13,0xfb18))
),
'Avestan': frozenset(
list(range(0x10b00,0x10b36)) +
list(range(0x10b39,0x10b40))
),
'Balinese': frozenset(
list(range(0x1b00,0x1b4c)) +
list(range(0x1b50,0x1b7d))
),
'Bamum': frozenset(
list(range(0xa6a0,0xa6f8)) +
list(range(0x16800,0x16a39))
),
'Bassa_Vah': frozenset(
list(range(0x16ad0,0x16aee)) +
list(range(0x16af0,0x16af6))
),
'Batak': frozenset(
list(range(0x1bc0,0x1bf4)) +
list(range(0x1bfc,0x1c00))
),
'Bengali': frozenset(
list(range(0x980,0x984)) +
list(range(0x985,0x98d)) +
list(range(0x98f,0x991)) +
list(range(0x993,0x9a9)) +
list(range(0x9aa,0x9b1)) +
[0x9b2] +
list(range(0x9b6,0x9ba)) +
list(range(0x9bc,0x9c5)) +
list(range(0x9c7,0x9c9)) +
list(range(0x9cb,0x9cf)) +
[0x9d7] +
list(range(0x9dc,0x9de)) +
list(range(0x9df,0x9e4)) +
list(range(0x9e6,0x9fc))
),
'Bopomofo': frozenset(
list(range(0x2ea,0x2ec)) +
list(range(0x3105,0x312e)) +
list(range(0x31a0,0x31bb))
),
'Brahmi': frozenset(
list(range(0x11000,0x1104e)) +
list(range(0x11052,0x11070)) +
[0x1107f]
),
'Braille': frozenset(
list(range(0x2800,0x2900))
),
'Buginese': frozenset(
list(range(0x1a00,0x1a1c)) +
list(range(0x1a1e,0x1a20))
),
'Buhid': frozenset(
list(range(0x1740,0x1754))
),
'Canadian_Aboriginal': frozenset(
list(range(0x1400,0x1680)) +
list(range(0x18b0,0x18f6))
),
'Carian': frozenset(
list(range(0x102a0,0x102d1))
),
'Caucasian_Albanian': frozenset(
list(range(0x10530,0x10564)) +
[0x1056f]
),
'Chakma': frozenset(
list(range(0x11100,0x11135)) +
list(range(0x11136,0x11144))
),
'Cham': frozenset(
list(range(0xaa00,0xaa37)) +
list(range(0xaa40,0xaa4e)) +
list(range(0xaa50,0xaa5a)) +
list(range(0xaa5c,0xaa60))
),
'Cherokee': frozenset(
list(range(0x13a0,0x13f5))
),
'Common': frozenset(
list(range(0x0,0x41)) +
list(range(0x5b,0x61)) +
list(range(0x7b,0xaa)) +
list(range(0xab,0xba)) +
list(range(0xbb,0xc0)) +
[0xd7] +
[0xf7] +
list(range(0x2b9,0x2e0)) +
list(range(0x2e5,0x2ea)) +
list(range(0x2ec,0x300)) +
[0x374] +
[0x37e] +
[0x385] +
[0x387] +
[0x589] +
[0x605] +
[0x60c] +
list(range(0x61b,0x61d)) +
[0x61f] +
[0x640] +
list(range(0x660,0x66a)) +
[0x6dd] +
list(range(0x964,0x966)) +
[0xe3f] +
list(range(0xfd5,0xfd9)) +
[0x10fb] +
list(range(0x16eb,0x16ee)) +
list(range(0x1735,0x1737)) +
list(range(0x1802,0x1804)) +
[0x1805] +
[0x1cd3] +
[0x1ce1] +
list(range(0x1ce9,0x1ced)) +
list(range(0x1cee,0x1cf4)) +
list(range(0x1cf5,0x1cf7)) +
list(range(0x2000,0x200c)) +
list(range(0x200e,0x2065)) +
list(range(0x2066,0x2071)) +
list(range(0x2074,0x207f)) +
list(range(0x2080,0x208f)) +
list(range(0x20a0,0x20be)) +
list(range(0x2100,0x2126)) +
list(range(0x2127,0x212a)) +
list(range(0x212c,0x2132)) +
list(range(0x2133,0x214e)) +
list(range(0x214f,0x2160)) +
[0x2189] +
list(range(0x2190,0x23fb)) +
list(range(0x2400,0x2427)) +
list(range(0x2440,0x244b)) +
list(range(0x2460,0x2800)) +
list(range(0x2900,0x2b74)) +
list(range(0x2b76,0x2b96)) +
list(range(0x2b98,0x2bba)) +
list(range(0x2bbd,0x2bc9)) +
list(range(0x2bca,0x2bd2)) +
list(range(0x2e00,0x2e43)) +
list(range(0x2ff0,0x2ffc)) +
list(range(0x3000,0x3005)) +
[0x3006] +
list(range(0x3008,0x3021)) +
list(range(0x3030,0x3038)) +
list(range(0x303c,0x3040)) +
list(range(0x309b,0x309d)) +
[0x30a0] +
list(range(0x30fb,0x30fd)) +
list(range(0x3190,0x31a0)) +
list(range(0x31c0,0x31e4)) +
list(range(0x3220,0x3260)) +
list(range(0x327f,0x32d0)) +
list(range(0x3358,0x3400)) +
list(range(0x4dc0,0x4e00)) +
list(range(0xa700,0xa722)) +
list(range(0xa788,0xa78b)) +
list(range(0xa830,0xa83a)) +
[0xa92e] +
[0xa9cf] +
[0xab5b] +
list(range(0xfd3e,0xfd40)) +
list(range(0xfe10,0xfe1a)) +
list(range(0xfe30,0xfe53)) +
list(range(0xfe54,0xfe67)) +
list(range(0xfe68,0xfe6c)) +
[0xfeff] +
list(range(0xff01,0xff21)) +
list(range(0xff3b,0xff41)) +
list(range(0xff5b,0xff66)) +
[0xff70] +
list(range(0xff9e,0xffa0)) +
list(range(0xffe0,0xffe7)) +
list(range(0xffe8,0xffef)) +
list(range(0xfff9,0xfffe)) +
list(range(0x10100,0x10103)) +
list(range(0x10107,0x10134)) +
list(range(0x10137,0x10140)) +
list(range(0x10190,0x1019c)) +
list(range(0x101d0,0x101fd)) +
list(range(0x102e1,0x102fc)) +
list(range(0x1bca0,0x1bca4)) +
list(range(0x1d000,0x1d0f6)) +
list(range(0x1d100,0x1d127)) +
list(range(0x1d129,0x1d167)) +
list(range(0x1d16a,0x1d17b)) +
list(range(0x1d183,0x1d185)) +
list(range(0x1d18c,0x1d1aa)) +
list(range(0x1d1ae,0x1d1de)) +
list(range(0x1d300,0x1d357)) +
list(range(0x1d360,0x1d372)) +
list(range(0x1d400,0x1d455)) +
list(range(0x1d456,0x1d49d)) +
list(range(0x1d49e,0x1d4a0)) +
[0x1d4a2] +
list(range(0x1d4a5,0x1d4a7)) +
list(range(0x1d4a9,0x1d4ad)) +
list(range(0x1d4ae,0x1d4ba)) +
[0x1d4bb] +
list(range(0x1d4bd,0x1d4c4)) +
list(range(0x1d4c5,0x1d506)) +
list(range(0x1d507,0x1d50b)) +
list(range(0x1d50d,0x1d515)) +
list(range(0x1d516,0x1d51d)) +
list(range(0x1d51e,0x1d53a)) +
list(range(0x1d53b,0x1d53f)) +
list(range(0x1d540,0x1d545)) +
[0x1d546] +
list(range(0x1d54a,0x1d551)) +
list(range(0x1d552,0x1d6a6)) +
list(range(0x1d6a8,0x1d7cc)) +
list(range(0x1d7ce,0x1d800)) +
list(range(0x1f000,0x1f02c)) +
list(range(0x1f030,0x1f094)) +
list(range(0x1f0a0,0x1f0af)) +
list(range(0x1f0b1,0x1f0c0)) +
list(range(0x1f0c1,0x1f0d0)) +
list(range(0x1f0d1,0x1f0f6)) +
list(range(0x1f100,0x1f10d)) +
list(range(0x1f110,0x1f12f)) +
list(range(0x1f130,0x1f16c)) +
list(range(0x1f170,0x1f19b)) +
list(range(0x1f1e6,0x1f200)) +
list(range(0x1f201,0x1f203)) +
list(range(0x1f210,0x1f23b)) +
list(range(0x1f240,0x1f249)) +
list(range(0x1f250,0x1f252)) +
list(range(0x1f300,0x1f32d)) +
list(range(0x1f330,0x1f37e)) +
list(range(0x1f380,0x1f3cf)) +
list(range(0x1f3d4,0x1f3f8)) +
list(range(0x1f400,0x1f4ff)) +
list(range(0x1f500,0x1f54b)) +
list(range(0x1f550,0x1f57a)) +
list(range(0x1f57b,0x1f5a4)) +
list(range(0x1f5a5,0x1f643)) +
list(range(0x1f645,0x1f6d0)) +
list(range(0x1f6e0,0x1f6ed)) +
list(range(0x1f6f0,0x1f6f4)) +
list(range(0x1f700,0x1f774)) +
list(range(0x1f780,0x1f7d5)) +
list(range(0x1f800,0x1f80c)) +
list(range(0x1f810,0x1f848)) +
list(range(0x1f850,0x1f85a)) +
list(range(0x1f860,0x1f888)) +
list(range(0x1f890,0x1f8ae)) +
[0xe0001] +
list(range(0xe0020,0xe0080))
),
'Coptic': frozenset(
list(range(0x3e2,0x3f0)) +
list(range(0x2c80,0x2cf4)) +
list(range(0x2cf9,0x2d00))
),
'Cuneiform': frozenset(
list(range(0x12000,0x12399)) +
list(range(0x12400,0x1246f)) +
list(range(0x12470,0x12475))
),
'Cypriot': frozenset(
list(range(0x10800,0x10806)) +
[0x10808] +
list(range(0x1080a,0x10836)) +
list(range(0x10837,0x10839)) +
[0x1083c] +
[0x1083f]
),
'Cyrillic': frozenset(
list(range(0x400,0x485)) +
list(range(0x487,0x530)) +
[0x1d2b] +
[0x1d78] +
list(range(0x2de0,0x2e00)) +
list(range(0xa640,0xa69e)) +
[0xa69f]
),
'Deseret': frozenset(
list(range(0x10400,0x10450))
),
'Devanagari': frozenset(
list(range(0x900,0x951)) +
list(range(0x953,0x964)) +
list(range(0x966,0x980)) +
list(range(0xa8e0,0xa8fc))
),
'Duployan': frozenset(
list(range(0x1bc00,0x1bc6b)) +
list(range(0x1bc70,0x1bc7d)) +
list(range(0x1bc80,0x1bc89)) +
list(range(0x1bc90,0x1bc9a)) +
list(range(0x1bc9c,0x1bca0))
),
'Egyptian_Hieroglyphs': frozenset(
list(range(0x13000,0x1342f))
),
'Elbasan': frozenset(
list(range(0x10500,0x10528))
),
'Ethiopic': frozenset(
list(range(0x1200,0x1249)) +
list(range(0x124a,0x124e)) +
list(range(0x1250,0x1257)) +
[0x1258] +
list(range(0x125a,0x125e)) +
list(range(0x1260,0x1289)) +
list(range(0x128a,0x128e)) +
list(range(0x1290,0x12b1)) +
list(range(0x12b2,0x12b6)) +
list(range(0x12b8,0x12bf)) +
[0x12c0] +
list(range(0x12c2,0x12c6)) +
list(range(0x12c8,0x12d7)) +
list(range(0x12d8,0x1311)) +
list(range(0x1312,0x1316)) +
list(range(0x1318,0x135b)) +
list(range(0x135d,0x137d)) +
list(range(0x1380,0x139a)) +
list(range(0x2d80,0x2d97)) +
list(range(0x2da0,0x2da7)) +
list(range(0x2da8,0x2daf)) +
list(range(0x2db0,0x2db7)) +
list(range(0x2db8,0x2dbf)) +
list(range(0x2dc0,0x2dc7)) +
list(range(0x2dc8,0x2dcf)) +
list(range(0x2dd0,0x2dd7)) +
list(range(0x2dd8,0x2ddf)) +
list(range(0xab01,0xab07)) +
list(range(0xab09,0xab0f)) +
list(range(0xab11,0xab17)) +
list(range(0xab20,0xab27)) +
list(range(0xab28,0xab2f))
),
'Georgian': frozenset(
list(range(0x10a0,0x10c6)) +
[0x10c7] +
[0x10cd] +
list(range(0x10d0,0x10fb)) +
list(range(0x10fc,0x1100)) +
list(range(0x2d00,0x2d26)) +
[0x2d27] +
[0x2d2d]
),
'Glagolitic': frozenset(
list(range(0x2c00,0x2c2f)) +
list(range(0x2c30,0x2c5f))
),
'Gothic': frozenset(
list(range(0x10330,0x1034b))
),
'Grantha': frozenset(
list(range(0x11301,0x11304)) +
list(range(0x11305,0x1130d)) +
list(range(0x1130f,0x11311)) +
list(range(0x11313,0x11329)) +
list(range(0x1132a,0x11331)) +
list(range(0x11332,0x11334)) +
list(range(0x11335,0x1133a)) +
list(range(0x1133c,0x11345)) +
list(range(0x11347,0x11349)) +
list(range(0x1134b,0x1134e)) +
[0x11357] +
list(range(0x1135d,0x11364)) +
list(range(0x11366,0x1136d)) +
list(range(0x11370,0x11375))
),
'Greek': frozenset(
list(range(0x370,0x374)) +
list(range(0x375,0x378)) +
list(range(0x37a,0x37e)) +
[0x37f] +
[0x384] +
[0x386] +
list(range(0x388,0x38b)) +
[0x38c] +
list(range(0x38e,0x3a2)) +
list(range(0x3a3,0x3e2)) +
list(range(0x3f0,0x400)) +
list(range(0x1d26,0x1d2b)) +
list(range(0x1d5d,0x1d62)) +
list(range(0x1d66,0x1d6b)) +
[0x1dbf] +
list(range(0x1f00,0x1f16)) +
list(range(0x1f18,0x1f1e)) +
list(range(0x1f20,0x1f46)) +
list(range(0x1f48,0x1f4e)) +
list(range(0x1f50,0x1f58)) +
[0x1f59] +
[0x1f5b] +
[0x1f5d] +
list(range(0x1f5f,0x1f7e)) +
list(range(0x1f80,0x1fb5)) +
list(range(0x1fb6,0x1fc5)) +
list(range(0x1fc6,0x1fd4)) +
list(range(0x1fd6,0x1fdc)) +
list(range(0x1fdd,0x1ff0)) +
list(range(0x1ff2,0x1ff5)) +
list(range(0x1ff6,0x1fff)) +
[0x2126] +
[0xab65] +
list(range(0x10140,0x1018d)) +
[0x101a0] +
list(range(0x1d200,0x1d246))
),
'Gujarati': frozenset(
list(range(0xa81,0xa84)) +
list(range(0xa85,0xa8e)) +
list(range(0xa8f,0xa92)) +
list(range(0xa93,0xaa9)) +
list(range(0xaaa,0xab1)) +
list(range(0xab2,0xab4)) +
list(range(0xab5,0xaba)) +
list(range(0xabc,0xac6)) +
list(range(0xac7,0xaca)) +
list(range(0xacb,0xace)) +
[0xad0] +
list(range(0xae0,0xae4)) +
list(range(0xae6,0xaf2))
),
'Gurmukhi': frozenset(
list(range(0xa01,0xa04)) +
list(range(0xa05,0xa0b)) +
list(range(0xa0f,0xa11)) +
list(range(0xa13,0xa29)) +
list(range(0xa2a,0xa31)) +
list(range(0xa32,0xa34)) +
list(range(0xa35,0xa37)) +
list(range(0xa38,0xa3a)) +
[0xa3c] +
list(range(0xa3e,0xa43)) +
list(range(0xa47,0xa49)) +
list(range(0xa4b,0xa4e)) +
[0xa51] +
list(range(0xa59,0xa5d)) +
[0xa5e] +
list(range(0xa66,0xa76))
),
'Han': frozenset(
list(range(0x2e80,0x2e9a)) +
list(range(0x2e9b,0x2ef4)) +
list(range(0x2f00,0x2fd6)) +
[0x3005] +
[0x3007] +
list(range(0x3021,0x302a)) +
list(range(0x3038,0x303c)) +
list(range(0x3400,0x4db6)) +
list(range(0x4e00,0x9fcd)) +
list(range(0xf900,0xfa6e)) +
list(range(0xfa70,0xfada)) +
list(range(0x20000,0x2a6d7)) +
list(range(0x2a700,0x2b735)) +
list(range(0x2b740,0x2b81e)) +
list(range(0x2f800,0x2fa1e))
),
'Hangul': frozenset(
list(range(0x1100,0x1200)) +
list(range(0x302e,0x3030)) +
list(range(0x3131,0x318f)) +
list(range(0x3200,0x321f)) +
list(range(0x3260,0x327f)) +
list(range(0xa960,0xa97d)) +
list(range(0xac00,0xd7a4)) +
list(range(0xd7b0,0xd7c7)) +
list(range(0xd7cb,0xd7fc)) +
list(range(0xffa0,0xffbf)) +
list(range(0xffc2,0xffc8)) +
list(range(0xffca,0xffd0)) +
list(range(0xffd2,0xffd8)) +
list(range(0xffda,0xffdd))
),
'Hanunoo': frozenset(
list(range(0x1720,0x1735))
),
'Hebrew': frozenset(
list(range(0x591,0x5c8)) +
list(range(0x5d0,0x5eb)) +
list(range(0x5f0,0x5f5)) +
list(range(0xfb1d,0xfb37)) +
list(range(0xfb38,0xfb3d)) +
[0xfb3e] +
list(range(0xfb40,0xfb42)) +
list(range(0xfb43,0xfb45)) +
list(range(0xfb46,0xfb50))
),
'Hiragana': frozenset(
list(range(0x3041,0x3097)) +
list(range(0x309d,0x30a0)) +
[0x1b001] +
[0x1f200]
),
'Imperial_Aramaic': frozenset(
list(range(0x10840,0x10856)) +
list(range(0x10857,0x10860))
),
'Inherited': frozenset(
list(range(0x300,0x370)) +
list(range(0x485,0x487)) +
list(range(0x64b,0x656)) +
[0x670] +
list(range(0x951,0x953)) +
list(range(0x1ab0,0x1abf)) +
list(range(0x1cd0,0x1cd3)) +
list(range(0x1cd4,0x1ce1)) +
list(range(0x1ce2,0x1ce9)) +
[0x1ced] +
[0x1cf4] +
list(range(0x1cf8,0x1cfa)) +
list(range(0x1dc0,0x1df6)) +
list(range(0x1dfc,0x1e00)) +
list(range(0x200c,0x200e)) +
list(range(0x20d0,0x20f1)) +
list(range(0x302a,0x302e)) +
list(range(0x3099,0x309b)) +
list(range(0xfe00,0xfe10)) +
list(range(0xfe20,0xfe2e)) +
[0x101fd] +
[0x102e0] +
list(range(0x1d167,0x1d16a)) +
list(range(0x1d17b,0x1d183)) +
list(range(0x1d185,0x1d18c)) +
list(range(0x1d1aa,0x1d1ae)) +
list(range(0xe0100,0xe01f0))
),
'Inscriptional_Pahlavi': frozenset(
list(range(0x10b60,0x10b73)) +
list(range(0x10b78,0x10b80))
),
'Inscriptional_Parthian': frozenset(
list(range(0x10b40,0x10b56)) +
list(range(0x10b58,0x10b60))
),
'Javanese': frozenset(
list(range(0xa980,0xa9ce)) +
list(range(0xa9d0,0xa9da)) +
list(range(0xa9de,0xa9e0))
),
'Kaithi': frozenset(
list(range(0x11080,0x110c2))
),
'Kannada': frozenset(
list(range(0xc81,0xc84)) +
list(range(0xc85,0xc8d)) +
list(range(0xc8e,0xc91)) +
list(range(0xc92,0xca9)) +
list(range(0xcaa,0xcb4)) +
list(range(0xcb5,0xcba)) +
list(range(0xcbc,0xcc5)) +
list(range(0xcc6,0xcc9)) +
list(range(0xcca,0xcce)) +
list(range(0xcd5,0xcd7)) +
[0xcde] +
list(range(0xce0,0xce4)) +
list(range(0xce6,0xcf0)) +
list(range(0xcf1,0xcf3))
),
'Katakana': frozenset(
list(range(0x30a1,0x30fb)) +
list(range(0x30fd,0x3100)) +
list(range(0x31f0,0x3200)) +
list(range(0x32d0,0x32ff)) +
list(range(0x3300,0x3358)) +
list(range(0xff66,0xff70)) +
list(range(0xff71,0xff9e)) +
[0x1b000]
),
'Kayah_Li': frozenset(
list(range(0xa900,0xa92e)) +
[0xa92f]
),
'Kharoshthi': frozenset(
list(range(0x10a00,0x10a04)) +
list(range(0x10a05,0x10a07)) +
list(range(0x10a0c,0x10a14)) +
list(range(0x10a15,0x10a18)) +
list(range(0x10a19,0x10a34)) +
list(range(0x10a38,0x10a3b)) +
list(range(0x10a3f,0x10a48)) +
list(range(0x10a50,0x10a59))
),
'Khmer': frozenset(
list(range(0x1780,0x17de)) +
list(range(0x17e0,0x17ea)) +
list(range(0x17f0,0x17fa)) +
list(range(0x19e0,0x1a00))
),
'Khojki': frozenset(
list(range(0x11200,0x11212)) +
list(range(0x11213,0x1123e))
),
'Khudawadi': frozenset(
list(range(0x112b0,0x112eb)) +
list(range(0x112f0,0x112fa))
),
'Lao': frozenset(
list(range(0xe81,0xe83)) +
[0xe84] +
list(range(0xe87,0xe89)) +
[0xe8a] +
[0xe8d] +
list(range(0xe94,0xe98)) +
list(range(0xe99,0xea0)) +
list(range(0xea1,0xea4)) +
[0xea5] +
[0xea7] +
list(range(0xeaa,0xeac)) +
list(range(0xead,0xeba)) +
list(range(0xebb,0xebe)) +
list(range(0xec0,0xec5)) +
[0xec6] +
list(range(0xec8,0xece)) +
list(range(0xed0,0xeda)) +
list(range(0xedc,0xee0))
),
'Latin': frozenset(
list(range(0x41,0x5b)) +
list(range(0x61,0x7b)) +
[0xaa] +
[0xba] +
list(range(0xc0,0xd7)) +
list(range(0xd8,0xf7)) +
list(range(0xf8,0x2b9)) +
list(range(0x2e0,0x2e5)) +
list(range(0x1d00,0x1d26)) +
list(range(0x1d2c,0x1d5d)) +
list(range(0x1d62,0x1d66)) +
list(range(0x1d6b,0x1d78)) +
list(range(0x1d79,0x1dbf)) +
list(range(0x1e00,0x1f00)) +
[0x2071] +
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.