input
stringlengths
2.65k
237k
output
stringclasses
1 value
#!/usr/bin/python2.7 ''' DownloadHygeine - Fetch software and other media safely and easily. TODO: multiple git repos mostly non-interactive command line operations gpg - ugh, no one uses consistent means of signing stuff. back-burner! :'( once gpg is supported,allow per-source authenticators test mirrors least latency mirror pick ''' import subprocess import os import time import re import json import sys import traceback import hashlib import random from subprocess import Popen import crypto color={"green":"\33[92m","blue":"\33[96m","red":"\33[31m","yellow":"\33[93m","white":"\33[98m"} class Util: def pickpath(self,pathlist): if not type(pathlist) is list: raise ValueError("Util.pickpath was passed a non-list argument") return None for p in pathlist: path=os.path.abspath(p) if os.path.exists(path): return path return None def numeric_choice(self,prompt,options): oldprompt=prompt while True: prompt=color["white"]+prompt i=0 for o in options: i=i+1 prompt+="\n\t"+color["green"]+"["+color["yellow"]+str(i)+color["green"]+"] "+color["white"]+o prompt+="\n" choice=raw_input(color["white"]+prompt) try: if len(choice)>0 and int(choice) > 0 and int(choice) < len(options)+1: return options[int(choice)-1] raise ValueError("Invalid option") except ValueError: print(color["red"]+"Not a valid number,try again.") prompt=oldprompt continue def numeric_choice_flexible(self,prompt,options): oldprompt=prompt while True: prompt=color["white"]+prompt i=0 for o in options: i=i+1 prompt+="\n\t"+color["green"]+"["+color["yellow"]+str(i)+color["green"]+"] "+color["white"]+o prompt+="\nPick one of these selections or specify a different configuration:" choice=raw_input(color["white"]+prompt) try: if len(choice)>0 and choice.strip().isdigit() and int(choice) > 0 and int(choice) < len(options)+1: return options[int(choice)-1] elif len(choice.strip())>0: return choice raise ValueError("Invalid option") except ValueError: print(color["red"]+"Not a valid number,try again.") prompt=oldprompt continue def yesno_choice(self,prompt): yes=["y","yes","catsareweird","yeah","yup"] no=["no","nope","n","getoffmylawn"] while True: choice=raw_input(color["yellow"]+prompt+"["+color["green"]+"Y"+color["yellow"]+"/"+color["red"]+"N"+color["yellow"]+"]:") if choice.lower() in yes: return True elif choice.lower() in no: return False else: print(color["red"]+"Invalid option, please pick yes or no.\n"+color["white"]+">") def clear(self): print("\033[H\033[J") def mkdirs(self,path): try: print(color["white"]+"Creating directory tree '"+path+"'") #os.makedirs(path) self.call(["mkdir","-vp",path]) if os.path.exists(path): return True else: print(color["red"]+"Executed os.makedirs however the path still does not exist.") return False except Exception as e: print(color["red"]+"Error creating directory tree:"+path) return False def which(self,program): try: p=subprocess.Popen(['which',program],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdo,stde=p.communicate() if p.returncode==0: return stdo.splitlines()[0] else: return None except Exception as e: print(traceback.print_exc()) return False def call(self,cmdlist): try: ret=subprocess.call(cmdlist,shell=False) return ret except Exception as e: print(color["yellow"]+"Exception while executing '"+str(cmdlist)+"'") return -1 class DownloadHygeine: def __init__(self): self.config_paths=[os.getenv('HOME')+"/.config/downloadhygeine",os.getenv('HOME')+"/.downloadhygeine","/etc/downloadhygeine.config","./downloadhygeine.config"] self.conf={} self.catalog=[] self.util=Util() self.suppressed_entries=set() def loadconfig(self,config): if not type(config) in [str,unicode]: return False try: cnf={} os.chown(config,os.getuid(),os.getgid()) #make sure the current user owns this file. os.chmod(config,0o0600) #owner can read/write(-exec),nobody else can rwx it. with open(config,"r") as cf: cnf=json.loads(cf.read().strip()) self.conf=cnf if not os.path.exists(self.conf["downloads"]) and not self.util.mkdirs(self.conf["downloads"]): print(color["yellow"]+"Warning,unable to find your 'downloads' directory,attempting to recreate it has also failed.") if not os.path.exists(self.conf["localclone"]) and not self.util.mkdirs(self.conf["localclone"]): print(color["yellow"]+"Warning,unable to find your 'clone' directory,attempting to recreate it has also failed.") #self.gitsync() print(color["green"]+"Succesfully loaded configuration from: "+config) return True except Exception as e: print(color["red"]+"Error loading the json configuration file at: "+config) print(traceback.print_exc()) return False return False def init_config(self,config): if not type(config) is str: config=self.util.numeric_choice("Where should the configuration file be saved:",self.config_paths) self.conf={} ''' Downloading with system tools is an intentional decision. This is one wheel that will not be re-invented here. aria2c and other downloaders will be added here in the future. ''' self.conf["fetchtool"]=self.util.numeric_choice("Which tool should be used to download files:",["curl","wget"]) self.conf["downloads"]=self.util.numeric_choice_flexible("Where should downloaded files be saved:",[os.getenv('HOME')+"/Downloads/",os.getenv('HOME')+"/",os.getenv('HOME')+"/DownloadHygeine/"]) self.util.mkdirs(self.conf["downloads"]) mirrors=self.util.yesno_choice("Do you want downloads to use mirrors? ") if mirrors==True: self.conf["mirrors"]=1 self.conf["mirrorselect"]=self.util.numeric_choice("How should download mirrors be chosen?",["Round-Robin","Random"]) #TODO: least latency mirror selection else: self.conf["mirrors"]=0 if self.util.yesno_choice("Do you want to use a git repository different than https://github.com/hackers-terabit/downloadhygeine-catalog for the download catalog management?"): self.conf["gitrepo"]=raw_input("Please enter the URL of a valid git repository: ") print(color["green"]+"Got it! Will use "+self.conf["gitrepo"]+" As the download catalog repository."+color["white"]+"\n>") else: self.conf["gitrepo"]="https://github.com/hackers-terabit/downloadhygeine-catalog" print(color["green"]+"Great, will use the default git repository "+self.conf["gitrepo"]+" to manage the download catalog") while True: self.conf["localclone"]=self.util.numeric_choice_flexible("Where should the download catalogue be stored?",[os.getenv('HOME')+"/Downloads/downloadhygeine-catalog",os.getenv('HOME')+"/downloadhygeine-catalog"]) if os.path.exists(self.conf["localclone"]): break elif os.path.exists(os.path.abspath(self.conf["localclone"])): self.conf["localclone"]=os.path.abspath(self.conf["localclone"]) break elif self.util.mkdirs(os.path.abspath(self.conf["localclone"])): self.conf["localclone"]=os.path.abspath(self.conf["localclone"]) break else: print(color["red"]+"The file system path:"+self.conf["localclone"]+" Does not exist, please enter a different valid path"+color["white"]+"\n>") torrents=self.util.yesno_choice("Should torrent downloads be enabled?") if torrents: self.conf["torrents"]=1 while True: self.conf["torrentapp"]=self.util.numeric_choice_flexible("Where in your filesystem is your torrent application located?",["/usr/bin/rtorrent","/usr/bin/ktorrent","/usr/bin/transmission"]) if os.path.exists(os.path.abspath(self.conf["torrentapp"])): break else: print("Error, the specified path '"+self.conf["torrentapp"]+"' Does not exist.") self.conf["torrentoptions"]=raw_input("What command-line options should be passed to the torrent application? (Leave blank and hit enter if none)\n:") else: self.conf["torrents"]=0 crypto_config={} crypto_config["trusted_keys_path"]=self.util.numeric_choice_flexible(color["green"]+"Where should trusted public keys be stored:",[os.getenv('HOME')+"/.download-hygeine.trusted_keys"]) with open(crypto_config["trusted_keys_path"],"war+") as f: #check if we can write to this file. f.write("") crypto_config["ECDSA-CURVE"]="secp521r1" crypto_config["hash"]="SHA512" print(color["blue"]+"This is the current(default) configuration for digital signatures. This configuration will be used to sign and verify download catalogs:\n------------") print(color["blue"]+json.dumps(crypto_config,indent=4,sort_keys=True)) print(color["blue"]+"------------") if self.util.yesno_choice("Would you like to change any of these paramters? (Pick 'N' unless you know what you are doing here)"): choice=self.util.numeric_choice("Select one of the following paramters to change: ",["Signature Hash","ECDSA-CURVE"]) if choice=="ECDSA-CURVE": crypto_config["ECDSA-CURVE"]=self.util.numeric_choice("Pick from one the following availble curves:",list(crypto.Crypto.curvemap)) elif choice=="Signature Hash": crypto_config["hash"]=self.util.numeric_choice("Pick from one the following availble hash algorithms:",list(crypto.Crypto.hash_algorithms)) self.crypto=crypto.Crypto(crypto_config) crypto_config=self.crypto.config self.conf["crypto-config"]=self.crypto.config try: with open(config,"w+") as cf: cf.write(json.dumps(self.conf,indent=4,sort_keys=True)) os.chown(config,os.getuid(),os.getgid()) #make sure the current user owns this file. os.chmod(config,0o0600) #owner read/write,nobody else can rwx it. except Exception as e: print(color["red"]+"Error saving the configuration file at: "+config) print(traceback.print_exc()) print(color["white"]+">") return print(color["blue"]+"Finished saving your new configuration. ") return def check_tools(self): git=self.util.which("git") fetchtool=self.util.which(self.conf["fetchtool"]) torrentapp=self.util.which(self.conf["torrentapp"]) if None is git: raise ValueError("The git executable cannot be found or called. Please install git") return False else: self.conf["git"]=git if None is fetchtool: raise ValueError("The configured tool to fetch remote files cannot be found or executed"+self.conf["fetchtool"]) return False else: self.conf["fetchtool"]=fetchtool if self.conf["torrents"] == 1 and None is torrentapp: raise ValueError("Torrent downloads configured,however the configured bittorrent client cannot be executed:"+self.conf["torrentapp"]) return False elif self.conf["torrents"] == 1: self.conf["torrentapp"]=torrentapp return True def init_env(self): defconf=self.util.pickpath(self.config_paths) if not self.loadconfig(defconf): print(color["green"]+"Existing configuration not found. Let's setup one!") self.init_config(defconf) time.sleep(3) self.util.clear() self.crypto=crypto.Crypto(self.conf["crypto-config"]) if self.crypto.ready != True: print(color["red"]+"Problem while setting up crypto configuration. exiting now!") sys.exit(1) return self.check_tools() def curl_fetch(self,url): fname=self.conf["downloads"]+"/"+os.path.basename(url) p=subprocess.Popen(["curl","-L","--progress-bar",url],shell=False,stdout=subprocess.PIPE) print(color["blue"]+"Downloading and saving at "+fname+" with curl from "+url+" ....") with open(fname,"wb+") as df: for b in p.communicate(): if None is b or len(b)<1: break df.write(b) print(color["white"]+"Finished downloading of "+fname) return fname def wget_fetch(self,url): fname=self.conf["downloads"]+"/"+os.path.basename(url) print(color["blue"]+"Downloading and saving at "+fname+" with wget from "+url+" ....") self.util.call(["wget","-nv", "--show-progress", "-O",fname,url]) print(color["white"]+"Finished downloading of "+fname) return fname def fetch(self,url): try: if "curl" == os.path.basename(self.conf["fetchtool"]): fname=self.curl_fetch(url) return fname,True elif "wget" == os.path.basename(self.conf["fetchtool"]): fname=self.wget_fetch(url) return fname,True else: print(color["red"]+"Unsupported download program:"+str(self.conf["fetchtool"])) return None,None except Exception as e: print(color["red"]+"Error downloading "+url) print(traceback.print_exc()) return None,False def verify_integrity(self,download,fname): try: hl=set() supportedhash=False for algo in ["sha512","sha256","whirlpool","ripemd160"]: if algo in download: supportedhash=True if not supportedhash: print(color["red"]+"Download catalog for "+download["name"]+" does not have hash algorithm supported on this system") return False for algo in hashlib.algorithms_available: if algo in ["sha512","sha256","whirlpool","ripemd160"] and algo in download: hl.add(hashlib.new(algo)) with open(fname,"rb") as f: sofar=0 total=os.path.getsize(fname) print("Verifying integrity of downloaded file "+fname) while True: block=f.read(65536) if len(block)<1: break else: sofar+=len(block) sys.stdout.write("\rRead so far:\t"+str(sofar/(total/100.00))[:5].ljust(5," ")+"\%") for h in hl: h.update(block) print("\n") integrity_good=False for h in hl: digest=h.hexdigest() if digest != download[h.name]: print(color["red"]+"Integrity verification failed for "+download["name"]+"\n\tFile: "+fname+"\n\tFound "+h.name+" hash:"+digest+"\n\tExpected hash:"+download[h.name]) integrity_good=False elif digest == download[h.name]: print(color["green"]+"Integrity verification ["+h.name+"] is good for "+download["name"]+" ["+fname+"]") integrity_good=True return integrity_good except Exception as e: print(color["red"]+"Exception while verifying download file integrity") print(traceback.print_exc()) return False def load_catalog(self): flist=set() if not os.path.exists(self.conf["localclone"]): return False fl=os.listdir(self.conf["localclone"]) if not type(fl) is list or len(fl)<1: return False for f in fl: if f.lower()[len(f)-5:] == ".json": flist.add(self.conf["localclone"]+"/"+f) self.catalog=[] for jf in flist: with open(jf,"r") as f: catalog_dict=None content=f.read() if content in list(self.suppressed_entries): break data,siginfo=self.crypto.verify_and_load(content) if type(data) in [dict,unicode]: catalog_dict=json.loads(data) if (type(catalog_dict) is dict and type(siginfo) is dict and len(catalog_dict)>0 and len(catalog_dict["name"].strip()) > 0 and len(catalog_dict["url"].strip())>0): catalog_dict["siginfo"]=siginfo self.catalog.append(catalog_dict) else: self.suppressed_entries.add(content) print(color["red"]+"Error,problem with loading catalog entry:\nData:"+"Content:\n"+str(content)) self.suppressed_entries.add(content) #print(traceback.print_exc()) return False if len(self.catalog)>0: return True else: return False def dump_catalog(self): for e in self.catalog: print(color["green"]+"************************ "+e["uuid"]+" ************************") print("Name:"+e["name"]) print("Category:"+e["category"]) print("Type:"+e["type"]) print("URL:"+e["url"]) self.crypto.showkey(e["siginfo"],showsig=True) print("\n-----------") for h in hashlib.algorithms_available: if h in e: print("Integrity hash "+h+":"+e[h]) print(color["green"]+"************************ "+e["uuid"]+" ************************") def pick(self,uuid=None): categories=set() download_choice=None if not None is uuid: for e in self.catalog: if e["uuid"].strip() == uuid.strip(): return e["uuid"] return None for e in self.catalog: if "category" in e and len(e["category"])>0: categories.add(e["category"]) if len(categories)<1: print(color["yellow"]+"No categories found. Please make sure your selected catalog has download entries with categories.") return None catchoice=self.util.numeric_choice(color["blue"]+"Please select a category: ",list(categories)) names=[] for e in self.catalog: if "category" in e and catchoice == e["category"]: names.append(e["name"]+" - Unique ID: "+e["uuid"]) if len(names)>0: choice=self.util.numeric_choice(color["yellow"]+"Please select one of these entries under the '"+catchoice+"' category:",names) uuid=choice.split(" - Unique ID: ")[1].strip() return uuid return uuid def pick_and_download(self): download_choice=self.pick() for e in self.catalog: if not type(e) is dict: continue if "uuid" in e and e["uuid"]==download_choice: ''' Something to note here - It is assumed all content that has succesfully been loaded has been authenticated. load_catalog() will never load anything that hasn't been authenticated by crypto.verify_and_load() All loaded downloads are trusted downloads. ''' self.util.clear() print(color["green"]+"This download information (including integrity hashes) was signed by the following identity you have already trusted:\n") self.crypto.showkey(e["siginfo"],showsig=True) print(color["green"]+"Download name: "+e["name"]) print(color["green"]+"Category: "+e["category"]) print(color["yellow"]+"File type: "+e["type"]) print(color["yellow"]+"Unique ID(UUID): "+e["uuid"]) print(color["blue"]+"Download URL: "+e["url"]) fname='' success=False lasturl='' if self.util.yesno_choice(color["yellow"]+"Download this file in "+self.conf["downloads"]+"? "): if "mirrors" in e and len(e["mirrors"])>0: if self.conf["mirrorselect"]=="Round-Robin": for m in e["mirrors"]: print(color["green"]+"Fetching "+e["name"]+" From mirror "+m["mirrorname"]+": '"+m["url"]+"' (round-robin selection)") fname,success=self.fetch(m["url"]) lasturl=m["url"] if success: break else: print(color["yellow"]+"Round-Robin mirror download failed for "+e["name"]+" URL: "+lasturl) elif self.conf["mirrorselect"]=="Random": while True: m=random.choice(e["mirrors"]) print(color["green"]+"Fetching "+e["name"]+" From mirror "+m["mirrorname"]+": '"+m["url"]+"' (round-robin selection)") fname,success=self.fetch(m["url"]) lasturl=m["url"] if success: break else: print(color["yellow"]+"Round-Robin mirror download failed for "+e["name"]+" URL: "+lasturl) else: fname,success=self.fetch(e["url"]) if None is fname or None is success: print(color["red"]+"Fetching the download failed.") continue lasturl=e["url"] if success and not None is fname and len(fname)>0 and self.verify_integrity(e,fname): print(color["green"]+"Download of "+e["name"]+" is finished.\nThe downloaded file is saved at '"+fname+"', It was downloaded from the URL '"+lasturl+"' and integrity verification on the downloaded file has passed.") if e["torrent"] == 1: subprocess.Popen([self.conf["torrentapp"],self.conf["torrentoptions"],os.path.abspath(fname)],shell=False).communicate() else: os.remove(fname) print(color["red"]+"Download of "+e["name"]+" has failed.\nThe URL used in this download attempt is:"+e["url"]) print(color["white"]+"\n>") break def gitsync(self): print(color["yellow"]+"Synchronizing download catalog ("+self.conf["gitrepo"]+")...") if os.path.exists(self.conf["localclone"]) and os.path.exists(self.conf["localclone"]+"/.git"): os.chdir(self.conf["localclone"]) if self.util.call(["git","pull"])!=0: print(color["red"]+"Error running 'git pull' to update the local clone '"+self.conf["localclone"]+"' of the chosen download catalog. Please fix this manually.") print(color["red"]+"Alternatively, please adjust your configuration to relfect any system or network changes.") elif os.path.exists(self.conf["localclone"]) and not os.path.exists(self.conf["localclone"]+"/.git"): if self.util.call(["git","clone",self.conf["gitrepo"],self.conf["localclone"]])!=0: print(color["red"]+"Error cloning the chosen git repository of the download catalog. Please make sure the local clone directory '"+self.conf["localclone"]+"' can be created by git and whether your internet connection is allowing access to your chosen git repository.") print(color["red"]+"Alternatively, please adjust your configuration to relfect any system or network changes.") elif self.util.mkdirs(self.conf["localclone"])==True: if self.util.call(["git","clone ",self.conf["gitrepo"],self.conf["localclone"]])!=0: print(color["red"]+"Error cloning the chosen git repository of the download catalog. Please make sure the local clone directory '"+self.conf["localclone"]+"' can be created by git and whether your internet connection is allowing access to your chosen git repository.") print(color["red"]+"Alternatively, please adjust your configuration to relfect any system or network changes.") else: print(color["red"]+"Unable to create the local download catalog clone directory at:"+self.conf["localclone"]) print(color["white"]+"------------------------------------------") def start(self): dh=self if not dh.init_env(): print("Critical error initializing. Exiting immediately.") sys.exit(1) self.util.clear() print(color["green"]+"\n\nThank you for using download-hygeine. If you like this project start creating your own catalog or contribute downloads to the default catalog.") while True: try: dh.gitsync() if dh.load_catalog()==True: choice=self.util.numeric_choice(color["white"]+"What would you like to do? ",["Browse and download something","List all downloads","List all trusted download signers(identities/public-key information)","Exit"]) if choice=="Browse and download something": dh.pick_and_download() elif choice=="List all downloads": dh.dump_catalog() elif
e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack a, b, c, d, e, f, g, h, i, j = to_unpack
# Copyright 2021 Hathor Labs # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hashlib import json from enum import Enum from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Set from cryptography import x509 from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import padding, rsa from OpenSSL.crypto import X509, PKey from twisted.internet.defer import inlineCallbacks from twisted.internet.ssl import Certificate, CertificateOptions, TLSVersion, trustRootFromCertificates from hathor import daa from hathor.conf import HathorSettings from hathor.p2p.utils import connection_string_to_host, discover_dns, generate_certificate if TYPE_CHECKING: from hathor.p2p.protocol import HathorProtocol # noqa: F401 settings = HathorSettings() class InvalidPeerIdException(Exception): pass class PeerFlags(str, Enum): RETRIES_EXCEEDED = 'retries_exceeded' class PeerId: """ Identify a peer, even when it is disconnected. The public_key and private_key are used to ensure that a new connection that claims to be this peer is really from this peer. The entrypoints are strings that describe a way to connect to this peer. Usually a peer will have only one entrypoint. """ id: Optional[str] entrypoints: List[str] private_key: Optional[rsa.RSAPrivateKeyWithSerialization] public_key: Optional[rsa.RSAPublicKey] certificate: Optional[x509.Certificate] retry_timestamp: int # should only try connecting to this peer after this timestamp retry_interval: int # how long to wait for next connection retry. It will double for each failure retry_attempts: int # how many retries were made flags: Set[str] def __init__(self, auto_generate_keys: bool = True) -> None: self.id = None self.private_key = None self.public_key = None self.certificate = None self.entrypoints = [] self.retry_timestamp = 0 self.retry_interval = 5 self.retry_attempts = 0 self.flags = set() if auto_generate_keys: self.generate_keys() def __str__(self): return ('PeerId(id=%s, entrypoints=%s, retry_timestamp=%d, retry_interval=%d)' % (self.id, self.entrypoints, self.retry_timestamp, self.retry_interval)) def merge(self, other: 'PeerId') -> None: """ Merge two PeerId objects, checking that they have the same id, public_key, and private_key. The entrypoints are merged without duplicating their entries. """ assert (self.id == other.id) # Copy public key if `self` doesn't have it and `other` does. if not self.public_key and other.public_key: self.public_key = other.public_key self.validate() if self.public_key and other.public_key: assert (self.get_public_key() == other.get_public_key()) # Copy private key if `self` doesn't have it and `other` does. if not self.private_key and other.private_key: self.private_key = other.private_key self.validate() # Merge entrypoints. for ep in other.entrypoints: if ep not in self.entrypoints: self.entrypoints.append(ep) def generate_keys(self, key_size: int = 2048) -> None: """ Generate a random pair of private key and public key. It also calculates the id of this peer, based on its public key. """ # https://security.stackexchange.com/questions/5096/rsa-vs-dsa-for-ssh-authentication-keys self.private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_size, backend=default_backend()) self.public_key = self.private_key.public_key() self.id = self.calculate_id() def calculate_id(self) -> str: """ Calculate and return the id based on the public key. """ assert self.public_key is not None public_der = self.public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) h1 = hashlib.sha256(public_der) h2 = hashlib.sha256(h1.digest()) return h2.hexdigest() def get_public_key(self) -> str: """ Return the public key in DER encoding as an `str`. """ assert self.public_key is not None public_der = self.public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) return base64.b64encode(public_der).decode('utf-8') def sign(self, data: bytes) -> bytes: """ Sign any data (of type `bytes`). """ assert self.private_key is not None return self.private_key.sign( data, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) def verify_signature(self, signature: bytes, data: bytes) -> bool: """ Verify a signature of a data. Both must be of type `bytes`. """ try: assert self.public_key is not None self.public_key.verify(signature, data, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) except InvalidSignature: return False else: return True @classmethod def create_from_json(cls, data: Dict[str, Any]) -> 'PeerId': """ Create a new PeerId from a JSON. It is used both to load a PeerId from disk and to create a PeerId from a peer connection. """ obj = cls(auto_generate_keys=False) obj.id = data['id'] if 'pubKey' in data: public_key_der = base64.b64decode(data['pubKey']) obj.public_key = serialization.load_der_public_key(data=public_key_der, backend=default_backend()) if 'privKey' in data: private_key_der = base64.b64decode(data['privKey']) obj.private_key = serialization.load_der_private_key(data=private_key_der, password=None, backend=default_backend()) if 'entrypoints' in data: obj.entrypoints = data['entrypoints'] # TODO(epnichols): call obj.validate()? return obj def validate(self) -> None: """ Return `True` if the following conditions are valid: (i) public key and private key matches; (ii) the id matches with the public key. TODO(epnichols): Update docs. Only raises exceptions; doesn't return anything. """ if self.private_key and not self.public_key: # TODO(epnichols): Modifies self.public_key, even though we're calling "validate". Why is state modified? self.public_key = self.private_key.public_key() if self.public_key: if self.id != self.calculate_id(): raise InvalidPeerIdException('id does not match public key') if self.private_key: assert self.public_key is not None public_der1 = self.public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) public_key = self.private_key.public_key() public_der2 = public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) if public_der1 != public_der2: raise InvalidPeerIdException('private/public pair does not match') def to_json(self, include_private_key: bool = False) -> Dict[str, Any]: """ Return a JSON serialization of the object. By default, it will not include the private key. If you would like to add it, use the parameter `include_private_key`. """ assert self.public_key is not None public_der = self.public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) # This format is compatible with libp2p. result = { 'id': self.id, 'pubKey': base64.b64encode(public_der).decode('utf-8'), 'entrypoints': self.entrypoints, } if include_private_key: assert self.private_key is not None private_der = self.private_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, # TODO encryption_algorithm=serialization.BestAvailableEncryption(b'mypassword') encryption_algorithm=serialization.NoEncryption()) result['privKey'] = base64.b64encode(private_der).decode('utf-8') return result def save_to_file(self, path: str) -> None: """ Save the object to a JSON file. """ data = self.to_json(include_private_key=True) fp = open(path, 'w') json.dump(data, fp, indent=4) fp.close() def increment_retry_attempt(self, now: int) -> None: """ Updates timestamp for next retry. :param now: current timestamp """ self.retry_timestamp = now + self.retry_interval self.retry_attempts += 1 self.retry_interval = self.retry_interval * settings.PEER_CONNECTION_RETRY_INTERVAL_MULTIPLIER if self.retry_interval > settings.PEER_CONNECTION_RETRY_MAX_RETRY_INTERVAL: self.retry_interval = settings.PEER_CONNECTION_RETRY_MAX_RETRY_INTERVAL def reset_retry_timestamp(self) -> None: """ Resets retry values. """ self.retry_interval = 5 self.retry_timestamp = 0 self.retry_attempts = 0 self.flags.discard(PeerFlags.RETRIES_EXCEEDED) def can_retry(self, now: int) -> bool: """ Return if can retry to connect to self in `now` timestamp We validate if peer already has RETRIES_EXCEEDED flag, or has reached the maximum allowed attempts If not, we check if the timestamp is already a valid one to retry """ if now < self.retry_timestamp: return False return True def get_certificate(self) -> x509.Certificate: if not self.certificate: certificate = generate_certificate(self.private_key, settings.CA_FILEPATH, settings.CA_KEY_FILEPATH) self.certificate = certificate return self.certificate def get_certificate_options(self) -> CertificateOptions: """ Return certificate options With certificate generated and signed with peer private key """ certificate = self.get_certificate() openssl_certificate = X509.from_cryptography(certificate) openssl_pkey = PKey.from_cryptography_key(self.private_key) with open(settings.CA_FILEPATH, 'rb') as f: ca = x509.load_pem_x509_certificate(data=f.read(), backend=default_backend()) openssl_ca = X509.from_cryptography(ca) ca_cert = Certificate(openssl_ca) trust_root = trustRootFromCertificates([ca_cert]) # We should not use a ContextFactory # https://twistedmatrix.com/documents/19.7.0/api/twisted.protocols.tls.TLSMemoryBIOFactory.html certificate_options = CertificateOptions( privateKey=openssl_pkey, certificate=openssl_certificate, trustRoot=trust_root, raiseMinimumTo=TLSVersion.TLSv1_3 ) return certificate_options @inlineCallbacks def validate_entrypoint(self, protocol: 'HathorProtocol') -> Generator[Any, Any, bool]: """ Validates if connection entrypoint is one of the peer entrypoints """ found_entrypoint = False # If has no entrypoints must be behind a NAT, so we add the flag to the connection if len(self.entrypoints) == 0: protocol.warning_flags.add(protocol.WarningFlags.NO_ENTRYPOINTS) # If there are no entrypoints, we don't need to validate it found_entrypoint = True # Entrypoint validation with connection string and connection host # Entrypoints have the format tcp://IP|name:port for entrypoint in self.entrypoints: if protocol.connection_string: # Connection string has the format tcp://IP:port # So we must consider that the entrypoint could be in name format if protocol.connection_string == entrypoint: # Found the entrypoint found_entrypoint = True break host = connection_string_to_host(entrypoint) # TODO: don't use `daa.TEST_MODE` for this result = yield discover_dns(host, daa.TEST_MODE) if protocol.connection_string in result: # Found the entrypoint found_entrypoint = True break else: # When the peer is the server part of the connection we don't have the full connection_string # So we can only validate the host from the protocol connection_remote = protocol.transport.getPeer() connection_host = connection_remote.host # Connection host has only the IP # So we must consider that the entrypoint could be in name format and we just validate the host host = connection_string_to_host(entrypoint) if connection_host == host: found_entrypoint = True break result = yield discover_dns(host, daa.TEST_MODE)
**range:** 250..2000 **units**\: millisecond **default value**\: 1000 """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.IfSignalling.IntervalRate, self).__init__() self.yang_name = "interval-rate" self.yang_parent_name = "if-signalling" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('messages_per_interval', YLeaf(YType.uint32, 'messages-per-interval')), ('interval_size', YLeaf(YType.uint32, 'interval-size')), ]) self.messages_per_interval = None self.interval_size = None self._segment_path = lambda: "interval-rate" def __setattr__(self, name, value): self._perform_setattr(Rsvp.Interfaces.Interface.IfSignalling.IntervalRate, ['messages_per_interval', 'interval_size'], name, value) class OutOfBand(Entity): """ Configure RSVP out\-of\-band signalling parameters .. attribute:: missed_messages Configure max number of consecutive missed messages for state expiry for out\-of\-band tunnels **type**\: int **range:** 1..110000 **default value**\: 38000 .. attribute:: refresh_interval Configure interval between successive refreshes for out\-of\-band tunnels **type**\: int **range:** 180..86400 **units**\: second """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.IfSignalling.OutOfBand, self).__init__() self.yang_name = "out-of-band" self.yang_parent_name = "if-signalling" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('missed_messages', YLeaf(YType.uint32, 'missed-messages')), ('refresh_interval', YLeaf(YType.uint32, 'refresh-interval')), ]) self.missed_messages = None self.refresh_interval = None self._segment_path = lambda: "out-of-band" def __setattr__(self, name, value): self._perform_setattr(Rsvp.Interfaces.Interface.IfSignalling.OutOfBand, ['missed_messages', 'refresh_interval'], name, value) class Bandwidth(Entity): """ Configure Bandwidth .. attribute:: mam Configure MAM bandwidth parameters **type**\: :py:class:`Mam <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Interfaces.Interface.Bandwidth.Mam>` .. attribute:: rdm Configure RDM bandwidth parameters **type**\: :py:class:`Rdm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Interfaces.Interface.Bandwidth.Rdm>` """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.Bandwidth, self).__init__() self.yang_name = "bandwidth" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([("mam", ("mam", Rsvp.Interfaces.Interface.Bandwidth.Mam)), ("rdm", ("rdm", Rsvp.Interfaces.Interface.Bandwidth.Rdm))]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict() self.mam = Rsvp.Interfaces.Interface.Bandwidth.Mam() self.mam.parent = self self._children_name_map["mam"] = "mam" self._children_yang_names.add("mam") self.rdm = Rsvp.Interfaces.Interface.Bandwidth.Rdm() self.rdm.parent = self self._children_name_map["rdm"] = "rdm" self._children_yang_names.add("rdm") self._segment_path = lambda: "bandwidth" class Mam(Entity): """ Configure MAM bandwidth parameters .. attribute:: max_resv_bandwidth Maximum reservable bandwidth (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: max_resv_flow Largest reservable flow (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: bc0_bandwidth Reservable bandwidth in BC0 (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: bc1_bandwidth Reservable bandwidth in BC1 (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: bandwidth_mode Absolute or Percentage bandwidth mode **type**\: :py:class:`RsvpBwCfg <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.RsvpBwCfg>` **units**\: percentage """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.Bandwidth.Mam, self).__init__() self.yang_name = "mam" self.yang_parent_name = "bandwidth" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('max_resv_bandwidth', YLeaf(YType.uint32, 'max-resv-bandwidth')), ('max_resv_flow', YLeaf(YType.uint32, 'max-resv-flow')), ('bc0_bandwidth', YLeaf(YType.uint32, 'bc0-bandwidth')), ('bc1_bandwidth', YLeaf(YType.uint32, 'bc1-bandwidth')), ('bandwidth_mode', YLeaf(YType.enumeration, 'bandwidth-mode')), ]) self.max_resv_bandwidth = None self.max_resv_flow = None self.bc0_bandwidth = None self.bc1_bandwidth = None self.bandwidth_mode = None self._segment_path = lambda: "mam" def __setattr__(self, name, value): self._perform_setattr(Rsvp.Interfaces.Interface.Bandwidth.Mam, ['max_resv_bandwidth', 'max_resv_flow', 'bc0_bandwidth', 'bc1_bandwidth', 'bandwidth_mode'], name, value) class Rdm(Entity): """ Configure RDM bandwidth parameters .. attribute:: max_resv_flow Largest reservable flow (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: bc0_bandwidth Reservable bandwidth in BC0 (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: bc1_bandwidth Reservable bandwidth in BC1 (Kbps or percent of physical bandwidth) **type**\: int **range:** 0..4294967295 .. attribute:: rdm_keyword Set requests should always use RDM **type**\: :py:class:`RsvpRdm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.RsvpRdm>` .. attribute:: bc0_keyword Set requests should always use BC0 **type**\: :py:class:`RsvpBc0 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.RsvpBc0>` .. attribute:: bc1_keyword Set requests should always use BC1 **type**\: :py:class:`RsvpBc1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.RsvpBc1>` .. attribute:: bandwidth_mode Absolute or Percentage bandwidth mode **type**\: :py:class:`RsvpBwCfg <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.RsvpBwCfg>` **units**\: percentage """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.Bandwidth.Rdm, self).__init__() self.yang_name = "rdm" self.yang_parent_name = "bandwidth" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('max_resv_flow', YLeaf(YType.uint32, 'max-resv-flow')), ('bc0_bandwidth', YLeaf(YType.uint32, 'bc0-bandwidth')), ('bc1_bandwidth', YLeaf(YType.uint32, 'bc1-bandwidth')), ('rdm_keyword', YLeaf(YType.enumeration, 'rdm-keyword')), ('bc0_keyword', YLeaf(YType.enumeration, 'bc0-keyword')), ('bc1_keyword', YLeaf(YType.enumeration, 'bc1-keyword')), ('bandwidth_mode', YLeaf(YType.enumeration, 'bandwidth-mode')), ]) self.max_resv_flow = None self.bc0_bandwidth = None self.bc1_bandwidth = None self.rdm_keyword = None self.bc0_keyword = None self.bc1_keyword = None self.bandwidth_mode = None self._segment_path = lambda: "rdm" def __setattr__(self, name, value): self._perform_setattr(Rsvp.Interfaces.Interface.Bandwidth.Rdm, ['max_resv_flow', 'bc0_bandwidth', 'bc1_bandwidth', 'rdm_keyword', 'bc0_keyword', 'bc1_keyword', 'bandwidth_mode'], name, value) class Authentication(Entity): """ Configure RSVP authentication .. attribute:: life_time Life time (in seconds) for each security association **type**\: int **range:** 30..86400 **units**\: second .. attribute:: enable Enable or disable RSVP authentication **type**\: bool .. attribute:: window_size Window\-size to limit number of out\-of\-order messages **type**\: int **range:** 1..64 .. attribute:: key_chain Key chain to authenticate RSVP signalling messages **type**\: str **length:** 1..32 """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Interfaces.Interface.Authentication, self).__init__() self.yang_name = "authentication" self.yang_parent_name = "interface" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('life_time', YLeaf(YType.uint32, 'life-time')), ('enable', YLeaf(YType.boolean, 'enable')), ('window_size', YLeaf(YType.uint32, 'window-size')), ('key_chain', YLeaf(YType.str, 'key-chain')), ]) self.life_time = None self.enable = None self.window_size = None self.key_chain = None self._segment_path = lambda: "authentication" def __setattr__(self, name, value): self._perform_setattr(Rsvp.Interfaces.Interface.Authentication, ['life_time', 'enable', 'window_size', 'key_chain'], name, value) class Signalling(Entity): """ Configure Global RSVP signalling parameters .. attribute:: global_out_of_band Configure out\-of\-band signalling parameters **type**\: :py:class:`GlobalOutOfBand <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.GlobalOutOfBand>` .. attribute:: graceful_restart Configure RSVP Graceful\-Restart parameters **type**\: :py:class:`GracefulRestart <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.GracefulRestart>` .. attribute:: prefix_filtering Configure prefix filtering parameters **type**\: :py:class:`PrefixFiltering <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.PrefixFiltering>` .. attribute:: pesr Sending Path Error with State\-Removal flag **type**\: :py:class:`Pesr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.Pesr>` .. attribute:: checksum RSVP message checksum computation **type**\: :py:class:`Checksum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.Checksum>` .. attribute:: hello_graceful_restart_misses Configure max number of consecutive missed Hello messages **type**\: int **range:** 1..10 **default value**\: 3 .. attribute:: hello_graceful_restart_interval Configure interval between successive Hello messages **type**\: int **range:** 3000..30000 **units**\: millisecond **default value**\: 5000 """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Signalling, self).__init__() self.yang_name = "signalling" self.yang_parent_name = "rsvp" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([("global-out-of-band", ("global_out_of_band", Rsvp.Signalling.GlobalOutOfBand)), ("graceful-restart", ("graceful_restart", Rsvp.Signalling.GracefulRestart)), ("prefix-filtering", ("prefix_filtering", Rsvp.Signalling.PrefixFiltering)), ("pesr", ("pesr", Rsvp.Signalling.Pesr)), ("checksum", ("checksum", Rsvp.Signalling.Checksum))]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('hello_graceful_restart_misses', YLeaf(YType.uint32, 'hello-graceful-restart-misses')), ('hello_graceful_restart_interval', YLeaf(YType.uint32, 'hello-graceful-restart-interval')), ]) self.hello_graceful_restart_misses = None self.hello_graceful_restart_interval = None self.global_out_of_band = Rsvp.Signalling.GlobalOutOfBand() self.global_out_of_band.parent = self self._children_name_map["global_out_of_band"] = "global-out-of-band" self._children_yang_names.add("global-out-of-band") self.graceful_restart = Rsvp.Signalling.GracefulRestart() self.graceful_restart.parent = self self._children_name_map["graceful_restart"] = "graceful-restart" self._children_yang_names.add("graceful-restart") self.prefix_filtering = Rsvp.Signalling.PrefixFiltering() self.prefix_filtering.parent = self self._children_name_map["prefix_filtering"] = "prefix-filtering" self._children_yang_names.add("prefix-filtering") self.pesr = Rsvp.Signalling.Pesr() self.pesr.parent = self self._children_name_map["pesr"] = "pesr" self._children_yang_names.add("pesr") self.checksum = Rsvp.Signalling.Checksum() self.checksum.parent = self self._children_name_map["checksum"] = "checksum" self._children_yang_names.add("checksum") self._segment_path = lambda: "signalling" self._absolute_path = lambda: "Cisco-IOS-XR-ip-rsvp-cfg:rsvp/%s" % self._segment_path() def __setattr__(self, name, value): self._perform_setattr(Rsvp.Signalling, ['hello_graceful_restart_misses', 'hello_graceful_restart_interval'], name, value) class GlobalOutOfBand(Entity): """ Configure out\-of\-band signalling parameters .. attribute:: vrf VRF used for out\-of\-band control signalling **type**\: str **length:** 1..32 """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Signalling.GlobalOutOfBand, self).__init__() self.yang_name = "global-out-of-band" self.yang_parent_name = "signalling" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('vrf', YLeaf(YType.str, 'vrf')), ]) self.vrf = None self._segment_path = lambda: "global-out-of-band" self._absolute_path = lambda: "Cisco-IOS-XR-ip-rsvp-cfg:rsvp/signalling/%s" % self._segment_path() def __setattr__(self, name, value): self._perform_setattr(Rsvp.Signalling.GlobalOutOfBand, ['vrf'], name, value) class GracefulRestart(Entity): """ Configure RSVP Graceful\-Restart parameters .. attribute:: lsp_class_type Send LSP's ctype for recovery and suggested label **type**\: :py:class:`LspClassType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg.Rsvp.Signalling.GracefulRestart.LspClassType>` .. attribute:: enable Enable RSVP graceful restart **type**\: bool .. attribute:: restart_time Graceful restart time (seconds) **type**\: int **range:** 60..3600 **units**\: second **default value**\: 120 .. attribute:: recovery_time Graceful restart recovery time (seconds) **type**\: int **range:** 0..3600 **units**\: second **default value**\: 120 """ _prefix = 'ip-rsvp-cfg' _revision = '2017-05-01' def __init__(self): super(Rsvp.Signalling.GracefulRestart, self).__init__() self.yang_name = "graceful-restart" self.yang_parent_name = "signalling" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names
# -*- coding:utf-8 -*- import json import os import numpy as np import random import config import jieba import re class file_data_loader: def __next__(self): raise NotImplementedError def next(self): return self.__next__() def next_batch(self, batch_size): raise NotImplementedError class cmp(): def __init__(self, x): self.x = x def __lt__(self, other): a_key = self.x['head']['id'] + '#' + self.x['tail']['id'] + '#' + self.x['relation'] b_key = other.x['head']['id'] + '#' + other.x['tail']['id'] + '#' + other.x['relation'] if (a_key > b_key): return False elif (a_key == b_key): return True else: return True class json_file_data_loader(file_data_loader): # 以instance作为最小单位 MODE_INSTANCE = 0 # 以bag为最小单位,每个bag中实体对相同,一般用于test(因为不知道relation) MODE_ENTPAIR_BAG = 1 # 以bag为最小单位,每个bag中实体对和关系相同,一般用于train MODE_RELFACT_BAG = 2 # chinese word segmentation def sentence_segmentation(self, sentence, entity1, entity2): jieba.add_word(entity1, freq=999999) jieba.add_word(entity2, freq=999999) seglist = list(jieba.cut(sentence, cut_all=False, HMM=False)) jieba.del_word(entity1) jieba.del_word(entity2) return seglist def _load_processed_file(self): # train or test name_prefix = '.'.join(self.file_name.split('/')[-1].split('.')[:-1]) word_vec_name_prefix = '.'.join(self.word_vec_file_name.split('/')[-1].split('.')[:-1]) processed_data_dir = '_processed_data' if not os.path.isdir(processed_data_dir): return False word_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_word.npy') pos1_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_pos1.npy') pos2_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_pos2.npy') rel_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_rel.npy') mask_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_mask.npy') length_npy_file_name = os.path.join(processed_data_dir, name_prefix + '_length.npy') entpair2scope_file_name = os.path.join(processed_data_dir, name_prefix + '_entpair2scope.json') relfact2scope_file_name = os.path.join(processed_data_dir, name_prefix + '_relfact2scope.json') word_vec_mat_file_name = os.path.join(processed_data_dir, word_vec_name_prefix + '_mat.npy') word2id_file_name = os.path.join(processed_data_dir, word_vec_name_prefix + '_word2id.json') if not os.path.exists(word_npy_file_name) or \ not os.path.exists(pos1_npy_file_name) or \ not os.path.exists(pos2_npy_file_name) or \ not os.path.exists(rel_npy_file_name) or \ not os.path.exists(mask_npy_file_name) or \ not os.path.exists(length_npy_file_name) or \ not os.path.exists(entpair2scope_file_name) or \ not os.path.exists(relfact2scope_file_name) or \ not os.path.exists(word_vec_mat_file_name) or \ not os.path.exists(word2id_file_name): return False print("Loading pre-processing files...") self.data_word = np.load(word_npy_file_name) self.data_pos1 = np.load(pos1_npy_file_name) self.data_pos2 = np.load(pos2_npy_file_name) self.data_rel = np.load(rel_npy_file_name) self.data_mask = np.load(mask_npy_file_name) self.data_length = np.load(length_npy_file_name) with open(entpair2scope_file_name, 'r', encoding='utf8') as fr: self.entpair2scope = json.load(fr) with open(relfact2scope_file_name, 'r', encoding='utf8') as fr: self.relfact2scope = json.load(fr) self.word_vec_mat = np.load(word_vec_mat_file_name) with open(word2id_file_name, 'r', encoding='utf8') as fr: self.word2id = json.load(fr) if (self.data_word.shape[1] != config.model.max_length): print("Pre-processing ata is expired, Reprocessing") return False print('Finish loading') return True def __init__(self, file_name, word_vec_file_name, rel2id_file_name, mode, shuffle, max_length=config.model.max_length, batch_size=config.model.batch_size): ''' :param file_name: 数据路径 :param word_vec_file_name: 词向量路径 :param rel2id_file_name: relation to id 路径 :param mode: 组织数据的模式,有3种方式:MODE_INSTANCE , MODE_ENTPAIR_BAG,MODE_RELFACT_BAG :param shuffle: 是否shuffle数据 :param max_length: 规定句子的最长长度 :param batch_size: 定义batch_size ''' self.file_name = file_name self.word_vec_file_name = word_vec_file_name self.rel2id_file_name = rel2id_file_name self.mode = mode self.shuffle = shuffle self.max_length = max_length self.batch_size = batch_size with open(rel2id_file_name, 'r') as fr: self.rel2id = json.load(fr) if (not self._load_processed_file()): if file_name is None or not os.path.isfile(file_name): raise Exception("[ERROR] Data file doesn't exist") if word_vec_file_name is None or not os.path.isfile(word_vec_file_name): raise Exception("[ERROR] word2vec file doesn't exist") if rel2id_file_name is None or not os.path.isfile(rel2id_file_name): raise Exception("[ERROR] word2vec file doesn't exist") # load file print("Loading data file...") with open(self.file_name, 'r', encoding='utf8') as fr: self.ori_data = json.load(fr) print("Finish loading") print("Loading word vector file...") with open(self.word_vec_file_name, 'r', encoding='utf8') as fr: self.ori_word_vec = json.load(fr) print("Finish loading") # sort data by entities and relations print("sort data") self.ori_data.sort(key=cmp) print('Finish sorting') # pre-processing word vec self.word2id = {} self.word_vec_tot = len(self.ori_word_vec) UNK = self.word_vec_tot BLANK = self.word_vec_tot + 1 self.word2id['UNK'] = UNK self.word2id['BLANK'] = BLANK self.word_vec_dim = len(self.ori_word_vec[0]['vec']) print("Got {} words of {} dims".format(self.word_vec_tot, self.word_vec_dim)) print("Building word vector matrix and mapping...") self.word_vec_mat = np.zeros((self.word_vec_tot, self.word_vec_dim), dtype=np.float32) for cur_id, word in enumerate(self.ori_word_vec): w = word['word'] self.word2id[w] = cur_id self.word_vec_mat[cur_id, :] = word['vec'] self.word2id['UNK'] = UNK self.word2id['BLANK'] = BLANK print("Finish building") # Pre-processing print("Pre-processing data...") self.instance_tot = len(self.ori_data) self.entpair2scope = {} # (head,tail) -> scope self.relfact2scope = {} # (head,tail,rel) -> scope self.data_word = np.zeros((self.instance_tot, self.max_length), dtype=np.int32) self.data_pos1 = np.zeros((self.instance_tot, self.max_length), dtype=np.int32) self.data_pos2 = np.zeros((self.instance_tot, self.max_length), dtype=np.int32) self.data_rel = np.zeros((self.instance_tot), dtype=np.int32) self.data_mask = np.zeros((self.instance_tot, self.max_length), dtype=np.int32) self.data_length = np.zeros((self.instance_tot), dtype=np.int32) last_entpair = '' last_entpair_pos = -1 last_relfact = '' last_relfact_pos = -1 dirty_data_number = 0 pattern = re.compile(r'\s') for i in range(self.instance_tot): ins = self.ori_data[i] dataset = file_name.split("/")[-2] if dataset == 'agriculture': ins['sentence'] = re.sub(pattern, '_', ins['sentence']) if (ins['relation'] in self.rel2id): self.data_rel[i] = self.rel2id[ins['relation']] else: self.data_rel[i] = self.rel2id['NA'] if dataset == 'nyt': sentence = ' '.join(ins['sentence'].split()) elif dataset == 'agriculture': sentence = ' '.join( self.sentence_segmentation(ins['sentence'], ins['head']['word'], ins['tail']['word'])) else: raise NameError head = ins['head']['word'] tail = ins['tail']['word'] cur_entpair = ins['head']['id'] + '#' + ins['tail']['id'] cur_relfact = ins['head']['id'] + '#' + ins['tail']['id'] + '#' + ins['relation'] if (cur_entpair != last_entpair): if (last_entpair != ''): self.entpair2scope[last_entpair] = [last_entpair_pos, i] last_entpair = cur_entpair last_entpair_pos = i if (cur_relfact != last_relfact): if (last_relfact != ''): self.relfact2scope[last_relfact] = [last_relfact_pos, i] last_relfact = cur_relfact last_relfact_pos = i # position if dataset == 'nyt': p1 = sentence.find(' ' + head + ' ') p2 = sentence.find(' ' + tail + ' ') # 如果是首 尾 if (p1 == -1): if (sentence[:len(head) + 1] == head + " "): p1 = 0 elif (sentence[-len(head) - 1:] == " " + head): p1 = len(sentence) - len(head) else: p1 = 0 else: p1 += 1 if (p2 == -1): if (sentence[:len(head) + 1] == head + " "): p2 = 0 elif (sentence[-len(head) - 1:] == " " + head): p2 = len(sentence) - len(head) else: p2 = 0 else: p2 += 1 elif dataset == 'agriculture': p1 = int(ins['head']['pos']) p2 = int(ins['tail']['pos']) words = sentence.split() cur_ref_data_word = self.data_word[i] cur_pos = 0 pos1 = -1 pos2 = -1 for j, word in enumerate(words): if (j < max_length): if word in self.word2id: cur_ref_data_word[j] = self.word2id[word] else: cur_ref_data_word[j] = UNK if cur_pos == p1: pos1 = j p1 = -1 if cur_pos == p2: pos2 = j p2 = -1 if dataset == 'nyt': cur_pos += len(word) + 1 elif dataset == 'agriculture': tmp = cur_pos cur_pos += len(word) while cur_pos < len(ins['sentence']) and ins['sentence'][cur_pos] == " ": cur_pos += 1 if tmp < p1 and cur_pos > p1: pos1 = j p1 = -1 if tmp < p2 and cur_pos > p2: pos2 = j p2 = -1 else: raise NameError if cur_pos == p1: pos1 = len(words) - 1 if cur_pos == p2: pos2 = len(words) - 1 if pos1 >= max_length: pos1 = max_length - 1 if pos2 >= max_length: pos2 = max_length - 1 for k in range(len(words), max_length): cur_ref_data_word[k] = BLANK self.data_length[i] = min(len(words), max_length) if (pos1 == -1 or pos2 == -1): print(p1, p2) raise Exception( "[ERROR] Position error, index = {}, sentence = {}, head = {}, tail = {}".format(i, sentence, head, tail)) pos1 = min(pos1, max_length - 1) pos2 = min(pos2, max_length - 1) pos_min = min(pos1, pos2) pos_max = max(pos1, pos2) for j in range(max_length): self.data_pos1[i][j] = j - pos1 + max_length self.data_pos2[i][j] = j - pos2 + max_length if (j >= self.data_length[i]): self.data_mask[i][j] = 0 elif j <= pos_min: self.data_mask[i][j] = 1 elif j <= pos_max: self.data_mask[i][j] = 2 else: self.data_mask[i][j] = 3 if last_entpair != '': self.entpair2scope[last_entpair] = [last_entpair_pos, self.instance_tot] if last_relfact != '': self.relfact2scope[last_relfact] = [last_relfact_pos, self.instance_tot] print("Finish pre-processing") print("Storing preprocessing file...") # train or test name_prefix = '.'.join(self.file_name.split('/')[-1].split('.')[:-1]) word_vec_name_prefix = '.'.join(self.word_vec_file_name.split('/')[-1].split('.')[:-1]) processed_data_dir = '_processed_data' if not os.path.isdir(processed_data_dir): os.mkdir(processed_data_dir) print("discards data number ", dirty_data_number) np.save(os.path.join(processed_data_dir, name_prefix + '_word.npy'), self.data_word) np.save(os.path.join(processed_data_dir, name_prefix + '_pos1.npy'), self.data_pos1) np.save(os.path.join(processed_data_dir, name_prefix + '_pos2.npy'), self.data_pos2) np.save(os.path.join(processed_data_dir, name_prefix + '_rel.npy'), self.data_rel) np.save(os.path.join(processed_data_dir, name_prefix + '_mask.npy'), self.data_mask) np.save(os.path.join(processed_data_dir, name_prefix + '_length.npy'), self.data_length) with open(os.path.join(processed_data_dir, name_prefix + '_entpair2scope.json'), 'w', encoding='utf8') as fw: json.dump(self.entpair2scope, fw, ensure_ascii=False) with open(os.path.join(processed_data_dir, name_prefix + '_relfact2scope.json'), 'w', encoding='utf8') as fw: json.dump(self.relfact2scope, fw, ensure_ascii=False) np.save(os.path.join(processed_data_dir, word_vec_name_prefix + '_mat.npy'), self.word_vec_mat) with open(os.path.join(processed_data_dir, word_vec_name_prefix + '_word2id.json'), 'w', encoding='utf8') as fw: json.dump(self.word2id, fw, ensure_ascii=False) print("Finish storing") self.instance_tot = self.data_word.shape[0] self.entpair_tot = len(self.entpair2scope) self.relfact_tot = 0 # relfact数, 除了 relation 关系 for key in self.relfact2scope: if (key[-2:] != 'NA'): self.relfact_tot += 1 self.rel_tot = len(self.rel2id) if self.mode == self.MODE_INSTANCE: self.order = list(range(self.instance_tot)) elif self.mode == self.MODE_ENTPAIR_BAG: self.order = list(range(len(self.entpair2scope))) self.scope_name = [] self.scope = [] for key, value in self.entpair2scope.items(): self.scope_name.append(key) self.scope.append(value) elif self.mode == self.MODE_RELFACT_BAG: self.order = list(range(len(self.relfact2scope))) self.scope_name = [] self.scope = [] for key, value in self.relfact2scope.items(): self.scope_name.append(key) self.scope.append(value) else: raise Exception("[ERROR] Invalid mode") print("len order", len(self.order)) self.idx = 0 if (self.shuffle): random.shuffle(self.order) print("Total relation fact:%d" % (self.relfact_tot)) print("Total
<reponame>knjcode/imgdupes #!/usr/bin/env python # coding: utf-8 from __future__ import (absolute_import, division, print_function) from logging import getLogger, StreamHandler, DEBUG logger = getLogger(__name__) handler = StreamHandler() # handler.setLevel(DEBUG) # logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False from builtins import input from datetime import datetime from multiprocessing import cpu_count from operator import itemgetter from pathlib import Path from PIL import Image from termcolor import colored, cprint from tqdm import tqdm from orderedset import OrderedSet import imagehash import os import re import six import sys import math import GPUtil import numpy as np from common.imgcatutil import imgcat_for_iTerm2, create_tile_img from common.hashcache import HashCache class ImageDeduper: def __init__(self, args, image_filenames): self.target_dir = args.target_dir self.files_from = args.files_from self.recursive = args.recursive self.hash_bits = args.hash_bits self.sort = args.sort self.reverse = args.reverse self.image_filenames = image_filenames self.hash_method = args.hash_method self.hamming_distance = args.hamming_distance self.cache = args.cache self.ngt = args.ngt self.hnsw = args.hnsw self.faiss_flat = args.faiss_flat self.faiss_cuda = args.faiss_cuda if self.faiss_cuda and len(GPUtil.getGPUs()) <= 0: logger.warning("There were no CUDA enabled devices found on this system. Defaulting to CPU...") self.faiss_cuda = False self.cuda_device = args.cuda_device if self.faiss_cuda: if self.cuda_device == -1: self.cuda_device = self.get_lowest_load_cuda_device() logger.warning("CUDA device auto selected. CUDA Device: {}".format(self.cuda_device)) elif self.cuda_device >= len(GPUtil.getGPUs()): self.cuda_device = self.get_lowest_load_cuda_device() logger.warning(colored("The passed CUDA device was not found on the system. Defaulting to device: " "{}".format(self.cuda_device), 'red')) self.hash_size = self.get_hash_size() self.cleaned_target_dir = self.get_valid_filename() self.duplicate_filesize_dict = {} self.hashcache = HashCache(args, self.image_filenames, self.hash_method, self.hash_size, args.num_proc) self.group = {} self.num_duplicate_set = 0 def get_valid_filename(self): if self.files_from: path = str(self.files_from).strip().replace(' ', '_').replace('.', '_') else: path = str(self.target_dir).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', path) def get_hashcache_dump_name(self): return "hash_cache_{}_{}_{}.dump".format(self.cleaned_target_dir, self.hash_method, self.hash_bits) def get_duplicate_log_name(self): if self.ngt: return "dup_ngt_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) elif self.hnsw: return "dup_hnsw_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) elif self.faiss_flat: return "dup_faiss_flat_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) else: return "dup_std_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) def get_delete_log_name(self): if self.ngt: return "del_ngt_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) elif self.hnsw: return "del_hnsw_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) elif self.faiss_flat: return "del_faiss_flat_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) else: return "del_std_{}_{}_{}_{}.log".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) def get_ngt_index_path(self): return "ngt_{}_{}_{}_{}.ngt_index".format(self.cleaned_target_dir, self.hash_method, self.hash_bits, self.hamming_distance) def load_hashcache(self): return self.hashcache.load_hash_dict(self.get_hashcache_dump_name(), self.cache, self.target_dir) def dump_hashcache(self): return self.hashcache.dump_hash_dict(self.get_hashcache_dump_name(), self.cache) def get_hash_size(self): hash_size = int(math.sqrt(self.hash_bits)) if (hash_size ** 2) != self.hash_bits: self.hash_bits = hash_size ** 2 logger.warning(colored("hash_bits must be the square of n. Use {} as hash_bits".format(self.hash_bits), 'red')) return hash_size def get_lowest_load_cuda_device(self): devices = GPUtil.getGPUs() device = None for d in devices: if device is None: device = d if d.load < device.load: device = d return device.id def preserve_file_question(self, file_num): preserve_all = {"all": True, "a": True} delete_all = {"none":True, "no": True, "n": True} file_num_set = OrderedSet([i for i in range(1,file_num+1)]) prompt = "preserve files [1 - {}, all, none]: ".format(file_num) error_prompt = "Please respond with comma-separated file numbers or all (a) or none (n).\n" # return list of delete files index while True: sys.stdout.write(prompt) choice = input().lower() logger.debug("choice: {}".format(choice)) if choice in preserve_all: return [] elif choice in delete_all: return [i for i in range(1,file_num+1)] else: try: input_num_set = OrderedSet([int(i) for i in choice.split(',')]) logger.debug("input_num_set: {}".format(input_num_set)) delete_set = file_num_set - input_num_set valid_set = input_num_set - file_num_set if len(delete_set) >= 0 and len(valid_set) == 0: return list(delete_set) elif len(valid_set) != 0: logger.debug("wrong file number: {}".format(valid_set)) sys.stdout.write(error_prompt) else: sys.stdout.write(error_prompt) except: sys.stdout.write(error_prompt) def dedupe(self, args): if not self.load_hashcache(): self.dump_hashcache() # check num_proc if args.num_proc is None: num_proc = max(cpu_count() - 1, 1) else: num_proc = args.num_proc # Use NGT by default if (not self.hnsw) and (not self.faiss_flat): try: import ngtpy except: logger.error(colored("Error: Unable to load NGT. Please install NGT and python binding first.", 'red')) sys.exit(1) index_path = self.get_ngt_index_path() logger.warning("Building NGT index (dimension={}, num_proc={})".format(self.hash_bits, num_proc)) ngtpy.create(path=index_path.encode(), dimension=self.hash_bits, edge_size_for_creation=args.ngt_edges, edge_size_for_search=args.ngt_edges_for_search, object_type="Byte", distance_type="Hamming") ngt_index = ngtpy.Index(index_path.encode()) ngt_index.batch_insert(self.hashcache.hshs(), num_proc) # NGT Approximate neighbor search logger.warning("Approximate neighbor searching using NGT") hshs = self.hashcache.hshs() filenames = self.hashcache.filenames() check_list = [0] * len(hshs) current_group_num = 1 if not args.query: for i in tqdm(range(len(hshs))): new_group_found = False if check_list[i] != 0: # already grouped image continue for res in ngt_index.search(hshs[i], size=args.ngt_k, epsilon=args.ngt_epsilon): if res[0] == i: continue else: if res[1] <= self.hamming_distance: if check_list[res[0]] == 0: if check_list[i] == 0: # new group new_group_found = True check_list[i] = current_group_num check_list[res[0]] = current_group_num self.group[current_group_num] = [filenames[i]] self.group[current_group_num].extend([filenames[res[0]]]) else: # exists group exists_group_num = check_list[i] check_list[res[0]] = exists_group_num self.group[exists_group_num].extend([filenames[res[0]]]) if new_group_found: current_group_num += 1 else: # query image new_group_found = False hsh = self.hashcache.gen_hash(args.query) self.group[current_group_num] = [] for res in ngt_index.search(hsh, size=args.ngt_k, epsilon=args.ngt_epsilon): if res[1] <= self.hamming_distance: new_group_found = True self.group[current_group_num].extend([filenames[res[0]]]) if new_group_found: current_group_num += 1 # remove ngt index if index_path: os.system("rm -rf {}".format(index_path)) elif self.hnsw: try: import hnswlib except: logger.error(colored("Error: Unable to load hnsw. Please install hnsw python binding first.", 'red')) sys.exit(1) hshs = self.hashcache.hshs() filenames = self.hashcache.filenames() num_elements = len(hshs) hshs_labels = np.arange(num_elements) hnsw_index = hnswlib.Index(space='l2', dim=self.hash_bits) # Squared L2 hnsw_index.init_index(max_elements=num_elements, ef_construction=args.hnsw_ef_construction, M=args.hnsw_m) hnsw_index.set_ef(max(args.hnsw_ef, args.hnsw_k - 1)) # ef should always be > k hnsw_index.set_num_threads(num_proc) logger.warning("Building hnsw index (dimension={}, num_proc={})".format(self.hash_bits, num_proc)) hnsw_index.add_items(hshs, hshs_labels, num_proc) # hnsw Approximate neighbor search logger.warning("Approximate neighbor searching using hnsw") check_list = [0] * num_elements current_group_num = 1 if not args.query: for i in tqdm(range(num_elements)): new_group_found = False if check_list[i] != 0: # already grouped image continue labels, distances = hnsw_index.knn_query(hshs[i], k=args.hnsw_k, num_threads=num_proc) for label, distance in zip(labels[0], distances[0]): if label == i: continue else: if distance <= self.hamming_distance: if check_list[label] == 0: if check_list[i] == 0: # new group new_group_found = True check_list[i] = current_group_num check_list[label] = current_group_num self.group[current_group_num] = [filenames[i]] self.group[current_group_num].extend([filenames[label]]) else: # exists group exists_group_num = check_list[i] check_list[label] = exists_group_num self.group[exists_group_num].extend([filenames[label]]) if new_group_found: current_group_num += 1 else: # query image new_group_found = False hsh = self.hashcache.gen_hash(args.query) self.group[current_group_num] = [] labels, distances = hnsw_index.knn_query(hsh, k=args.hnsw_k, num_threads=num_proc) for label, distance in zip(labels[0], distances[0]): if distance <= self.hamming_distance: new_group_found = True self.group[current_group_num].extend([filenames[label]]) if new_group_found: current_group_num += 1 elif self.faiss_flat: try: import faiss except: logger.error(colored("Error: Unable to load faiss. Please install faiss python binding first.", 'red')) sys.exit(1) hshs = self.hashcache.hshs() filenames = self.hashcache.filenames() faiss.omp_set_num_threads(num_proc) logger.warning("Building faiss index (dimension={}, num_proc={})".format(self.hash_bits, num_proc)) data = np.array(hshs).astype('float32') faiss_flat_index = faiss.IndexFlatL2(self.hash_bits) # Exact search if self.faiss_cuda: res = faiss.StandardGpuResources() faiss_flat_index = faiss.index_cpu_to_gpu(res, self.cuda_device, faiss_flat_index) # Convert to CUDA faiss_flat_index.add(data) # faiss Exact neighbor search logger.warning("Exact neighbor searching using faiss") check_list = [0] * faiss_flat_index.ntotal current_group_num = 1 if not args.query: for i in tqdm(range(faiss_flat_index.ntotal)): new_group_found = False if check_list[i] != 0: # already grouped image continue distances, labels = faiss_flat_index.search(data[[i]], args.faiss_flat_k) for label, distance in zip(labels[0], distances[0]): if label == i: continue else: if distance <= self.hamming_distance: if check_list[label] == 0: if check_list[i] == 0: # new group new_group_found = True check_list[i] = current_group_num check_list[label] = current_group_num self.group[current_group_num] = [filenames[i]] self.group[current_group_num].extend([filenames[label]]) else: # exists group exists_group_num = check_list[i] check_list[label] = exists_group_num self.group[exists_group_num].extend([filenames[label]]) if new_group_found: current_group_num += 1 else: # query image new_group_found = False hsh = np.array([self.hashcache.gen_hash(args.query)]).astype('float32') self.group[current_group_num] = [] distances, labels = faiss_flat_index.search(hsh, args.faiss_flat_k) for label, distance in zip(labels[0], distances[0]): if distance <= self.hamming_distance: new_group_found = True self.group[current_group_num].extend([filenames[label]]) if new_group_found: current_group_num += 1 # sort self.group if self.sort != 'none': self.sort_group() # write duplicate log file self.num_duplicate_set = current_group_num - 1 if self.num_duplicate_set > 0 and args.log: now = datetime.now().strftime('%Y%m%d%H%M%S') duplicate_log_file = "{}_{}".format(now, self.get_duplicate_log_name()) with open(duplicate_log_file, 'w') as f: if args.query: f.write("Query: {}\n\n".format(args.query)) for k in range(1, self.num_duplicate_set + 1): img_list = self.group[k] pad = 1 if args.query else 0 if len(img_list) + pad > 1: sorted_img_list, _, _, _ = self.sort_image_list(img_list) if args.sameline: f.write(" ".join(sorted_img_list) + "\n") else: f.write("\n".join(sorted_img_list) + "\n") if k != len(self.group): f.write("\n") def summarize(self, args): # summarize dupe information if self.num_duplicate_set > 0: duplicate_files = set() for filenames in self.group.values(): for filename in filenames: duplicate_files.add(filename) num_duplicate_files = len(duplicate_files) numbytes = 0 for filename in duplicate_files: numbytes += self.duplicate_filesize_dict[filename] numkilobytes = int(numbytes / 1000) print("{} duplicate files (in {} sets), occupying {} KB".format(num_duplicate_files, self.num_duplicate_set, numkilobytes)) else: print("No duplicates found.") def sort_group(self): tmp_group_list = [] new_group_dict = {} for _, filenames in self.group.items(): filenames.sort() tmp_group_list.append(filenames) tmp_group_list.sort() for key, filenames in enumerate(tmp_group_list, start=1): new_group_dict[key] = filenames self.group = new_group_dict def sort_image_list(self, img_list): rev = not self.reverse img_filesize_dict = {} img_size_dict = {} img_width_dict = {} img_height_dict = {} if self.sort == 'none': return img_list, img_filesize_dict, img_width_dict,
<reponame>sireliah/polish-python """Interface to the libbzip2 compression library. This module provides a file interface, classes dla incremental (de)compression, oraz functions dla one-shot (de)compression. """ __all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", "open", "compress", "decompress"] __author__ = "<NAME> <<EMAIL>>" z builtins zaimportuj open jako _builtin_open zaimportuj io zaimportuj warnings zaimportuj _compression spróbuj: z threading zaimportuj RLock wyjąwszy ImportError: z dummy_threading zaimportuj RLock z _bz2 zaimportuj BZ2Compressor, BZ2Decompressor _MODE_CLOSED = 0 _MODE_READ = 1 # Value 2 no longer used _MODE_WRITE = 3 klasa BZ2File(_compression.BaseStream): """A file object providing transparent bzip2 (de)compression. A BZ2File can act jako a wrapper dla an existing file object, albo refer directly to a named file on disk. Note that BZ2File provides a *binary* file interface - data read jest returned jako bytes, oraz data to be written should be given jako bytes. """ def __init__(self, filename, mode="r", buffering=Nic, compresslevel=9): """Open a bzip2-compressed file. If filename jest a str albo bytes object, it gives the name of the file to be opened. Otherwise, it should be a file object, which will be used to read albo write the compressed data. mode can be 'r' dla reading (default), 'w' dla (over)writing, 'x' dla creating exclusively, albo 'a' dla appending. These can equivalently be given jako 'rb', 'wb', 'xb', oraz 'ab'. buffering jest ignored. Its use jest deprecated. If mode jest 'w', 'x' albo 'a', compresslevel can be a number between 1 oraz 9 specifying the level of compression: 1 produces the least compression, oraz 9 (default) produces the most compression. If mode jest 'r', the input file may be the concatenation of multiple compressed streams. """ # This lock must be recursive, so that BufferedIOBase's # writelines() does nie deadlock. self._lock = RLock() self._fp = Nic self._closefp = Nieprawda self._mode = _MODE_CLOSED jeżeli buffering jest nie Nic: warnings.warn("Use of 'buffering' argument jest deprecated", DeprecationWarning) jeżeli nie (1 <= compresslevel <= 9): podnieś ValueError("compresslevel must be between 1 oraz 9") jeżeli mode w ("", "r", "rb"): mode = "rb" mode_code = _MODE_READ albo_inaczej mode w ("w", "wb"): mode = "wb" mode_code = _MODE_WRITE self._compressor = BZ2Compressor(compresslevel) albo_inaczej mode w ("x", "xb"): mode = "xb" mode_code = _MODE_WRITE self._compressor = BZ2Compressor(compresslevel) albo_inaczej mode w ("a", "ab"): mode = "ab" mode_code = _MODE_WRITE self._compressor = BZ2Compressor(compresslevel) inaczej: podnieś ValueError("Invalid mode: %r" % (mode,)) jeżeli isinstance(filename, (str, bytes)): self._fp = _builtin_open(filename, mode) self._closefp = Prawda self._mode = mode_code albo_inaczej hasattr(filename, "read") albo hasattr(filename, "write"): self._fp = filename self._mode = mode_code inaczej: podnieś TypeError("filename must be a str albo bytes object, albo a file") jeżeli self._mode == _MODE_READ: raw = _compression.DecompressReader(self._fp, BZ2Decompressor, trailing_error=OSError) self._buffer = io.BufferedReader(raw) inaczej: self._pos = 0 def close(self): """Flush oraz close the file. May be called more than once without error. Once the file jest closed, any other operation on it will podnieś a ValueError. """ przy self._lock: jeżeli self._mode == _MODE_CLOSED: zwróć spróbuj: jeżeli self._mode == _MODE_READ: self._buffer.close() albo_inaczej self._mode == _MODE_WRITE: self._fp.write(self._compressor.flush()) self._compressor = Nic w_końcu: spróbuj: jeżeli self._closefp: self._fp.close() w_końcu: self._fp = Nic self._closefp = Nieprawda self._mode = _MODE_CLOSED self._buffer = Nic @property def closed(self): """Prawda jeżeli this file jest closed.""" zwróć self._mode == _MODE_CLOSED def fileno(self): """Return the file descriptor dla the underlying file.""" self._check_not_closed() zwróć self._fp.fileno() def seekable(self): """Return whether the file supports seeking.""" zwróć self.readable() oraz self._buffer.seekable() def readable(self): """Return whether the file was opened dla reading.""" self._check_not_closed() zwróć self._mode == _MODE_READ def writable(self): """Return whether the file was opened dla writing.""" self._check_not_closed() zwróć self._mode == _MODE_WRITE def peek(self, n=0): """Return buffered data without advancing the file position. Always returns at least one byte of data, unless at EOF. The exact number of bytes returned jest unspecified. """ przy self._lock: self._check_can_read() # Relies on the undocumented fact that BufferedReader.peek() # always returns at least one byte (wyjąwszy at EOF), independent # of the value of n zwróć self._buffer.peek(n) def read(self, size=-1): """Read up to size uncompressed bytes z the file. If size jest negative albo omitted, read until EOF jest reached. Returns b'' jeżeli the file jest already at EOF. """ przy self._lock: self._check_can_read() zwróć self._buffer.read(size) def read1(self, size=-1): """Read up to size uncompressed bytes, dopóki trying to avoid making multiple reads z the underlying stream. Reads up to a buffer's worth of data jeżeli size jest negative. Returns b'' jeżeli the file jest at EOF. """ przy self._lock: self._check_can_read() jeżeli size < 0: size = io.DEFAULT_BUFFER_SIZE zwróć self._buffer.read1(size) def readinto(self, b): """Read bytes into b. Returns the number of bytes read (0 dla EOF). """ przy self._lock: self._check_can_read() zwróć self._buffer.readinto(b) def readline(self, size=-1): """Read a line of uncompressed bytes z the file. The terminating newline (jeżeli present) jest retained. If size jest non-negative, no more than size bytes will be read (in which case the line may be incomplete). Returns b'' jeżeli already at EOF. """ jeżeli nie isinstance(size, int): jeżeli nie hasattr(size, "__index__"): podnieś TypeError("Integer argument expected") size = size.__index__() przy self._lock: self._check_can_read() zwróć self._buffer.readline(size) def readlines(self, size=-1): """Read a list of lines of uncompressed bytes z the file. size can be specified to control the number of lines read: no further lines will be read once the total size of the lines read so far equals albo exceeds size. """ jeżeli nie isinstance(size, int): jeżeli nie hasattr(size, "__index__"): podnieś TypeError("Integer argument expected") size = size.__index__() przy self._lock: self._check_can_read() zwróć self._buffer.readlines(size) def write(self, data): """Write a byte string to the file. Returns the number of uncompressed bytes written, which jest always len(data). Note that due to buffering, the file on disk may nie reflect the data written until close() jest called. """ przy self._lock: self._check_can_write() compressed = self._compressor.compress(data) self._fp.write(compressed) self._pos += len(data) zwróć len(data) def writelines(self, seq): """Write a sequence of byte strings to the file. Returns the number of uncompressed bytes written. seq can be any iterable uzyskajing byte strings. Line separators are nie added between the written byte strings. """ przy self._lock: zwróć _compression.BaseStream.writelines(self, seq) def seek(self, offset, whence=io.SEEK_SET): """Change the file position. The new position jest specified by offset, relative to the position indicated by whence. Values dla whence are: 0: start of stream (default); offset must nie be negative 1: current stream position 2: end of stream; offset must nie be positive Returns the new file position. Note that seeking jest emulated, so depending on the parameters, this operation may be extremely slow. """ przy self._lock: self._check_can_seek() zwróć self._buffer.seek(offset, whence) def tell(self): """Return the current file position.""" przy self._lock: self._check_not_closed() jeżeli self._mode == _MODE_READ: zwróć self._buffer.tell() zwróć self._pos def open(filename, mode="rb", compresslevel=9, encoding=Nic, errors=Nic, newline=Nic): """Open a bzip2-compressed file w binary albo text mode. The filename argument can be an actual filename (a str albo bytes object), albo an existing file object to read z albo write to. The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" albo "ab" dla binary mode, albo "rt", "wt", "xt" albo "at" dla text mode. The default mode jest "rb", oraz the default compresslevel jest 9. For binary mode, this function jest equivalent to the BZ2File constructor: BZ2File(filename, mode, compresslevel). In this case, the encoding, errors oraz newline arguments must nie be provided. For text mode, a BZ2File object jest created, oraz wrapped w an io.TextIOWrapper instance przy the specified encoding, error handling behavior, oraz line ending(s). """ jeżeli "t" w mode: jeżeli "b" w mode: podnieś ValueError("Invalid mode: %r" % (mode,)) inaczej: jeżeli encoding jest nie Nic: podnieś ValueError("Argument 'encoding' nie supported w binary mode") jeżeli errors jest nie Nic: podnieś ValueError("Argument 'errors' nie supported w binary mode") jeżeli newline jest nie Nic: podnieś ValueError("Argument 'newline' nie supported w binary mode") bz_mode = mode.replace("t", "") binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel) jeżeli "t" w mode: zwróć io.TextIOWrapper(binary_file, encoding, errors, newline) inaczej: zwróć binary_file def compress(data, compresslevel=9): """Compress a block of data. compresslevel, jeżeli given, must be a number between 1 oraz 9. For incremental compression, use a BZ2Compressor object instead. """
""" Throughout this file, the following notations are consistently used for variables: * `x` always refers to user histories, and will generally be a tensor of dimension num_interactions x num_users x compress_dim * `y` always refers to the output probabilities of the neural net, and will generally be of the dimensions num_interactions x num_users x num_questions * `t` will always refer to the "true" response of the student on the *next* question. This is used for training and testing purposes. It is generally of dimension num_interactions x num_users * `m` will always represent a *mask* on the truth values, since users have different history lengths. The dimension is usually num_interactions x num_users * `h` will always refer to the hidden layer * w_hh/w_xh/w_hy refer to weights with which neural network layers are convolved * w_bh/w_by are the bias weights added within the various relevant nonlinearities """ from __future__ import division from collections import namedtuple import logging import pickle import time import numpy as np import theano from theano import tensor as T from .common import Results from .data.rnn import build_batches from .irt.metrics import Metrics _logger = logging.getLogger(__name__) def build_grad_and_test_fns(sigmoid_fn=T.nnet.hard_sigmoid, recurrent=True, num_type='float32', mask_type='int8', probability_clip=1e-12, compressed_output=False): """ Actually build the Theano functions for the recurrent neural net. These are *independent* of the data passed into the neural net, so whatever it produces should be cached in the actual state of the class. :param function sigmoid_fn: A function that performs an element-wise sigmoid operation on a theano tensor. These will likely be one of - `T.nnet.sigmoid` - `T.nnet.hard_sigmoid` - `T.nnet.ultra_fast_sigmoid` :param bool recurrent: If True, use a recurrent architecture :param str num_type: A valid floating point numeric type to pass to Theano. Should be a numpy-esque name, but Theano requires the stringy version. :param str mask_type: A valid "boolean" type to pass to Theano. Should be a numpy-esque name for an integer type, but Theano requires the stringy version. Also note that Theano does *not* support bools currently. :param float probability_clip: In order to avoid nans when taking logs, we clip the predicted probabilities to lie in (probability_clip, 1 - probability_clip) :param bool compressed_output: Is the output of the RNN compressed? If so, we will run the final layer through a sigmoid. Else, we'll assume the final layer are already probabilities. :rtype: (function, function) :return: The main Theano functions. The first function is the "gradient" function, whose signature looks like:: inputs: [x, y, t, m, h_0, w_hh, w_xh, w_hy, w_bh, w_by] outputs: [error, num_pred_correct, g_hh, g_xh, g_hy, g_bh, g_by] For a further explanation of these variable names see the class docstring. The second function is a simpler "test" function, that doesn't go through the trouble of computing gradients. Thus, its signature looks like:: inputs: [x, y, t, m, h_0, w_hh, w_xh, w_hy, w_bh, w_by], outputs: [num_pred_correct, next_qn_prob] """ ###################################################### # Tensor input # ###################################################### # (Compressed) one hot encoding for input x x = T.tensor3('x', dtype=num_type) # (NUM INTERACTIONS x BATCH x COMPRESS) # Mask if uncompressed or "to dot with" if compressed # of the probabilities of y to only contain probabilities for the next question y = T.tensor3('y', dtype=num_type) # (NUM INTERACTIONS x BATCH x NUM QN) # Correctness of the next question t = T.matrix('t', dtype=mask_type) # (NUM INTERACTIONS x BATCH) # Mask for non-existing interactions within a rectangle m = T.matrix('m', dtype=mask_type) # (NUM INTERACTIONS x BATCH) # Initial hidden state h_0 = T.matrix('h_0', dtype=num_type) # (BATCH x HIDDEN) # Dropout mask for (compressed) input x_drop = T.vector('x_drop', dtype=mask_type) # (COMPRESS) # Dropout mask for hidden layer h_drop = T.vector('h_drop', dtype=mask_type) # (HIDDEN) ###################################################### # Weights # ###################################################### # Recurrent weight from layer H to layer H w_hh = T.matrix('w_hh', dtype=num_type) # (HIDDEN x HIDDEN) # Input weights from layer X to layer H w_xh = T.matrix('w_xh', dtype=num_type) # (COMPRESS x HIDDEN) # Output weights from layer H to layer Y w_hy = T.matrix('w_hy', dtype=num_type) # (HIDDEN x NUM QN) # Bias weights from bias to layer H w_bh = T.vector('w_bh', dtype=num_type) # (HIDDEN) # Bias weights from bias to layer Y w_by = T.vector('w_by', dtype=num_type) # (NUM QN) # Recurrent function def step(x_t, y_t, h_tm1, x_drop, h_drop, w_hh, w_xh, w_hy, w_bh, w_by): """ :param x_t: (Compressed) one hot encoding for input x for time t (BATCH x COMPRESS) :param y_t: Mask of the probabilities of y for time t (BATCH x NUM QN) :param h_tm1: Previous hidden state for time t-1 (BATCH x HIDDEN) :param w_hh: Recurrent weight from layer H to layer H (HIDDEN x HIDDEN) :param w_xh: Input weights from layer X to layer H (COMPRESS x HIDDEN) :param w_hy: Output weights from layer H to layer Y (HIDDEN x NUM QN) :param w_bh: Bias weights from bias to layer H (HIDDEN) :param w_by: Bias weights from bias to layer Y (NUM QN) :return: h_t: Hidden state for time t all_qn_prob_t: Probabilities for all questions for time t next_qn_prob_t: Probabilities for next question for time t """ # Broadcast over batches x_drop_batch = x_drop.dimshuffle(('x', 0)) # BATCH x COMPRESS h_drop_batch = h_drop.dimshuffle(('x', 0)) # BATCH x HIDDEN w_bh_batch = w_bh.dimshuffle(('x', 0)) # BATCH x HIDDEN w_by_batch = w_by.dimshuffle(('x', 0)) # BATCH x NUM QN # Dropout on inputs x_t_dropped = x_t * x_drop_batch if recurrent: # BATCH x HIDDEN h_t = T.tanh(T.dot(x_t_dropped, w_xh) + T.dot(h_tm1, w_hh) + w_bh_batch) else: # In this case, we don't care about the previous state, but Theano is a bit # smarter than we are and notices if we don't include w_hh. This trains up # fast enough as is, so let's just dot it with 0s to get rid of it. h_t = T.tanh(T.dot(x_t_dropped, w_xh) + T.dot(T.zeros_like(h_tm1), w_hh) + w_bh_batch) # Dropout on hidden layer h_t_dropped = h_t * h_drop_batch # Compute predicted pass rate for all questions, taking into account # whether we have compressed the output dimensions or not if compressed_output: # If compressed, run the final output through a sigmoid final_layer = T.dot(h_t, w_hy) + w_by_batch next_qn_prob_t = sigmoid_fn((final_layer * y_t).sum(axis=1)) else: # If not compressed, mask probabilities of all questions except the next one all_qn_prob_t = sigmoid_fn(T.dot(h_t, w_hy) + w_by_batch) next_qn_prob_t = (all_qn_prob_t * y_t).sum(axis=1) return h_t_dropped, next_qn_prob_t # This is the scan function to apply the forward propagation for each of the time slice # For each time slice: # A time slice of the x and y are passed in as arguments # The previous output of the function is also passed in # (the initial output is passed into outputs_info) # The weights are also passed in but not sliced in time # # h: Hidden state for all time (NUM INTERACTIONS x BATCH x HIDDEN) # next_qn_prob: Probabilities for next question for all time (NUM INTERACTIONS x BATCH) [h, next_qn_prob], _ = theano.scan(fn=step, sequences=[x, y], outputs_info=[h_0, None], non_sequences=[x_drop, h_drop, w_hh, w_xh, w_hy, w_bh, w_by]) # The negative cross entropy will be what we minimize # Note that we multiply by our mask (m) since not all students have the same history length next_qn_prob = T.clip(next_qn_prob, probability_clip, 1 - probability_clip) cross_entropy = t * T.log(next_qn_prob) + (1 - t) * T.log(1 - next_qn_prob) error = -((cross_entropy * m).sum()) # Prediction accuracy at a cutoff of 0.5 num_pred_correct = (T.eq(T.ge(next_qn_prob, 0.5), t) * m).sum() # BP Gradients for all weights g_hh, g_xh, g_hy, g_bh, g_by = T.grad(error, [w_hh, w_xh, w_hy, w_bh, w_by]) # After all the symbols are linked correctly # The expression is compiled using the theano.function to a function # Hence grad_fn and test_fn are actual functions to pass in real data grad_fn = theano.function(inputs=[x, y, t, m, h_0, x_drop, h_drop, w_hh, w_xh, w_hy, w_bh, w_by], outputs=[error, g_hh, g_xh, g_hy, g_bh, g_by]) # Note that in testing we use all the edges in the graph regardless of dropout. # In order to make this clear to the
<gh_stars>1-10 # Copyright 2017 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Attention-based decoder functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.framework import function __all__ = [ "prepare_attention", "attention_decoder_fn_train", "attention_decoder_fn_inference" ] def attention_decoder_fn_train(encoder_state, attention_keys, attention_values, attention_score_fn, attention_construct_fn, name=None): """Attentional decoder function for `dynamic_rnn_decoder` during training. The `attention_decoder_fn_train` is a training function for an attention-based sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is in the training mode. The `attention_decoder_fn_train` is called with a set of the user arguments and returns the `decoder_fn`, which can be passed to the `dynamic_rnn_decoder`, such that ``` dynamic_fn_train = attention_decoder_fn_train(encoder_state) outputs_train, state_train = dynamic_rnn_decoder( decoder_fn=dynamic_fn_train, ...) ``` Further usage can be found in the `kernel_tests/seq2seq_test.py`. Args: encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. attention_keys: to be compared with target states. attention_values: to be used to construct context vectors. attention_score_fn: to compute similarity between key and target states. attention_construct_fn: to build attention states. name: (default: `None`) NameScope for the decoder function; defaults to "simple_decoder_fn_train" Returns: A decoder function with the required interface of `dynamic_rnn_decoder` intended for training. """ with tf.name_scope(name, "attention_decoder_fn_train", [ encoder_state, attention_keys, attention_values, attention_score_fn, attention_construct_fn ]): pass def decoder_fn(time, cell_state, cell_input, cell_output, context_state): """Decoder function used in the `dynamic_rnn_decoder` for training. Args: time: positive integer constant reflecting the current timestep. cell_state: state of RNNCell. cell_input: input provided by `dynamic_rnn_decoder`. cell_output: output of RNNCell. context_state: context state provided by `dynamic_rnn_decoder`. Returns: A tuple (done, next state, next input, emit output, next context state) where: done: `None`, which is used by the `dynamic_rnn_decoder` to indicate that `sequence_lengths` in `dynamic_rnn_decoder` should be used. next state: `cell_state`, this decoder function does not modify the given state. next input: `cell_input`, this decoder function does not modify the given input. The input could be modified when applying e.g. attention. emit output: `cell_output`, this decoder function does not modify the given output. next context state: `context_state`, this decoder function does not modify the given context state. The context state could be modified when applying e.g. beam search. """ with tf.name_scope( name, "attention_decoder_fn_train", [time, cell_state, cell_input, cell_output, context_state]): if cell_state is None: # first call, return encoder_state cell_state = encoder_state # init attention attention = _init_attention(encoder_state) else: # construct attention attention = attention_construct_fn(cell_output, attention_keys, attention_values) cell_output = attention # combine cell_input and attention next_input = tf.concat([cell_input, attention], 1) return (None, cell_state, next_input, cell_output, context_state) return decoder_fn def attention_decoder_fn_inference(output_fn, encoder_state, attention_keys, attention_values, attention_score_fn, attention_construct_fn, embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, num_decoder_symbols, dtype=tf.int32, name=None): """Attentional decoder function for `dynamic_rnn_decoder` during inference. The `attention_decoder_fn_inference` is a simple inference function for a sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is in the inference mode. The `attention_decoder_fn_inference` is called with user arguments and returns the `decoder_fn`, which can be passed to the `dynamic_rnn_decoder`, such that ``` dynamic_fn_inference = attention_decoder_fn_inference(...) outputs_inference, state_inference = dynamic_rnn_decoder( decoder_fn=dynamic_fn_inference, ...) ``` Further usage can be found in the `kernel_tests/seq2seq_test.py`. Args: output_fn: An output function to project your `cell_output` onto class logits. An example of an output function; ``` tf.variable_scope("decoder") as varscope output_fn = lambda x: tf.contrib.layers.linear(x, num_decoder_symbols, scope=varscope) outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...) logits_train = output_fn(outputs_train) varscope.reuse_variables() logits_inference, state_inference = seq2seq.dynamic_rnn_decoder( output_fn=output_fn, ...) ``` If `None` is supplied it will act as an identity function, which might be wanted when using the RNNCell `OutputProjectionWrapper`. encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. attention_keys: to be compared with target states. attention_values: to be used to construct context vectors. attention_score_fn: to compute similarity between key and target states. attention_construct_fn: to build attention states. embeddings: The embeddings matrix used for the decoder sized `[num_decoder_symbols, embedding_size]`. start_of_sequence_id: The start of sequence ID in the decoder embeddings. end_of_sequence_id: The end of sequence ID in the decoder embeddings. maximum_length: The maximum allowed of time steps to decode. num_decoder_symbols: The number of classes to decode at each time step. dtype: (default: `tf.int32`) The default data type to use when handling integer objects. name: (default: `None`) NameScope for the decoder function; defaults to "attention_decoder_fn_inference" Returns: A decoder function with the required interface of `dynamic_rnn_decoder` intended for inference. """ with tf.name_scope(name, "attention_decoder_fn_inference", [ output_fn, encoder_state, attention_keys, attention_values, attention_score_fn, attention_construct_fn, embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, num_decoder_symbols, dtype ]): start_of_sequence_id = tf.convert_to_tensor(start_of_sequence_id, dtype) end_of_sequence_id = tf.convert_to_tensor(end_of_sequence_id, dtype) maximum_length = tf.convert_to_tensor(maximum_length, dtype) num_decoder_symbols = tf.convert_to_tensor(num_decoder_symbols, dtype) encoder_info = tf.contrib.framework.nest.flatten(encoder_state)[0] batch_size = encoder_info.get_shape()[0].value if output_fn is None: output_fn = lambda x: x if batch_size is None: batch_size = tf.shape(encoder_info)[0] def decoder_fn(time, cell_state, cell_input, cell_output, context_state): """Decoder function used in the `dynamic_rnn_decoder` for inference. The main difference between this decoder function and the `decoder_fn` in `attention_decoder_fn_train` is how `next_cell_input` is calculated. In decoder function we calculate the next input by applying an argmax across the feature dimension of the output from the decoder. This is a greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014) use beam-search instead. Args: time: positive integer constant reflecting the current timestep. cell_state: state of RNNCell. cell_input: input provided by `dynamic_rnn_decoder`. cell_output: output of RNNCell. context_state: context state provided by `dynamic_rnn_decoder`. Returns: A tuple (done, next state, next input, emit output, next context state) where: done: A boolean vector to indicate which sentences has reached a `end_of_sequence_id`. This is used for early stopping by the `dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with all elements as `true` is returned. next state: `cell_state`, this decoder function does not modify the given state. next input: The embedding from argmax of the `cell_output` is used as `next_input`. emit output: If `output_fn is None` the supplied `cell_output` is returned, else the `output_fn` is used to update the `cell_output` before calculating `next_input` and returning `cell_output`. next context state: `context_state`, this decoder function does not modify the given context state. The context state could be modified when applying e.g. beam search. Raises: ValueError: if cell_input is not None. """ with tf.name_scope( name, "attention_decoder_fn_inference", [time, cell_state, cell_input, cell_output, context_state]): if cell_input is not None: raise ValueError( "Expected cell_input to be None, but saw: %s" % cell_input) if cell_output is None: # invariant that this is time == 0 next_input_id = tf.ones( [ batch_size, ], dtype=dtype) * ( start_of_sequence_id) done = tf.zeros( [ batch_size, ], dtype=tf.bool) cell_state = encoder_state cell_output = tf.zeros([num_decoder_symbols], dtype=tf.float32) cell_input = tf.gather(embeddings, next_input_id) # init attention attention = _init_attention(encoder_state) else: # construct attention attention = attention_construct_fn(cell_output, attention_keys, attention_values) cell_output = attention # argmax decoder cell_output = output_fn(cell_output) # logits next_input_id = tf.cast(tf.argmax(cell_output, 1), dtype=dtype) done = tf.equal(next_input_id, end_of_sequence_id) cell_input = tf.gather(embeddings, next_input_id) # combine cell_input and attention next_input = tf.concat([cell_input, attention], 1) # if time > maxlen, return all true vector done = tf.cond( tf.greater(time, maximum_length), lambda: tf.ones([ batch_size, ], dtype=tf.bool), lambda: done) return (done, cell_state, next_input, cell_output, context_state) return decoder_fn ## Helper functions ## def prepare_attention(attention_states, attention_option, num_units, reuse=None): """Prepare keys/values/functions for attention. Args: attention_states: hidden states to attend over. attention_option: how to compute attention, either "luong" or "bahdanau". num_units: hidden state dimension. reuse: whether to reuse variable scope. Returns: attention_keys: to be compared with target states. attention_values: to be used to construct context vectors. attention_score_fn: to compute similarity between key and target states. attention_construct_fn: to build attention states. """ # Prepare attention keys / values from attention_states with tf.variable_scope("attention_keys", reuse=reuse) as scope: attention_keys = tf.contrib.layers.linear( attention_states, num_units, biases_initializer=None, scope=scope) attention_values = attention_states # Attention score function attention_score_fn = _create_attention_score_fn("attention_score", num_units, attention_option, reuse) # Attention construction function attention_construct_fn = _create_attention_construct_fn( "attention_construct", num_units, attention_score_fn, reuse) return (attention_keys, attention_values, attention_score_fn, attention_construct_fn) def _init_attention(encoder_state):
sticky='e', padx=20) lbl_max_move_title = ttk.Label(master=self.controller_frame, text="Movement Speed", font=self.big_font) lbl_max_move_title.grid(row=0, column=3, sticky='e', padx=20) self.lbl_max_move = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70') self.lbl_max_move.grid(row=1, column=3, sticky='e', padx=20) lbl_amount_move_title = ttk.Label(master=self.controller_frame, text="Feet Moved", font=self.big_font) lbl_amount_move_title.grid(row=2, column=3, sticky='e', padx=20) self.lbl_amount_moved = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70') self.lbl_amount_moved.grid(row=3, column=3, sticky='e', padx=20) self.z_delta = 0 self.root.bind("<Key>", self._on_numpad_keys) self.controller_frame.bind("<Button-1>", self._on_defocus) self.place_tokens() self.root.deiconify() def _on_config(self, event): self.grid_canvas.configure(scrollregion=self.grid_canvas.bbox('all')) def _on_enter_canvas(self, event): self.grid_canvas.bind_all('<MouseWheel>', self._on_mousewheel) self.grid_canvas.bind_all('<Shift-MouseWheel>', self._on_shift_mousewheel) def _on_leave_canvas(self, event): self.grid_canvas.unbind_all('<MouseWheel>') self.grid_canvas.unbind_all('<Shift-MouseWheel>') def _on_mousewheel(self, event): self.grid_canvas.yview_scroll(int(-1*(event.delta/120)), 'units') def _on_shift_mousewheel(self, event): self.grid_canvas.xview_scroll(int(-1*(event.delta/120)), 'units') def _on_select_target(self, event): for being in self.root.token_list: if being['name'] == self.cont_targets.get(): sel_obj = being self.lbl_target_ac.config(text=sel_obj['ac']) self.lbl_target_max_hp.config(text=sel_obj['max_HP']) self.lbl_target_hp.config(text=sel_obj['current_HP']) self.lbl_target_temp_hp.config(text=sel_obj['temp_HP']) def _on_numpad_keys(self, event): # Controller movements if event.keysym == '0' or event.keysym == 'Insert': self.undo_move() elif event.keysym == '1' or event.keysym == 'End': self.dpad_move('sw') elif event.keysym == '2' or event.keysym == 'Down': self.dpad_move('s') elif event.keysym == '3' or event.keysym == 'Next': self.dpad_move('se') elif event.keysym == '4' or event.keysym == 'Left': self.dpad_move('w') elif event.keysym == '5' or event.keysym == 'Clear': if self.z_delta != 0: if self.z_delta == 1: self.dpad_move('+') elif self.z_delta == -1: self.dpad_move('-') elif event.keysym == '6' or event.keysym == 'Right': self.dpad_move('e') elif event.keysym == '7' or event.keysym == 'Home': self.dpad_move('nw') elif event.keysym == '8' or event.keysym == 'Up': self.dpad_move('n') elif event.keysym == '9' or event.keysym == 'Prior': self.dpad_move('ne') elif event.keysym == 'minus': self.zpad('-') elif event.keysym == 'plus': self.zpad('+') elif event.keysym == 'Return': self.next_turn() if self.z_delta == 0: self.z_frame.config(bg='gray28') self.root.unbind_all("<Button-1>") def _on_delta_focus(self, event, typ): if typ == 'in': self.root.unbind("<Key>") elif typ == 'out': self.root.bind("<Key>", self._on_numpad_keys) def _on_defocus(self, event): event.widget.focus_set() def initialize(self): self.root.token_list = [] self.root.obj_list = [] with ZipFile(self.root.filename, "r") as savefile: creat_bytes = savefile.read('creatures.json') creat_str = creat_bytes.decode('utf-8') creatures = json.loads(creat_str) obj_bytes = savefile.read('objects.json') obj_str = obj_bytes.decode('utf-8') objects = json.loads(obj_str) for being in creatures.values(): self.root.token_list.append(being) for thing in objects.values(): self.root.obj_list.append(thing) def place_tokens(self): self.initiative_holder = {} spaces_taken = [] self.target_names = [] for item in self.root.obj_list: occupied = False if item["coordinate"][0] != "" and item["coordinate"][1] != "": row_pos = int(item["coordinate"][1]) col_pos = int(item["coordinate"][0]) self.target_names.append(item['name']) for space_tuple in spaces_taken: if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(item["coordinate"][2]): occupied = True if occupied == False: spaces_taken.append((row_pos, col_pos, int(item["coordinate"][2]))) o_length = item["length"] o_width = item["width"] f_len = 5 * round(o_length / 5) if f_len < 5: f_len = 5 f_wid = 5 * round(o_width / 5) if f_wid < 5: f_wid = 5 o_col = int(f_wid / 5) o_row = int(f_len / 5) for x in range(o_col): col_pos = int(item["coordinate"][0]) + x for y in range(o_row): row_pos = int(item["coordinate"][1]) + y obj_img = ImageTk.PhotoImage(image=PIL.Image.open(item["img_ref"]).resize((30,30))) lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=obj_img, bg="gray28", borderwidth=0) lbl_unit.image = obj_img lbl_unit.coord = (row_pos, col_pos) lbl_unit.pack(fill='both', expand=True, padx=2, pady=2) CreateToolTip(lbl_unit, text=f"{item['name']}: {row_pos}, {col_pos}", left_disp=True) for being in self.root.token_list: token_type = being["type"] if token_type == "ally": token_img = self.ally_img elif token_type == "enemy": token_img = self.enemy_img elif token_type == "bystander": token_img = self.bystander_img elif token_type == "dead": token_img = self.dead_img else: raise NameError("Token type not specified.") occupied = False if being["coordinate"][0] != "" and being["coordinate"][1] != "": row_pos = int(being["coordinate"][1]) col_pos = int(being["coordinate"][0]) self.target_names.append(being['name']) for space_tuple in spaces_taken: if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(being["coordinate"][2]): occupied = True if occupied == False: spaces_taken.append((row_pos, col_pos, int(being["coordinate"][2]))) lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0) lbl_unit.image = token_img lbl_unit.coord = (row_pos, col_pos) lbl_unit.pack(fill='both', expand=True, padx=2, pady=2) self.token_labels[col_pos][row_pos] = lbl_unit CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True) if being['initiative'] != math.inf: self.initiative_holder[being['name']] = being if being["size"] == "large" or being["size"] == "huge" or being["size"] == "gargantuan": if being["size"] == "large": space_need = 4 elif being["size"] == "huge": space_need = 9 else: space_need = 16 row_offset = 0 col_offset = 0 go_to_next_row = math.sqrt(space_need) for i in range(1, space_need): if i < space_need: col_offset += 1 if col_offset == go_to_next_row: col_offset = 0 row_offset += 1 row_pos = int(being["coordinate"][1]) + row_offset col_pos = int(being["coordinate"][0]) + col_offset lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0) lbl_unit.image = token_img lbl_unit.coord = (row_pos, col_pos) lbl_unit.pack(fill='both', expand=True) CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True) else: messagebox.showerror("Internal Error", "Restart program\nError 0x006") return else: self.unused_tokens(being, token_img) self.cont_targets.config(values=self.target_names) self.refresh_initiatives() def unused_tokens(self, creature, token_img): next_row = int(self.side_count / 2) next_col = self.side_count % 2 lbl_side_unit = tk.Label(master=self.side_board, image=token_img, bg="gray28", borderwidth=0) lbl_side_unit.grid(row=next_row, column=next_col, padx=5, pady=5, sticky="ne") #lbl_side_unit.bind("<Button-3>", self.em.right_click_menu) lbl_side_unit.image = token_img CreateToolTip(lbl_side_unit, text=creature["name"]) self.side_count += 1 def post_initiatives(self): init_dict_in_order = {k:v for k, v in sorted(self.initiative_holder.items(), key= lambda item: item[1]['initiative'], reverse=True)} order_count = 0 lbl_turn_img = tk.Label(master=self.initiative_frame, image=self.turn_icon, bg="gray28", borderwidth=0) lbl_turn_img.grid(row=self.turn, column=0, sticky='w') lbl_turn_img.image = self.turn_icon self.move_path = [] for next_up in init_dict_in_order.items(): if next_up[1]['initiative'] != math.inf and next_up[1]['type'] != 'dead': lbl_your_turn = ttk.Label(master=self.initiative_frame, text=f"{next_up[0]}: ", font=self.small_font) lbl_your_turn.grid(row=order_count, column=1, sticky='w') lbl_your_init = ttk.Label(master=self.initiative_frame, text=next_up[1]['initiative'], font=self.small_font) lbl_your_init.grid(row=order_count, column=2, sticky='e') if order_count == self.turn: self.turn_obj = next_up[1] curr_pos = (int(self.turn_obj['coordinate'][0]), int(self.turn_obj['coordinate'][1]), int(self.turn_obj['coordinate'][2])) self.move_path.append(curr_pos) self.lbl_current_turn.config(text=self.turn_obj['name']) self.lbl_max_move.config(text=self.turn_obj['speed']) self.lbl_position.config(text=f"{curr_pos[0]+1}: {curr_pos[1]+1}: {curr_pos[2]}") self.lbl_amount_moved.config(text="0") self.map_frames[curr_pos[0]][curr_pos[1]].config(bg='orange3') if self.turn_obj['status'] == 'PC': self.lbl_current_turn.config(fg='green3') elif self.turn_obj['status'] == 'Monster': self.lbl_current_turn.config(fg='orange3') else: self.lbl_current_turn.config(fg='DodgerBlue2') if self.root.copy_win_open: if self.turn_obj['status'] != 'PC': self.copy_win.set_turn_lbl("X") else: self.copy_win.set_turn_lbl(self.turn_obj['name']) order_count += 1 def refresh_initiatives(self): init_frame_slaves = self.initiative_frame.grid_slaves() if len(init_frame_slaves): for item in init_frame_slaves: item.destroy() for i in range(len(self.map_frames)): for frm in self.map_frames[i]: frm.config(bg='gray28') self.post_initiatives() def next_turn(self, not_from_redo=True): self.lbl_amount_moved.config(bg='gray28') if not_from_redo: self.log_action('turn button') on_board_inits = self.initiative_holder inf_exists = True fucked_up = 100 while inf_exists and fucked_up > 0: for key, value in on_board_inits.items(): if value == math.inf: del on_board_inits[key] break if math.inf not in on_board_inits: inf_exists = False fucked_up -= 1 self.turn += 1 if self.turn > len(self.initiative_holder) - 1: self.next_round() else: for being in self.root.token_list: if being['name'] == self.turn_obj['name']: being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])] if self.root.copy_win_open: self.copy_win.gray_map() self.refresh_map() #self.refresh_initiatives() def next_round(self, not_from_redo=True): if not_from_redo: self.log_action('round button', {'turn': self.turn}) self.round += 1 self.lbl_round.config(text=self.round) self.turn = 0 for being in self.root.token_list: if being['name'] == self.turn_obj['name']: being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])] if self.root.copy_win_open: self.copy_win.gray_map() self.refresh_map() #self.refresh_initiatives() def reset_round(self, not_from_redo=True): if not_from_redo: restore_round = { 'round': self.round, 'turn': self.turn } self.log_action('reset round', restore_round) self.round = 0 self.lbl_round.config(text="S") self.turn = 0 self.refresh_map() #self.refresh_initiatives() def refresh_map(self, reset=False): for row in self.map_frames: for col in row: remove_tokens = col.pack_slaves() if len(remove_tokens) > 0: for token in remove_tokens: token.destroy() remove_side_list = self.side_board.grid_slaves() if len(remove_side_list) > 0: for side_token in remove_side_list: side_token.destroy() self.side_count = 0 if reset: self.initialize() self.place_tokens() if self.root.copy_win_open: self.copy_win.update_players() self.refresh_initiatives() def open_for_players(self): self.copy_win.start_win() self.refresh_map() def save_game(self): new_token_dict = {} for being in self.root.token_list: name = being["name"] new_token_dict[name] = being new_object_dict = {} for obj in self.root.obj_list: obj_name = obj["name"] new_object_dict[obj_name] = obj battle_dict = { "map_size": self.map_size, "round": self.round, "turn": self.turn } battleJSON = json.dumps(battle_dict, indent=4) with ZipFile(self.root.filename, "w") as savefile: creatJSON = json.dumps(new_token_dict, indent=4) objJSON = json.dumps(new_object_dict, indent=4) savefile.writestr('battle_info.json', battleJSON) savefile.writestr('creatures.json', creatJSON) savefile.writestr('objects.json', objJSON) self.go_back.clear_all() def clear_map(self): restore_tokens = copy.deepcopy(self.root.token_list) self.log_action('list', restore_tokens) for being in self.root.token_list: being["coordinate"] = ['', '', ''] self.refresh_map() def dpad_move(self, dir): last_pos = copy.deepcopy(self.move_path[-1]) if dir == 'n': curr_pos = (last_pos[0] - 1, last_pos[1], last_pos[2] + self.z_delta) elif dir == 's': curr_pos = (last_pos[0] + 1, last_pos[1], last_pos[2] + self.z_delta) elif dir == 'w': curr_pos = (last_pos[0], last_pos[1] - 1, last_pos[2] + self.z_delta) elif dir == 'e': curr_pos = (last_pos[0], last_pos[1] + 1, last_pos[2] + self.z_delta) elif dir == 'ne': curr_pos = (last_pos[0] - 1, last_pos[1] + 1, last_pos[2] + self.z_delta) elif dir == 'se': curr_pos = (last_pos[0] + 1, last_pos[1] + 1, last_pos[2] + self.z_delta) elif dir == 'sw': curr_pos = (last_pos[0] + 1, last_pos[1] - 1, last_pos[2] + self.z_delta) elif dir == 'nw': curr_pos = (last_pos[0] - 1, last_pos[1] - 1, last_pos[2] + self.z_delta) else: curr_pos = (last_pos[0], last_pos[1], last_pos[2] + self.z_delta) if curr_pos[0] < 0 or curr_pos[0] > self.map_size[0] - 1 or curr_pos[1] < 0 or curr_pos[1] > self.map_size[1] - 1: messagebox.showwarning("BattleTracker", "Cannot move off map.") return self.z_delta = 0 if self.turn_obj['size'] == 'large': space_need = 4 elif self.turn_obj['size'] == 'huge': space_need = 9 elif self.turn_obj['size'] == 'gargantuan': space_need = 16 else: space_need = 1 next_row_num = math.sqrt(space_need) row_offset = 0 col_offset = 0 if dir == '+': for
<gh_stars>1-10 # -*- coding: utf-8 -*- import pdb, importlib, inspect, time, datetime, json # from PyFin.api import advanceDateByCalendar # from data.polymerize import DBPolymerize from data.storage_engine import StorageEngine import time import pandas as pd import numpy as np from datetime import timedelta, datetime from financial import factor_earning from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query from vision.table.industry_daily import IndustryDaily from vision.table.fin_cash_flow import FinCashFlow from vision.table.fin_balance import FinBalance from vision.table.fin_income import FinIncome from vision.table.fin_indicator import FinIndicator from vision.table.fin_balance_ttm import FinBalanceTTM from vision.table.fin_income_ttm import FinIncomeTTM from vision.table.fin_cash_flow_ttm import FinCashFlowTTM from utilities.sync_util import SyncUtil # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # from ultron.cluster.invoke.cache_data import cache_data class CalcEngine(object): def __init__(self, name, url, methods=[{'packet': 'financial.factor_earning', 'class': 'FactorEarning'}, ]): self._name = name self._methods = methods self._url = url def get_trade_date(self, trade_date, n, days=365): """ 获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。 :param days: :param trade_date: 当前交易日 :param n: :return: """ syn_util = SyncUtil() trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date) trade_date_sets = trade_date_sets['TRADEDATE'].values time_array = datetime.strptime(str(trade_date), "%Y%m%d") time_array = time_array - timedelta(days=days) * n date_time = int(datetime.strftime(time_array, "%Y%m%d")) if str(date_time) < min(trade_date_sets): # print('date_time %s is out of trade_date_sets' % date_time) return str(date_time) else: while str(date_time) not in trade_date_sets: date_time = date_time - 1 # print('trade_date pre %s year %s' % (n, date_time)) return str(date_time) def _func_sets(self, method): # 私有函数和保护函数过滤 return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method))) def loading_data(self, trade_date): """ 获取基础数据 按天获取当天交易日所有股票的基础数据 :param trade_date: 交易日 :return: """ # 转换时间格式 time_array = datetime.strptime(trade_date, "%Y-%m-%d") trade_date = datetime.strftime(time_array, '%Y%m%d') # 读取目前涉及到的因子 trade_date_pre_year = self.get_trade_date(trade_date, 1) trade_date_pre_year_2 = self.get_trade_date(trade_date, 2) trade_date_pre_year_3 = self.get_trade_date(trade_date, 3) trade_date_pre_year_4 = self.get_trade_date(trade_date, 4) trade_date_pre_year_5 = self.get_trade_date(trade_date, 5) columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date'] # Report Data cash_flow_sets = get_fin_consolidated_statements_pit(FinCashFlow, [FinCashFlow.goods_sale_and_service_render_cash, FinCashFlow.cash_and_equivalents_at_end, ], dates=[trade_date]) for column in columns: if column in list(cash_flow_sets.keys()): cash_flow_sets = cash_flow_sets.drop(column, axis=1) cash_flow_sets = cash_flow_sets.rename( columns={'goods_sale_and_service_render_cash': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金 'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额 }) income_sets = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.total_operating_revenue, FinIncome.operating_revenue, FinIncome.operating_profit, FinIncome.np_parent_company_owners, FinIncome.net_profit, FinIncome.operating_cost, ], dates=[trade_date]) for column in columns: if column in list(income_sets.keys()): income_sets = income_sets.drop(column, axis=1) income_sets = income_sets.rename(columns={'net_profit': 'net_profit', # 净利润 'total_operating_revenue': 'total_operating_revenue', # 营业总收入 'operating_revenue': 'operating_revenue', # 营业收入 'operating_cost': 'operating_cost', # 营业成本 'operating_profit': 'operating_profit', # 营业利润 'np_parent_company_owners': 'np_parent_company_owners', # 归属于母公司所有者的净利润 }) tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code') indicator_sets = get_fin_consolidated_statements_pit(FinIndicator, [FinIndicator.np_cut, # 扣除非经常损益后的净利润 FinIndicator.roe_weighted, FinIndicator.roe_ex_weighted ], dates=[trade_date]) for column in columns: if column in list(indicator_sets.keys()): indicator_sets = indicator_sets.drop(column, axis=1) indicator_sets = indicator_sets.rename(columns={'roe_ex_weighted': 'adjusted_profit', # 扣除非经常损益后的净利润 }) tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code') balance_sets = get_fin_consolidated_statements_pit(FinBalance, [FinBalance.equities_parent_company_owners, ], dates=[trade_date]) for column in columns: if column in list(balance_sets.keys()): balance_sets = balance_sets.drop(column, axis=1) balance_sets = balance_sets.rename( columns={'equities_parent_company_owners': 'equities_parent_company_owners', # 归属于母公司股东权益合计 }) tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code') income_sets_pre_year_1 = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.operating_revenue, # 营业收入 FinIncome.net_profit, # 净利润 FinIncome.operating_cost, # 营业成本 ], dates=[trade_date_pre_year]) for column in columns: if column in list(income_sets_pre_year_1.keys()): income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1) income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'net_profit': 'net_profit_pre_year_1', # 净利润 'operating_revenue': 'operating_revenue_pre_year_1', # 营业收入 'operating_cost': 'operating_cost_1y', # 营业成本 }) tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code') income_sets_pre_year_2 = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.operating_revenue, FinIncome.net_profit, ], dates=[trade_date_pre_year_2]) for column in columns: if column in list(income_sets_pre_year_2.keys()): income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1) income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'net_profit': 'net_profit_pre_year_2', # 净利润 'operating_revenue': 'operating_revenue_pre_year_2', # 营业收入 }) tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code') income_sets_pre_year_3 = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.operating_revenue, FinIncome.net_profit, ], dates=[trade_date_pre_year_3]) for column in columns: if column in list(income_sets_pre_year_3.keys()): income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1) income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'net_profit': 'net_profit_pre_year_3', # 净利润 'operating_revenue': 'operating_revenue_pre_year_3', # 营业收入 }) tp_earning = pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code') income_sets_pre_year_4 = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.operating_revenue, FinIncome.net_profit, ], dates=[trade_date_pre_year_4]) for column in columns: if column in list(income_sets_pre_year_4.keys()): income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1) income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'net_profit': 'net_profit_pre_year_4', # 净利润 'operating_revenue': 'operating_revenue_pre_year_4', # 营业收入 }) tp_earning = pd.merge(income_sets_pre_year_4, tp_earning, how='outer', on='security_code') # TTM Data cash_flow_ttm_sets = get_fin_consolidated_statements_pit(FinCashFlowTTM, [FinCashFlowTTM.FINNETCFLOW, ], dates=[trade_date]) for column in columns: if column in list(cash_flow_ttm_sets.keys()): cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1) cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={'FINNETCFLOW': 'net_finance_cash_flow'}) income_ttm_sets = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_revenue, # 营业收入 FinIncomeTTM.net_profit, # 净利润 FinIncomeTTM.administration_expense, # 管理费用 FinIncomeTTM.total_operating_revenue, # 营业总收入 FinIncomeTTM.total_profit, # 利润总额 FinIncomeTTM.financial_expense, # 财务费用 FinIncomeTTM.interest_income, # 利息收入 FinIncomeTTM.sale_expense, # 销售费用 FinIncomeTTM.total_operating_cost, # 营业总成本 FinIncomeTTM.operating_profit, # 营业利润 FinIncomeTTM.np_parent_company_owners, # 归属于母公司所有者的净利润 FinIncomeTTM.operating_cost, # 营业成本 # FinIncomeTTM.ASSOINVEPROF, # 对联营企业和合营企业的投资收益 FinIncomeTTM.operating_tax_surcharges, # 营业税金及附加 FinIncomeTTM.asset_impairment_loss, # 资产减值损失 FinIncomeTTM.income_tax, # 所得税 ], dates=[trade_date]) for column in columns: if column in list(income_ttm_sets.keys()): income_ttm_sets = income_ttm_sets.drop(column, axis=1) ttm_earning = pd.merge(income_ttm_sets, cash_flow_ttm_sets, how='outer', on='security_code') balance_ttm_sets = get_fin_consolidated_statements_pit(FinBalanceTTM, [FinBalanceTTM.total_assets, # 资产总计 FinBalanceTTM.total_owner_equities, # 所有者权益(或股东权益)合计 FinBalanceTTM.equities_parent_company_owners, # 归属于母公司股东权益合计 ], dates=[trade_date]) for column in columns: if column in list(balance_ttm_sets.keys()): balance_ttm_sets = balance_ttm_sets.drop(column, axis=1) ttm_earning = pd.merge(ttm_earning, balance_ttm_sets, how='outer', on='security_code') income_ttm_sets_pre_year_1 = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_revenue, FinIncomeTTM.net_profit, ], dates=[trade_date_pre_year]) for column in columns: if column in list(income_ttm_sets_pre_year_1.keys()): income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.drop(column, axis=1) income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.rename( columns={'operating_revenue': 'operating_revenue_pre_year_1', # 营业收入 'net_profit': 'net_profit_pre_year_1', # 净利润 }) ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_1, how='outer', on='security_code') income_ttm_sets_pre_year_2 = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_revenue, FinIncomeTTM.net_profit, ], dates=[trade_date_pre_year_2]) for column in columns: if column in list(income_ttm_sets_pre_year_2.keys()): income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.drop(column, axis=1) income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.rename( columns={'operating_revenue': 'operating_revenue_pre_year_2', # 营业收入 'net_profit': 'net_profit_pre_year_2', # 净利润 }) ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_2, how='outer', on='security_code') income_ttm_sets_pre_year_3 = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_revenue, FinIncomeTTM.net_profit, ], dates=[trade_date_pre_year_3]) for column in columns: if column in list(income_ttm_sets_pre_year_3.keys()): income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.drop(column, axis=1) income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.rename( columns={'operating_revenue': 'operating_revenue_pre_year_3', # 营业收入 'net_profit': 'net_profit_pre_year_3', # 净利润 }) ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_3, how='outer', on='security_code') income_ttm_sets_pre_year_4 = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_revenue, FinIncomeTTM.net_profit, ], dates=[trade_date_pre_year_4]) for column in columns: if column in list(income_ttm_sets_pre_year_4.keys()): income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.drop(column, axis=1) income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.rename( columns={'operating_revenue': 'operating_revenue_pre_year_4', # 营业收入 'net_profit': 'net_profit_pre_year_4', # 净利润 }) ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_4, how='outer', on='security_code') # indicator_ttm_sets = get_fin_consolidated_statements_pit(IndicatorTTM, # [IndicatorTTM.ROIC, # 投入资本回报率 # ], dates=[trade_date]).drop(columns, axis=1) # indicator_ttm_sets = indicator_ttm_sets.rename(columns={'ROIC': '', # }) # MRQ balance_mrq_sets = get_fin_consolidated_statements_pit(FinBalance, [FinBalance.total_assets, # 资产总计 FinBalance.equities_parent_company_owners, # 归属于母公司股东权益合计 FinBalance.total_owner_equities, # 所有者权益(或股东权益)合计 FinBalance.longterm_loan, # 长期借款 ], dates=[trade_date]) for column in columns: if column in list(balance_mrq_sets.keys()): balance_mrq_sets = balance_mrq_sets.drop(column, axis=1) balance_mrq_sets = balance_mrq_sets.rename(columns={'total_assets': 'total_assets_mrq', 'equities_parent_company_owners': 'equities_parent_company_owners_mrq', # 归属于母公司股东权益合计 'total_owner_equities': 'total_owner_equities_mrq', # 所有者权益(或股东权益)合计 'longterm_loan': 'longterm_loan_mrq', # 长期借款 }) ttm_earning = pd.merge(ttm_earning, balance_mrq_sets, how='outer', on='security_code') balance_mrq_sets_pre = get_fin_consolidated_statements_pit(FinBalance, [FinBalance.total_assets, # 资产总计 FinBalance.total_owner_equities, # 所有者权益(或股东权益)合计 FinBalance.longterm_loan, # 长期借款 ], dates=[trade_date]) for column in columns: if column in list(balance_mrq_sets_pre.keys()): balance_mrq_sets_pre = balance_mrq_sets_pre.drop(column, axis=1) balance_mrq_sets_pre = balance_mrq_sets_pre.rename(columns={'total_assets': 'total_assets_mrq_pre', 'total_owner_equities': 'total_owner_equities_mrq_pre', # 所有者权益(或股东权益)合计 'longterm_loan': 'longterm_loan_mrq_pre', # 长期借款 }) ttm_earning = pd.merge(ttm_earning, balance_mrq_sets_pre, how='outer', on='security_code') balance_con_sets = get_fin_consolidated_statements_pit(FinBalanceTTM, [FinBalanceTTM.total_assets, # 资产总计 FinBalanceTTM.total_owner_equities, # 所有者权益(或股东权益)合计 ], dates=[trade_date, trade_date_pre_year, trade_date_pre_year_2, trade_date_pre_year_3, trade_date_pre_year_4, ]) for column in columns: if column in list(balance_con_sets.keys()): balance_con_sets = balance_con_sets.drop(column, axis=1) balance_con_sets = balance_con_sets.groupby(['security_code']) balance_con_sets = balance_con_sets.sum() balance_con_sets = balance_con_sets.rename(columns={'total_assets': 'total_assets', 'total_owner_equities': 'total_owner_equities'}) # cash_flow_con_sets = get_fin_consolidated_statements_pit(FinCashFlow, # [FinCashFlow.cash_and_equivalents_at_end, # ], # dates=[trade_date, # trade_date_pre_year, # trade_date_pre_year_2, # trade_date_pre_year_3, # trade_date_pre_year_4, # trade_date_pre_year_5, # ]).drop(columns, axis=1) # cash_flow_con_sets = cash_flow_con_sets.groupby(['security_code']) # cash_flow_con_sets = cash_flow_con_sets.sum() # cash_flow_con_sets = cash_flow_con_sets.rename(columns={'cash_and_equivalents_at_end':'cash_and_equivalents_at_end'}) income_con_sets = get_fin_consolidated_statements_pit(FinIncome, [FinIncome.net_profit], dates=[trade_date, trade_date_pre_year, trade_date_pre_year_2, trade_date_pre_year_3, trade_date_pre_year_4, trade_date_pre_year_5, ]) for column in columns: if column in list(income_con_sets.keys()): income_con_sets = income_con_sets.drop(column, axis=1) income_con_sets = income_con_sets.groupby(['security_code']) income_con_sets = income_con_sets.sum() income_con_sets = income_con_sets.rename(columns={'net_profit': 'net_profit'}).reset_index() ttm_earning_5y = pd.merge(balance_con_sets, income_con_sets, how='outer', on='security_code') ttm_earning_1y = get_fin_consolidated_statements_pit(FinIncomeTTM, [FinIncomeTTM.operating_cost, FinIncomeTTM.operating_revenue, FinIncomeTTM.np_parent_company_owners, ], dates=[trade_date_pre_year]) for column in columns: if column in list(ttm_earning_1y.keys()): ttm_earning_1y = ttm_earning_1y.drop(column, axis=1) ttm_earning_1y = ttm_earning_1y.rename(columns={'operating_revenue': 'operating_revenue_1y', # 营业收入 'operating_cost': 'operating_cost_1y', # 营业成本 'np_parent_company_owners': 'np_parent_company_owners_1y' }) ttm_earning = pd.merge(ttm_earning, ttm_earning_1y, how='outer', on='security_code') balance_mrq_1y = get_fin_consolidated_statements_pit(FinBalance, [FinBalance.equities_parent_company_owners, ], dates=[trade_date_pre_year]) for column in columns: if column in list(balance_mrq_1y.keys()): balance_mrq_1y = balance_mrq_1y.drop(column, axis=1) balance_mrq_1y = balance_mrq_1y.rename( columns={'equities_parent_company_owners': 'equities_parent_company_owners_mrq_1y', }) ttm_earning = pd.merge(ttm_earning, balance_mrq_1y, how='outer', on='security_code') return tp_earning, ttm_earning, ttm_earning_5y def process_calc_factor(self, trade_date, tp_earning, ttm_earning, ttm_earning_5y): tp_earning = tp_earning.set_index('security_code') ttm_earning = ttm_earning.set_index('security_code') ttm_earning_5y = ttm_earning_5y.set_index('security_code') earning = factor_earning.FactorEarning() # 因子计算 earning_sets = pd.DataFrame() earning_sets['security_code'] = tp_earning.index earning_sets = earning_sets.set_index('security_code') # MRQ earning_sets = earning.Rev5YChg(tp_earning, earning_sets) earning_sets = earning.ROA5YChg(ttm_earning_5y, earning_sets) earning_sets = earning.ROE5Y(ttm_earning_5y, earning_sets) earning_sets = earning.NPCutToNP(tp_earning, earning_sets) earning_sets = earning.ROE(tp_earning, earning_sets) earning_sets = earning.ROEAvg(tp_earning, earning_sets) earning_sets = earning.ROEcut(tp_earning, earning_sets) earning_sets = earning.DGPR(tp_earning, earning_sets) earning_sets = earning.ROEWeight(tp_earning, earning_sets) earning_sets = earning.ROEDilutedWeight(tp_earning, earning_sets) # TTM # factor_earning = earning.invest_r_associates_to_tp_latest(tp_earning, earning_sets) earning_sets = earning.NetNonOiToTP(ttm_earning, earning_sets) earning_sets = earning.GPM1YChgTTM(ttm_earning, earning_sets) earning_sets = earning.DROE(ttm_earning, earning_sets) earning_sets = earning.NetPft5YAvgChgTTM(ttm_earning, earning_sets) earning_sets = earning.Sales5YChgTTM(ttm_earning, earning_sets) # factor_earning = earning.roa(ttm_earning, earning_sets) earning_sets = earning.AdminExpRtTTM(ttm_earning, earning_sets) earning_sets = earning.BerryRtTTM(ttm_earning, earning_sets) earning_sets = earning.CFARatioMinusROATTM(ttm_earning, earning_sets) earning_sets = earning.CostRtTTM(ttm_earning, earning_sets) earning_sets = earning.EBITToTORevTTM(ttm_earning, earning_sets) earning_sets = earning.PeridCostTTM(ttm_earning, earning_sets) earning_sets = earning.FinExpRtTTM(ttm_earning, earning_sets) earning_sets = earning.ImpLossToTOITTM(ttm_earning, earning_sets) earning_sets = earning.OIAToOITTM(ttm_earning, earning_sets) earning_sets = earning.ROAexTTM(ttm_earning, earning_sets) earning_sets = earning.NetProfitRtTTM(ttm_earning, earning_sets)
with a hosting service will have those credentials removed. """ from reviewboard.scmtools.models import Repository # Due to a bug in Review Board 2.0.x < 2.0.25 and 2.5.x < 2.5.7, # the browser could end up filling in the hidden "password" field # on repositories that were set up to use a hosting service. For # these, we want to make sure those credentials are safely removed. repositories = ( Repository.objects .filter(hosting_account__isnull=False) .exclude(username='', encrypted_password='') ) repositories.update(username='', encrypted_password='') # Any remaining passwords should be encrypted (if coming from an older # version before encryption was added). Repository.objects.encrypt_plain_text_passwords() def get_static_media_upgrade_needed(self): """Determine if a static media config upgrade is needed.""" from djblets.siteconfig.models import SiteConfiguration siteconfig = SiteConfiguration.objects.get_current() manual_updates = siteconfig.settings.get('manual-updates', {}) resolved_update = manual_updates.get('static-media', False) return (not resolved_update and (pkg_resources.parse_version(siteconfig.version) < pkg_resources.parse_version("1.7"))) def get_diff_dedup_needed(self): """Determine if there's likely duplicate diff data stored.""" from reviewboard.diffviewer.models import FileDiff try: return FileDiff.objects.unmigrated().exists() except: # Very likely, there was no diffviewer_filediff.diff_hash_id # column, indicating a pre-1.7 database. We want to assume # a dedup is needed. return True def get_settings_upgrade_needed(self): """Determine if a settings upgrade is needed.""" try: import settings_local if (hasattr(settings_local, 'DATABASE_ENGINE') or hasattr(settings_local, 'CACHE_BACKEND')): return True if hasattr(settings_local, 'DATABASES'): engine = settings_local.DATABASES['default']['ENGINE'] if not engine.startswith('django.db.backends'): return True except ImportError: sys.stderr.write("Unable to import settings_local. " "Cannot determine if upgrade is needed.\n") return False def get_wsgi_upgrade_needed(self): """Return whether a reviewboard.wsgi upgrade is needed. Returns: bool: ``True`` if the :file:`reviewboard.wsgi` file needs to be upgraded. ``False`` if it does not. """ filename = os.path.join(self.abs_install_dir, 'htdocs', 'reviewboard.wsgi') with open(filename, 'r') as fp: data = fp.read() return 'django.core.handlers.wsgi.WSGIHandler' in data def upgrade_settings(self): """Perform a settings upgrade.""" settings_file = os.path.join(self.abs_install_dir, "conf", "settings_local.py") perform_upgrade = False buf = [] database_info = {} database_keys = ('ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT') backend_info = {} from django.core.cache import (parse_backend_uri, InvalidCacheBackendError) try: import settings_local if hasattr(settings_local, 'DATABASE_ENGINE'): engine = settings_local.DATABASE_ENGINE # Don't convert anything other than the ones we know about, # or third parties with custom databases may have problems. if engine in ('sqlite3', 'mysql', 'postgresql', 'postgresql_psycopg2'): engine = 'django.db.backends.' + engine database_info['ENGINE'] = engine for key in database_keys: if key != 'ENGINE': database_info[key] = getattr(settings_local, 'DATABASE_%s' % key, '') perform_upgrade = True if hasattr(settings_local, 'DATABASES'): engine = settings_local.DATABASES['default']['ENGINE'] if engine == 'postgresql_psycopg2': perform_upgrade = True if hasattr(settings_local, 'CACHE_BACKEND'): try: backend_info = parse_backend_uri( settings_local.CACHE_BACKEND) perform_upgrade = True except InvalidCacheBackendError: pass except ImportError: sys.stderr.write("Unable to import settings_local for upgrade.\n") return if not perform_upgrade: return fp = open(settings_file, 'r') found_database = False found_cache = False for line in fp.readlines(): if line.startswith('DATABASE_'): if not found_database: found_database = True buf.append("DATABASES = {\n") buf.append(" 'default': {\n") for key in database_keys: if database_info[key]: buf.append(" '%s': '%s',\n" % (key, database_info[key])) buf.append(" },\n") buf.append("}\n") elif line.startswith('CACHE_BACKEND') and backend_info: if not found_cache: found_cache = True buf.append("CACHES = {\n") buf.append(" 'default': {\n") buf.append(" 'BACKEND': '%s',\n" % self.CACHE_BACKENDS[backend_info[0]]) buf.append(" 'LOCATION': '%s',\n" % backend_info[1]) buf.append(" },\n") buf.append("}\n") elif line.strip().startswith("'ENGINE': 'postgresql_psycopg2'"): buf.append(" 'ENGINE': '" "django.db.backends.postgresql_psycopg2',\n") else: buf.append(line) fp.close() fp = open(settings_file, 'w') fp.writelines(buf) fp.close() # Reload the settings module del sys.modules['settings_local'] del sys.modules['reviewboard.settings'] import django.conf django.conf.settings = django.conf.LazySettings() def upgrade_wsgi(self): """Upgrade the reviewboard.wsgi file. This will modify :file:`reviewboard.wsgi` to replace any old WSGI initialization logic with modern logic. """ filename = os.path.join(self.abs_install_dir, 'htdocs', 'reviewboard.wsgi') with open(filename, 'r') as fp: data = fp.read() data = data.replace( 'import django.core.handlers.wsgi', 'from django.core.wsgi import get_wsgi_application') data = data.replace( 'application = django.core.handlers.wsgi.WSGIHandler()', 'application = get_wsgi_application()') with open(filename, 'w') as fp: fp.write(data) def create_admin_user(self): """Create an administrator user account.""" from django.contrib.auth.models import User if not User.objects.filter(username=self.admin_user).exists(): cwd = os.getcwd() os.chdir(self.abs_install_dir) User.objects.create_superuser(self.admin_user, self.admin_email, self.admin_password) os.chdir(cwd) def register_support_page(self): """Register this installation with the support data tracker.""" from reviewboard.admin.support import get_register_support_url url = get_register_support_url(force_is_admin=True) try: urlopen(url, timeout=5).read() except: # There may be a number of issues preventing this from working, # such as a restricted network environment or a server issue on # our side. This isn't a catastrophic issue, so don't bother them # about it. pass def run_manage_command(self, cmd, params=None): """Run a given django management command.""" cwd = os.getcwd() os.chdir(self.abs_install_dir) try: from django.core.management import (execute_from_command_line, get_commands) os.environ.setdefault(str('DJANGO_SETTINGS_MODULE'), str('reviewboard.settings')) if not params: params = [] if DEBUG: params.append("--verbosity=0") commands_dir = os.path.join(self.abs_install_dir, 'commands') if os.path.exists(commands_dir): # Pre-fetch all the available management commands. get_commands() # Insert our own management commands into this list. # Yes, this is a bit of a hack. from django.core.management import _commands for command in os.listdir(commands_dir): module_globals = {} filename = os.path.join(commands_dir, command) with open(filename) as f: code = compile(f.read(), filename, 'exec') exec(code, module_globals) if 'Command' in module_globals: name = os.path.splitext(f)[0] _commands[name] = module_globals['Command']() execute_from_command_line([__file__, cmd] + params) except ImportError as e: ui.error("Unable to execute the manager command %s: %s" % (cmd, e)) os.chdir(cwd) def mkdir(self, dirname): """Create a directory, but only if it doesn't already exist.""" if not os.path.exists(dirname): os.mkdir(dirname) def link_pkg_dir(self, pkgname, src_path, dest_dir, replace=True): """Create the package directory.""" src_dir = pkg_resources.resource_filename(pkgname, src_path) if os.path.islink(dest_dir) and not os.path.exists(dest_dir): os.unlink(dest_dir) if os.path.exists(dest_dir): if not replace: return self.unlink_media_dir(dest_dir) if self.options.copy_media: shutil.copytree(src_dir, dest_dir) else: os.symlink(src_dir, dest_dir) def unlink_media_dir(self, path): """Delete the given media directory and all contents.""" if os.path.exists(path): if os.path.islink(path): os.unlink(path) else: shutil.rmtree(path) def process_template(self, template_path, dest_filename): """Generate a file from a template.""" domain_name = self.domain_name or '' domain_name_escaped = domain_name.replace(".", "\\.") template = pkg_resources.resource_string("reviewboard", template_path) sitedir = os.path.abspath(self.install_dir).replace("\\", "/") if self.site_root: site_root = self.site_root site_root_noslash = site_root[1:-1] else: site_root = '/' site_root_noslash = '' # Check if this is a .exe. if (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") or # new py2exe imp.is_frozen("__main__")): # tools/freeze rbsite_path = sys.executable else: rbsite_path = '"%s" "%s"' % (sys.executable, sys.argv[0]) data = { 'rbsite': rbsite_path, 'port': self.web_server_port, 'sitedir': sitedir, 'sitedomain': domain_name, 'sitedomain_escaped': domain_name_escaped, 'siteid': self.site_id, 'siteroot': site_root, 'siteroot_noslash': site_root_noslash, } if hasattr(self, 'apache_auth'): data['apache_auth'] = self.apache_auth template = re.sub(r"@([a-z_]+)@", lambda m: data.get(m.group(1)), template) fp = open(dest_filename, "w") fp.write(template) fp.close() class SiteList(object): """Maintains the list of sites installed on the system.""" def __init__(self, path): """Initialize the site list.""" self.path = path # Read the list in as a unique set. # This way, we can easily eliminate duplicates. self.sites = set() if os.path.exists(self.path): f = open(self.path, 'r') for line in f: site = line.strip() # Verify that this path exists on the system # And add it to the dictionary. if os.path.exists(site): self.sites.add(site) f.close() def add_site(self, site_path): """Add a site to the site list.""" self.sites.add(site_path) # Write all of the sites back to the file. # Sort keys to ensure consistent order. ordered_sites = list(self.sites) ordered_sites.sort() # Create the parent directory of the site # if it doesn't already exist if not os.path.exists(os.path.dirname(self.path)): # Create the parent directory with read-write # permissions for user but read and execute # only for others. try: os.makedirs(os.path.dirname(self.path), 0o755) except: # We shouldn't consider this an abort-worthy error # We'll warn the user and just complete setup print("WARNING: Could not save site to sitelist %s" % self.path) return with open(self.path, 'w') as f: for site in ordered_sites: f.write("%s\n" % site) class UIToolkit(object): """An abstract class that forms the basis for all UI interaction. Subclasses can override this to provide new ways of representing the UI to the user. """ def run(self): """Run the UI.""" pass def page(self, text, allow_back=True, is_visible_func=None, on_show_func=None): """Add a new "page" to display to the user. Input and text are associated with this page and may be displayed immediately or later, depending on the toolkit. If is_visible_func is specified and returns False, this page will be skipped. """ return None def prompt_input(self, page, prompt, default=None, password=False, normalize_func=None, save_obj=None, save_var=None): """Prompt the user for some text. This may contain a default value.""" raise NotImplementedError def prompt_choice(self, page, prompt, choices, save_obj=None, save_var=None): """Prompt the user for an item amongst a list of choices.""" raise NotImplementedError def text(self, page, text): """Display a block of text to the user.""" raise NotImplementedError def disclaimer(self, page, text): """Display a block of disclaimer text to
import sys import os import time import socket import logging import traceback from collections import deque try: # Use simplejson instead of json because it is released more frequently and # is generally faster. import simplejson as json # However, if speedups are not installed, simplejson will be slow, so use # the built-in json instead. if json._import_c_make_encoder() is None: import json except ImportError: import json storm_log = logging.getLogger('tripwatch.storm') TUPLE_PROFILING = False json_encode = lambda x: json.dumps(x) json_decode = lambda x: json.loads(x) BLANK_LINE_CHECK = True # Save old stdout so we can still write to it after redirecting. old_stdout = sys.stdout # TODO: Get this value from a topology configuration setting. MAX_MESSAGE_SIZE = 16777216 class StormIPCException(Exception): pass #reads lines and reconstructs newlines appropriately def readMsg(): def read_message_lines(): if BLANK_LINE_CHECK: count_blank = 0 i_line = 0 message_size = 0 while True: line = sys.stdin.readline()[0:-1] if not line: continue i_line += 1 message_size += len(line) if line == "end": break # If message size exceeds MAX_MESSAGE_SIZE, we assume that the Storm # worker has died, and we would be reading an infinite series of blank # lines. Throw an error to halt processing, otherwise the task will # use 100% CPU and will quickly consume a huge amount of RAM. if MAX_MESSAGE_SIZE is not None and message_size > MAX_MESSAGE_SIZE: raise StormIPCException('Message exceeds MAX_MESSAGE_SIZE -- assuming this is an error') if BLANK_LINE_CHECK: if not line: storm_log.debug('Message line #%d is blank. Pipe to Storm supervisor may be broken.', i_line) count_blank += 1 if count_blank >= 20: raise StormIPCException('Pipe to Storm supervisor seems to be broken!') if i_line > 100: raise StormIPCException('Message exceeds 100 lines -- assuming this is an error') if count_blank > 0: storm_log.debug('Message line #%d: %s', i_line + 1, line) yield line msg = ''.join('%s\n' % line for line in read_message_lines()) return json_decode(msg) MODE = None ANCHOR_TUPLE = None #queue up commands we read while trying to read taskids pending_commands = deque() def readTaskIds(): if pending_taskids: return pending_taskids.popleft() else: msg = readMsg() while type(msg) is not list: pending_commands.append(msg) msg = readMsg() return msg #queue up taskids we read while trying to read commands/tuples pending_taskids = deque() def readCommand(): if pending_commands: return pending_commands.popleft() else: msg = readMsg() while type(msg) is list: pending_taskids.append(msg) msg = readMsg() return msg def readTuple(): cmd = readCommand() return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"]) def sendMsgToParent(msg): print >> old_stdout, json_encode(msg) print >> old_stdout, "end" try: old_stdout.flush() except (IOError, OSError) as e: raise StormIPCException('%s error [Errno %d] in sendMsgToParent: %s' % ( type(e).__name__, e.errno, str(e))) # This function is probably obsolete with the addition of the new # reportError() function. # TODO: Consider getting rid of this function and call reportError() instead. # However, need to consider the case where we are running on an older version # of Storm where the Storm back end does not support reportError()? Can we # detect that case and use this function instead? def sendFailureMsgToParent(msg): """This function is kind of a hack, but useful when a Python task encounters a fatal exception. "msg" should be a simple string like "E_SPOUTFAILED". This function sends "msg" as-is to the Storm worker, which tries to parse it as JSON. The hacky aspect is that we *deliberately* make it fail by sending it non-JSON data. This causes the Storm worker to throw an error and restart the Python task. This is cleaner than simply letting the task die without notifying Storm, because this way Storm restarts the task more quickly.""" assert isinstance(msg, basestring) print >> old_stdout, msg print >> old_stdout, "end" storm_log.error('Sent failure message ("%s") to Storm', msg) def sync(): sendMsgToParent({'command':'sync'}) def sendpid(heartbeatdir): pid = os.getpid() sendMsgToParent({'pid':pid}) open(heartbeatdir + "/" + str(pid), "w").close() def emit(*args, **kwargs): result = __emit(*args, **kwargs) if result: return readTaskIds() def emitMany(*args, **kwargs): """A more efficient way to emit a number of tuples at once.""" global MODE if MODE == Bolt: emitManyBolt(*args, **kwargs) elif MODE == Spout: emitManySpout(*args, **kwargs) def emitDirect(task, *args, **kwargs): kwargs["directTask"] = task __emit(*args, **kwargs) def __emit(*args, **kwargs): global MODE if MODE == Bolt: return emitBolt(*args, **kwargs) elif MODE == Spout: return emitSpout(*args, **kwargs) def emitManyBolt(tuples, stream=None, anchors=None, directTask=None): global ANCHOR_TUPLE if anchors is None: if ANCHOR_TUPLE is None: anchors = [] elif ANCHOR_TUPLE is not None: anchors = [ANCHOR_TUPLE] m = { "command": "emit", "anchors": [a.id for a in anchors], "tuple": None, "need_task_ids": False, } if stream is not None: m["stream"] = stream if directTask is not None: m["task"] = directTask lines = [] for tup in tuples: m["tuple"] = tup lines.append(json_encode(m)) lines.append('end') print >> old_stdout, '\n'.join(lines) def emitBolt(tup, stream=None, anchors=None, directTask=None, need_task_ids=False): global ANCHOR_TUPLE if anchors is None: if ANCHOR_TUPLE is None: anchors = [] elif ANCHOR_TUPLE is not None: anchors = [ANCHOR_TUPLE] m = { "command": "emit", "anchors": [a.id for a in anchors], "tuple": tup, "need_task_ids": need_task_ids, } if stream is not None: m["stream"] = stream if directTask is not None: m["task"] = directTask sendMsgToParent(m) return need_task_ids def emitManySpout(tuples, stream=None, id=None, directTask=None, need_task_ids=False): m = { "command": "emit", "tuple": None, "need_task_ids": need_task_ids, } if id is not None: m["id"] = id if stream is not None: m["stream"] = stream if directTask is not None: m["task"] = directTask lines = [] for tup in tuples: m["tuple"] = tup lines.append(json_encode(m)) lines.append('end') print >> old_stdout, '\n'.join(lines) def emitSpout(tup, stream=None, id=None, directTask=None, need_task_ids=False): m = { "command": "emit", "tuple": tup, "need_task_ids": need_task_ids, } if id is not None: m["id"] = id if stream is not None: m["stream"] = stream if directTask is not None: m["task"] = directTask sendMsgToParent(m) return need_task_ids def ack(tup): """Acknowledge a tuple""" sendMsgToParent({"command": "ack", "id": tup.id}) def ackId(tupid): """Acknowledge a tuple when you only have its ID""" sendMsgToParent({"command": "ack", "id": tupid}) def fail(tup): """Fail a tuple""" sendMsgToParent({"command": "fail", "id": tup.id}) def reportError(msg): sendMsgToParent({"command": "error", "msg": msg}) def log(msg): sendMsgToParent({"command": "log", "msg": msg}) def initComponent(): # Redirect stdout and stderr to logger instances. This is particularly # important for stdout so 'print' statements won't crash the Storm Java # worker. sys.stdout = LogStream(logging.getLogger('storm.stdout')) sys.stderr = LogStream(logging.getLogger('storm.stderr')) setupInfo = readMsg() storm_log.info('Task received setupInfo from Storm: %s', setupInfo) sendpid(setupInfo['pidDir']) storm_log.info('Task sent pid to Storm') return [setupInfo['conf'], setupInfo['context']] class Tuple(object): __slots__ = ['id', 'component', 'stream', 'task', 'values'] def __init__(self, id, component, stream, task, values): self.id = id self.component = component self.stream = stream self.task = task self.values = values def __eq__(self, other): if not isinstance(other, Tuple): return False for k in self.__slots__: if getattr(self, k) != getattr(other, k): return False return True def __ne__(self, other): return not (self == other) def __repr__(self): return '<%s%s>' % ( self.__class__.__name__, ''.join(' %s=%r' % (k, getattr(self, k)) for k in sorted(self.__slots__))) class Task(object): def shared_initialize(self): conf, context = initComponent() # These values are only available with a patched version of Storm. self.task_index = context.get('taskIndex', -1) self.worker_port = context.get('workerPort', -1) self.initialize(conf, context) def report_exception(self, base_message, exception): parameters = ( base_message, os.environ.get('SCRIPT', sys.argv[0]), socket.gethostname(), 'pid', os.getpid(), 'port', self.worker_port, 'taskindex', self.task_index, type(exception).__name__, #str(exception), ) #message = '%s: %s (pid %d) on %s failed with %s: %s' % parameters message = '__'.join(str(p).replace('.', '_') for p in parameters) sendFailureMsgToParent(message) # Sleep for a few seconds to try and ensure Storm reads this message # before we terminate. If it does, then our message above will appear in # the Storm UI. time.sleep(5) class Bolt(Task): def __init__(self): if TUPLE_PROFILING: self.profiler = BoltProfiler() else: self.profiler = None def initialize(self, stormconf, context): pass def process(self, tuple): pass def run(self): global MODE MODE = Bolt self.shared_initialize() profiler = self.profiler while True: try: if profiler is not None: profiler.pre_read() tup = readTuple() if profiler is not None: profiler.post_read() self.process(tup) if profiler is not None: profiler.post_process() except Exception as e: storm_log.exception('Caught exception in Bolt.run') if 'tup' in locals(): # Only print the first 2000 characters of the tuple, otherwise # the message may be too long for certain handlers (e.g. # SysLogHandler). storm_log.error( 'The error occurred while processing this tuple: %s', repr(tup.values)[:2000]) reportError("%s\n%s" % (str(e), traceback.format_exc())) raise class BasicBolt(Task): def __init__(self): if TUPLE_PROFILING: self.profiler = BasicBoltProfiler() else: self.profiler = None def initialize(self, stormconf, context): pass def process(self, tuple): pass def run(self): global MODE MODE = Bolt global ANCHOR_TUPLE self.shared_initialize() profiler = self.profiler tup =
<filename>ctdcal/process_ctd.py import logging import warnings from datetime import datetime, timezone from pathlib import Path import gsw import numpy as np import pandas as pd import scipy.signal as sig from . import get_ctdcal_config, io, oxy_fitting cfg = get_ctdcal_config() log = logging.getLogger(__name__) warnings.filterwarnings("ignore", "Mean of empty slice.") def cast_details(df, ssscc, log_file=None): """ We determine the cast details using pandas magic. First find alternating periods of pumps on and pumps off, then select the pumps on period with the highest pressure. Get values from the row with the highest pressure, and return all values to be sent to log. Parameters ---------- df : DataFrame Filtered CTD data ssscc : integer The station and cast, as SSSCC format log_file : file handle or string File destination for cast details Returns ------- df_downcast : DataFrame CTD data with the soak period and upcast trimmed off Notes ----- The following (float) variables are output to log_file: time_start : Time at start of cast (in unix epoch time) time_end : Time at end of cast (in unix epoch time) time_bottom : Time at bottom of cast (in unix epoch time) p_start : Pressure at which cast started p_max : Bottom of the cast pressure b_lat : Latitude at bottom of cast b_lon : Longitude at bottom of cast b_alt : Altimeter reading at bottom of cast """ df_cast = _trim_soak_period(df) # TODO: call parameters from config file instead p_start = float(np.around(df_cast["CTDPRS"].head(1), 4)) p_max_ind = df_cast["CTDPRS"].argmax() p_max = float(np.around(df_cast["CTDPRS"].max(), 4)) time_start = float(df_cast["scan_datetime"].head(1)) time_end = float(df_cast["scan_datetime"].tail(1)) time_bottom = float(df_cast["scan_datetime"][p_max_ind]) b_lat = float(np.around(df_cast["GPSLAT"][p_max_ind], 4)) b_lon = float(np.around(df_cast["GPSLON"][p_max_ind], 4)) b_alt = float(np.around(df_cast["ALT"][p_max_ind], 4)) io.write_cast_details( ssscc, log_file, time_start, time_end, time_bottom, p_start, p_max, b_alt, b_lat, b_lon, ) # remove upcast df_downcast = df_cast[:p_max_ind].copy() return df_downcast def _trim_soak_period(df=None): """ 1) Find pump on/off patterns 2) Select pump_on=True group with largest pressure recording 3) Find soak period before start of downcast 4) Trim cast, return everything after top of cast (i.e. minimum pressure) """ df_list = [ g for i, g in df.groupby(df["pump_on"].ne(df["pump_on"].shift()).cumsum()) ] df_pump_on_list = [df for df in df_list if df["pump_on"].all()] df_cast = df_pump_on_list[np.argmax([df["CTDPRS"].max() for df in df_pump_on_list])] df_cast = df_cast.reset_index(drop=True) # next fn deals w/ edge cases, leave as is for now df_cast = _find_last_soak_period(df_cast) start_ind = df_cast.loc[: len(df) // 4, "CTDPRS"].argmin() df_trimmed = df_cast[start_ind:].reset_index(drop=True).copy() return df_trimmed def _find_last_soak_period(df_cast, time_bin=8, P_surface=2, P_downcast=50): """ Find the soak period before the downcast starts. The algorithm is tuned for repeat hydrography work, specifically US GO-SHIP parameters. This assumes the soak depth will be somewhere between 10 and 30 meters, the package will sit at the soak depth for at least 20 to 30 seconds before starting ascent to the surface and descent to target depth. The algorithm is not guaranteed to catch the exact start of the soak period, but within a minimum period of time_bin seconds(?) from end of the soak if the soak period assumption is valid. This should be shorter than the total soak period time, and able to catch the following rise and descent of the package that signals the start of the cast. The algorithm has been designed to handle four general cases of casts: * A routine cast with pumps turning on in water and normal soak * A cast where the pumps turn on in air/on deck * A cast where the pumps turn on and off due to rosette coming out of water * A cast where there are multiple stops on the downcast to the target depth Parameters ---------- df_cast : DataFrame DataFrame of the entire cast, from deckbox on to deckbox off time_bin : integer, optional Number of seconds to bin average for descent rate calculation P_surface : integer, optional Minimum surface pressure threshold required to look for soak depth (2 dbar was chosen as an average rosette is roughly 1.5 to 2 meters tall) P_downcast : integer, optional Minimum pressure threshold required to assume downcast has started (50 dbar has been chosen as double the deep soak depth of 20-30 dbar) Returns ------- df_cast_trimmed : DataFrame DataFrame starting within time_bin seconds of the last soak period. """ # Validate user input if time_bin <= 0: raise ValueError("Time bin value should be positive whole seconds.") if P_downcast <= 0: raise ValueError( "Starting downcast pressure threshold must be positive integers." ) if P_downcast < P_surface: raise ValueError( "Starting downcast pressure threshold must be greater \ than surface pressure threshold." ) # If pumps have not turned on until in water, return DataFrame if df_cast.iloc[0]["CTDPRS"] > P_surface: return df_cast # Bin the data by time, and compute the average rate of descent df_cast["index"] = df_cast.index # needed at end to identify start_idx df_cast["bin"] = pd.cut( df_cast.index, np.arange(df_cast.index[0], df_cast.index[-1], time_bin * 24), labels=False, include_lowest=True, ) df_binned = df_cast.groupby("bin").mean() # Compute difference of descent rates and label bins df_binned["dP"] = df_binned["CTDPRS"].diff().fillna(0).round(0) df_binned["movement"] = pd.cut( df_binned["dP"], [-1000, -0.5, 0.5, 1000], labels=["up", "stop", "down"] ) # Find all periods where the rosette is not moving df_group = df_binned.groupby( df_binned["movement"].ne(df_binned["movement"].shift()).cumsum() ) df_list = [g for i, g in df_group] # Find last soak period before starting descent to target depth def find_last(df_list, P_downcast): for idx, df in enumerate(df_list): if df["CTDPRS"].max() < P_downcast: # make sure it's soak, not a stop to switch to autocast (i.e. A20 2021) # TODO: try instead finding max depth then working backwards? if df.max()["movement"] == "stop" and len(df) > 1: last_idx = idx else: return last_idx return last_idx # Trim off everything before last soak start_idx = int(df_list[find_last(df_list, P_downcast)].head(1)["index"]) df_cast_trimmed = df_cast.loc[start_idx:].reset_index() return df_cast_trimmed def ctd_align(inMat=None, col=None, time=0.0): """ctd_align function Function takes full NUMPY ndarray with predefined dtype array and adjusts time of sensor responce and water flow relative to the time frame of temperature sensor. Originally written by <NAME>, docstring by <NAME>. Need to generate alignment plots in order to properly use ctd_align. Args: param1 (ndarray): inMat, numpy ndarray with dtype array param2 (float): col, column to apply time advance to. param3 (float): time, advance in seconds to apply to raw data. Returns: Narray: The return value is ndarray with adjusted time of parameter specified. """ # Num of frames per second. fl = 24 if (inMat is not None) & (col is not None) & (time > 0.0): # Time to advance advnc = int(fl * time) tmp = np.arange(advnc, dtype=np.float) last = inMat[col][len(inMat) - 1] tmp.fill(float(last)) inMat[col] = np.concatenate((inMat[col][advnc:], tmp)) return inMat def raw_ctd_filter(df=None, window="triangle", win_size=24, parameters=None): """ Filter raw CTD data using one of three window types (boxcar, hanning, triangle). Parameters ---------- df : DataFrame Raw CTD data window : str, optional Type of filter window win_size : int, optional Length of window in number of samples parameters : list of str, optional List of DataFrame columns to be filtered Returns ------- filtered_df : DataFrame CTD data with filtered parameters """ filter_df = df.copy() if parameters is not None: for p in parameters: if window == "boxcar": win = sig.boxcar(win_size) elif window == "hanning": win = sig.hann(win_size) elif window == "triangle": win = sig.triang(win_size) filter_df[p] = sig.convolve(filter_df[p], win, mode="same") / np.sum(win) return filter_df def remove_on_deck(df, stacast, cond_startup=20.0, log_file=None): """ Find and remove times when rosette is on deck. Optionally log average pressure at start and end of cast. Parameters ---------- df : DataFrame Raw CTD data stacast : str Station/cast name cond_startup : float, optional Minimum conductivity (units?) threshold indicating rosette is in water log_file : str, optional Path and filename to save start/end deck pressure values Returns ------- trimmed_df : DataFrame Raw CTD data trimmed to times when rosette is in water """ # TODO: move these to config file # Frequency fl = 24 fl2 = fl * 2 # Half minute ms = 30 time_delay = fl * ms # time to let CTD pressure reading settle/sit on deck # split dataframe into upcast/downcast downcast = df.iloc[: (df["CTDPRS"].argmax() + 1)] upcast = df.iloc[(df["CTDPRS"].argmax() + 1) :] #
scorer_tf(self, doc_term_freq, concepts=None, use_existing_data=False): return doc_term_freq def scorer_tfidf(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'): if use_existing_data: (doc_term, _) = tf_to_tfidf(doc_term_freq, idf_diag=self.idf_diag, sublinear_tf=True, smooth_idf=True, norm=norm) else: (doc_term, idf_diag) = tf_to_tfidf(doc_term_freq, sublinear_tf=True, smooth_idf=True, norm=norm) self.idf_diag = idf_diag return doc_term def scorer_okapi(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'): if use_existing_data: (doc_term, _, _) = tf_to_okapi(doc_term_freq, idfs=self.idfs, avg_doc_len=self.avg_doc_len) else: (doc_term, idfs, avg_doc_len) = tf_to_okapi(doc_term_freq) self.idfs = idfs self.avg_doc_len = avg_doc_len return doc_term def scorer_midf(self, doc_term_freq, concepts=None, use_existing_data=False, norm='l2'): (doc_term_freq_idf, ) = tf_to_midf(doc_term_freq) return doc_term_freq_idf ########################## # Initialization methods # ########################## def _forward_index_from_directory(self): """Build forward index from a directory""" scorer_name = self.scorer_name traindir = self.traindir arff_output = self.traindir_arff vocab_output = self.traindir_vocab with Timing('Processing training files in the folder %s...' % traindir, self.logging): dtf = DocToFeature(lowercase=self.lowercase, keep_nnp=self.keep_nnp, transliteration=self.transliteration, word_normalization=self.word_normalization) train_doc_term_freq = dtf.doc_to_tf(traindir) train_file_list = dtf.filelist vocabulary = dtf.vocabulary mapping = dtf.mapping concepts = [filename[filename.rfind('/') + 1:].replace('_', ' ').replace('.txt', '') for filename in train_file_list] if arff_output is not None: with Timing('Dumping TF counts to %s...' % arff_output, self.logging): docs_arff = FeatureToArff(train_doc_term_freq, relation='TF.IDF') docs_arff.add_column(concepts, name='concept', type_='string') docs_arff.dump(arff_output, sparse=True) pickle_output = '%s.pickle' % arff_output[:arff_output.rfind('.')] with Timing('Pickling TF counts to %s...' % pickle_output, self.logging): def task(item, _pickle_output): with open(_pickle_output, 'wb') as outfile: pickle.dump(item, outfile, protocol=2) process = mp.Process(target=task, args=((train_doc_term_freq, concepts), pickle_output)) process.start() process.join() train_list_output = '%s.list' % arff_output[:arff_output.rfind('.')] with Timing('Writing file names of %s into %s...' % (traindir, train_list_output), self.logging): with(open(train_list_output, 'w')) as filename_output: for filename in train_file_list: filename_output.write(filename + '\n') if vocab_output is not None: with Timing('Dumping vocabulary to %s...' % vocab_output, self.logging): with open(vocab_output, 'w') as vocab_output_file: pickle.dump(vocabulary, vocab_output_file, protocol=2) with Timing('Calculating feature scores using scorer %s...' % scorer_name, self.logging): forward_index = self.get_scorer(scorer_name)(train_doc_term_freq) self.forward_index_ = forward_index self.num_concepts_, self.num_features_ = forward_index.shape self.concepts_ = concepts self.vocabulary_ = vocabulary self.mapping_ = mapping def _forward_index_from_arff(self): """Build forward index from ARFF file""" scorer_name = self.scorer_name arff_file = self.trainarff vocab_file = self.trainarff_vocab with Timing('Loading and processing training data from %s using scorer %s...' % (arff_file, scorer_name), self.logging): (train_doc_term_freq, concepts) = loadarff(arff_file) pickle_output = '%s.pickle' % arff_file[:arff_file.rfind('.')] if not os.path.exists(pickle_output): def task(item, _pickle_output): with open(_pickle_output, 'wb') as outfile: pickle.dump(item, outfile, protocol=2) process = mp.Process(target=task, args=((train_doc_term_freq, concepts), pickle_output)) process.start() process.join() forward_index = self.get_scorer(scorer_name)(train_doc_term_freq) with Timing('Loading vocabulary from %s...' % vocab_file, self.logging): with open(vocab_file, 'rb') as infile: vocabulary = pickle.load(infile) mapping = {} for word, idx in vocabulary.iteritems(): mapping[idx] = word self.forward_index_ = forward_index self.num_concepts_, self.num_features_ = forward_index.shape self.concepts_ = concepts self.vocabulary_ = vocabulary self.mapping_ = mapping def _forward_index_from_pickle(self): """Build forward index from pickled csr_matrix""" scorer_name = self.scorer_name pickle_file = self.trainpickle vocab_file = self.trainpickle_vocab with Timing('Loading and processing training data from %s using scorer %s...' % (pickle_file, scorer_name), self.logging): with open(pickle_file, 'rb') as infile: (train_doc_term_freq, concepts) = pickle.load(infile) forward_index = self.get_scorer(scorer_name)(train_doc_term_freq) with Timing('Loading vocabulary from %s...' % vocab_file, self.logging): with open(vocab_file, 'rb') as vocab_file: vocabulary = pickle.load(vocab_file) mapping = {} for word, idx in vocabulary.iteritems(): mapping[idx] = word self.forward_index_ = forward_index self.num_concepts_, self.num_features_ = forward_index.shape self.concepts_ = concepts self.vocabulary_ = vocabulary self.mapping_ = mapping def _invert_index(self): """Invert the forward index""" forward_index = self.forward_index_ with Timing('Inverting index... ', self.logging): inverted_index = forward_index.transpose(copy=True).tocsr() # Remove insignificant term-concept association #inverted_index.data[inverted_index.data<=1e-3] = 0 self.inverted_index_ = inverted_index # Word informativeness based on: # http://www.ica.stc.sh.cn/picture/article/176/b8/e2/b5e4932249ec8284bb8a86866ec3/3b0d0bff-0e05-4d26-ba6f-85d15924594f.pdf # With corrected formula based on the description with Timing('Getting IDF scores...', self.logging): df = np.diff(inverted_index.indptr) idf = np.log(float(self.num_concepts_) / df) mul = 1.1 exp_df = 0.25*np.sqrt(self.num_concepts_) fw = mul*abs(np.log(exp_df/df)) word_info = idf - fw word_info = word_info-min(word_info) self.word_info_ = word_info/max(word_info) def _generate_term_concept_index(self): """Generate term concept index""" concepts = self.concepts_ vocabulary = self.vocabulary_ with Timing('Creating term-concept index...', self.logging): dtf = DocToFeature(lowercase=self.lowercase, keep_nnp=self.keep_nnp, transliteration=self.transliteration, word_normalization=self.word_normalization) # concept_tf is the term count for each concept name, where each concept name is treated like one document concept_tf = dtf.str_to_tf(concepts, vocabulary=vocabulary) # concept_term_index is the normalized count concept_term_index = self.get_scorer(self.scorer_name)(concept_tf, use_existing_data=True, norm='l2') # term_concept_index is the transposed matrix from concept_term_index term_concept_index = concept_term_index.transpose(copy=False).tocsr() self.term_concept_index_ = term_concept_index def initialize(self): """Initialize the extractor **Notes** When initializing from directory (i.e., with traindir specified), please be informed that the training might take a very long time, depending on the amount of training data. In Knorex working environment, CountVectorizer in scikit-learn has been modified to support multiprocessing, and so the initialization process can be faster. It's on the branch "parallel_vectorizer" Because of that, whenever the scikit-learn is updated, we need to make sure that the "knx_patch_mpcv" branch is still working. """ with Timing('Initializing text processing components...', self.logging): self.dtf = DocToFeature(lowercase=self.lowercase, keep_nnp=self.keep_nnp, transliteration=self.transliteration, word_normalization=self.word_normalization) with Timing('Initializing ner-tagger component...', self.logging): self.ner_tagger = KnorexNERTagger() with Timing('Initializing np chunker component...', self.logging): self.np_chunker = MaxentNPChunker() with Timing('Connect to Wikipedia database...', self.logging): self.client = MongoClient('localhost', 27017) self.db = self.client['wikipedia'] self.coll = self.db['TittleId'] if self.traindir: self._forward_index_from_directory() elif self.trainarff: self._forward_index_from_arff() elif self.trainpickle: self._forward_index_from_pickle() else: raise Exception('No training directory or ARFF or pickle file has been specified') self._invert_index() self._generate_term_concept_index() gc.collect() def check_initialized(self): if self.inverted_index_ is None: raise Exception('Inverted index has not been built! Run initialize() first') ############################ # Batch extraction process # ############################ def extract_from_directory(self, dirname, n=10, with_score=False, extraction_output=None): """Extract top concepts and top words from the given directory **Parameters** dirname : string The directory containing files that are to be extracted n : int, optional, 10 by default The number of concepts and words to be extracted with_score : boolean, optional, False by default Whether to return the score associated with each concept and word extraction_output : string, optional, None by default The file name to which the extraction output will be printed as JSON dump. **Returns** extraction_output : list The extraction output will always be returned in a list of tuple, where each tuple contains: top_concepts : list This will be a list of strings if with_score=False is used, otherwise it will be a list of (concept, score) tuple top_phrases : list This will be a list of strings if with_score=False is used, otherwise it will be a list of (phrase, score) tuple """ if extraction_output is None: if dirname.find('/') >= 0: extraction_output = '%s.out' % dirname[dirname.rfind('/') + 1:] else: extraction_output = '%s.out' % dirname with Timing('Processing test files in the folder %s...' % dirname, self.logging): results = [] for filename in sorted(os.listdir(dirname), key=lambda x: x.lower()): if filename == '.DS_Store': continue with open(os.path.join(dirname, filename), 'r') as infile: text = infile.read() title = filename[:(filename.rfind('.') + len(filename)) % len(filename)] result = self.extract(text, title=title, n=n, with_score=with_score) results.append((filename, result)) with Timing('Writing output to %s...' % extraction_output, self.logging): with open(extraction_output, 'w') as outfile: json.dump(results, outfile) return results ###################### # Extraction methods # ###################### def _interpret(self, test_doc_term, test_doc_tf=None, boost_concept=None, boost_lower=None): """Convert a term weight matrix into interpretation matrix The test_doc_tf is used for concept boosting """ inverted_index = self.inverted_index_ term_concept_index = self.term_concept_index_ mapping = self.mapping_ concepts = self.concepts_ if boost_concept is None: boost_concept = self.boost_concept if boost_lower is None: boost_lower = self.boost_lower with Timing('Calculating interpretation vector...', self.logging): interpretation_vector = test_doc_term * inverted_index if boost_concept: if None in [term_concept_index, test_doc_tf]: LOGGER.warn('Concept boosting requested but either term_concept_index or test_docs_tf is not ' 'available!') else: # docs_term_index is test_doc_tf being l2-normalized with Timing('Calculating term weight scores...', self.logging): docs_term_index = self.get_scorer(self.scorer_name)(test_doc_tf, use_existing_data=True, norm='l2') with Timing('Calculating concept multiplier...', self.logging): # Perform concept multiplier calculation for each concept: # multiplier = 2^(1/sum (w_i.c'_i)) # with c'_i = tanh(1/(1-log(c_i)))/tanh(1) as the modified count # where w_i is the weight of word i in concept matrix # and c_i is the normalized count of word i in the document concept_multiplier = docs_term_index * term_concept_index if boost_lower: concept_multiplier.data = self.COTH1 * np.tanh(1 / (1 - np.log(concept_multiplier.data ** 2))) # The -1 is because this multiplier works as an addition to original matrix # So later the multiplication can be done efficiently by using (or the equivalent): # interpretation_vector += interpretation_vector.multiply(concept_multiplier) concept_multiplier.data = np.exp2(concept_multiplier.data) - 1 if DEBUG: # Debug process: print top 10 multipliers docs_term_index_lil = docs_term_index.tolil() top_concept_multiplier_indices = np.argsort(concept_multiplier.getrow(0).toarray()[0])[:-11:-1] concept_multiplier_lil = concept_multiplier.tolil() method_name = 'Core' if boost_concept: method_name = 'Core + Concept boost' if boost_lower: method_name = 'Core + Concept boost with
<filename>angr/analyses/veritesting.py import logging from collections import defaultdict import networkx from simuvex import SimProcedures, o from ..errors import AngrError, AngrCFGError from ..analysis import Analysis, register_analysis from ..path_group import PathGroup from ..path import Path, AngrPathError l = logging.getLogger('angr.analyses.veritesting') class VeritestingError(Exception): pass class CallTracingFilter(object): whitelist = { SimProcedures['cgc']['receive'], SimProcedures['cgc']['transmit'], SimProcedures['libc.so.6']['read'], } cfg_cache = { } def __init__(self, project, depth, blacklist=None): self.project = project self.blacklist = [ ] if blacklist is None else blacklist self._skipped_targets = set() self.depth = depth def filter(self, call_target_state, jumpkind): """ The call will be skipped if it returns True. :param call_target_state: The new state of the call target. :param jumpkind: The Jumpkind of this call. :returns: True if we want to skip this call, False otherwise. """ ACCEPT = False REJECT = True l.debug('Filtering calling target %s', call_target_state.ip) # Currently we always skip the call, unless the target function satisfies one of the following conditions: # 1) It's a SimProcedure that are in the whitelist # 2) It's a function that has no loops, and no calls/syscalls, # 3) It's a function that has no loops, and only has calls to another function that will not be filtered out by # this filter # Generate a CFG ip = call_target_state.ip if self.depth >= 5: l.debug('Rejecting target %s - too deep, depth is %d', ip, self.depth) return REJECT try: addr = call_target_state.se.exactly_int(ip) except (SimValueError, SimSolverModeError): self._skipped_targets.add(-1) l.debug('Rejecting target %s - cannot be concretized', ip) return REJECT # Is it in our blacklist? if addr in self.blacklist: self._skipped_targets.add(addr) l.debug('Rejecting target 0x%x - blacklisted', addr) return REJECT # If the target is a SimProcedure, is it on our whitelist? if self.project.is_hooked(addr) and type(self.project._sim_procedures[addr][0]) in CallTracingFilter.whitelist: # accept! l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind) return ACCEPT # If it's a syscall, let's see if the real syscall is inside our whitelist if jumpkind.startswith('Ijk_Sys'): call_target_state.scratch.jumpkind = jumpkind tmp_path = self.project.factory.path(call_target_state) tmp_path.step() next_run = tmp_path.next_run if type(next_run) in CallTracingFilter.whitelist: # accept! l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind) return ACCEPT else: # reject l.debug('Rejecting target 0x%x - syscall %s not in whitelist', addr, type(next_run)) return REJECT cfg_key = (addr, jumpkind) if cfg_key not in self.cfg_cache: new_blacklist = self.blacklist[ :: ] new_blacklist.append(addr) tracing_filter = CallTracingFilter(self.project, depth=self.depth + 1, blacklist=new_blacklist) cfg = self.project.analyses.CFGAccurate(starts=((addr, jumpkind),), initial_state=call_target_state, context_sensitivity_level=0, call_depth=1, call_tracing_filter=tracing_filter.filter, normalize=True ) self.cfg_cache[cfg_key] = (cfg, tracing_filter) try: cfg.force_unroll_loops(1) except AngrCFGError: # Exceptions occurred during loop unrolling # reject l.debug('Rejecting target 0x%x - loop unrolling failed', addr) return REJECT else: l.debug('Loading CFG from CFG cache') cfg, tracing_filter = self.cfg_cache[cfg_key] if cfg._loop_back_edges: # It has loops! self._skipped_targets.add(addr) l.debug('Rejecting target 0x%x - it has loops', addr) return REJECT sim_procedures = [ n for n in cfg.graph.nodes() if n.simprocedure_name is not None ] for sp_node in sim_procedures: if not self.project.is_hooked(sp_node.addr): # This is probably a PathTerminator # Just skip it for now continue if self.project._sim_procedures[sp_node.addr].procedure not in CallTracingFilter.whitelist: self._skipped_targets.add(addr) l.debug('Rejecting target 0x%x - contains SimProcedures outside whitelist', addr) return REJECT if len(tracing_filter._skipped_targets): # Bummer self._skipped_targets.add(addr) l.debug('Rejecting target 0x%x - should be skipped', addr) return REJECT # accept! l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind) return ACCEPT class Veritesting(Analysis): # A cache for CFG we generated before cfg_cache = { } # Names of all stashes we will return from Veritesting all_stashes = ('successful', 'errored', 'deadended', 'deviated', 'unconstrained') def __init__( self, input_path, boundaries=None, loop_unrolling_limit=10, enable_function_inlining=False, terminator=None, deviation_filter=None, path_callback=None ): """ SSE stands for Static Symbolic Execution, and we also implemented an extended version of Veritesting (Avgerinos, Thanassis, et al, ICSE 2014). :param input_path: The initial path to begin the execution with. :param boundaries: Addresses where execution should stop. :param loop_unrolling_limit: The maximum times that Veritesting should unroll a loop for. :param enable_function_inlining: Whether we should enable function inlining and syscall inlining. :param terminator: A callback function that takes a path as parameter. Veritesting will terminate if this function returns True. :param deviation_filter: A callback function that takes a path as parameter. Veritesting will put the path into "deviated" stash if this function returns True. :param path_callback: A callback function that takes a path as parameter. Veritesting will call this function on every single path after their next_run is created. """ block = self.project.factory.block(input_path.addr) branches = block.vex.constant_jump_targets_and_jumpkinds # if we are not at a conditional jump, just do a normal path.step if not branches.values() == ['Ijk_Boring', 'Ijk_Boring']: self.result, self.final_path_group = False, None return # otherwise do a veritesting step self._input_path = input_path.copy() self._boundaries = boundaries if boundaries is not None else [ ] self._loop_unrolling_limit = loop_unrolling_limit self._enable_function_inlining = enable_function_inlining self._terminator = terminator self._deviation_filter = deviation_filter self._path_callback = path_callback # set up the cfg stuff self._cfg, self._loop_graph = self._make_cfg() self._loop_backedges = self._cfg._loop_back_edges self._loop_heads = set([ dst.addr for _, dst in self._loop_backedges ]) l.info("Static symbolic execution starts at 0x%x", self._input_path.addr) l.debug( "The execution will terminate at the following addresses: [ %s ]", ", ".join([ hex(i) for i in self._boundaries ]) ) l.debug("A loop will be unrolled by a maximum of %d times.", self._loop_unrolling_limit) if self._enable_function_inlining: l.debug("Function inlining is enabled.") else: l.debug("Function inlining is disabled.") self.result, self.final_path_group = self._veritesting() def _veritesting(self): """ Perform static symbolic execution starting from the given point. """ p = self._input_path.copy() try: new_path_group = self._execute_and_merge(p) except (ClaripyError, SimError, AngrError): if not BYPASS_VERITESTING_EXCEPTIONS in p.state.options: raise else: l.warning("Veritesting caught an exception.", exc_info=True) return False, PathGroup(self.project, stashes={'deviated', p}) except VeritestingError as ex: l.warning("Exception occurred: %s", str(ex)) return False, PathGroup(self.project, stashes={'deviated', p}) l.info( 'Returning new paths: (successful: %s, deadended: %s, errored: %s, deviated: %s)', len(new_path_group.successful), len(new_path_group.deadended), len(new_path_group.errored), len(new_path_group.deviated) ) return True, new_path_group def _execute_and_merge(self, path): """ Symbolically execute the program in a static manner. The basic idea is that we look ahead by creating a CFG, then perform a _controlled symbolic exploration_ based on the CFG, one path at a time. The controlled symbolic exploration stops when it sees a branch whose both directions are all feasible, or it shall wait for a merge from another path. A basic block will not be executed for more than *loop_unrolling_limit* times. If that is the case, a new state will be returned. :param path: The initial path to start the execution. :returns: A list of new states. """ # Remove path._run path._run = None # Find all merge points merge_points = self._get_all_merge_points(self._cfg, self._loop_graph) l.debug('Merge points: %s', [ hex(i[0]) for i in merge_points ]) # # Controlled symbolic exploration # # Initialize the beginning path initial_path = path initial_path.info['loop_ctrs'] = defaultdict(int) path_group = PathGroup( self.project, active_paths=[ initial_path ], immutable=False, resilience=o.BYPASS_VERITESTING_EXCEPTIONS in initial_path.state.options ) # Initialize all stashes for stash in self.all_stashes: path_group.stashes[stash] = [ ] # immediate_dominators = cfg.immediate_dominators(cfg.get_any_node(ip_int)) while path_group.active: # Step one step forward l.debug('Steps %s with %d active paths: [ %s ]', path_group, len(path_group.active), path_group.active) # Apply self.deviation_func on every single active path, and move them to deviated stash if needed if self._deviation_filter is not None: path_group.stash(filter_func=self._deviation_filter, from_stash='active', to_stash='deviated') # Mark all those paths that are out of boundaries as successful path_group.stash( filter_func=self.is_path_overbound, from_stash='active', to_stash='successful' ) path_group.step( successor_func=lambda p: self.generate_successors(p, path_group), check_func=self.is_path_errored ) if self._terminator is not None and self._terminator(path_group): for p in path_group.unfuck: self._unfuck(p) break # Stash all paths that we do not see in our CFG path_group.stash( filter_func=self._path_not_in_cfg, to_stash="deviated" ) # Stash all paths that we do not care about path_group.stash( filter_func= lambda p: ( p.state.scratch.jumpkind not in ('Ijk_Boring', 'Ijk_Call', 'Ijk_Ret', 'Ijk_NoHook') and not p.state.scratch.jumpkind.startswith('Ijk_Sys') ), to_stash="deadended" ) if path_group.deadended: l.debug('Now we have some deadended paths: %s', path_group.deadended) # Stash all possible paths that we should merge later for merge_point_addr, merge_point_looping_times in merge_points: path_group.stash_addr( merge_point_addr, to_stash="_merge_%x_%d" % (merge_point_addr, merge_point_looping_times) ) # Try to merge a set of previously stashed paths, and then unstash them if not path_group.active: merged_anything = False for merge_point_addr, merge_point_looping_times in merge_points: if merged_anything: break stash_name = "_merge_%x_%d" % (merge_point_addr, merge_point_looping_times) if stash_name not in path_group.stashes: continue stash_size = len(path_group.stashes[stash_name]) if stash_size == 0: continue if stash_size == 1: l.info("Skipping merge of 1 path in stash %s.", stash_size) path_group.move(stash_name, 'active') continue # let
<reponame>sneklingame/FakahedaSEServerBackupManager #!/usr/bin/env python # encoding: utf-8 # Copyright (c) 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ A simple module providing an overview of one's FakaHeda SE server and automatic backup and restore functions. API reference: https://forum.fakaheda.eu/viewtopic.php?f=126&t=35153 """ from __future__ import annotations __title__ = "FakahedaSEServerBackupManager" __abbrev__ = "FSSBM" __author__ = "<NAME>" __version__ = "1.0" __year__ = 2021 import datetime import io import json import os import shutil import sys import threading import time import typing import PySimpleGUI as sg import PySimpleGUIQt as sgqt import ftputil import requests from PIL import Image from ftputil import error # -------------------------------------------------------------------------------------------------------------------- # # STATICS B64 # -------------------------------------------------------------------------------------------------------------------- # ICON_SUCCESS = b"""iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABGdBT<KEY>""" ICON_ERROR = b"""<KEY>""" DEFAULT_ICON = b"""iVBORw0KGgoAAAANSUhEUgAAABkAAAASCAYAAACuLnWgAAAACXBIWXMAAAsTAAALEwEAmpwYAAAJsWlUWHRYTUw6Y29tLmFkb2JlL nhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuc zptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmL XN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb 20veGFwLzEuMC8iPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPlpvbmVyIFBob3RvIFN0dWRpbyBYPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmO kRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgI CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgI CAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz7dPuZdAAABxElEQVQ4T7XVT4hOURjH8c8z3SmNEWVjaQwrC4kVKX8bEosxUkpSs/CnlGykK Bv2LJkSGxazszBJGSlCWdhOsWQx5c9uyjwW933H+565Zkb41u3e+/s9p1/n3HOeW1mAzOzFCI5iK1bjO97iLsYjIucG/IaqFNpk5g6MYUNh9eFg63qcmcci4 <KEY>"" ICON_SETTINGS = b"""<KEY> OkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAg ICAgICAgICAgICAgICAgICAgCjw/eHBhY<KEY> <KEY> DaSBiDiSmY/j5U7sLL7FDNbjsk7sT2xfjAHz5Qps7OjTuDsivqnxldiDe2r8JL6s6/PS<KEY>ARMZuZD+BmZSAuxXOZ+T1ei4jjbe4gGtza047j k54mIk5k5kE8XKVt9f837KvrgTQ4Xf9bltTfIJb2BYO1BTS4UynBsxjBSmzGe528ti93dKSjmMXhjiYzl/YHoomIyRqcMF+ClzJzBh9HRGY5qHuwusZ/xURE zNRrmbkcn+PnzNyC2/BpRJxqp2sUt7Q34HLlcE5l5iyux0Wd+BROQWZejKeVHY0p1flKGfsX8Vjbi+24oa5bluCantayAbvwBLbi0U7spPnDfJj5hj+v9GE9 9isGWyxs6lHl6ccwh8mq78VTyu7vxft4UNnF7sy8r4GIOJaZm3AX3o6IvzLzzXoTvKU88QhewL6I+LCW6gBG8WPUd1lm7sUzNf+Kc6MbEcfwRnutzH/LdESk 0vCtHX0O1ynHYHUdnq+Vpl+C73BT93z02alsGX7qBloi4kxmrsWTSsk+U3KvUko+hk1DTaJ8oI709T71TbAOqZyba/GHUq5xfDHU5F+yAx8oH7wfFLNdEXGW f/78LpqImMYrkJnjONEacIFMutQBWsAFNxnE355s6ItR+L2UAAAAAElFTkSuQmCC""" ICON_WHITE_DOT = b"""iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAJxWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJ lZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1 QIENvcmUgNS41LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA 8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPgogICAgICA gICA8eG1wOkNyZWF0b3JUb29sPlpvbmVyIFBob3RvIFN0dWRpbyBYPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3J kZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw /eHBhY2tldCBlbmQ9InciPz4KPD94cGFja2V0IGVuOT+UWh8XJ63Hws0AAADjSURBVBhXZY8vTsRgEEfffP9CN6mshxOA4ABIMKimem8ADtfUcAGSRXEDTC2 CIBAQwh1wFdhN2my/LoNpN9B9yWQm83sjxjGjqioz9p+/ezcNMcZza+0tcAJoWZavwzDchBA+duIwDEtjzL2IJNMhcOace4kxXnrvn13TNAtjzN1MAkBEFtb aB+DIZVl2CuhcmhCRQwCnqhtA/sf7uLquP/M8XwPpPFRVBd4BXFEU2xhjbq19Ag5ExI/SBlj3fb+E8Wvv/VvXdcchhCvgAtiq6mPbtqs0Tb93IkCSJF/A9Vh 7/AJWr0oYcdErmQAAAABJRU5ErkJggg==""" ICON_RELOAD = b"""iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAJsWlUWHRYTUw6Y29tLmFkb2JlLn htcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpucz ptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLX N5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb2 0veGFwLzEuMC8iPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPlpvbmVyIFBob3RvIFN0dWRpbyBYPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOk Rlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgIC AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgIC AgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz7dPuZdAAABW0lEQVQ4T6XTsWpVQRAG4G8uC/ZqejEIglpY6ANEMSA+gIiNQYxKGiEg2HjtDX aS6AukUSxECILExkQshIC9+AAB7VLoWJy9526uuVr4w3B2/tkZZs8/U0wgM89gERdwHIFveI/nEbHd3D1SGucQVnAHgxFfMVttITPXcRsPsFPok1/jYp8yHV d13c3g+qiDx/Ynv9F18xE/cRZLuFbjM/WrZOYp3B0RGEbEo8aHLWxl5lFcagMFt4zfvHFAMsjMFRPJdAXmGv9Jc+6RmYfxoVqLTwXHWqI594iIXbyY5Ok6+C 8UfMXp6p/DRh89AJl5AsPqfi94Z1zgnn8UwH1jOV8WPNNpPMB8Zj78ixI3sdBQqyUivmTmU10RGGbmed0gbeOXPwcJXkXE29FPXMZJ42m8XG0aPuMGVYWI2M vMK6YvU4t1LEbEDxoZI2IPS5m5Zv86D3TrvGlineE3KxhpsO1CfqQAAAAASUVORK5CYII=""" # -------------------------------------------------------------------------------------------------------------------- # # INIT & CONFIGURATION # -------------------------------------------------------------------------------------------------------------------- # # SYSTEM REDIRECT_STDOUT_STDERR_TO_FILE = True LOG_FILENAME = "latest.log" # GUI sg.MENU_SHORTCUT_CHARACTER = '&' _COLOR_LIGHT_GREY = "#000000" _LIST_FOREGROUND_COLOR = "#555555" MIN_AUTO_BACKUP_INTERVAL = 5 MAX_AUTO_BACKUP_INTERVAL = 60 # PATHS CONFIG_FILENAME = os.path.abspath("config.json") _DEFAULT_CONFIG = {"fakaheda": { "api_url": "https://www.fakaheda.eu/fhapi/v1/servers", "server_id": 123456, "api_token": "", "json_feed_url": "https://query.fakaheda.eu/{server_ip}{port}.feed"}, "ftp": {"host": "https://example.org/", "user": "user", "password": "", "dir": "/Saves", "blacklist": ["/Backup"]}, "backups_dir": "backups", "auto_backup": False, "interval": MIN_AUTO_BACKUP_INTERVAL, "last_run": 10 ** 9} # -------------------------------------------------------------------------------------------------------------------- # # UTILS # -------------------------------------------------------------------------------------------------------------------- # class Util: class ObjectifiedDict(dict): def __getattr__(self, item): return self.get(item) def __setattr__(self, key, value): self[key] = value class BackupRestorer: def __init__(self, backup: Backup): self.backup = backup self.action = None def run(self): self.action = "__SERVER_STOP__" print("[RESTORER] Stopping server...") server.stop() while True: if not server.status.is_running: break time.sleep(5) self.action = "__UPLOAD__" print("[RESTORER] Uploading files...") Util.upload_backup(self.backup) self.action = "__SERVER_START__" print("[RESTORER] Starting server...") server.start() while True: if server.status.is_running: break self.action = "__EXIT__" @staticmethod def get_folder_size(start_path: str): total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) # skip if it is symbolic link if not os.path.islink(fp): total_size += os.path.getsize(fp) return total_size @classmethod def json_load(cls, filename: str, encoding: str = None): with open(filename, "r", encoding="utf-8" if encoding is None else encoding) as file: return json.load(file, object_hook=cls.ObjectifiedDict) @staticmethod def json_dump(filename: str, data: dict, encoding: str = None): with open(filename, "w", encoding="utf-8" if encoding is None else encoding) as file: json.dump(data, file, sort_keys=False, ensure_ascii=False, indent=4) @classmethod def get_config(cls): return cls.json_load(CONFIG_FILENAME) @classmethod def save_config(cls, data: dict): cls.json_dump(CONFIG_FILENAME, data) @staticmethod def to_megabytes(size: float): return "{:.2f} MB".format(size / (1024 ** 2)) @staticmethod def menu_bar( menu_def: list[list], text_color: str = None, background_color: str = None, pad: typing.Union[ tuple[int, int], tuple[tuple[int, int], tuple[int, int]], tuple[int, tuple[int, int]], tuple[tuple[int, int], int] ] = (0, 0) ): if text_color is None: text_color = sg.theme_text_color() if background_color is None: background_color = sg.theme_background_color() row = [] for menu in menu_def: text = menu[0] if sg.MENU_SHORTCUT_CHARACTER in text: text = text.replace(sg.MENU_SHORTCUT_CHARACTER, '') if text.startswith(sg.MENU_DISABLED_CHARACTER): disabled = True text = text[len(sg.MENU_DISABLED_CHARACTER):] else: disabled = False row += [sg.ButtonMenu(text, menu, border_width=0, button_color=(text_color, background_color), key=text, pad=pad, disabled=disabled, font="Any 9 bold")] return sg.Column([row], background_color=background_color, pad=(0, 0), expand_x=True) @staticmethod def popup(*args, **kwargs): p = Popup(*args, **kwargs) return p.event @staticmethod def image_preprocessor(path: str, max_size: tuple[int, int] = (600, 300)): img = Image.open(path) img.thumbnail(max_size) bio = io.BytesIO() img.save(bio, format="PNG") del img return bio.getvalue() @classmethod def img_from_url(cls, url: str, *args, **kwargs): data = requests.get(url, stream=True).raw return cls.image_preprocessor(data, *args, **kwargs) @staticmethod def download_backup(): global ftp print("[BACKUP-DOWNLOAD] Downloading backup...") ftp = FTPManager(FTP_HOST, FTP_USER, FTP_PASSWORD) ftp.download_folder(FTP_DIR, os.path.join(BACKUPS_DIR, str(datetime.datetime.now().timestamp())), FTP_BLACKLIST) ftp.close() print("[BACKUP-DOWNLOAD] Backup downloaded") ftp = None @staticmethod def upload_backup(backup): global ftp print("[BACKUP-UPLOAD] Uploading backup...") ftp = FTPManager(FTP_HOST, FTP_USER, FTP_PASSWORD) ftp.upload_folder(os.path.join(BACKUPS_DIR, backup.path), FTP_DIR) ftp.close() print("[BACKUP-UPLOAD] Backup uploaded") ftp = None class Logger: INPUT_STREAM_STDOUT = "stdout" INPUT_STREAM_STDERR = "stderr" STDOUT = sys.stdout STDERR = sys.stderr def __init__(self, output_stream: typing.TextIO, input_stream: str): if input_stream != self.INPUT_STREAM_STDOUT and input_stream != self.INPUT_STREAM_STDERR: raise AttributeError("input_stream must be either 'stdout' or 'stderr'") self.output_stream = output_stream self.input_stream = input_stream def write(self, string: str): if string and string != "\n": if self.input_stream == self.INPUT_STREAM_STDOUT: self.STDOUT.write(string + "\n") self.output_stream.write( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ": " + string + "\n") self.output_stream.flush() else: self.STDERR.write(string) self.output_stream.write( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ": [SYS_ERR] " + string.replace( "\n", "") + "\n") self.output_stream.flush() def flush(self): pass # -------------------------------------------------------------------------------------------------------------------- # # API # -------------------------------------------------------------------------------------------------------------------- # class ServerException(BaseException): pass class InvalidResponse(ServerException): pass class Server: ENDPOINT_STATUS = "status" ENDPOINT_START = "start_async" ENDPOINT_STOP = "stop_async" ENDPOINT_RESTART = "restart_async" def __init__(self, host: str, server_id: int, token: str, json_feed_url: str): self.host = host self.server_id = server_id self.token = token self.json_feed_url = json_feed_url self.headers = {"Authorization": "Bearer " + self.token} self.url = API_BASE_URL + "/" + str(SERVER_ID) def send_request(self, endpoint: str): if endpoint: return requests.get(url=self.url + "/" + endpoint, headers=self.headers) @property def _json_feed(self) -> dict: return requests.get(url=self.json_feed_url).json() @property def status(self) -> Util.ObjectifiedDict: status_ = self.send_request(self.ENDPOINT_STATUS).json() if not status_.get("result"): raise InvalidResponse("InvalidResponse: " + repr(status_)) return Util.ObjectifiedDict(status_ | self._json_feed) def start(self): print("[SERVER] Sending request to endpoint " + self.ENDPOINT_START) return self.send_request(self.ENDPOINT_START).status_code def stop(self): print("[SERVER] Sending request to endpoint " + self.ENDPOINT_STOP) return self.send_request(self.ENDPOINT_STOP).status_code def restart(self): print("[SERVER] Sending request to endpoint " + self.ENDPOINT_RESTART) return self.send_request(self.ENDPOINT_RESTART).status_code class Player: def __init__(self, data: Util.ObjectifiedDict): self.data = data self.name = data.name self.score = data.score self.time = data.time self.kills = data.kills self.deaths = data.deaths self.ping = data.ping class FTPManager(ftputil.FTPHost): class _IOStream: def __init__(self, total_size: float): self.total_size = total_size self.cum_size = 0 def add_size(self, size: float): self.cum_size += size @property def progress(self): return "{:.2f} %".format((self.cum_size / self.total_size) * 100) @property def finished(self): return self.cum_size == self.total_size # ----------------------------------- # def __init__(self, *args, **kwargs): self.down_stream = None self.up_stream = None super().__init__(*args, **kwargs) self.def_wd = self.getcwd() def folder_size(self, folder: str, blacklist: list[str] = None): self.chdir(self.def_wd) self.chdir(folder) size = 0 for item in self.walk("."): if not any(val in item[0] for val in (blacklist or [])): for file in item[2]: size += self.path.getsize(self.path.join(item[0], file)) self.chdir(self.def_wd) return size def download_folder(self, remote_folder: str, local_folder: str = None, blacklist: list[str] = None): if local_folder is not None: try: os.mkdir(local_folder) except OSError: pass self.down_stream = self._IOStream(self.folder_size(remote_folder, blacklist)) self.chdir(self.def_wd) self.chdir(remote_folder) for item in self.walk("."): if not any(val in item[0] for val in (blacklist or [])): try: os.mkdir(os.path.join(local_folder, item[0])) except OSError: pass for file in item[2]: self.download(self.path.join(item[0], file), self.path.join(local_folder, item[0], file), callback=lambda data: self.down_stream.add_size(len(data))) self.chdir(self.def_wd) def upload_folder(self, local_folder: str, remote_folder: str): self.up_stream = self._IOStream(Util.get_folder_size(local_folder)) for item in os.listdir(local_folder): path = os.path.join(local_folder, item) rem_path = os.path.join(remote_folder, item) if os.path.isfile(path): self.upload(path, rem_path, callback=lambda data: self.up_stream.add_size(len(data))) elif os.path.isdir(path): try: self.mkdir(rem_path) except ftputil.error.FTPError: pass self.upload_folder(path, rem_path) # -------------------------------------------------------------------------------------------------------------------- # # GUI # -------------------------------------------------------------------------------------------------------------------- # class Backup: def __init__(self, path: str): self.path = path self.abspath = os.path.abspath(os.path.join(BACKUPS_DIR, self.path)) self.name = next(os.walk(os.path.join(BACKUPS_DIR, self.path)))[1][0] self.size = Util.get_folder_size(os.path.join(BACKUPS_DIR, self.path)) / (1024 ** 2) self.date = datetime.datetime.fromtimestamp(float(self.path)).strftime("%Y-%m-%d %H:%M:%S") self.thumbnail = os.path.join(BACKUPS_DIR, self.path, self.name, "thumb.jpg") def __repr__(self): return ("<" + __name__ + "." + self.__class__.__name__ + " name=" + repr(self.name) + " size=" + repr(self.size) + " date=" + repr(self.date) + " path=" + repr(self.path) + ">") class Scheduler: def __init__(self): self._stop_flag = False self.running = False self.thread_running = False def run(self): self._stop_flag = False thread = None while True: if not self._stop_flag: self.running = True if thread is not None and thread.is_alive(): self.thread_running = True else: self.thread_running = False config = Util.get_config() interval = config.interval last_run = config.last_run if datetime.datetime.timestamp( datetime.datetime.fromtimestamp(last_run) + datetime.timedelta(minutes=interval) ) <= datetime.datetime.now().timestamp(): config.last_run = datetime.datetime.now().timestamp() Util.save_config(config) print("[SCHEDULER] Starting task...") thread = threading.Thread(target=lambda: Util.download_backup(), daemon=True) thread.start() else: self.running = False break time.sleep(1) def stop(self): print("[SCHEDULER] Stopping scheduler...") self._stop_flag = True return self def start(self): print("[SCHEDULER] Starting scheduler...") threading.Thread(target=lambda: self.run(), daemon=True).start() @property def
population that escapes from |11> to |02>. Formula for unitary propagator: population = |<02|U|11>|^2 and similarly for the superoperator case. The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. """ if U.type=='oper': sump = 0 for i_list in [[0,2]]: for j_list in [[1,1]]: bra_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]), qtp.ket([i_list[1]], dim=[3])).dag() ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]), qtp.ket([j_list[1]], dim=[3])) p = np.abs((bra_i*U*ket_j).data[0, 0])**2 sump += p return np.real(sump) elif U.type=='super': sump = 0 for i_list in [[0,2]]: for j_list in [[1,1]]: ket_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]), qtp.ket([i_list[1]], dim=[3])) rho_i=qtp.operator_to_vector(qtp.ket2dm(ket_i)) ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]), qtp.ket([j_list[1]], dim=[3])) rho_j=qtp.operator_to_vector(qtp.ket2dm(ket_j)) p = (rho_i.dag()*U*rho_j).data[0, 0] sump += p return np.real(sump) def pro_avfid_superoperator_compsubspace(U,L1): """ Average process (gate) fidelity in the qubit computational subspace for two qutrits. Leakage has to be taken into account, see Woods & Gambetta. The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. """ if U.type=='oper': inner = U.dag()*U_target part_idx = [0, 1, 3, 4] # only computational subspace ptrace = 0 for i in part_idx: ptrace += inner[i, i] dim = 4 # 2 qubits comp subspace return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1))) elif U.type=='super': kraus_form = qtp.to_kraus(U) dim=4 # 2 qubits in the computational subspace part_idx = [0, 1, 3, 4] # only computational subspace psum=0 for A_k in kraus_form: ptrace = 0 inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch for i in part_idx: ptrace += inner[i, i] psum += (np.abs(ptrace))**2 return np.real((dim*(1-L1) + psum) / (dim*(dim + 1))) def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases): """ Average process (gate) fidelity in the qubit computational subspace for two qutrits Leakage has to be taken into account, see Woods & Gambetta The phase is corrected with Z rotations considering both transmons as qubits. The correction is done perfectly. The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. """ Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0], [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0], [0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0], [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0], [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0], [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0], [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0], [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0], [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]], type='oper', dims=[[3, 3], [3, 3]]) if U.type=='oper': U=Ucorrection*U inner = U.dag()*U_target part_idx = [0, 1, 3, 4] # only computational subspace ptrace = 0 for i in part_idx: ptrace += inner[i, i] dim = 4 # 2 qubits comp subspace return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1))) elif U.type=='super': U=qtp.to_super(Ucorrection)*U kraus_form = qtp.to_kraus(U) dim=4 # 2 qubits in the computational subspace part_idx = [0, 1, 3, 4] # only computational subspace psum=0 for A_k in kraus_form: ptrace = 0 inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch for i in part_idx: ptrace += inner[i, i] psum += (np.abs(ptrace))**2 return np.real((dim*(1-L1) + psum) / (dim*(dim + 1))) def pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U,L1,phases): """ Average process (gate) fidelity in the qubit computational subspace for two qutrits Leakage has to be taken into account, see Woods & Gambetta The phase is corrected with Z rotations considering both transmons as qubits. The correction is done perfectly. The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. """ Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0], [0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0], [0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0], [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0], [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0], [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0], [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0], [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0], [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]], type='oper', dims=[[3, 3], [3, 3]]) if U.type=='oper': U=Ucorrection*U inner = U.dag()*U_target part_idx = [0, 1, 3, 4] # only computational subspace ptrace = 0 for i in part_idx: ptrace += inner[i, i] dim = 4 # 2 qubits comp subspace return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1))) elif U.type=='super': U=qtp.to_super(Ucorrection)*U kraus_form = qtp.to_kraus(U) dim=4 # 2 qubits in the computational subspace part_idx = [0, 1, 3, 4] # only computational subspace psum=0 for A_k in kraus_form: ptrace = 0 inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch for i in part_idx: ptrace += inner[i, i] psum += (np.abs(ptrace))**2 return np.real((dim*(1-L1) + psum) / (dim*(dim + 1))) def pro_avfid_superoperator(U): """ Average process (gate) fidelity in the whole space for two qutrits The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. """ if U.type=='oper': ptrace = np.abs((U.dag()*U_target).tr())**2 dim = 9 # dimension of the whole space return np.real((ptrace+dim)/(dim*(dim+1))) elif U.type=='super': return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims)) def pro_avfid_superoperator_phasecorrected(U,phases): """ Average process (gate) fidelity in the whole space for two qutrits Qubit Z rotation and qutrit "Z" rotations are applied, taking into account the anharmonicity as well. The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>. If this is not the case, one need to change the basis to that one, before calling this function. This function is quite useless because we are always interested in the computational subspace only. """ Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0], [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0], [0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1])), 0, 0, 0, 0, 0, 0], [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0], [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0], [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[2]-phases[0])), 0, 0, 0], [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5])), 0, 0], [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5]+phases[1]-phases[0])), 0], [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[5]-phases[0]))]], type='oper', dims=[[3, 3], [3, 3]]) if U.type=='oper': U=Ucorrection*U ptrace = np.abs((U.dag()*U_target).tr())**2 dim = 9 # dimension of the whole space return np.real((ptrace+dim)/(dim*(dim+1))) elif U.type=='super': U=qtp.to_super(Ucorrection)*U return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims)) ##### functions called by the main program def distort_amplitude(fitted_stepresponse_ty,amp,tlist_new,sim_step_new): fitted_stepresponse_ty_temp=np.concatenate([np.zeros(1),fitted_stepresponse_ty[1]]) # to make gradient work properly impulse_response_temp=np.gradient(fitted_stepresponse_ty_temp) impulse_response= np.delete(impulse_response_temp,-1) # to have t and y of the same length for interpolation # use interpolation to be sure that amp and impulse_response have the same delta_t separating two values amp_interp = interp1d(tlist_new,amp) impulse_response_interp = interp1d(fitted_stepresponse_ty[0],impulse_response) tlist_convol1 = tlist_new tlist_convol2 = np.arange(0, fitted_stepresponse_ty[0][-1], sim_step_new) amp_convol = amp_interp(tlist_convol1) impulse_response_convol = impulse_response_interp(tlist_convol2) # Compute convolution convolved_amp=scipy.signal.convolve(amp_convol,impulse_response_convol)/sum(impulse_response_convol) amp_final=convolved_amp[0:np.size(tlist_convol1)] # consider only amp during the gate time return amp_final def shift_due_to_fluxbias_q0(fluxlutman,amp_final,fluxbias_q0): if not fluxlutman.czd_double_sided(): omega_0 = fluxlutman.calc_amp_to_freq(0,'01') f_pulse = fluxlutman.calc_amp_to_freq(amp_final,'01') f_pulse = np.clip(f_pulse,a_min=None,a_max=omega_0) #
"""passlib.handlers.django- Django password hash support""" #============================================================================= # imports #============================================================================= # core from base64 import b64encode from hashlib import md5, sha1 import re import logging; log = logging.getLogger(__name__) from warnings import warn # site # pkg from passlib.utils import to_unicode, classproperty from passlib.utils.compat import b, bytes, str_to_uascii, uascii_to_str, unicode, u from passlib.utils.pbkdf2 import pbkdf2 import passlib.utils.handlers as uh # local __all__ = [ "django_salted_sha1", "django_salted_md5", "django_bcrypt", "django_pbkdf2_sha1", "django_pbkdf2_sha256", "django_des_crypt", "django_disabled", ] #============================================================================= # lazy imports & constants #============================================================================= des_crypt = None def _import_des_crypt(): global des_crypt if des_crypt is None: from passlib.hash import des_crypt return des_crypt # django 1.4's salt charset SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' #============================================================================= # salted hashes #============================================================================= class DjangoSaltedHash(uh.HasSalt, uh.GenericHandler): """base class providing common code for django hashes""" # name, ident, checksum_size must be set by subclass. # ident must include "$" suffix. setting_kwds = ("salt", "salt_size") min_salt_size = 0 # NOTE: django 1.0-1.3 would accept empty salt strings. # django 1.4 won't, but this appears to be regression # (https://code.djangoproject.com/ticket/18144) # so presumably it will be fixed in a later release. default_salt_size = 12 max_salt_size = None salt_chars = SALT_CHARS checksum_chars = uh.LOWER_HEX_CHARS @classproperty def _stub_checksum(cls): return cls.checksum_chars[0] * cls.checksum_size @classmethod def from_string(cls, hash): salt, chk = uh.parse_mc2(hash, cls.ident, handler=cls) return cls(salt=salt, checksum=chk) def to_string(self): return uh.render_mc2(self.ident, self.salt, self.checksum or self._stub_checksum) class DjangoVariableHash(uh.HasRounds, DjangoSaltedHash): """base class providing common code for django hashes w/ variable rounds""" setting_kwds = DjangoSaltedHash.setting_kwds + ("rounds",) min_rounds = 1 @classmethod def from_string(cls, hash): rounds, salt, chk = uh.parse_mc3(hash, cls.ident, handler=cls) return cls(rounds=rounds, salt=salt, checksum=chk) def to_string(self): return uh.render_mc3(self.ident, self.rounds, self.salt, self.checksum or self._stub_checksum) class django_salted_sha1(DjangoSaltedHash): """This class implements Django's Salted SHA1 hash, and follows the :ref:`password-hash-api`. It supports a variable-length salt, and uses a single round of SHA1. The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords: :type salt: str :param salt: Optional salt string. If not specified, a 12 character one will be autogenerated (this is recommended). If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``. :type salt_size: int :param salt_size: Optional number of characters to use when autogenerating new salts. Defaults to 12, but can be any positive value. This should be compatible with Django 1.4's :class:`!SHA1PasswordHasher` class. .. versionchanged: 1.6 This class now generates 12-character salts instead of 5, and generated salts uses the character range ``[0-9a-zA-Z]`` instead of the ``[0-9a-f]``. This is to be compatible with how Django >= 1.4 generates these hashes; but hashes generated in this manner will still be correctly interpreted by earlier versions of Django. """ name = "django_salted_sha1" django_name = "sha1" ident = u("sha1$") checksum_size = 40 def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode("utf-8") return str_to_uascii(sha1(self.salt.encode("ascii") + secret).hexdigest()) class django_salted_md5(DjangoSaltedHash): """This class implements Django's Salted MD5 hash, and follows the :ref:`password-hash-api`. It supports a variable-length salt, and uses a single round of MD5. The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords: :type salt: str :param salt: Optional salt string. If not specified, a 12 character one will be autogenerated (this is recommended). If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``. :type salt_size: int :param salt_size: Optional number of characters to use when autogenerating new salts. Defaults to 12, but can be any positive value. This should be compatible with the hashes generated by Django 1.4's :class:`!MD5PasswordHasher` class. .. versionchanged: 1.6 This class now generates 12-character salts instead of 5, and generated salts uses the character range ``[0-9a-zA-Z]`` instead of the ``[0-9a-f]``. This is to be compatible with how Django >= 1.4 generates these hashes; but hashes generated in this manner will still be correctly interpreted by earlier versions of Django. """ name = "django_salted_md5" django_name = "md5" ident = u("md5$") checksum_size = 32 def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode("utf-8") return str_to_uascii(md5(self.salt.encode("ascii") + secret).hexdigest()) django_bcrypt = uh.PrefixWrapper("django_bcrypt", "bcrypt", prefix=u('bcrypt$'), ident=u("bcrypt$"), # NOTE: this docstring is duplicated in the docs, since sphinx # seems to be having trouble reading it via autodata:: doc="""This class implements Django 1.4's BCrypt wrapper, and follows the :ref:`password-hash-api`. This is identical to :class:`!bcrypt` itself, but with the Django-specific prefix ``"bcrypt$"`` prepended. See :doc:`/lib/passlib.hash.bcrypt` for more details, the usage and behavior is identical. This should be compatible with the hashes generated by Django 1.4's :class:`!BCryptPasswordHasher` class. .. versionadded:: 1.6 """) django_bcrypt.django_name = "bcrypt" class django_pbkdf2_sha256(DjangoVariableHash): """This class implements Django's PBKDF2-HMAC-SHA256 hash, and follows the :ref:`password-hash-api`. It supports a variable-length salt, and a variable number of rounds. The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords: :type salt: str :param salt: Optional salt string. If not specified, a 12 character one will be autogenerated (this is recommended). If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``. :type salt_size: int :param salt_size: Optional number of characters to use when autogenerating new salts. Defaults to 12, but can be any positive value. :type rounds: int :param rounds: Optional number of rounds to use. Defaults to 10000, but must be within ``range(1,1<<32)``. :type relaxed: bool :param relaxed: By default, providing an invalid value for one of the other keywords will result in a :exc:`ValueError`. If ``relaxed=True``, and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning` will be issued instead. Correctable errors include ``rounds`` that are too small or too large, and ``salt`` strings that are too long. This should be compatible with the hashes generated by Django 1.4's :class:`!PBKDF2PasswordHasher` class. .. versionadded:: 1.6 """ name = "django_pbkdf2_sha256" django_name = "pbkdf2_sha256" ident = u('pbkdf2_sha256$') min_salt_size = 1 max_rounds = 0xffffffff # setting at 32-bit limit for now checksum_chars = uh.PADDED_BASE64_CHARS checksum_size = 44 # 32 bytes -> base64 default_rounds = 10000 # NOTE: using django default here _prf = "hmac-sha256" def _calc_checksum(self, secret): if isinstance(secret, unicode): secret = secret.encode("utf-8") hash = pbkdf2(secret, self.salt.encode("ascii"), self.rounds, keylen=None, prf=self._prf) return b64encode(hash).rstrip().decode("ascii") class django_pbkdf2_sha1(django_pbkdf2_sha256): """This class implements Django's PBKDF2-HMAC-SHA1 hash, and follows the :ref:`password-hash-api`. It supports a variable-length salt, and a variable number of rounds. The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords: :type salt: str :param salt: Optional salt string. If not specified, a 12 character one will be autogenerated (this is recommended). If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``. :type salt_size: int :param salt_size: Optional number of characters to use when autogenerating new salts. Defaults to 12, but can be any positive value. :type rounds: int :param rounds: Optional number of rounds to use. Defaults to 10000, but must be within ``range(1,1<<32)``. :type relaxed: bool :param relaxed: By default, providing an invalid value for one of the other keywords will result in a :exc:`ValueError`. If ``relaxed=True``, and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning` will be issued instead. Correctable errors include ``rounds`` that are too small or too large, and ``salt`` strings that are too long. This should be compatible with the hashes generated by Django 1.4's :class:`!PBKDF2SHA1PasswordHasher` class. .. versionadded:: 1.6 """ name = "django_pbkdf2_sha1" django_name = "pbkdf2_sha1" ident = u('pbkdf2_sha1$') checksum_size = 28 # 20 bytes -> base64 _prf = "hmac-sha1" #============================================================================= # other #============================================================================= class django_des_crypt(uh.HasSalt, uh.GenericHandler): """This class implements Django's :class:`des_crypt` wrapper, and follows the :ref:`password-hash-api`. It supports a fixed-length salt. The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords: :type salt: str :param salt: Optional salt string. If not specified, one will be autogenerated (this is recommended). If specified, it must be 2 characters, drawn from the regexp range ``[./0-9A-Za-z]``. This should be compatible with the hashes generated by Django 1.4's :class:`!CryptPasswordHasher` class. Note that Django only supports this hash on Unix systems (though :class:`!django_des_crypt` is available cross-platform under Passlib). .. versionchanged:: 1.6 This class will now accept hashes with empty salt strings, since Django 1.4 generates them this way. """ name = "django_des_crypt" django_name = "crypt" setting_kwds = ("salt", "salt_size") ident = u("crypt$") checksum_chars = salt_chars = uh.HASH64_CHARS checksum_size = 11 min_salt_size = default_salt_size = 2 _stub_checksum = u('.')*11 @classmethod def from_string(cls, hash): salt, chk = uh.parse_mc2(hash, cls.ident, handler=cls) if chk: # chk should be full des_crypt hash if not salt:
# -*- coding: utf-8 -*- import json import random import string from unittest import TestCase from hypothesis import given from hypothesis import settings from hypothesis.strategies import dictionaries from hypothesis.strategies import floats from hypothesis.strategies import integers from hypothesis.strategies import text import msgpack import pytest import six from ddtrace.constants import ORIGIN_KEY from ddtrace.ext.ci import CI_APP_TEST_ORIGIN from ddtrace.internal._encoding import BufferFull from ddtrace.internal._encoding import BufferItemTooLarge from ddtrace.internal._encoding import ListStringTable from ddtrace.internal._encoding import MsgpackStringTable from ddtrace.internal.compat import msgpack_type from ddtrace.internal.compat import string_type from ddtrace.internal.encoding import JSONEncoder from ddtrace.internal.encoding import JSONEncoderV2 from ddtrace.internal.encoding import MSGPACK_ENCODERS from ddtrace.internal.encoding import MsgpackEncoderV03 from ddtrace.internal.encoding import MsgpackEncoderV05 from ddtrace.internal.encoding import _EncoderBase from ddtrace.span import Span from ddtrace.span import SpanTypes from ddtrace.tracer import Tracer from tests.utils import DummyTracer _ORIGIN_KEY = ORIGIN_KEY.encode() def span_to_tuple(span): # type: (Span) -> tuple return ( span.service, span.name, span.resource, span.trace_id or 0, span.span_id or 0, span.parent_id or 0, span.start_ns or 0, span.duration_ns or 0, int(bool(span.error)), span.meta or {}, span.metrics or {}, span.span_type, ) def rands(size=6, chars=string.ascii_uppercase + string.digits): return "".join(random.choice(chars) for _ in range(size)) def gen_trace(nspans=1000, ntags=50, key_size=15, value_size=20, nmetrics=10): t = Tracer() root = None trace = [] for i in range(0, nspans): parent_id = root.span_id if root else None with Span( t, "span_name", resource="/fsdlajfdlaj/afdasd%s" % i, service="myservice", parent_id=parent_id, ) as span: span._parent = root span.set_tags({rands(key_size): rands(value_size) for _ in range(0, ntags)}) # only apply a span type to the root span if not root: span.span_type = "web" for _ in range(0, nmetrics): span.set_tag(rands(key_size), random.randint(0, 2 ** 16)) trace.append(span) if not root: root = span return trace class RefMsgpackEncoder(_EncoderBase): content_type = "application/msgpack" def normalize(self, span): raise NotImplementedError() def encode_traces(self, traces): normalized_traces = [[self.normalize(span) for span in trace] for trace in traces] return self.encode(normalized_traces) def encode(self, obj): return msgpack.packb(obj) @staticmethod def decode(data): return msgpack.unpackb(data, raw=True, strict_map_key=False) class RefMsgpackEncoderV03(RefMsgpackEncoder): def normalize(self, span): d = span.to_dict() if not d["error"]: del d["error"] return d class RefMsgpackEncoderV05(RefMsgpackEncoder): def __init__(self, *args, **kwargs): super(RefMsgpackEncoderV05, self).__init__(*args, **kwargs) self.string_table = ListStringTable() self.string_table.index(ORIGIN_KEY) def _index_or_value(self, value): if value is None: return 0 if isinstance(value, six.string_types): return self.string_table.index(value) if isinstance(value, dict): return {self._index_or_value(k): self._index_or_value(v) for k, v in value.items()} return value def normalize(self, span): return tuple(self._index_or_value(_) for _ in span_to_tuple(span)) def encode(self, obj): try: return super(RefMsgpackEncoderV05, self).encode([list(self.string_table), obj]) finally: self.string_table = ListStringTable() self.string_table.index(ORIGIN_KEY) REF_MSGPACK_ENCODERS = { "v0.3": RefMsgpackEncoderV03, "v0.4": RefMsgpackEncoderV03, "v0.5": RefMsgpackEncoderV05, } class TestEncoders(TestCase): """ Ensures that Encoders serialize the payload as expected. """ def test_encode_traces_json(self): # test encoding for JSON format traces = [] traces.append( [ Span(name="client.testing", tracer=None), Span(name="client.testing", tracer=None), ] ) traces.append( [ Span(name="client.testing", tracer=None), Span(name="client.testing", tracer=None), ] ) encoder = JSONEncoder() spans = encoder.encode_traces(traces) items = json.loads(spans) # test the encoded output that should be a string # and the output must be flatten assert isinstance(spans, string_type) assert len(items) == 2 assert len(items[0]) == 2 assert len(items[1]) == 2 for i in range(2): for j in range(2): assert "client.testing" == items[i][j]["name"] def test_encode_traces_json_v2(self): # test encoding for JSON format traces = [] traces.append( [ Span(name="client.testing", tracer=None, span_id=0xAAAAAA), Span(name="client.testing", tracer=None, span_id=0xAAAAAA), ] ) traces.append( [ Span(name="client.testing", tracer=None, span_id=0xAAAAAA), Span(name="client.testing", tracer=None, span_id=0xAAAAAA), ] ) encoder = JSONEncoderV2() spans = encoder.encode_traces(traces) items = json.loads(spans)["traces"] # test the encoded output that should be a string # and the output must be flatten assert isinstance(spans, string_type) assert len(items) == 2 assert len(items[0]) == 2 assert len(items[1]) == 2 for i in range(2): for j in range(2): assert "client.testing" == items[i][j]["name"] assert isinstance(items[i][j]["span_id"], string_type) assert items[i][j]["span_id"] == "0000000000AAAAAA" def test_encode_traces_msgpack_v03(self): # test encoding for MsgPack format encoder = MsgpackEncoderV03(2 << 10, 2 << 10) encoder.put( [ Span(name="client.testing", tracer=None), Span(name="client.testing", tracer=None), ] ) encoder.put( [ Span(name="client.testing", tracer=None), Span(name="client.testing", tracer=None), ] ) spans = encoder.encode() items = encoder._decode(spans) # test the encoded output that should be a string # and the output must be flatten assert isinstance(spans, msgpack_type) assert len(items) == 2 assert len(items[0]) == 2 assert len(items[1]) == 2 for i in range(2): for j in range(2): assert b"client.testing" == items[i][j][b"name"] def decode(obj, reconstruct=True): unpacked = msgpack.unpackb(obj, raw=True, strict_map_key=False) if not unpacked or not unpacked[0]: return unpacked if isinstance(unpacked[0][0], bytes) and reconstruct: # v0.5 table, _traces = unpacked def resolve(span): return ( table[span[0]], table[span[1]], table[span[2]], span[3], span[4], span[5], span[6], span[7], span[8], {table[k]: table[v] for k, v in span[9].items()}, {table[k]: v for k, v in span[10].items()}, table[span[11]], ) traces = [[resolve(span) for span in trace] for trace in _traces] else: traces = unpacked return traces def allencodings(f): return pytest.mark.parametrize("encoding", MSGPACK_ENCODERS.keys())(f) @allencodings def test_custom_msgpack_encode(encoding): encoder = MSGPACK_ENCODERS[encoding](1 << 20, 1 << 20) refencoder = REF_MSGPACK_ENCODERS[encoding]() trace = gen_trace(nspans=50) # Note that we assert on the decoded versions because the encoded # can vary due to non-deterministic map key/value positioning encoder.put(trace) assert decode(refencoder.encode_traces([trace])) == decode(encoder.encode()) ref_encoded = refencoder.encode_traces([trace, trace]) encoder.put(trace) encoder.put(trace) encoded = encoder.encode() assert decode(encoded) == decode(ref_encoded) # Empty trace (not that this should be done in practice) encoder.put([]) assert decode(refencoder.encode_traces([[]])) == decode(encoder.encode()) s = Span(None, None) # Need to .finish() to have a duration since the old implementation will not encode # duration_ns, the new one will encode as None s.finish() encoder.put([s]) assert decode(refencoder.encode_traces([[s]])) == decode(encoder.encode()) def span_type_span(): s = Span(None, "span_name") s.span_type = SpanTypes.WEB return s @allencodings @pytest.mark.parametrize( "span", [ Span(None, "span_name", span_type=SpanTypes.WEB), Span(None, "span_name", resource="/my-resource"), Span(None, "span_name", service="my-svc"), span_type_span(), ], ) def test_msgpack_span_property_variations(encoding, span): refencoder = REF_MSGPACK_ENCODERS[encoding]() encoder = MSGPACK_ENCODERS[encoding](1 << 10, 1 << 10) # Finish the span to ensure a duration exists. span.finish() trace = [span] encoder.put(trace) assert decode(refencoder.encode_traces([trace])) == decode(encoder.encode()) class SubString(str): pass class SubInt(int): pass class SubFloat(float): pass @allencodings @pytest.mark.parametrize( "span, tags", [ (Span(None, "name"), {"int": SubInt(123)}), (Span(None, "name"), {"float": SubFloat(123.213)}), (Span(None, SubString("name")), {SubString("test"): SubString("test")}), (Span(None, "name"), {"unicode": u"😐"}), (Span(None, "name"), {u"😐": u"😐"}), ( Span(None, u"span_name", service="test-service", resource="test-resource", span_type=SpanTypes.WEB), {"metric1": 123, "metric2": "1", "metric3": 12.3, "metric4": "12.0", "tag1": "test", u"tag2": u"unicode"}, ), ], ) def test_span_types(encoding, span, tags): refencoder = REF_MSGPACK_ENCODERS[encoding]() encoder = MSGPACK_ENCODERS[encoding](1 << 10, 1 << 10) span.set_tags(tags) # Finish the span to ensure a duration exists. span.finish() trace = [span] encoder.put(trace) assert decode(refencoder.encode_traces([trace])) == decode(encoder.encode()) @pytest.mark.parametrize( "Encoder,item", [ (MsgpackEncoderV03, b"meta"), (MsgpackEncoderV05, 9), ], ) def test_encoder_propagates_dd_origin(Encoder, item): tracer = DummyTracer() encoder = Encoder(1 << 20, 1 << 20) with tracer.trace("Root") as root: root.context.dd_origin = CI_APP_TEST_ORIGIN for _ in range(999): with tracer.trace("child"): pass trace = tracer.writer.pop() encoder.put(trace) decoded_trace = decode(encoder.encode()) # Ensure encoded trace contains dd_origin tag in all spans assert all((_[item][_ORIGIN_KEY] == b"ciapp-test" for _ in decoded_trace[0])) @allencodings @given( name=text(), service=text(), resource=text(), meta=dictionaries(text(), text()), metrics=dictionaries(text(), floats()), error=integers(min_value=-(2 ** 31), max_value=2 ** 31 - 1), span_type=text(), ) @settings(max_examples=200) def test_custom_msgpack_encode_trace_size(encoding, name, service, resource, meta, metrics, error, span_type): encoder = MSGPACK_ENCODERS[encoding](1 << 20, 1 << 20) span = Span(tracer=None, name=name, service=service, resource=resource) span.meta = meta span.metrics = metrics span.error = error span.span_type = span_type trace = [span, span, span] encoder.put(trace) assert encoder.size == len(encoder.encode()) def test_encoder_buffer_size_limit_v03(): buffer_size = 1 << 10 encoder = MsgpackEncoderV03(buffer_size, buffer_size) trace = [Span(tracer=None, name="test")] encoder.put(trace) trace_size = encoder.size - 1 # This includes the global msgpack array size prefix for _ in range(1, int(buffer_size / trace_size)): encoder.put(trace) with pytest.raises(BufferFull): encoder.put(trace) with pytest.raises(BufferFull): encoder.put(trace) def test_encoder_buffer_size_limit_v05(): buffer_size = 1 << 10 encoder = MsgpackEncoderV05(buffer_size, buffer_size) trace = [Span(tracer=None, name="test")] encoder.put(trace) base_size = encoder.size encoder.put(trace) trace_size = encoder.size - base_size for _ in range(1, int((buffer_size - base_size) / trace_size)): encoder.put(trace) with pytest.raises(BufferFull): encoder.put(trace) with pytest.raises(BufferFull): encoder.put(trace) def test_encoder_buffer_item_size_limit_v03(): max_item_size = 1 << 10 encoder = MsgpackEncoderV03(max_item_size << 1, max_item_size) span = Span(tracer=None, name="test") trace = [span] encoder.put(trace) trace_size = encoder.size - 1 # This includes the global msgpack array size prefix with pytest.raises(BufferItemTooLarge): encoder.put([span] * (int(max_item_size / trace_size) + 1)) def test_encoder_buffer_item_size_limit_v05(): max_item_size = 1 << 10 encoder = MsgpackEncoderV05(max_item_size << 1, max_item_size) span = Span(tracer=None, name="test") trace = [span] encoder.put(trace) base_size = encoder.size encoder.put(trace) trace_size = encoder.size - base_size with pytest.raises(BufferItemTooLarge): encoder.put([span] * (int(max_item_size / trace_size) + 2)) def test_custom_msgpack_encode_v05(): encoder = MsgpackEncoderV05(2 << 20, 2 << 20) assert encoder.max_size == 2 << 20 assert encoder.max_item_size == 2 << 20 trace = [ Span(tracer=None, name="v05-test", service="foo", resource="GET"), Span(tracer=None, name="v05-test", service="foo", resource="POST"), Span(tracer=None, name=None, service="bar"), ] encoder.put(trace) assert len(encoder) == 1 size = encoder.size encoded = encoder.flush() assert size == len(encoded) st, ts = decode(encoded, reconstruct=False) def filter_mut(ts): return [[[s[i] for i in [0, 1, 2, 5, 7, 8, 9, 10, 11]] for s in t] for t in ts] assert
transition matrix for an all-ones routing params should be a # (weighted) directed adjacency matrix. We use 3 variants, 2 states. ones_routing_params = automaton_builder.RoutingParams( move=jnp.ones([3, 12, 2, 2]), special=jnp.ones([3, 5, 2, 3])) ones_transition_matrix = builder.build_transition_matrix( ones_routing_params, encoded_graph, graph_meta, ).concatenated_transitions() self.assertEqual(ones_transition_matrix.shape, (3, 4 + 6, 2, 6 * 2 + 3)) # pyformat: disable # pylint: disable=bad-continuation,bad-whitespace,g-inline-comment-too-close expected = np.array([ # a0i0 a0i1 a1i0 a1i1 b0i0 b1i0 specials < next [ #|------|-------|-------|-------|-------|-------| |--------| current V [[ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1], # ┬ a0 [ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1]], # | [[ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1], # | a1 [ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1]], # | [[ 0, 0, 0, 0, 0, 0, 1, 1,0.5,0.5,1.5,1.5, 1, 1, 1], # | b0 [ 0, 0, 0, 0, 0, 0, 1, 1,0.5,0.5,1.5,1.5, 1, 1, 1]], # | [[ 1, 1, 0, 0, 0, 0, 0, 0,1.5,1.5,0.5,0.5, 1, 1, 1], # | b1 [ 1, 1, 0, 0, 0, 0, 0, 0,1.5,1.5,0.5,0.5, 1, 1, 1]], # ┴ [[ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1], # ┬ a0i0 [ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1]], # | [[ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1], # | a0i1 [ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1]], # | [[ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1], # | a1i0 [ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1]], # | [[ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1], # | a1i1 [ 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1]], # | [[ 0, 0, 0, 0, 0, 0, 1, 1,0.5,0.5,1.5,1.5, 1, 1, 1], # | b0i0 [ 0, 0, 0, 0, 0, 0, 1, 1,0.5,0.5,1.5,1.5, 1, 1, 1]], # | [[ 1, 1, 0, 0, 0, 0, 0, 0,1.5,1.5,0.5,0.5, 1, 1, 1], # | b0i1 [ 1, 1, 0, 0, 0, 0, 0, 0,1.5,1.5,0.5,0.5, 1, 1, 1]], # ┴ ] ] * 3) # pyformat: enable # pylint: enable=bad-continuation,bad-whitespace,g-inline-comment-too-close np.testing.assert_allclose(ones_transition_matrix, expected) def test_transition_sentinel_integers(self): """Test that the transition matrix puts each element in the right place.""" schema, test_graph = self.build_loop_graph() builder = automaton_builder.AutomatonBuilder(schema) encoded_graph, graph_meta = builder.encode_graph(test_graph, as_jax=False) # Apply to some sentinel integers to check correct indexing (with only one # variant and state, since indexing doesn't use those) # Each integer is of the form XYZ where # X = {a:1, b:2}[node_type] # Y = {initial:9, i0:0, i1:1}[in_edge] # Z = {o0:0, o1:1, o2:2, finish:3, backtrack:4, fail:5}[action] sentinel_routing_params = automaton_builder.RoutingParams( move=jnp.array([[100, 101, 110, 111, 190, 191], [200, 201, 202, 290, 291, 292]]).reshape((1, 12, 1, 1)), special=jnp.array([ [103, 104, 105], [113, 114, 115], [193, 194, 195], [203, 204, 205], [293, 294, 295], ]).reshape((1, 5, 1, 3))) range_transition_matrix = builder.build_transition_matrix( sentinel_routing_params, encoded_graph, graph_meta, ).concatenated_transitions() self.assertEqual(range_transition_matrix.shape, (1, 4 + 6, 1, 6 * 1 + 3)) # pyformat: disable # pylint: disable=bad-continuation,bad-whitespace,g-inline-comment-too-close expected = np.array([ # a0i0 a0i1 a1i0 a1i1 b0i0 b1i0 specials < next [ # | | | | | | |---------| current V [[ 0, 0, 191, 0, 0, 190, 193, 194, 195]], # ┬ a0 [[ 0, 190, 0, 0, 191, 0, 193, 194, 195]], # | a1 [[ 0, 0, 0, 290, 292/2, 291+292/2, 293, 294, 295]], # | b0 [[291, 0, 0, 0, 290+292/2, 292/2, 293, 294, 295]], # ┴ b1 [[ 0, 0, 101, 0, 0, 100, 103, 104, 105]], # ┬ a0i0 [[ 0, 0, 111, 0, 0, 110, 113, 114, 115]], # | a0i1 [[ 0, 100, 0, 0, 101, 0, 103, 104, 105]], # | a1i0 [[ 0, 110, 0, 0, 111, 0, 113, 114, 115]], # | a1i1 [[ 0, 0, 0, 200, 202/2, 201+202/2, 203, 204, 205]], # | b0i0 [[201, 0, 0, 0, 200+202/2, 202/2, 203, 204, 205]], # ┴ b0i1 ] ]) # pyformat: enable # pylint: enable=bad-continuation,bad-whitespace,g-inline-comment-too-close np.testing.assert_allclose(range_transition_matrix, expected) def build_doubly_linked_list_graph(self, length): """Helper method to build a doubly-linked-list graph and schema.""" schema = { graph_types.NodeType("node"): graph_types.NodeSchema( in_edges=[ graph_types.InEdgeType("next_in"), graph_types.InEdgeType("prev_in"), ], out_edges=[ graph_types.OutEdgeType("next_out"), graph_types.OutEdgeType("prev_out"), ]) } graph = {} for i in range(length): graph[graph_types.NodeId(str(i))] = graph_types.GraphNode( graph_types.NodeType("node"), { graph_types.OutEdgeType("next_out"): [ graph_types.InputTaggedNode( node_id=graph_types.NodeId(str((i + 1) % length)), in_edge=graph_types.InEdgeType("prev_in")) ], graph_types.OutEdgeType("prev_out"): [ graph_types.InputTaggedNode( node_id=graph_types.NodeId(str((i - 1) % length)), in_edge=graph_types.InEdgeType("next_in")) ] }) return schema, graph def test_all_nodes_absorbing_solve(self): schema, graph = self.build_doubly_linked_list_graph(4) builder = automaton_builder.AutomatonBuilder(schema) enc_graph, enc_meta = builder.encode_graph(graph) # We set up the automaton with 5 variants and 2 states, but only use the # first state, to make sure that variants and states are interleaved # correctly. # Variant 0: move forward # Variant 1: move backward # Variant 2: finish # Variant 3: restart # Variant 4: fail variant_weights = jnp.array([ # From node 0, go forward. [[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [.7, 0, .3, 0, 0], [0, 0, 1, 0, 0]], # From node 1, go backward with small failure probabilities. [[0, 0.9, 0, 0, 0.1], [0, 0.9, 0, 0, 0.1], [.7, 0, .3, 0, 0], [0, 0, 1, 0, 0]], # Node 2 bounces around and ultimately accepts on node 0. [[0.9, 0, 0.1, 0, 0], [0, 1, 0, 0, 0], [0.5, 0.5, 0, 0, 0], [0, 1, 0, 0, 0]], # Node 3 immediately accepts, or restarts after 0 or 1 steps. [[0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0.1, 0.8, 0.1, 0]], ]) routing_params = automaton_builder.RoutingParams( move=jnp.pad( jnp.array([ [1., 0., 1., 0., 1., 0.], [0., 1., 0., 1., 0., 1.], [0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.], ]).reshape([5, 6, 1, 1]), [(0, 0), (0, 0), (0, 1), (0, 1)]), special=jnp.pad( jnp.array([ [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]], [[0., 1., 0.], [0., 1., 0.], [0., 1., 0.]], [[0., 0., 1.], [0., 0., 1.], [0., 0., 1.]], ]).reshape([5, 3, 1, 3]), [(0, 0), (0, 0), (0, 1), (0, 0)])) tmat = builder.build_transition_matrix(routing_params, enc_graph, enc_meta) # Absorbing probs follow the paths described above. # Note that when starting at node 3, with probability 0.2 the automaton # tries to backtrack, but with probability 0.2 * 0.01 backtracking fails # (as specified by backtrack_fails_prob) and thus the total absorbing # probability is 0.8 / (0.8 + 0.2 * 0.01) = 0.997506 expected_absorbing_probs = jnp.array([ [0, 0, 0.3, 0.7], [0, 0, 0, 0.81], [1, 0, 0, 0], [0, 0, 0, 0.997506], ]) absorbing_probs = automaton_builder.all_nodes_absorbing_solve( builder, tmat, variant_weights, jnp.pad(jnp.ones([4, 1]), [(0, 0), (0, 1)]), steps=1000, backtrack_fails_prob=0.01) np.testing.assert_allclose( absorbing_probs, expected_absorbing_probs, atol=1E-5, rtol=1E-5) def test_all_nodes_absorbing_solve_explicit_conv(self): schema, graph = self.build_doubly_linked_list_graph(4) builder = automaton_builder.AutomatonBuilder(schema) enc_graph, enc_meta = builder.encode_graph(graph) variant_weights = jax.random.dirichlet( jax.random.PRNGKey(0), jnp.ones((4, 4, 5))) routing_params = builder.initialize_routing_params( jax.random.PRNGKey(1), num_fsm_states=3, num_variants=5) start_states = jax.random.dirichlet(jax.random.PRNGKey(0), jnp.ones((4, 3))) # Confirm that the explicit conv doesn't change results. def go(routing_params, variant_weights, start_states, explicit_conv=True): tmat = builder.build_transition_matrix(routing_params, enc_graph, enc_meta) return automaton_builder.all_nodes_absorbing_solve( builder, tmat, variant_weights, start_states, steps=1000, backtrack_fails_prob=0.01, explicit_conv=explicit_conv) vals, vjpfun = jax.vjp(go, routing_params, variant_weights, start_states) unopt_vals, unopt_vjpfun = jax.vjp( functools.partial(go, explicit_conv=False), routing_params, variant_weights, start_states) assert_allclose = functools.partial( np.testing.assert_allclose, atol=1E-5, rtol=1E-5) assert_allclose(vals, unopt_vals) some_cotangent = jax.random.normal(jax.random.PRNGKey(0), vals.shape) jax.tree_util.tree_map(assert_allclose, vjpfun(some_cotangent), unopt_vjpfun(some_cotangent)) def test_unroll_and_aggregate(self): schema, graph = self.build_doubly_linked_list_graph(4) builder = automaton_builder.AutomatonBuilder(schema) enc_graph,
<filename>caql/caql_agent.py # coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow Model for CAQL.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import numpy as np import tensorflow.compat.v1 as tf from caql import caql_network class CaqlAgent(object): """CAQL Agent.""" def __init__(self, session, state_spec, action_spec, discount_factor, hidden_layers, learning_rate, learning_rate_action, learning_rate_ga, action_maximization_iterations, tau_copy, clipped_target_flag, hard_update_steps, batch_size, l2_loss_flag, simple_lambda_flag, dual_filter_clustering_flag, solver, dual_q_label, initial_lambda, tolerance_min_max): """Creates CAQL agent. Args: session: TF session. state_spec: tf_agents.specs.array_spec.ArraySpec. Specification for state. action_spec: tf_agents.specs.array_spec.ArraySpec. Specification for action. discount_factor: float on discounting factor. hidden_layers: list of integers. Number of hidden units for each hidden layer. learning_rate: float on Q function learning rate. learning_rate_action: float on action function learning rate. learning_rate_ga: float. Learning rate for gradient ascent optimizer. action_maximization_iterations: int on CEM/gradient ascent iterations. tau_copy: float on portion to copy train net to target net. clipped_target_flag: bool. Enable clipped double DQN when True. hard_update_steps: Number of gradient steps for hard-updating a target network. batch_size: int on batch size for training. l2_loss_flag: bool on using l2 loss. simple_lambda_flag: bool on using lambda hinge loss. dual_filter_clustering_flag: bool on using dual filter and clustering. solver: string on inner max optimizer. Supported optimizers are "gradient_ascent", "cross_entropy", "ails", "mip". dual_q_label: bool on using dual max-Q label for action function training. If False, use primal max-Q label. initial_lambda: float on initial lambda. tolerance_min_max: list of float. First is the minimum value and the second is the maximum value for the tolerance of a maxQ solver. """ assert len(state_spec.shape) == 1 assert len(action_spec.shape) == 1 assert len(tolerance_min_max) == 2 self._session = session self.state_spec = state_spec self.action_spec = action_spec self.discount_factor = discount_factor self.learning_rate = learning_rate self.learning_rate_action = learning_rate_action self.action_maximization_iterations = action_maximization_iterations self._clipped_target_flag = clipped_target_flag self._hard_update_steps = hard_update_steps self.batch_size = batch_size self.l2_loss_flag = l2_loss_flag self.simple_lambda_flag = simple_lambda_flag self.dual_filter_clustering_flag = dual_filter_clustering_flag self.solver = solver self._dual_q_label = dual_q_label self.initial_lambda = initial_lambda self._tolerance_min = tolerance_min_max[0] self._tolerance_max = tolerance_min_max[1] self.target_network = caql_network.CaqlNet( self._session, state_spec=self.state_spec, action_spec=self.action_spec, hidden_layers=hidden_layers, learning_rate=self.learning_rate, learning_rate_action=self.learning_rate_action, learning_rate_ga=learning_rate_ga, batch_size=self.batch_size, action_maximization_iterations=self.action_maximization_iterations, name="target", l2_loss_flag=self.l2_loss_flag, simple_lambda_flag=self.simple_lambda_flag, initial_lambda=self.initial_lambda) if self._clipped_target_flag: self.target_network2 = caql_network.CaqlNet( self._session, state_spec=self.state_spec, action_spec=self.action_spec, hidden_layers=hidden_layers, learning_rate=self.learning_rate, learning_rate_action=self.learning_rate_action, learning_rate_ga=learning_rate_ga, batch_size=self.batch_size, action_maximization_iterations=self.action_maximization_iterations, name="target2", l2_loss_flag=self.l2_loss_flag, simple_lambda_flag=self.simple_lambda_flag, initial_lambda=self.initial_lambda) self.train_network = caql_network.CaqlNet( self._session, state_spec=self.state_spec, action_spec=self.action_spec, hidden_layers=hidden_layers, learning_rate=self.learning_rate, learning_rate_action=self.learning_rate_action, learning_rate_ga=learning_rate_ga, batch_size=self.batch_size, action_maximization_iterations=self.action_maximization_iterations, name="train", l2_loss_flag=self.l2_loss_flag, simple_lambda_flag=self.simple_lambda_flag, solver=self.solver, initial_lambda=self.initial_lambda) self._copy_var_ops = self._get_copy_var_ops( tau_copy, dest_scope_name="target", src_scope_name="train") if self._clipped_target_flag: self._hard_copy_var_ops = self._get_hard_copy_var_ops( dest_scope_name="target2", src_scope_name="train") def initialize(self, saver, checkpoint_dir=None): """Initialize network or load from checkpoint. Args: saver: TF saver. checkpoint_dir: string. Directory path where checkpoint files are saved. Returns: integer. The initial global step value. """ init_variables = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope="train") + tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope="target") if (saver and checkpoint_dir and tf.gfile.Exists(checkpoint_dir)): if tf.gfile.IsDirectory(checkpoint_dir): checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) else: checkpoint_file = checkpoint_dir if checkpoint_file: print("Loading model weights from checkpoint %s", checkpoint_file) saver.restore(self._session, checkpoint_file) else: self._session.run(tf.initializers.variables(init_variables)) else: self._session.run(tf.initializers.variables(init_variables)) init_step_tensor = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope="train_global_step")[0] init_step = tf.train.global_step(self._session, init_step_tensor) self.update_target_network() if self._clipped_target_flag: self.update_target_network2() return init_step def update_target_network(self): self._session.run(self._copy_var_ops) def update_target_network2(self): assert self._clipped_target_flag self._session.run(self._hard_copy_var_ops) def _get_copy_var_ops(self, tau_copy, dest_scope_name, src_scope_name, names_to_copy=None): """Creates TF ops that copy weights from `src_scope` to `dest_scope`.""" # Copy variables src_scope to dest_scope op_holder = [] if names_to_copy is None: # copy all variables src_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name) dest_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name) else: src_vars = [] dest_vars = [] for name in names_to_copy: src_scope_name_now = "_".join([src_scope_name, name]) dest_scope_name_now = "_".join([dest_scope_name, name]) src_vars += tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name_now) dest_vars += tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name_now) if tau_copy < 1.0: for src_var, dest_var in zip(src_vars, dest_vars): op_holder.append( dest_var.assign((1 - tau_copy) * dest_var.value() + tau_copy * src_var.value())) else: for src_var, dest_var in zip(src_vars, dest_vars): op_holder.append(dest_var.assign(src_var.value())) return op_holder def _get_hard_copy_var_ops(self, dest_scope_name, src_scope_name): """Creates TF ops that copy weights from `src_scope` to `dest_scope`.""" assert self._clipped_target_flag op_holder = [] src_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name) dest_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name) for src_var, dest_var in zip(src_vars, dest_vars): op_holder.append(dest_var.assign(src_var.value())) return op_holder @staticmethod def compute_cluster_masks(states, mask, eps_approx_ratio=0.01): """Farthest-first (FF) traversal clustering algorithm. Args: states: np.ndarray of floats. State input. mask: 1-D np.ndarray of bools. A mask indicating the states that are candidates for centroids. Centroids are selected among them. The length of this array should be same as the length of the first dimension of `states` (i.e., batch size). eps_approx_ratio: float. The "radius" of a cluster. Returns: (np.ndarray, np.ndarray, list). The first array is the boolean mask w.r.t. states that are cluster centers. Second array is the boolean mask w.r.t. states that are non-clusters. Third element is a list of dictionary of information (element index and centroid) for non-clusters. """ max_num_clusters = min([sum(mask), max([1, int(sum(mask))])]) eps_approx = eps_approx_ratio * np.mean(np.linalg.norm(states, axis=1)) cluster_mask = np.array([False] * len(states)) noncluster_mask = np.array([False] * len(states)) if sum(mask) > 0: noncluster_states = [ (ind, ele) for ind, ele in enumerate(states) if mask[ind] ] cluster_states = [] # now compute the states for clusters using FF-traversal algorithm # FF-Traversal Algorithm: # 1. Pick C = {x}, for an arbitrary point x init_state = random.choice(noncluster_states) cluster_states.append(init_state) # 2. Repeat until C has k centers: noncluster_states.remove(init_state) def distance_fn(inp, c_list): distances = [np.linalg.norm(inp[1] - element[1]) for element in c_list] min_index = np.argmin(distances) return {"centroid": c_list[min_index], "distance": distances[min_index]} while len(cluster_states) < max_num_clusters and noncluster_states: # Let y maximize d(y, C), where d(y, C) = min_(x in C) d(x, y) # C = C U {y} distance = [ distance_fn(state, cluster_states)["distance"] for state in noncluster_states ] if max(distance) < eps_approx: # for all the remaining nonclusters, there's an eps-close cluster break # update cluster set and remaining set state_with_max_distance = noncluster_states[np.argmax(distance)] cluster_states.append(state_with_max_distance) noncluster_states.remove(state_with_max_distance) # return 1) Flags that are cluster/non-cluster, cluster_mask[np.array([ele[0] for ele in cluster_states])] = True if noncluster_states: noncluster_mask[np.array([ele[0] for ele in noncluster_states])] = True # 2) cluster_info, a list of dict of nearest centroid and distance cluster_info = [{ "non_cluster_index": ele[0], "centroid": distance_fn(ele, cluster_states)["centroid"] } for ele in noncluster_states] else: cluster_info = None return cluster_mask, noncluster_mask, cluster_info def _compute_extrapolated_target(self, states, actions, cluster_info): """Extrapolate target next value by first-oder Taylor series.""" state_deviation = [] centroid_actions = [] centroid_states = [] for ele in cluster_info: state_deviation.append(states[ele["non_cluster_index"]] - ele["centroid"][1]) centroid_actions.append(actions[ele["centroid"][0]]) centroid_states.append(ele["centroid"][1]) return self.target_network.predict_state_perturbed_q_function( centroid_states, centroid_actions, state_deviation) def _compute_tolerance(self, states, actions, next_states, rewards, dones, tolerance_init=None, tolerance_decay=None): """Compute the (dynamic) tolerance for a max-Q solver.""" if tolerance_init is None: tolerance_init = self._tolerance_min if tolerance_decay is not None: target_next_values_tolerance = self.target_network.predict_q_function( next_states, self.train_network.predict_action_function(next_states)) td_rmse_tolerance = self.train_network.compute_td_rmse( states, actions, target_next_values_tolerance, rewards, dones, self.discount_factor) tolerance = tolerance_init * td_rmse_tolerance * tolerance_decay return min([max([tolerance, self._tolerance_min]), self._tolerance_max]) else: return tolerance_init def train_q_function_network(self, batch, tolerance_init, tolerance_decay, warmstart=True, tf_summary_vals=None): """Train Q function network. Args: batch: list of states, actions, rewards, next_states, dones, unused_infos. tolerance_init: float on initial tolerance. tolerance_decay: float on current tolerance decay. warmstart: bool on warmstarting flag. tf_summary_vals: list to store tf.Summary.Value objects. Returns: (float, float, float, dict, float, float) The first element is the loss of q_function, second element is TD target, third element is loss of lambda update (only active for hinge loss), fourth element is a dict containing the batch of states and Q labels for training action function, fifth element is the portion of active data after dual filter, and sixth element is the portion of active data after dual filter and clustering. """ [states, actions, rewards, next_states, dones, unused_infos] = zip(*batch) if self.solver == "dual": # dual methods for approximating target_next_values, but that isn't DDQN! target_next_values = np.reshape( self.target_network.compute_dual_maxq_label(next_states), (len(states),)) # portion_active_data: data portion for maxq after dual filter portion_active_data = 0.0 # portion_active_data_and_cluster: data portion for maxq after dual filter # and cluster portion_active_data_and_cluster = 0.0 else: assert self.solver in ["gradient_ascent", "cross_entropy", "ails", "mip"] # Trick 1: Find dual objective and
<reponame>graeme-winter/dials<filename>command_line/combine_experiments.py import logging import os import random import sys import dxtbx.model.compare as compare import xfel.clustering.cluster from dxtbx.command_line.image_average import splitit from dxtbx.model.experiment_list import ( BeamComparison, DetectorComparison, Experiment, ExperimentList, GoniometerComparison, ) from libtbx.phil import parse from scitbx import matrix from xfel.clustering.cluster_groups import unit_cell_info import dials.util from dials.algorithms.integration.stills_significance_filter import SignificanceFilter from dials.array_family import flex from dials.util import tabulate from dials.util.options import OptionParser, flatten_experiments logger = logging.getLogger(__name__) help_message = """ Utility script to combine multiple reflections and experiments files into one multi-experiment reflections and one experiments file. Experiments are matched to reflections in the order they are provided as input. Reference models can be chosen from any of the input experiments files. These will replace all other models of that type in the output experiments file. This is useful, for example, for combining mutiple experiments that should differ only in their crystal models. No checks are made to ensure that a reference model is a good replacement model. Although only one reference model of each type is allowed, more complex combinations of experiments can be created by repeat runs. Examples:: dials.combine_experiments experiments_0.expt experiments_1.expt \\ reflections_0.refl reflections_1.refl \\ reference_from_experiment.beam=0 \\ reference_from_experiment.detector=0 """ # The phil scope phil_scope = parse( """ reference_from_experiment{ beam = None .help = "Take beam model from this experiment to overwrite all other" "beam models in the combined experiments" .type = int(value_min=0) scan = None .help = "Take scan model from this experiment to overwrite all other" "scan models in the combined experiments" .type = int(value_min=0) crystal = None .help = "Take crystal model from this experiment to overwrite all" "other crystal models in the combined experiments" .type = int(value_min=0) goniometer = None .help = "Take goniometer model from this experiment to overwrite all" "other goniometer models in the combined experiments" .type = int(value_min=0) detector = None .help = "Take detector model from this experiment to overwrite all" "other detector models in the combined experiments" .type = int(value_min=0) average_detector = False .help = "Create an average detector model from all the input detector" "models and use it as the reference. Not compatible with" "reference_from_experiment.detector" .type = bool compare_models = True .help = "Whether to compare a model with the reference model before" "replacing it. If the comparison falls outside the tolerance," "the combination will not be allowed. Disable comparison to force" "overwriting of models with the reference" .type = bool average_hierarchy_level = None .help = "For hierarchical detectors, optionally provide a single level" "to do averaging at." .type = int(value_min=0) include scope dials.util.options.tolerance_phil_scope } clustering { use = False .type = bool .help = "Separate experiments into subsets using the clustering" "toolkit. One json per cluster will be saved." dendrogram = False .type = bool .help = "Display dendrogram of the clustering results. Should not" "be used with parallel processing." threshold = 1000 .type = int .help = "Threshold used in the dendrogram to separate into clusters." max_clusters = None .type = int .help = "Maximum number of clusters to save as jsons." max_crystals = None .type = int .help = "Maximum number of crystals to cluster." exclude_single_crystal_clusters = True .type = bool .help = "Don't produce a 'cluster' containing only one crystal." } output { experiments_filename = combined.expt .type = str .help = "The filename for combined experimental models" reflections_filename = combined.refl .type = str .help = "The filename for combined reflections" n_subset = None .type = int .help = "If not None, keep a subset of size n_subset when" "saving the combined experiments" n_subset_method = *random n_refl significance_filter .type = choice .help = "Algorithm to be used for choosing the n_subset images/" "experiments for refinement. n_refl chooses the set with the" "largest numbers of reflections listed in the pickle files" "significance filter used to select n_subset images based on" "I/sig(I) cutoff" n_refl_panel_list = None .type = ints .help = "If n_subset_method is n_refl, specify which panels to search" "on." max_batch_size = None .type = int .expert_level = 2 .help = "If not None, split the resultant combined set of experiments" "into seperate files, each at most max_batch_size number of" "experiments. Example, if there were 5500 experiments and" "max_batch_size is 1000, 6 experiment lists will be created," "of sizes 917, 917, 917, 917, 916, 916" delete_shoeboxes = False .type = bool .expert_level = 2 .help = "If true, delete shoeboxes from reflection tables while comb-" "ining them to save on memory." min_reflections_per_experiment = None .type = int .expert_level = 2 .help = "If not None, throw out any experiment with fewer than this" "many reflections" max_reflections_per_experiment = None .type = int .expert_level = 2 .help = "If not None, throw out any experiment with more than this" "many reflections" include scope dials.algorithms.integration.stills_significance_filter.phil_scope } """, process_includes=True, ) def find_experiment_in(experiment, all_experiments): """Search the phil experiment list and find where an experiment came from. :param Experiment experiment: The experiment to search for :param all_experiments: The list of all experiments from phil :type all_experiments: list[dials.util.phil.FilenameDataWrapper[ExperimentList]] :returns: The filename and experiment ID :rtype: (str, int) """ for source in all_experiments: try: experiment_list = list(source.data) index = experiment_list.index(experiment) return (source.filename, index) except ValueError: pass raise ValueError("Experiment not found") class ComparisonError(Exception): """Exception to indicate problem with tolerance comparisons""" pass class CombineWithReference: def __init__( self, beam=None, goniometer=None, scan=None, crystal=None, detector=None, params=None, ): self.ref_beam = beam self.ref_goniometer = goniometer self.ref_scan = scan self.ref_crystal = crystal self.ref_detector = detector self.tolerance = None self._last_imageset = None if params: if params.reference_from_experiment.compare_models: self.tolerance = params.reference_from_experiment.tolerance self.average_detector = params.reference_from_experiment.average_detector else: self.average_detector = False def __call__(self, experiment): if self.tolerance: compare_beam = BeamComparison( wavelength_tolerance=self.tolerance.beam.wavelength, direction_tolerance=self.tolerance.beam.direction, polarization_normal_tolerance=self.tolerance.beam.polarization_normal, polarization_fraction_tolerance=self.tolerance.beam.polarization_fraction, ) compare_detector = DetectorComparison( fast_axis_tolerance=self.tolerance.detector.fast_axis, slow_axis_tolerance=self.tolerance.detector.slow_axis, origin_tolerance=self.tolerance.detector.origin, ) compare_goniometer = GoniometerComparison( rotation_axis_tolerance=self.tolerance.goniometer.rotation_axis, fixed_rotation_tolerance=self.tolerance.goniometer.fixed_rotation, setting_rotation_tolerance=self.tolerance.goniometer.setting_rotation, ) else: compare_beam = None compare_detector = None compare_goniometer = None if self.ref_beam: if compare_beam: if not compare_beam(self.ref_beam, experiment.beam): raise ComparisonError( compare.beam_diff( self.ref_beam, experiment.beam, wavelength_tolerance=self.tolerance.beam.wavelength, direction_tolerance=self.tolerance.beam.direction, polarization_normal_tolerance=self.tolerance.beam.polarization_normal, polarization_fraction_tolerance=self.tolerance.beam.polarization_fraction, ) ) beam = self.ref_beam else: beam = experiment.beam if self.ref_detector and self.average_detector: detector = self.ref_detector elif self.ref_detector and not self.average_detector: if compare_detector: if not compare_detector(self.ref_detector, experiment.detector): raise ComparisonError( compare.detector_diff( self.ref_detector, experiment.detector, fast_axis_tolerance=self.tolerance.detector.fast_axis, slow_axis_tolerance=self.tolerance.detector.slow_axis, origin_tolerance=self.tolerance.detector.origin, ) ) detector = self.ref_detector else: detector = experiment.detector if self.ref_goniometer: if compare_goniometer: if not compare_goniometer(self.ref_goniometer, experiment.goniometer): raise ComparisonError( compare.goniometer_diff( self.ref_goniometer, experiment.goniometer, rotation_axis_tolerance=self.tolerance.goniometer.rotation_axis, fixed_rotation_tolerance=self.tolerance.goniometer.fixed_rotation, setting_rotation_tolerance=self.tolerance.goniometer.setting_rotation, ) ) goniometer = self.ref_goniometer else: goniometer = experiment.goniometer if self.ref_scan: scan = self.ref_scan else: scan = experiment.scan if self.ref_crystal: crystal = self.ref_crystal else: crystal = experiment.crystal if self._last_imageset == experiment.imageset: imageset = self._last_imageset else: imageset = experiment.imageset self._last_imageset = imageset return Experiment( identifier=experiment.identifier, beam=beam, detector=detector, scan=scan, goniometer=goniometer, crystal=crystal, imageset=imageset, ) class Cluster: def __init__( self, experiments, reflections, dendrogram=False, threshold=1000, n_max=None ): if dendrogram: import matplotlib.pyplot as plt axes = plt.gca() else: axes = None ucs = xfel.clustering.cluster.Cluster.from_expts( refl_table=reflections, expts_list=experiments, n_images=n_max ) self.clusters, _ = ucs.ab_cluster( threshold=threshold, log=True, # log scale ax=axes, write_file_lists=False, schnell=False, doplot=dendrogram, ) print(unit_cell_info(self.clusters)) self.clustered_frames = { int(c.cname.split("_")[1]): c.members for c in self.clusters } if dendrogram: plt.tight_layout() plt.show() class Script: def __init__(self): """Initialise the script.""" # The script usage usage = ( "usage: dials.combine_experiments [options] [param.phil] " "experiments1.expt experiments2.expt reflections1.refl " "reflections2.refl..." ) # Create the parser self.parser = OptionParser( usage=usage, phil=phil_scope, read_reflections=True, read_experiments=True, check_format=False, epilog=help_message, ) def run(self, args=None): """Execute the script.""" params, options = self.parser.parse_args(args, show_diff_phil=True) self.run_with_preparsed(params, options) def run_with_preparsed(self, params, options): """Run combine_experiments, but allow passing in of parameters""" # Try to load the models and data if not params.input.experiments: print("No Experiments found in the input") self.parser.print_help() return if not params.input.reflections: print("No reflection data found in the input") self.parser.print_help() return if len(params.input.reflections) != len(params.input.experiments): sys.exit( "The number of input reflections files does not match the " "number of input experiments" ) flat_exps = flatten_experiments(params.input.experiments) ref_beam = params.reference_from_experiment.beam ref_goniometer = params.reference_from_experiment.goniometer ref_scan = params.reference_from_experiment.scan ref_crystal = params.reference_from_experiment.crystal ref_detector = params.reference_from_experiment.detector if ref_beam is not None: try: ref_beam = flat_exps[ref_beam].beam except IndexError: sys.exit(f"{ref_beam} is not a valid experiment ID") if ref_goniometer is not None: try: ref_goniometer = flat_exps[ref_goniometer].goniometer except IndexError: sys.exit(f"{ref_goniometer} is not a valid experiment ID") if ref_scan is not None: try: ref_scan = flat_exps[ref_scan].scan except IndexError: sys.exit(f"{ref_scan} is not a valid experiment ID") if ref_crystal is not None: try: ref_crystal = flat_exps[ref_crystal].crystal
a sentence # * *Pattern matching*: looking for specific syntactical patterns # * *Third-party models*: using an pre-trained model (usually a model for a different task than the one at hand) # * *Distant supervision*: using external knowledge base # * *Crowdworker labels*: treating each crowdworker as a black-box function that assigns labels to subsets of the data # %% [markdown] # ### Recommended practice for LF development # %% [markdown] # Typical LF development cycles include multiple iterations of ideation, refining, evaluation, and debugging. # A typical cycle consists of the following steps: # # 1. Look at examples to generate ideas for LFs # 1. Write an initial version of an LF # 1. Spot check its performance by looking at its output on data points in the training set (or development set if available) # 1. Refine and debug to improve coverage or accuracy as necessary # # Our goal for LF development is to create a high quality set of training labels for our unlabeled dataset, # not to label everything or directly create a model for inference using the LFs. # The training labels are used to train a separate discriminative model (in this case, one which just uses the comment text) in order to generalize to new, unseen data points. # Using this model, we can make predictions for data points that our LFs don't cover. # # We'll walk through the development of two LFs using basic analysis tools in Snorkel, then provide a full set of LFs that we developed for this tutorial. # %% [markdown] # ### a) Exploring the development set for initial ideas # %% [markdown] # We'll start by looking at 20 random data points from the `train` set to generate some ideas for LFs. # %% df_train[["author", "text", "video"]].sample(20, random_state=2) # %% [markdown] # One dominant pattern in the comments that look like spam is the use of the phrase "check out" (e.g. "check out my channel"). # Let's start with that. # %% [markdown] # ### b) Writing an LF to identify spammy comments that use the phrase "check out" # %% [markdown] # Labeling functions in Snorkel are created with the # [`@labeling_function` decorator](https://snorkel.readthedocs.io/en/master/packages/_autosummary/labeling/snorkel.labeling.labeling_function.html). # The [decorator](https://realpython.com/primer-on-python-decorators/) can be applied to _any Python function_ that returns a label for a single data point. # # Let's start developing an LF to catch instances of commenters trying to get people to "check out" their channel, video, or website. # We'll start by just looking for the exact string `"check out"` in the text, and see how that compares to looking for just `"check"` in the text. # For the two versions of our rule, we'll write a Python function over a single data point that express it, then add the decorator. # %% from snorkel.labeling import labeling_function @labeling_function() def check(x): return SPAM if "check" in x.text.lower() else ABSTAIN @labeling_function() def check_out(x): return SPAM if "check out" in x.text.lower() else ABSTAIN # %% [markdown] # To apply one or more LFs that we've written to a collection of data points, we use an # [`LFApplier`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/labeling/snorkel.labeling.LFApplier.html). # Because our data points are represented with a Pandas DataFrame in this tutorial, we use the # [`PandasLFApplier`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/labeling/snorkel.labeling.PandasLFApplier.html). # Correspondingly, a single data point `x` that's passed into our LFs will be a [Pandas `Series` object](https://pandas.pydata.org/pandas-docs/stable/reference/series.html). # # It's important to note that these LFs will work for any object with an attribute named `text`, not just Pandas objects. # Snorkel has several other appliers for different data point collection types which you can browse in the [API documentation](https://snorkel.readthedocs.io/en/master/packages/labeling.html). # # The output of the `apply(...)` method is a ***label matrix***, a fundamental concept in Snorkel. # It's a NumPy array `L` with one column for each LF and one row for each data point, where `L[i, j]` is the label that the `j`th labeling function output for the `i`th data point. # We'll create one label matrix for the `train` set and one for the `dev` set. # %% {"tags": ["md-exclude-output"]} from snorkel.labeling import PandasLFApplier lfs = [check_out, check] applier = PandasLFApplier(lfs=lfs) L_train = applier.apply(df=df_train) L_dev = applier.apply(df=df_dev) # %% L_train # %% [markdown] # ### c) Evaluate performance on training and development sets # %% [markdown] # We can easily calculate the coverage of these LFs (i.e., the percentage of the dataset that they label) as follows: # %% coverage_check_out, coverage_check = (L_train != ABSTAIN).mean(axis=0) print(f"check_out coverage: {coverage_check_out * 100:.1f}%") print(f"check coverage: {coverage_check * 100:.1f}%") # %% [markdown] # Lots of statistics about labeling functions &mdash; like coverage &mdash; are useful when building any Snorkel application. # So Snorkel provides tooling for common LF analyses using the # [`LFAnalysis` utility](https://snorkel.readthedocs.io/en/master/packages/_autosummary/labeling/snorkel.labeling.LFAnalysis.html). # We report the following summary statistics for multiple LFs at once: # # * **Polarity**: The set of unique labels this LF outputs (excluding abstains) # * **Coverage**: The fraction of the dataset the LF labels # * **Overlaps**: The fraction of the dataset where this LF and at least one other LF label # * **Conflicts**: The fraction of the dataset where this LF and at least one other LF label and disagree # * **Correct**: The number of data points this LF labels correctly (if gold labels are provided) # * **Incorrect**: The number of data points this LF labels incorrectly (if gold labels are provided) # * **Empirical Accuracy**: The empirical accuracy of this LF (if gold labels are provided) # # For *Correct*, *Incorrect*, and *Empirical Accuracy*, we don't want to penalize the LF for data points where it abstained. # We calculate these statistics only over those data points where the LF output a label. # Since we have labels for the `dev` set but not the `train` set, we'll compute these statistics for the `dev` set only by supplying `Y_dev`. # %% from snorkel.labeling import LFAnalysis LFAnalysis(L=L_train, lfs=lfs).lf_summary() # %% LFAnalysis(L=L_dev, lfs=lfs).lf_summary(Y=Y_dev) # %% [markdown] # So even these very simple rules do quite well! # We might want to pick the `check` rule, since both have high precision and `check` has higher coverage. # But let's look at our data to be sure. # # The helper method `get_label_buckets(...)` groups data points by their predicted label and true label. # For example, we can find the indices of data points that the LF labeled `SPAM` that actually belong to class `HAM`. # This may give ideas for where the LF could be made more specific. # %% from snorkel.analysis import get_label_buckets buckets = get_label_buckets(Y_dev, L_dev[:, 1]) df_dev.iloc[buckets[(HAM, SPAM)]] # %% [markdown] # There's only one row here because `check` produced only one false positive on the `dev` set. # Now let's take a look at 10 random `train` set data points where `check` labeled `SPAM` to see if it matches our intuition or if we can identify some false positives. # %% df_train.iloc[L_train[:, 1] == SPAM].sample(10, random_state=1) # %% [markdown] # No clear false positives here, but many look like they could be labeled by `check_out` as well. # Let's see 10 data points where `check_out` abstained, but `check` labeled. # %% buckets = get_label_buckets(L_train[:, 0], L_train[:, 1]) df_train.iloc[buckets[(ABSTAIN, SPAM)]].sample(10, random_state=1) # %% [markdown] # Most of these seem like small modifications of "check out", like "check me out" or "check it out". # Can we get the best of both worlds? # %% [markdown] # ### d) Balance accuracy and coverage # %% [markdown] # Let's see if we can use regular expressions to account for modifications of "check out" and get the coverage of `check` plus the accuracy of `check_out`. # %% import re @labeling_function() def regex_check_out(x): return SPAM if re.search(r"check.*out", x.text, flags=re.I) else ABSTAIN # %% [markdown] # Again, let's generate our label matrices and see how we do. # %% {"tags": ["md-exclude-output"]} lfs = [check_out, check, regex_check_out] applier = PandasLFApplier(lfs=lfs) L_train = applier.apply(df=df_train) L_dev = applier.apply(df=df_dev) # %% LFAnalysis(L=L_train, lfs=lfs).lf_summary() # %% LFAnalysis(L_dev, lfs).lf_summary(Y=Y_dev) # %% [markdown] # We've split the difference in `train` set coverage, and increased our accuracy on the `dev` set to 100%! # This looks promising. # Let's verify that we corrected our false positive from before. # %% buckets = get_label_buckets(L_dev[:, 1], L_dev[:, 2]) df_dev.iloc[buckets[(SPAM, ABSTAIN)]] # %% [markdown] # To understand the coverage difference between `check` and `regex_check_out`, let's take a look at 10 data points from the `train` set. # Remember: coverage isn't always good. # Adding false positives will increase coverage. # %% buckets = get_label_buckets(L_train[:, 1], L_train[:, 2]) df_train.iloc[buckets[(SPAM, ABSTAIN)]].sample(10, random_state=1) # %% [markdown] # Most of these are SPAM, but a good number are false positives. # **To keep precision high (while not sacrificing much in
separate path components regardless of the separator character for this platform. This method will perform the necessary conversion. """ converted_path = convert_path(path) try: os.makedirs(converted_path) except OSError, error: if error.errno == errno.EEXIST and os.path.isdir(converted_path): pass else: raise def make_path(parent_directory, path): """Returns the full path created by joining path to parent_directory. This method is a convenience function because it allows path to use forward slashes to separate path components rather than the platform's separator character. @param parent_directory: The parent directory. This argument must use the system's separator character. This may be None if path is relative to the current working directory. @param path: The path to add to parent_directory. This should use forward slashes as the separator character, regardless of the platform's character. @return: The path created by joining the two with using the system's separator character. """ if parent_directory is None and os.path.sep == '/': return path if parent_directory is None: result = '' elif path.startswith('/'): result = '' else: result = parent_directory for path_part in path.split('/'): if len(path_part) > 0: result = os.path.join(result, path_part) return result def convert_path(path): """Converts the forward slashes in path to the platform's separator and returns the value. @param path: The path to convert. This should use forward slashes as the separator character, regardless of the platform's character. @return: The path created by converting the forward slashes to the platform's separator. """ return make_path(None, path) def make_soft_link(source, link_path): """Creates a soft link at link_path to source. @param source: The path that the link will point to. This should use a forward slash as the separator, regardless of the platform's separator. @param link_path: The path where the link will be created. This should use a forward slash as the separator, regardless of the platform's separator. """ os.symlink(convert_path(source), convert_path(link_path)) def glob_files(path): """Returns the paths that match the specified path glob (based on current working directory). @param path: The path with glob wildcard characters to match. This should use a forward slash as the separator, regardless of the platform's separator. @return: The list of matched paths. """ return glob.glob(convert_path(path)) def recursively_delete_dirs_by_name(*dir_names): """Deletes any directories that are in the current working directory or any of its children whose file names match the specified regular expressions. This will recursively examine all children of the current working directory. If a directory is found that needs to be deleted, all of it and its children are deleted. @param dir_names: A variable number of strings containing regular expressions that should match the file names of the directories that should be deleted. """ # Compile the strings into actual regular expression match objects. matchers = [] for dir_name in dir_names: matchers.append(re.compile(dir_name)) # Walk down the file tree, top down, allowing us to prune directories as we go. for root, dirs, files in os.walk('.'): # The list of directories at the current level to delete. to_remove = [] # Examine all directories at this level, see if any get a match for dir_path in dirs: remove_it = False for matcher in matchers: if matcher.match(dir_path): remove_it = True if remove_it: to_remove.append(dir_path) # Go back and delete it. Also, remove it from dirs so that we don't try to walk down it. for remove_dir_path in to_remove: shutil.rmtree(os.path.join(root, remove_dir_path)) dirs.remove(remove_dir_path) def recursively_delete_files_by_name(*file_names): """Deletes any files that are in the current working directory or any of its children whose file names match the specified regular expressions. This will recursively examine all children of the current working directory. @param file_names: A variable number of strings containing regular expressions that should match the file names of the files that should be deleted. """ # Compile the strings into actual regular expression match objects. matchers = [] for file_name in file_names: matchers.append(re.compile(file_name)) # Walk down the current directory. for root, dirs, files in os.walk('.'): # See if any of the files at this level match any of the matchers. for file_path in files: remove_it = False for matcher in matchers: if matcher.match(file_path): remove_it = True # Delete it if it did match. if remove_it: os.unlink(os.path.join(root, file_path)) def cat_files(file_paths, destination): """Concatenates the contents of the specified files and writes it to a new file at destination. @param file_paths: A list of paths for the files that should be read. The concatenating will be done in the same order as the list. @param destination: The path of the file to write the contents to. """ dest_fp = open(destination, 'w') for file_path in file_paths: in_fp = open(file_path, 'r') for line in in_fp: dest_fp.write(line) in_fp.close() dest_fp.close() def write_to_file(string_value, file_path): """Writes the specified string to a new file. This removes trailing newlines, etc, to avoid adding an extra blank line. @param string_value: The value to write to the file. @param file_path: The path of the file to write to. """ dest_fp = open(file_path, 'w') dest_fp.write(string_value.rstrip()) dest_fp.write(os.linesep) dest_fp.close() def parse_date(date_str): """Parses a date time string of the format MMM DD, YYYY HH:MM +ZZZZ and returns seconds past epoch. Example of the format is: Oct 10, 2014 17:00 -0700 @param date_str: A string containing the date and time in the format described above. @return: The number of seconds past epoch. @raise ValueError: if there is a parsing problem. """ # For some reason, it was hard to parse this particular format with the existing Python libraries, # especially when the timezone was not the same as the local time zone. So, we have to do this the # sort of hard way. # # It is a little annoying that strptime only takes Sep for September and not Sep which is more common # in US-eng, so we cheat here and just swap it out. adjusted = date_str.replace('Sept', 'Sep') # Find the timezone string at the end of the string. if re.search('[\-+]\d\d\d\d$', adjusted) is None: raise ValueError('Value \'%s\' does not meet required time format of \'MMM DD, YYYY HH:MM +ZZZZ\' (or ' 'as an example, \' \'Oct 10, 2014 17:00 -0700\'' % date_str) # Use the existing Python string parsing calls to just parse the time and date. We will handle the timezone # separately. try: base_time = time.mktime(time.strptime(adjusted[0:-6], '%b %d, %Y %H:%M')) except ValueError: raise ValueError('Value \'%s\' does not meet required time format of \'MMM DD, YYYY HH:MM +ZZZZ\' (or ' 'as an example, \' \'Oct 10, 2014 17:00 -0700\'' % date_str) # Since mktime assumes the time is in localtime, we might have a different time zone # in tz_str, we must manually added in the difference. # First, convert -0700 to seconds.. the second two digits are the number of hours # and the last two are the minute of minutes. tz_str = adjusted[-5:] tz_offset_secs = int(tz_str[1:3]) * 3600 + int(tz_str[3:5]) * 60 if tz_str.startswith('-'): tz_offset_secs *= -1 # Determine the offset for the local timezone. if time.daylight: local_offset_secs = -1 * time.altzone else: local_offset_secs = -1 * time.timezone base_time += local_offset_secs - tz_offset_secs return base_time # TODO: This code is shared with config_main.py. We should move this into a common # utility location both commands can import it from. def run_command(command_str, exit_on_fail=True, command_name=None): """Executes the specified command string returning the exit status. @param command_str: The command to execute. @param exit_on_fail: If True, will exit this process with a non-zero status if the command fails. @param command_name: The name to use to identify the command in error output. @return: The exist status of the command. """ # We have to use a temporary file to hold the output to stdout and stderr. output_file = tempfile.mktemp() output_fp = open(output_file, 'w') try: return_code = subprocess.call(command_str, stdin=None, stderr=output_fp, stdout=output_fp, shell=True) output_fp.flush() # Read the output back into a string. We cannot use a cStringIO.StringIO buffer directly above with # subprocess.call because that method expects fileno support which StringIO doesn't support. output_buffer = StringIO() input_fp = open(output_file, 'r') for line in input_fp: output_buffer.write(line) input_fp.close() output_str = output_buffer.getvalue() output_buffer.close() if return_code != 0: if
self.under_test.veracrypt_post_processor) # THEN expected = [ ('notify_secondary_progress()', 0.126), ('notify_file()', 'Done: 0.126% Speed: 3.7 MiB/s Left: 36 minutes', False), ('notify_secondary_progress()', 0.504), ('notify_file()', 'Done: 0.504% Speed: 14 MiB/s Left: 9 minutes', False), ('notify_secondary_progress()', 0.882), ('notify_file()', 'Done: 0.882% Speed: 24 MiB/s Left: 5 minutes', False), ('notify_secondary_progress()', 1.272), ('notify_file()', 'Done: 1.272% Speed: 33 MiB/s Left: 3 minutes', False), ('notify_secondary_progress()', 1.64), ('notify_file()', 'Done: 1.640% Speed: 41 MiB/s Left: 3 minutes', False), ('notify_secondary_progress()', 2.009), ('notify_file()', 'Done: 2.009% Speed: 49 MiB/s Left: 2 minutes', False), ('notify_secondary_progress()', 2.38), ('notify_file()', 'Done: 2.380% Speed: 57 MiB/s Left: 2 minutes', False), ('notify_secondary_progress()', 2.755), ('notify_file()', 'Done: 2.755% Speed: 64 MiB/s Left: 2 minutes', False), ('notify_secondary_progress()', 3.075), ('notify_file()', 'Done: 3.075% Speed: 69 MiB/s Left: 113 s', False), ('notify_secondary_progress()', 3.363), ('notify_file()', 'Done: 3.363% Speed: 74 MiB/s Left: 106 s', False), ('notify_secondary_progress()', 3.615), ('notify_file()', 'Done: 3.615% Speed: 77 MiB/s Left: 101 s', False), ('notify_secondary_progress()', 3.95), ('notify_file()', 'Done: 3.950% Speed: 82 MiB/s Left: 94 s', False), ('notify_secondary_progress()', 4.291), ('notify_file()', 'Done: 4.291% Speed: 87 MiB/s Left: 89 s', False), ('notify_secondary_progress()', 4.632), ('notify_file()', 'Done: 4.632% Speed: 91 MiB/s Left: 84 s', False), ('notify_secondary_progress()', 4.991), ('notify_file()', 'Done: 4.991% Speed: 96 MiB/s Left: 80 s', False), ('notify_secondary_progress()', 5.329), ('notify_file()', 'Done: 5.329% Speed: 100 MiB/s Left: 76 s', False), ('notify_secondary_progress()', 5.688), ('notify_file()', 'Done: 5.688% Speed: 105 MiB/s Left: 73 s', False), ('notify_secondary_progress()', 6.032), ('notify_file()', 'Done: 6.032% Speed: 108 MiB/s Left: 70 s', False), ('notify_secondary_progress()', 6.358), ('notify_file()', 'Done: 6.358% Speed: 112 MiB/s Left: 67 s', False), ('notify_secondary_progress()', 6.708), ('notify_file()', 'Done: 6.708% Speed: 115 MiB/s Left: 65 s', False), ('notify_secondary_progress()', 7.074), ('notify_file()', 'Done: 7.074% Speed: 119 MiB/s Left: 63 s', False), ('notify_secondary_progress()', 7.408), ('notify_file()', 'Done: 7.408% Speed: 122 MiB/s Left: 61 s', False), ('notify_secondary_progress()', 7.762), ('notify_file()', 'Done: 7.762% Speed: 125 MiB/s Left: 59 s', False), ('notify_secondary_progress()', 8.102), ('notify_file()', 'Done: 8.102% Speed: 128 MiB/s Left: 58 s', False), ('notify_secondary_progress()', 8.287), ('notify_file()', 'Done: 8.287% Speed: 129 MiB/s Left: 57 s', False), ('notify_secondary_progress()', 8.606), ('notify_file()', 'Done: 8.606% Speed: 131 MiB/s Left: 56 s', False), ('notify_secondary_progress()', 8.944), ('notify_file()', 'Done: 8.944% Speed: 134 MiB/s Left: 55 s', False), ('notify_secondary_progress()', 9.236), ('notify_file()', 'Done: 9.236% Speed: 135 MiB/s Left: 54 s', False), ('notify_secondary_progress()', 9.564), ('notify_file()', 'Done: 9.564% Speed: 138 MiB/s Left: 53 s', False), ('notify_secondary_progress()', 9.902), ('notify_file()', 'Done: 9.902% Speed: 140 MiB/s Left: 52 s', False), ('notify_secondary_progress()', 10.213), ('notify_file()', 'Done: 10.213% Speed: 142 MiB/s Left: 51 s', False), ('notify_secondary_progress()', 10.514), ('notify_file()', 'Done: 10.514% Speed: 144 MiB/s Left: 50 s', False), ('notify_secondary_progress()', 10.851), ('notify_file()', 'Done: 10.851% Speed: 146 MiB/s Left: 49 s', False), ('notify_secondary_progress()', 11.202), ('notify_file()', 'Done: 11.202% Speed: 148 MiB/s Left: 48 s', False), ('notify_secondary_progress()', 11.561), ('notify_file()', 'Done: 11.561% Speed: 150 MiB/s Left: 47 s', False), ('notify_secondary_progress()', 11.899), ('notify_file()', 'Done: 11.899% Speed: 152 MiB/s Left: 46 s', False), ('notify_secondary_progress()', 12.24), ('notify_file()', 'Done: 12.240% Speed: 154 MiB/s Left: 46 s', False), ('notify_secondary_progress()', 12.578), ('notify_file()', 'Done: 12.578% Speed: 156 MiB/s Left: 45 s', False), ('notify_secondary_progress()', 12.872), ('notify_file()', 'Done: 12.872% Speed: 157 MiB/s Left: 44 s', False), ('notify_secondary_progress()', 13.244), ('notify_file()', 'Done: 13.244% Speed: 159 MiB/s Left: 44 s', False), ('notify_secondary_progress()', 13.579), ('notify_file()', 'Done: 13.579% Speed: 161 MiB/s Left: 43 s', False), ('notify_secondary_progress()', 13.944), ('notify_file()', 'Done: 13.944% Speed: 163 MiB/s Left: 42 s', False), ('notify_secondary_progress()', 14.288), ('notify_file()', 'Done: 14.288% Speed: 165 MiB/s Left: 42 s', False), ('notify_secondary_progress()', 14.642), ('notify_file()', 'Done: 14.642% Speed: 166 MiB/s Left: 41 s', False), ('notify_secondary_progress()', 15.004), ('notify_file()', 'Done: 15.004% Speed: 168 MiB/s Left: 41 s', False), ('notify_secondary_progress()', 15.345), ('notify_file()', 'Done: 15.345% Speed: 169 MiB/s Left: 40 s', False), ('notify_secondary_progress()', 15.698), ('notify_file()', 'Done: 15.698% Speed: 171 MiB/s Left: 39 s', False), ('notify_secondary_progress()', 16.018), ('notify_file()', 'Done: 16.018% Speed: 172 MiB/s Left: 39 s', False), ('notify_secondary_progress()', 16.374), ('notify_file()', 'Done: 16.374% Speed: 174 MiB/s Left: 39 s', False), ('notify_secondary_progress()', 16.706), ('notify_file()', 'Done: 16.706% Speed: 175 MiB/s Left: 38 s', False), ('notify_secondary_progress()', 17.068), ('notify_file()', 'Done: 17.068% Speed: 176 MiB/s Left: 38 s', False), ('notify_secondary_progress()', 17.406), ('notify_file()', 'Done: 17.406% Speed: 178 MiB/s Left: 37 s', False), ('notify_secondary_progress()', 17.765), ('notify_file()', 'Done: 17.765% Speed: 179 MiB/s Left: 37 s', False), ('notify_secondary_progress()', 18.109), ('notify_file()', 'Done: 18.109% Speed: 180 MiB/s Left: 36 s', False), ('notify_secondary_progress()', 18.469), ('notify_file()', 'Done: 18.469% Speed: 182 MiB/s Left: 36 s', False), ('notify_secondary_progress()', 18.819), ('notify_file()', 'Done: 18.819% Speed: 183 MiB/s Left: 36 s', False), ('notify_secondary_progress()', 19.163), ('notify_file()', 'Done: 19.163% Speed: 184 MiB/s Left: 35 s', False), ('notify_secondary_progress()', 19.494), ('notify_file()', 'Done: 19.494% Speed: 185 MiB/s Left: 35 s', False), ('notify_secondary_progress()', 19.851), ('notify_file()', 'Done: 19.851% Speed: 186 MiB/s Left: 34 s', False), ('notify_secondary_progress()', 19.974), ('notify_file()', 'Done: 19.974% Speed: 185 MiB/s Left: 35 s', False), ('notify_secondary_progress()', 20.259), ('notify_file()', 'Done: 20.259% Speed: 186 MiB/s Left: 34 s', False), ('notify_secondary_progress()', 20.637), ('notify_file()', 'Done: 20.637% Speed: 187 MiB/s Left: 34 s', False), ('notify_secondary_progress()', 20.984), ('notify_file()', 'Done: 20.984% Speed: 188 MiB/s Left: 34 s', False), ('notify_secondary_progress()', 21.331), ('notify_file()', 'Done: 21.331% Speed: 189 MiB/s Left: 33 s', False), ('notify_secondary_progress()', 21.687), ('notify_file()', 'Done: 21.687% Speed: 190 MiB/s Left: 33 s', False), ('notify_secondary_progress()', 22.013), ('notify_file()', 'Done: 22.013% Speed: 191 MiB/s Left: 33 s', False), ('notify_secondary_progress()', 22.317), ('notify_file()', 'Done: 22.317% Speed: 191 MiB/s Left: 32 s', False), ('notify_secondary_progress()', 22.501), ('notify_file()', 'Done: 22.501% Speed: 191 MiB/s Left: 32 s', False), ('notify_secondary_progress()', 22.772), ('notify_file()', 'Done: 22.772% Speed: 191 MiB/s Left: 32 s', False), ('notify_secondary_progress()', 23.11), ('notify_file()', 'Done: 23.110% Speed: 192 MiB/s Left: 32 s', False), ('notify_secondary_progress()', 23.438), ('notify_file()', 'Done: 23.438% Speed: 193 MiB/s Left: 32 s', False), ('notify_secondary_progress()', 23.782), ('notify_file()', 'Done: 23.782% Speed: 194 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 24.12), ('notify_file()', 'Done: 24.120% Speed: 194 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 24.464), ('notify_file()', 'Done: 24.464% Speed: 195 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 24.762), ('notify_file()', 'Done: 24.762% Speed: 196 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 25.06), ('notify_file()', 'Done: 25.060% Speed: 196 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 25.244), ('notify_file()', 'Done: 25.244% Speed: 196 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 25.422), ('notify_file()', 'Done: 25.422% Speed: 195 MiB/s Left: 31 s', False), ('notify_secondary_progress()', 25.671), ('notify_file()', 'Done: 25.671% Speed: 195 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 25.929), ('notify_file()', 'Done: 25.929% Speed: 195 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 26.267), ('notify_file()', 'Done: 26.267% Speed: 196 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 26.553), ('notify_file()', 'Done: 26.553% Speed: 196 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 26.731), ('notify_file()', 'Done: 26.731% Speed: 196 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 27.047), ('notify_file()', 'Done: 27.047% Speed: 196 MiB/s Left: 30 s', False), ('notify_secondary_progress()', 27.41), ('notify_file()', 'Done: 27.410% Speed: 197 MiB/s Left: 29 s', False), ('notify_secondary_progress()', 27.766), ('notify_file()', 'Done: 27.766% Speed: 198 MiB/s Left: 29 s', False), ('notify_secondary_progress()', 28.11), ('notify_file()', 'Done: 28.110% Speed: 199 MiB/s Left: 29 s', False), ('notify_secondary_progress()', 28.448), ('notify_file()', 'Done: 28.448% Speed: 199 MiB/s Left: 29 s', False), ('notify_secondary_progress()', 28.795), ('notify_file()', 'Done: 28.795% Speed: 200 MiB/s Left: 28 s', False), ('notify_secondary_progress()', 29.139), ('notify_file()', 'Done: 29.139% Speed: 200 MiB/s Left: 28 s', False), ('notify_secondary_progress()', 29.474), ('notify_file()', 'Done: 29.474% Speed: 201 MiB/s Left: 28 s', False), ('notify_secondary_progress()', 29.799), ('notify_file()', 'Done: 29.799% Speed: 202 MiB/s Left: 28 s', False), ('notify_secondary_progress()', 30.106), ('notify_file()', 'Done: 30.106% Speed: 202 MiB/s Left: 28 s', False), ('notify_secondary_progress()', 30.466), ('notify_file()', 'Done: 30.466% Speed: 203 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 30.816), ('notify_file()', 'Done: 30.816% Speed: 203 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 31.154), ('notify_file()', 'Done: 31.154% Speed: 204 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 31.449), ('notify_file()', 'Done: 31.449% Speed: 204 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 31.759), ('notify_file()', 'Done: 31.759% Speed: 205 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 31.937), ('notify_file()', 'Done: 31.937% Speed: 204 MiB/s Left: 27 s', False), ('notify_secondary_progress()', 32.235), ('notify_file()', 'Done: 32.235% Speed: 204 MiB/s Left: 26 s', False), ('notify_secondary_progress()', 32.582), ('notify_file()', 'Done: 32.582% Speed: 205 MiB/s Left: 26 s', False), ('notify_secondary_progress()', 32.917), ('notify_file()', 'Done: 32.917% Speed: 205 MiB/s Left: 26 s', False), ('notify_secondary_progress()', 33.255), ('notify_file()', 'Done: 33.255% Speed: 206 MiB/s Left: 26 s', False), ('notify_secondary_progress()', 33.608), ('notify_file()', 'Done: 33.608% Speed: 207 MiB/s Left: 26 s', False), ('notify_secondary_progress()', 33.946), ('notify_file()', 'Done: 33.946% Speed: 207 MiB/s Left: 25 s', False), ('notify_secondary_progress()', 34.296), ('notify_file()', 'Done: 34.296% Speed: 208 MiB/s Left: 25 s', False), ('notify_secondary_progress()', 34.631),
<reponame>paran0ids0ul/binjitsu from pwnlib.constants.constant import Constant __NR_read = Constant('__NR_read',0) __NR_write = Constant('__NR_write',1) __NR_open = Constant('__NR_open',2) __NR_close = Constant('__NR_close',3) __NR_stat = Constant('__NR_stat',4) __NR_fstat = Constant('__NR_fstat',5) __NR_lstat = Constant('__NR_lstat',6) __NR_poll = Constant('__NR_poll',7) __NR_lseek = Constant('__NR_lseek',8) __NR_mmap = Constant('__NR_mmap',9) __NR_mprotect = Constant('__NR_mprotect',10) __NR_munmap = Constant('__NR_munmap',11) __NR_brk = Constant('__NR_brk',12) __NR_rt_sigaction = Constant('__NR_rt_sigaction',13) __NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask',14) __NR_rt_sigreturn = Constant('__NR_rt_sigreturn',15) __NR_ioctl = Constant('__NR_ioctl',16) __NR_pread = Constant('__NR_pread',17) __NR_pwrite = Constant('__NR_pwrite',18) __NR_readv = Constant('__NR_readv',19) __NR_writev = Constant('__NR_writev',20) __NR_access = Constant('__NR_access',21) __NR_pipe = Constant('__NR_pipe',22) __NR_select = Constant('__NR_select',23) __NR_sched_yield = Constant('__NR_sched_yield',24) __NR_mremap = Constant('__NR_mremap',25) __NR_msync = Constant('__NR_msync',26) __NR_mincore = Constant('__NR_mincore',27) __NR_madvise = Constant('__NR_madvise',28) __NR_shmget = Constant('__NR_shmget',29) __NR_shmat = Constant('__NR_shmat',30) __NR_shmctl = Constant('__NR_shmctl',31) __NR_dup = Constant('__NR_dup',32) __NR_dup2 = Constant('__NR_dup2',33) __NR_pause = Constant('__NR_pause',34) __NR_nanosleep = Constant('__NR_nanosleep',35) __NR_getitimer = Constant('__NR_getitimer',36) __NR_alarm = Constant('__NR_alarm',37) __NR_setitimer = Constant('__NR_setitimer',38) __NR_getpid = Constant('__NR_getpid',39) __NR_sendfile = Constant('__NR_sendfile',40) __NR_socket = Constant('__NR_socket',41) __NR_connect = Constant('__NR_connect',42) __NR_accept = Constant('__NR_accept',43) __NR_sendto = Constant('__NR_sendto',44) __NR_recvfrom = Constant('__NR_recvfrom',45) __NR_sendmsg = Constant('__NR_sendmsg',46) __NR_recvmsg = Constant('__NR_recvmsg',47) __NR_shutdown = Constant('__NR_shutdown',48) __NR_bind = Constant('__NR_bind',49) __NR_listen = Constant('__NR_listen',50) __NR_getsockname = Constant('__NR_getsockname',51) __NR_getpeername = Constant('__NR_getpeername',52) __NR_socketpair = Constant('__NR_socketpair',53) __NR_setsockopt = Constant('__NR_setsockopt',54) __NR_getsockopt = Constant('__NR_getsockopt',55) __NR_clone = Constant('__NR_clone',56) __NR_fork = Constant('__NR_fork',57) __NR_vfork = Constant('__NR_vfork',58) __NR_execve = Constant('__NR_execve',59) __NR_exit = Constant('__NR_exit',60) __NR_wait4 = Constant('__NR_wait4',61) __NR_kill = Constant('__NR_kill',62) __NR_uname = Constant('__NR_uname',63) __NR_semget = Constant('__NR_semget',64) __NR_semop = Constant('__NR_semop',65) __NR_semctl = Constant('__NR_semctl',66) __NR_shmdt = Constant('__NR_shmdt',67) __NR_msgget = Constant('__NR_msgget',68) __NR_msgsnd = Constant('__NR_msgsnd',69) __NR_msgrcv = Constant('__NR_msgrcv',70) __NR_msgctl = Constant('__NR_msgctl',71) __NR_fcntl = Constant('__NR_fcntl',72) __NR_flock = Constant('__NR_flock',73) __NR_fsync = Constant('__NR_fsync',74) __NR_fdatasync = Constant('__NR_fdatasync',75) __NR_truncate = Constant('__NR_truncate',76) __NR_ftruncate = Constant('__NR_ftruncate',77) __NR_getdents = Constant('__NR_getdents',78) __NR_getcwd = Constant('__NR_getcwd',79) __NR_chdir = Constant('__NR_chdir',80) __NR_fchdir = Constant('__NR_fchdir',81) __NR_rename = Constant('__NR_rename',82) __NR_mkdir = Constant('__NR_mkdir',83) __NR_rmdir = Constant('__NR_rmdir',84) __NR_creat = Constant('__NR_creat',85) __NR_link = Constant('__NR_link',86) __NR_unlink = Constant('__NR_unlink',87) __NR_symlink = Constant('__NR_symlink',88) __NR_readlink = Constant('__NR_readlink',89) __NR_chmod = Constant('__NR_chmod',90) __NR_fchmod = Constant('__NR_fchmod',91) __NR_chown = Constant('__NR_chown',92) __NR_fchown = Constant('__NR_fchown',93) __NR_lchown = Constant('__NR_lchown',94) __NR_umask = Constant('__NR_umask',95) __NR_gettimeofday = Constant('__NR_gettimeofday',96) __NR_getrlimit = Constant('__NR_getrlimit',97) __NR_getrusage = Constant('__NR_getrusage',98) __NR_sysinfo = Constant('__NR_sysinfo',99) __NR_times = Constant('__NR_times',100) __NR_ptrace = Constant('__NR_ptrace',101) __NR_getuid = Constant('__NR_getuid',102) __NR_syslog = Constant('__NR_syslog',103) __NR_getgid = Constant('__NR_getgid',104) __NR_setuid = Constant('__NR_setuid',105) __NR_setgid = Constant('__NR_setgid',106) __NR_geteuid = Constant('__NR_geteuid',107) __NR_getegid = Constant('__NR_getegid',108) __NR_setpgid = Constant('__NR_setpgid',109) __NR_getppid = Constant('__NR_getppid',110) __NR_getpgrp = Constant('__NR_getpgrp',111) __NR_setsid = Constant('__NR_setsid',112) __NR_setreuid = Constant('__NR_setreuid',113) __NR_setregid = Constant('__NR_setregid',114) __NR_getgroups = Constant('__NR_getgroups',115) __NR_setgroups = Constant('__NR_setgroups',116) __NR_setresuid = Constant('__NR_setresuid',117) __NR_getresuid = Constant('__NR_getresuid',118) __NR_setresgid = Constant('__NR_setresgid',119) __NR_getresgid = Constant('__NR_getresgid',120) __NR_getpgid = Constant('__NR_getpgid',121) __NR_setfsuid = Constant('__NR_setfsuid',122) __NR_setfsgid = Constant('__NR_setfsgid',123) __NR_getsid = Constant('__NR_getsid',124) __NR_capget = Constant('__NR_capget',125) __NR_capset = Constant('__NR_capset',126) __NR_rt_sigpending = Constant('__NR_rt_sigpending',127) __NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait',128) __NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo',129) __NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend',130) __NR_sigaltstack = Constant('__NR_sigaltstack',131) __NR_utime = Constant('__NR_utime',132) __NR_mknod = Constant('__NR_mknod',133) __NR_uselib = Constant('__NR_uselib',134) __NR_personality = Constant('__NR_personality',135) __NR_ustat = Constant('__NR_ustat',136) __NR_statfs = Constant('__NR_statfs',137) __NR_fstatfs = Constant('__NR_fstatfs',138) __NR_sysfs = Constant('__NR_sysfs',139) __NR_getpriority = Constant('__NR_getpriority',140) __NR_setpriority = Constant('__NR_setpriority',141) __NR_sched_setparam = Constant('__NR_sched_setparam',142) __NR_sched_getparam = Constant('__NR_sched_getparam',143) __NR_sched_setscheduler = Constant('__NR_sched_setscheduler',144) __NR_sched_getscheduler = Constant('__NR_sched_getscheduler',145) __NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max',146) __NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min',147) __NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval',148) __NR_mlock = Constant('__NR_mlock',149) __NR_munlock = Constant('__NR_munlock',150) __NR_mlockall = Constant('__NR_mlockall',151) __NR_munlockall = Constant('__NR_munlockall',152) __NR_vhangup = Constant('__NR_vhangup',153) __NR_modify_ldt = Constant('__NR_modify_ldt',154) __NR_pivot_root = Constant('__NR_pivot_root',155) __NR__sysctl = Constant('__NR__sysctl',156) __NR_prctl = Constant('__NR_prctl',157) __NR_arch_prctl = Constant('__NR_arch_prctl',158) __NR_adjtimex = Constant('__NR_adjtimex',159) __NR_setrlimit = Constant('__NR_setrlimit',160) __NR_chroot = Constant('__NR_chroot',161) __NR_sync = Constant('__NR_sync',162) __NR_acct = Constant('__NR_acct',163) __NR_settimeofday = Constant('__NR_settimeofday',164) __NR_mount = Constant('__NR_mount',165) __NR_umount2 = Constant('__NR_umount2',166) __NR_swapon = Constant('__NR_swapon',167) __NR_swapoff = Constant('__NR_swapoff',168) __NR_reboot = Constant('__NR_reboot',169) __NR_sethostname = Constant('__NR_sethostname',170) __NR_setdomainname = Constant('__NR_setdomainname',171) __NR_iopl = Constant('__NR_iopl',172) __NR_ioperm = Constant('__NR_ioperm',173) __NR_create_module = Constant('__NR_create_module',174) __NR_init_module = Constant('__NR_init_module',175) __NR_delete_module = Constant('__NR_delete_module',176) __NR_get_kernel_syms = Constant('__NR_get_kernel_syms',177) __NR_query_module = Constant('__NR_query_module',178) __NR_quotactl = Constant('__NR_quotactl',179) __NR_nfsservctl = Constant('__NR_nfsservctl',180) __NR_getpmsg = Constant('__NR_getpmsg',181) __NR_putpmsg = Constant('__NR_putpmsg',182) __NR_afs_syscall = Constant('__NR_afs_syscall',183) __NR_tuxcall = Constant('__NR_tuxcall',184) __NR_security = Constant('__NR_security',185) __NR_gettid = Constant('__NR_gettid',186) __NR_readahead = Constant('__NR_readahead',187) __NR_setxattr = Constant('__NR_setxattr',188) __NR_lsetxattr = Constant('__NR_lsetxattr',189) __NR_fsetxattr = Constant('__NR_fsetxattr',190) __NR_getxattr = Constant('__NR_getxattr',191) __NR_lgetxattr = Constant('__NR_lgetxattr',192) __NR_fgetxattr = Constant('__NR_fgetxattr',193) __NR_listxattr = Constant('__NR_listxattr',194) __NR_llistxattr = Constant('__NR_llistxattr',195) __NR_flistxattr = Constant('__NR_flistxattr',196) __NR_removexattr = Constant('__NR_removexattr',197) __NR_lremovexattr = Constant('__NR_lremovexattr',198) __NR_fremovexattr = Constant('__NR_fremovexattr',199) __NR_tkill = Constant('__NR_tkill',200) __NR_time = Constant('__NR_time',201) __NR_futex = Constant('__NR_futex',202) __NR_sched_setaffinity = Constant('__NR_sched_setaffinity',203) __NR_sched_getaffinity = Constant('__NR_sched_getaffinity',204) __NR_set_thread_area = Constant('__NR_set_thread_area',205) __NR_io_setup = Constant('__NR_io_setup',206) __NR_io_destroy = Constant('__NR_io_destroy',207) __NR_io_getevents = Constant('__NR_io_getevents',208) __NR_io_submit = Constant('__NR_io_submit',209) __NR_io_cancel = Constant('__NR_io_cancel',210) __NR_get_thread_area = Constant('__NR_get_thread_area',211) __NR_lookup_dcookie = Constant('__NR_lookup_dcookie',212) __NR_epoll_create = Constant('__NR_epoll_create',213) __NR_epoll_ctl_old = Constant('__NR_epoll_ctl_old',214) __NR_epoll_wait_old = Constant('__NR_epoll_wait_old',215) __NR_remap_file_pages = Constant('__NR_remap_file_pages',216) __NR_getdents64 = Constant('__NR_getdents64',217) __NR_set_tid_address = Constant('__NR_set_tid_address',218) __NR_restart_syscall = Constant('__NR_restart_syscall',219) __NR_semtimedop = Constant('__NR_semtimedop',220) __NR_fadvise64 = Constant('__NR_fadvise64',221) __NR_timer_create = Constant('__NR_timer_create',222) __NR_timer_settime = Constant('__NR_timer_settime',223) __NR_timer_gettime = Constant('__NR_timer_gettime',224) __NR_timer_getoverrun = Constant('__NR_timer_getoverrun',225) __NR_timer_delete = Constant('__NR_timer_delete',226) __NR_clock_settime = Constant('__NR_clock_settime',227) __NR_clock_gettime = Constant('__NR_clock_gettime',228) __NR_clock_getres = Constant('__NR_clock_getres',229) __NR_clock_nanosleep = Constant('__NR_clock_nanosleep',230) __NR_exit_group = Constant('__NR_exit_group',231) __NR_epoll_wait = Constant('__NR_epoll_wait',232) __NR_epoll_ctl = Constant('__NR_epoll_ctl',233) __NR_tgkill = Constant('__NR_tgkill',234) __NR_utimes = Constant('__NR_utimes',235) __NR_vserver = Constant('__NR_vserver',236) __NR_vserver = Constant('__NR_vserver',236) __NR_mbind = Constant('__NR_mbind',237) __NR_set_mempolicy = Constant('__NR_set_mempolicy',238) __NR_get_mempolicy = Constant('__NR_get_mempolicy',239) __NR_mq_open = Constant('__NR_mq_open',240) __NR_mq_unlink = Constant('__NR_mq_unlink',241) __NR_mq_timedsend = Constant('__NR_mq_timedsend',242) __NR_mq_timedreceive = Constant('__NR_mq_timedreceive',243) __NR_mq_notify = Constant('__NR_mq_notify',244) __NR_mq_getsetattr = Constant('__NR_mq_getsetattr',245) __NR_kexec_load = Constant('__NR_kexec_load',246) __NR_waitid = Constant('__NR_waitid',247) __NR_add_key = Constant('__NR_add_key',248) __NR_request_key = Constant('__NR_request_key',249) __NR_keyctl = Constant('__NR_keyctl',250) __NR_ioprio_set = Constant('__NR_ioprio_set',251) __NR_ioprio_get = Constant('__NR_ioprio_get',252) __NR_inotify_init = Constant('__NR_inotify_init',253) __NR_inotify_add_watch = Constant('__NR_inotify_add_watch',254) __NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch',255) __NR_migrate_pages = Constant('__NR_migrate_pages',256) __NR_openat = Constant('__NR_openat',257) __NR_mkdirat = Constant('__NR_mkdirat',258) __NR_mknodat = Constant('__NR_mknodat',259) __NR_fchownat = Constant('__NR_fchownat',260) __NR_futimesat = Constant('__NR_futimesat',261) __NR_newfstatat = Constant('__NR_newfstatat',262) __NR_unlinkat = Constant('__NR_unlinkat',263) __NR_renameat = Constant('__NR_renameat',264) __NR_linkat = Constant('__NR_linkat',265) __NR_symlinkat = Constant('__NR_symlinkat',266) __NR_readlinkat = Constant('__NR_readlinkat',267) __NR_fchmodat = Constant('__NR_fchmodat',268) __NR_faccessat = Constant('__NR_faccessat',269) __NR_pselect6 = Constant('__NR_pselect6',270) __NR_ppoll = Constant('__NR_ppoll',271) __NR_unshare = Constant('__NR_unshare',272) __NR_set_robust_list = Constant('__NR_set_robust_list',273) __NR_get_robust_list = Constant('__NR_get_robust_list',274) __NR_splice = Constant('__NR_splice',275) __NR_tee = Constant('__NR_tee',276) __NR_sync_file_range = Constant('__NR_sync_file_range',277) __NR_vmsplice = Constant('__NR_vmsplice',278) __NR_move_pages = Constant('__NR_move_pages',279) __NR_utimensat = Constant('__NR_utimensat',280) __NR_epoll_pwait = Constant('__NR_epoll_pwait',281) __NR_signalfd = Constant('__NR_signalfd',282) __NR_timerfd = Constant('__NR_timerfd',283) __NR_eventfd = Constant('__NR_eventfd',284) __NR_fallocate = Constant('__NR_fallocate',285) __NR_timerfd_settime = Constant('__NR_timerfd_settime',286) __NR_timerfd_gettime = Constant('__NR_timerfd_gettime',287) __NR_accept4 = Constant('__NR_accept4',288) __NR_signalfd4 = Constant('__NR_signalfd4',289) __NR_eventfd2 = Constant('__NR_eventfd2',290) __NR_epoll_create1 = Constant('__NR_epoll_create1',291) __NR_dup3 = Constant('__NR_dup3',292) __NR_pipe2 = Constant('__NR_pipe2',293) __NR_inotify_init1 = Constant('__NR_inotify_init1',294) __NR_preadv = Constant('__NR_preadv',295) __NR_pwritev = Constant('__NR_pwritev',296) __NR_rt_tgsigqueueinfo = Constant('__NR_rt_tgsigqueueinfo',297) __NR_perf_event_open = Constant('__NR_perf_event_open',298) __NR_recvmmsg = Constant('__NR_recvmmsg',299) __NR_fanotify_init = Constant('__NR_fanotify_init',300) __NR_fanotify_mark = Constant('__NR_fanotify_mark',301) __NR_prlimit64 = Constant('__NR_prlimit64',302) SYS32_restart_syscall = Constant('SYS32_restart_syscall',0) SYS32_exit = Constant('SYS32_exit',1) SYS32_fork = Constant('SYS32_fork',2) SYS32_read = Constant('SYS32_read',3) SYS32_write = Constant('SYS32_write',4) SYS32_open = Constant('SYS32_open',5) SYS32_close = Constant('SYS32_close',6) SYS32_waitpid = Constant('SYS32_waitpid',7) SYS32_creat = Constant('SYS32_creat',8) SYS32_link = Constant('SYS32_link',9) SYS32_unlink = Constant('SYS32_unlink',10) SYS32_execve = Constant('SYS32_execve',11) SYS32_chdir = Constant('SYS32_chdir',12) SYS32_time = Constant('SYS32_time',13) SYS32_mknod = Constant('SYS32_mknod',14) SYS32_chmod = Constant('SYS32_chmod',15) SYS32_lchown = Constant('SYS32_lchown',16) SYS32_break = Constant('SYS32_break',17) SYS32_oldstat = Constant('SYS32_oldstat',18) SYS32_lseek = Constant('SYS32_lseek',19) SYS32_getpid = Constant('SYS32_getpid',20) SYS32_mount = Constant('SYS32_mount',21) SYS32_umount = Constant('SYS32_umount',22) SYS32_setuid = Constant('SYS32_setuid',23) SYS32_getuid = Constant('SYS32_getuid',24) SYS32_stime = Constant('SYS32_stime',25) SYS32_ptrace = Constant('SYS32_ptrace',26) SYS32_alarm = Constant('SYS32_alarm',27) SYS32_oldfstat = Constant('SYS32_oldfstat',28) SYS32_pause = Constant('SYS32_pause',29) SYS32_utime = Constant('SYS32_utime',30) SYS32_stty = Constant('SYS32_stty',31) SYS32_gtty = Constant('SYS32_gtty',32) SYS32_access = Constant('SYS32_access',33) SYS32_nice = Constant('SYS32_nice',34) SYS32_ftime = Constant('SYS32_ftime',35) SYS32_sync = Constant('SYS32_sync',36) SYS32_kill = Constant('SYS32_kill',37) SYS32_rename = Constant('SYS32_rename',38) SYS32_mkdir = Constant('SYS32_mkdir',39) SYS32_rmdir = Constant('SYS32_rmdir',40) SYS32_dup = Constant('SYS32_dup',41) SYS32_pipe = Constant('SYS32_pipe',42) SYS32_times = Constant('SYS32_times',43) SYS32_prof = Constant('SYS32_prof',44) SYS32_brk = Constant('SYS32_brk',45) SYS32_setgid = Constant('SYS32_setgid',46) SYS32_getgid = Constant('SYS32_getgid',47) SYS32_signal = Constant('SYS32_signal',48) SYS32_geteuid = Constant('SYS32_geteuid',49) SYS32_getegid = Constant('SYS32_getegid',50) SYS32_acct = Constant('SYS32_acct',51) SYS32_umount2 = Constant('SYS32_umount2',52) SYS32_lock = Constant('SYS32_lock',53) SYS32_ioctl = Constant('SYS32_ioctl',54) SYS32_fcntl = Constant('SYS32_fcntl',55) SYS32_mpx = Constant('SYS32_mpx',56) SYS32_setpgid = Constant('SYS32_setpgid',57) SYS32_ulimit = Constant('SYS32_ulimit',58) SYS32_oldolduname = Constant('SYS32_oldolduname',59) SYS32_umask = Constant('SYS32_umask',60) SYS32_chroot = Constant('SYS32_chroot',61) SYS32_ustat = Constant('SYS32_ustat',62) SYS32_dup2 = Constant('SYS32_dup2',63) SYS32_getppid = Constant('SYS32_getppid',64) SYS32_getpgrp = Constant('SYS32_getpgrp',65) SYS32_setsid = Constant('SYS32_setsid',66) SYS32_sigaction = Constant('SYS32_sigaction',67) SYS32_sgetmask = Constant('SYS32_sgetmask',68) SYS32_ssetmask = Constant('SYS32_ssetmask',69) SYS32_setreuid = Constant('SYS32_setreuid',70) SYS32_setregid = Constant('SYS32_setregid',71) SYS32_sigsuspend = Constant('SYS32_sigsuspend',72) SYS32_sigpending = Constant('SYS32_sigpending',73) SYS32_sethostname = Constant('SYS32_sethostname',74) SYS32_setrlimit = Constant('SYS32_setrlimit',75) SYS32_getrlimit = Constant('SYS32_getrlimit',76) SYS32_getrusage = Constant('SYS32_getrusage',77) SYS32_gettimeofday = Constant('SYS32_gettimeofday',78) SYS32_settimeofday = Constant('SYS32_settimeofday',79) SYS32_getgroups = Constant('SYS32_getgroups',80) SYS32_setgroups = Constant('SYS32_setgroups',81) SYS32_select = Constant('SYS32_select',82) SYS32_symlink = Constant('SYS32_symlink',83) SYS32_oldlstat = Constant('SYS32_oldlstat',84) SYS32_readlink = Constant('SYS32_readlink',85) SYS32_uselib = Constant('SYS32_uselib',86) SYS32_swapon = Constant('SYS32_swapon',87) SYS32_reboot = Constant('SYS32_reboot',88) SYS32_readdir = Constant('SYS32_readdir',89) SYS32_mmap = Constant('SYS32_mmap',90) SYS32_munmap = Constant('SYS32_munmap',91) SYS32_truncate = Constant('SYS32_truncate',92) SYS32_ftruncate = Constant('SYS32_ftruncate',93) SYS32_fchmod = Constant('SYS32_fchmod',94) SYS32_fchown = Constant('SYS32_fchown',95) SYS32_getpriority = Constant('SYS32_getpriority',96) SYS32_setpriority = Constant('SYS32_setpriority',97) SYS32_profil = Constant('SYS32_profil',98) SYS32_statfs = Constant('SYS32_statfs',99) SYS32_fstatfs = Constant('SYS32_fstatfs',100) SYS32_ioperm = Constant('SYS32_ioperm',101) SYS32_socketcall = Constant('SYS32_socketcall',102) SYS32_syslog = Constant('SYS32_syslog',103) SYS32_setitimer = Constant('SYS32_setitimer',104) SYS32_getitimer = Constant('SYS32_getitimer',105) SYS32_stat = Constant('SYS32_stat',106) SYS32_lstat = Constant('SYS32_lstat',107) SYS32_fstat = Constant('SYS32_fstat',108) SYS32_olduname = Constant('SYS32_olduname',109) SYS32_iopl = Constant('SYS32_iopl',110) SYS32_vhangup = Constant('SYS32_vhangup',111) SYS32_idle = Constant('SYS32_idle',112) SYS32_vm86old = Constant('SYS32_vm86old',113) SYS32_wait4 = Constant('SYS32_wait4',114) SYS32_swapoff = Constant('SYS32_swapoff',115) SYS32_sysinfo = Constant('SYS32_sysinfo',116) SYS32_ipc = Constant('SYS32_ipc',117) SYS32_fsync = Constant('SYS32_fsync',118) SYS32_sigreturn = Constant('SYS32_sigreturn',119) SYS32_clone = Constant('SYS32_clone',120) SYS32_setdomainname = Constant('SYS32_setdomainname',121) SYS32_uname = Constant('SYS32_uname',122) SYS32_modify_ldt = Constant('SYS32_modify_ldt',123) SYS32_adjtimex = Constant('SYS32_adjtimex',124) SYS32_mprotect = Constant('SYS32_mprotect',125) SYS32_sigprocmask = Constant('SYS32_sigprocmask',126) SYS32_create_module = Constant('SYS32_create_module',127) SYS32_init_module = Constant('SYS32_init_module',128) SYS32_delete_module = Constant('SYS32_delete_module',129) SYS32_get_kernel_syms = Constant('SYS32_get_kernel_syms',130) SYS32_quotactl = Constant('SYS32_quotactl',131) SYS32_getpgid = Constant('SYS32_getpgid',132) SYS32_fchdir = Constant('SYS32_fchdir',133) SYS32_bdflush = Constant('SYS32_bdflush',134) SYS32_sysfs = Constant('SYS32_sysfs',135) SYS32_personality = Constant('SYS32_personality',136) SYS32_afs_syscall = Constant('SYS32_afs_syscall',137) SYS32_setfsuid = Constant('SYS32_setfsuid',138) SYS32_setfsgid = Constant('SYS32_setfsgid',139) SYS32__llseek = Constant('SYS32__llseek',140) SYS32_getdents = Constant('SYS32_getdents',141) SYS32__newselect = Constant('SYS32__newselect',142) SYS32_flock = Constant('SYS32_flock',143) SYS32_msync = Constant('SYS32_msync',144) SYS32_readv = Constant('SYS32_readv',145) SYS32_writev = Constant('SYS32_writev',146) SYS32_getsid = Constant('SYS32_getsid',147) SYS32_fdatasync = Constant('SYS32_fdatasync',148) SYS32__sysctl = Constant('SYS32__sysctl',149) SYS32_mlock = Constant('SYS32_mlock',150) SYS32_munlock = Constant('SYS32_munlock',151) SYS32_mlockall = Constant('SYS32_mlockall',152) SYS32_munlockall = Constant('SYS32_munlockall',153) SYS32_sched_setparam = Constant('SYS32_sched_setparam',154) SYS32_sched_getparam = Constant('SYS32_sched_getparam',155) SYS32_sched_setscheduler = Constant('SYS32_sched_setscheduler',156) SYS32_sched_getscheduler = Constant('SYS32_sched_getscheduler',157) SYS32_sched_yield = Constant('SYS32_sched_yield',158) SYS32_sched_get_priority_max = Constant('SYS32_sched_get_priority_max',159) SYS32_sched_get_priority_min = Constant('SYS32_sched_get_priority_min',160) SYS32_sched_rr_get_interval = Constant('SYS32_sched_rr_get_interval',161) SYS32_nanosleep = Constant('SYS32_nanosleep',162) SYS32_mremap = Constant('SYS32_mremap',163) SYS32_setresuid = Constant('SYS32_setresuid',164) SYS32_getresuid = Constant('SYS32_getresuid',165) SYS32_vm86 = Constant('SYS32_vm86',166) SYS32_query_module = Constant('SYS32_query_module',167) SYS32_poll = Constant('SYS32_poll',168) SYS32_nfsservctl = Constant('SYS32_nfsservctl',169) SYS32_setresgid = Constant('SYS32_setresgid',170) SYS32_getresgid = Constant('SYS32_getresgid',171) SYS32_prctl = Constant('SYS32_prctl',172) SYS32_rt_sigreturn = Constant('SYS32_rt_sigreturn',173) SYS32_rt_sigaction = Constant('SYS32_rt_sigaction',174) SYS32_rt_sigprocmask = Constant('SYS32_rt_sigprocmask',175) SYS32_rt_sigpending = Constant('SYS32_rt_sigpending',176) SYS32_rt_sigtimedwait = Constant('SYS32_rt_sigtimedwait',177) SYS32_rt_sigqueueinfo = Constant('SYS32_rt_sigqueueinfo',178) SYS32_rt_sigsuspend = Constant('SYS32_rt_sigsuspend',179) SYS32_pread64 = Constant('SYS32_pread64',180) SYS32_pwrite64 = Constant('SYS32_pwrite64',181) SYS32_chown = Constant('SYS32_chown',182) SYS32_getcwd = Constant('SYS32_getcwd',183) SYS32_capget = Constant('SYS32_capget',184) SYS32_capset = Constant('SYS32_capset',185) SYS32_sigaltstack = Constant('SYS32_sigaltstack',186) SYS32_sendfile = Constant('SYS32_sendfile',187) SYS32_getpmsg = Constant('SYS32_getpmsg',188) SYS32_putpmsg = Constant('SYS32_putpmsg',189) SYS32_vfork = Constant('SYS32_vfork',190) SYS32_ugetrlimit = Constant('SYS32_ugetrlimit',191) SYS32_mmap2 = Constant('SYS32_mmap2',192) SYS32_truncate64 = Constant('SYS32_truncate64',193) SYS32_ftruncate64 = Constant('SYS32_ftruncate64',194) SYS32_stat64 = Constant('SYS32_stat64',195) SYS32_lstat64 = Constant('SYS32_lstat64',196) SYS32_fstat64 = Constant('SYS32_fstat64',197) SYS32_lchown32 = Constant('SYS32_lchown32',198) SYS32_getuid32 = Constant('SYS32_getuid32',199) SYS32_getgid32 = Constant('SYS32_getgid32',200) SYS32_geteuid32 = Constant('SYS32_geteuid32',201) SYS32_getegid32 = Constant('SYS32_getegid32',202) SYS32_setreuid32 = Constant('SYS32_setreuid32',203) SYS32_setregid32 = Constant('SYS32_setregid32',204) SYS32_getgroups32 = Constant('SYS32_getgroups32',205) SYS32_setgroups32 = Constant('SYS32_setgroups32',206) SYS32_fchown32 = Constant('SYS32_fchown32',207) SYS32_setresuid32 = Constant('SYS32_setresuid32',208) SYS32_getresuid32 = Constant('SYS32_getresuid32',209) SYS32_setresgid32 = Constant('SYS32_setresgid32',210) SYS32_getresgid32 = Constant('SYS32_getresgid32',211) SYS32_chown32 = Constant('SYS32_chown32',212) SYS32_setuid32 = Constant('SYS32_setuid32',213) SYS32_setgid32 = Constant('SYS32_setgid32',214) SYS32_setfsuid32 = Constant('SYS32_setfsuid32',215) SYS32_setfsgid32 = Constant('SYS32_setfsgid32',216) SYS32_pivot_root = Constant('SYS32_pivot_root',217) SYS32_mincore = Constant('SYS32_mincore',218) SYS32_madvise = Constant('SYS32_madvise',219) SYS32_madvise1 = Constant('SYS32_madvise1',219) SYS32_getdents64 = Constant('SYS32_getdents64',220) SYS32_fcntl64 = Constant('SYS32_fcntl64',221) SYS32_gettid = Constant('SYS32_gettid',224) SYS32_readahead = Constant('SYS32_readahead',225) SYS32_setxattr = Constant('SYS32_setxattr',226) SYS32_lsetxattr = Constant('SYS32_lsetxattr',227) SYS32_fsetxattr = Constant('SYS32_fsetxattr',228) SYS32_getxattr = Constant('SYS32_getxattr',229) SYS32_lgetxattr = Constant('SYS32_lgetxattr',230) SYS32_fgetxattr = Constant('SYS32_fgetxattr',231) SYS32_listxattr = Constant('SYS32_listxattr',232) SYS32_llistxattr = Constant('SYS32_llistxattr',233) SYS32_flistxattr = Constant('SYS32_flistxattr',234) SYS32_removexattr = Constant('SYS32_removexattr',235) SYS32_lremovexattr = Constant('SYS32_lremovexattr',236) SYS32_fremovexattr = Constant('SYS32_fremovexattr',237) SYS32_tkill = Constant('SYS32_tkill',238) SYS32_sendfile64 = Constant('SYS32_sendfile64',239) SYS32_futex = Constant('SYS32_futex',240) SYS32_sched_setaffinity = Constant('SYS32_sched_setaffinity',241) SYS32_sched_getaffinity = Constant('SYS32_sched_getaffinity',242) SYS32_set_thread_area = Constant('SYS32_set_thread_area',243) SYS32_get_thread_area = Constant('SYS32_get_thread_area',244) SYS32_io_setup = Constant('SYS32_io_setup',245) SYS32_io_destroy = Constant('SYS32_io_destroy',246) SYS32_io_getevents = Constant('SYS32_io_getevents',247) SYS32_io_submit = Constant('SYS32_io_submit',248) SYS32_io_cancel = Constant('SYS32_io_cancel',249) SYS32_fadvise64 = Constant('SYS32_fadvise64',250) SYS32_exit_group = Constant('SYS32_exit_group',252) SYS32_lookup_dcookie = Constant('SYS32_lookup_dcookie',253) SYS32_epoll_create = Constant('SYS32_epoll_create',254) SYS32_epoll_ctl = Constant('SYS32_epoll_ctl',255) SYS32_epoll_wait = Constant('SYS32_epoll_wait',256) SYS32_remap_file_pages = Constant('SYS32_remap_file_pages',257) SYS32_set_tid_address = Constant('SYS32_set_tid_address',258) SYS32_timer_create = Constant('SYS32_timer_create',259) SYS32_timer_settime = Constant('SYS32_timer_settime',(222+1)) SYS32_timer_gettime = Constant('SYS32_timer_gettime',(222+2)) SYS32_timer_getoverrun = Constant('SYS32_timer_getoverrun',(222+3)) SYS32_timer_delete = Constant('SYS32_timer_delete',(222+4)) SYS32_clock_settime = Constant('SYS32_clock_settime',(222+5)) SYS32_clock_gettime = Constant('SYS32_clock_gettime',(222+6)) SYS32_clock_getres = Constant('SYS32_clock_getres',(222+7)) SYS32_clock_nanosleep = Constant('SYS32_clock_nanosleep',(222+8)) SYS32_statfs64 = Constant('SYS32_statfs64',268) SYS32_fstatfs64 = Constant('SYS32_fstatfs64',269) SYS32_tgkill = Constant('SYS32_tgkill',270) SYS32_utimes = Constant('SYS32_utimes',271) SYS32_fadvise64_64 = Constant('SYS32_fadvise64_64',272) SYS32_vserver = Constant('SYS32_vserver',273) SYS32_mbind = Constant('SYS32_mbind',274) SYS32_get_mempolicy = Constant('SYS32_get_mempolicy',275) SYS32_set_mempolicy = Constant('SYS32_set_mempolicy',276) SYS32_mq_open = Constant('SYS32_mq_open',277) SYS32_mq_unlink = Constant('SYS32_mq_unlink',(240+1)) SYS32_mq_timedsend = Constant('SYS32_mq_timedsend',(240+2)) SYS32_mq_timedreceive = Constant('SYS32_mq_timedreceive',(240+3)) SYS32_mq_notify = Constant('SYS32_mq_notify',(240+4)) SYS32_mq_getsetattr = Constant('SYS32_mq_getsetattr',(240+5)) SYS32_kexec_load = Constant('SYS32_kexec_load',283) SYS32_waitid = Constant('SYS32_waitid',284) SYS32_add_key = Constant('SYS32_add_key',286) SYS32_request_key = Constant('SYS32_request_key',287) SYS32_keyctl = Constant('SYS32_keyctl',288) SYS32_ioprio_set = Constant('SYS32_ioprio_set',289) SYS32_ioprio_get = Constant('SYS32_ioprio_get',290) SYS32_inotify_init = Constant('SYS32_inotify_init',291) SYS32_inotify_add_watch = Constant('SYS32_inotify_add_watch',292) SYS32_inotify_rm_watch = Constant('SYS32_inotify_rm_watch',293) SYS32_migrate_pages = Constant('SYS32_migrate_pages',294) SYS32_openat = Constant('SYS32_openat',295) SYS32_mkdirat = Constant('SYS32_mkdirat',296) SYS32_mknodat = Constant('SYS32_mknodat',297) SYS32_fchownat = Constant('SYS32_fchownat',298) SYS32_futimesat = Constant('SYS32_futimesat',299) SYS32_fstatat64 = Constant('SYS32_fstatat64',300) SYS32_unlinkat = Constant('SYS32_unlinkat',301) SYS32_renameat = Constant('SYS32_renameat',302) SYS32_linkat = Constant('SYS32_linkat',303) SYS32_symlinkat = Constant('SYS32_symlinkat',304) SYS32_readlinkat = Constant('SYS32_readlinkat',305) SYS32_fchmodat = Constant('SYS32_fchmodat',306) SYS32_faccessat = Constant('SYS32_faccessat',307) SYS32_pselect6 = Constant('SYS32_pselect6',308) SYS32_ppoll = Constant('SYS32_ppoll',309) SYS32_unshare = Constant('SYS32_unshare',310) SYS32_set_robust_list = Constant('SYS32_set_robust_list',311) SYS32_get_robust_list = Constant('SYS32_get_robust_list',312) SYS32_splice = Constant('SYS32_splice',313) SYS32_sync_file_range = Constant('SYS32_sync_file_range',314) SYS32_tee = Constant('SYS32_tee',315) SYS32_vmsplice = Constant('SYS32_vmsplice',316) SYS32_move_pages = Constant('SYS32_move_pages',317) SYS32_getcpu = Constant('SYS32_getcpu',318) SYS32_epoll_pwait = Constant('SYS32_epoll_pwait',319) SYS32_utimensat = Constant('SYS32_utimensat',320) SYS32_signalfd = Constant('SYS32_signalfd',321) SYS32_timerfd_create = Constant('SYS32_timerfd_create',322) SYS32_eventfd = Constant('SYS32_eventfd',323) SYS32_fallocate = Constant('SYS32_fallocate',324) SYS32_timerfd_settime = Constant('SYS32_timerfd_settime',325) SYS32_timerfd_gettime = Constant('SYS32_timerfd_gettime',326) SYS32_signalfd4 = Constant('SYS32_signalfd4',327) SYS32_eventfd2 = Constant('SYS32_eventfd2',328) SYS32_epoll_create1 = Constant('SYS32_epoll_create1',329) SYS32_dup3 = Constant('SYS32_dup3',330) SYS32_pipe2 = Constant('SYS32_pipe2',331) SYS32_inotify_init1 = Constant('SYS32_inotify_init1',332) SYS32_preadv = Constant('SYS32_preadv',333) SYS32_pwritev = Constant('SYS32_pwritev',334) SYS32_rt_tgsigqueueinfo = Constant('SYS32_rt_tgsigqueueinfo',335) SYS32_perf_event_open = Constant('SYS32_perf_event_open',336) SYS32_recvmmsg
self.variables.select_rect_id self.variables.active_tool = TOOLS.SELECT_TOOL self.variables.current_tool = TOOLS.SELECT_TOOL def set_current_tool_to_draw_line_by_dragging(self, line_id=None): self.variables.current_shape_id = line_id self.show_shape(line_id) self.variables.active_tool = TOOLS.DRAW_LINE_BY_DRAGGING self.variables.current_tool = TOOLS.DRAW_LINE_BY_DRAGGING def set_current_tool_to_draw_line_by_clicking(self, line_id=None): self.variables.current_shape_id = line_id self.show_shape(line_id) self.variables.active_tool = TOOLS.DRAW_LINE_BY_CLICKING self.variables.current_tool = TOOLS.DRAW_LINE_BY_CLICKING def set_current_tool_to_draw_arrow_by_dragging(self, arrow_id=None): self.variables.current_shape_id = arrow_id self.show_shape(arrow_id) self.variables.active_tool = TOOLS.DRAW_ARROW_BY_DRAGGING self.variables.current_tool = TOOLS.DRAW_ARROW_BY_DRAGGING def set_current_tool_to_draw_arrow_by_clicking(self, line_id=None): self.variables.current_shape_id = line_id self.show_shape(line_id) self.variables.active_tool = TOOLS.DRAW_ARROW_BY_CLICKING self.variables.current_tool = TOOLS.DRAW_ARROW_BY_CLICKING def set_current_tool_to_draw_polygon_by_clicking(self, polygon_id=None): self.variables.current_shape_id = polygon_id self.show_shape(polygon_id) self.variables.active_tool = TOOLS.DRAW_POLYGON_BY_CLICKING self.variables.current_tool = TOOLS.DRAW_POLYGON_BY_CLICKING def set_current_tool_to_draw_point(self, point_id=None): self.variables.current_shape_id = point_id self.show_shape(point_id) self.variables.active_tool = TOOLS.DRAW_POINT_BY_CLICKING self.variables.current_tool = TOOLS.DRAW_POINT_BY_CLICKING def set_current_tool_to_translate_shape(self): self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL self.variables.current_tool = TOOLS.TRANSLATE_SHAPE_TOOL def set_current_tool_to_none(self): self.variables.active_tool = None self.variables.current_tool = None def set_current_tool_to_edit_shape(self): self.variables.active_tool = TOOLS.EDIT_SHAPE_TOOL self.variables.current_tool = TOOLS.EDIT_SHAPE_TOOL def set_current_tool_to_edit_shape_coords(self): self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL self.variables.current_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL def set_current_tool_to_pan(self): self.variables.active_tool = TOOLS.PAN_TOOL self.variables.current_tool = TOOLS.PAN_TOOL def _set_image_from_pil_image(self, pil_image): nx_pix, ny_pix = pil_image.size self.config(scrollregion=(0, 0, nx_pix, ny_pix)) self.variables._tk_im = ImageTk.PhotoImage(pil_image) self.variables.image_id = self.create_image(0, 0, anchor="nw", image=self.variables._tk_im) self.tag_lower(self.variables.image_id) def _get_shape_property(self, shape_id, # type: int shape_property, # type: str ): properties = self.variables.shape_properties[str(shape_id)] return properties[shape_property] def _set_shape_property(self, shape_id, # type: int shape_property, # type: str val, ): if not str(shape_id) in self.variables.shape_properties.keys(): self.variables.shape_properties[str(shape_id)] = {} self.variables.shape_properties[str(shape_id)][shape_property] = val def _update_shape_properties(self, shape_id, # type: int properties, # type: dict ): for key in properties.keys(): val = properties[key] self._set_shape_property(shape_id, key, val) def _pan(self, event): new_canvas_x_ul = self.variables.pan_anchor_point_xy[0] - event.x new_canvas_y_ul = self.variables.pan_anchor_point_xy[1] - event.y new_canvas_x_br = new_canvas_x_ul + self.variables.canvas_width new_canvas_y_br = new_canvas_y_ul + self.variables.canvas_height canvas_coords = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br) image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords) image_y_ul = image_coords[0] image_x_ul = image_coords[1] image_y_br = image_coords[2] image_x_br = image_coords[3] if image_y_ul < 0: new_canvas_y_ul = 0 new_canvas_y_br = self.variables.canvas_height if image_x_ul < 0: new_canvas_x_ul = 0 new_canvas_x_br = self.variables.canvas_width if image_y_br > self.variables.canvas_image_object.image_reader.full_image_ny: image_y_br = self.variables.canvas_image_object.image_reader.full_image_ny new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords( (image_y_br, image_x_br)) new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int( new_canvas_y_br - self.variables.canvas_height) if image_x_br > self.variables.canvas_image_object.image_reader.full_image_nx: image_x_br = self.variables.canvas_image_object.image_reader.full_image_nx new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords( (image_y_br, image_x_br)) new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int( new_canvas_y_br - self.variables.canvas_height) canvas_rect = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br) self.zoom_to_selection(canvas_rect, self.variables.animate_pan) self.hide_shape(self.variables.zoom_rect_id) def config_do_not_scale_image_to_fit(self): self.sbarv=tkinter.Scrollbar(self, orient=tkinter.VERTICAL) self.sbarh=tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL) self.sbarv.config(command=self.yview) self.sbarh.config(command=self.xview) self.config(yscrollcommand=self.sbarv.set) self.config(xscrollcommand=self.sbarh.set) self.sbarv.grid(row=0, column=1, stick=tkinter.N+tkinter.S) self.sbarh.grid(row=1, column=0, sticky=tkinter.E+tkinter.W) def save_full_canvas_as_png(self, output_fname, # type: str ): # put a sleep in here in case there is a dialog covering the screen before this method is called. time.sleep(0.2) im = self.save_currently_displayed_canvas_to_numpy_array() im.save(output_fname) def save_currently_displayed_canvas_to_numpy_array(self): x_ul = self.winfo_rootx() + 1 y_ul = self.winfo_rooty() + 1 x_lr = x_ul + self.variables.canvas_width y_lr = y_ul + self.variables.canvas_height im = ImageGrab.grab() im = im.crop((x_ul, y_ul, x_lr, y_lr)) return im def activate_color_selector(self, event): color = colorchooser.askcolor()[1] self.variables.foreground_color = color self.change_shape_color(self.variables.current_shape_id, color) def find_closest_shape_coord(self, shape_id, # type: int canvas_x, # type: int canvas_y, # type: int ): # type: (...) -> int shape_type = self.get_shape_type(self.variables.current_shape_id) coords = self.get_shape_canvas_coords(shape_id) if shape_type == SHAPE_TYPES.RECT: select_x1, select_y1, select_x2, select_y2 = coords select_xul = min(select_x1, select_x2) select_xlr = max(select_x1, select_x2) select_yul = min(select_y1, select_y2) select_ylr = max(select_y1, select_y2) ul = (select_xul, select_yul) ur = (select_xlr, select_yul) lr = (select_xlr, select_ylr) ll = (select_xul, select_ylr) rect_coords = [(select_x1, select_y1), (select_x2, select_y2)] all_coords = [ul, ur, lr, ll] squared_distances = [] for corner_coord in all_coords: coord_x, coord_y = corner_coord d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2 squared_distances.append(d) closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0] closest_coord = all_coords[closest_coord_index] if closest_coord not in rect_coords: if closest_coord == ul: self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1])) if closest_coord == ur: self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ur[0], ur[1], ll[0], ll[1])) if closest_coord == lr: self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1])) if closest_coord == ll: self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ll[0], ll[1], ur[0], ur[1])) coords = self.get_shape_canvas_coords(shape_id) squared_distances = [] coord_indices = numpy.arange(0, len(coords), step=2) for i in coord_indices: coord_x, coord_y = coords[i], coords[i+1] d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2 squared_distances.append(d) closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0] return closest_coord_index # TODO: improve this. Right now it finds closest shape just based on distance to corners. Improvements should # TODO: include finding a closest point if the x/y coordinate is inside a polygon, and also finding closest # TODO: distance to each line of a polygon, not just the corners. def find_closest_shape(self, canvas_x, canvas_y): non_tool_shape_ids = self.get_non_tool_shape_ids() closest_distances = [] for shape_id in non_tool_shape_ids: coords = self.get_shape_canvas_coords(shape_id) squared_distances = [] coord_indices = numpy.arange(0, len(coords), step=2) for i in coord_indices: coord_x, coord_y = coords[i], coords[i + 1] d = (coord_x - canvas_x) ** 2 + (coord_y - canvas_y) ** 2 squared_distances.append(d) closest_distances.append(numpy.min(squared_distances)) closest_shape_id = non_tool_shape_ids[numpy.where(closest_distances == numpy.min(closest_distances))[0][0]] return closest_shape_id def get_non_tool_shape_ids(self): all_shape_ids = self.variables.shape_ids tool_shape_ids = self.get_tool_shape_ids() return list(numpy.setdiff1d(all_shape_ids, tool_shape_ids)) def get_tool_shape_ids(self): tool_shape_ids = [self.variables.zoom_rect_id, self.variables.select_rect_id] return tool_shape_ids class CanvasImage(object): image_reader = None # type: ImageReader canvas_decimated_image = None # type: ndarray display_image = None # type: ndarray decimation_factor = 1 # type: int display_rescaling_factor = 1 # type: float canvas_full_image_upper_left_yx = (0, 0) # type: (int, int) canvas_ny = None # type: int canvas_nx = None # type: int scale_to_fit_canvas = True # type: bool drop_bands = [] # type: [] def __init__(self, image_reader, # type: ImageReader canvas_nx, # type: int canvas_ny, # type: int ): self.image_reader = image_reader self.canvas_nx = canvas_nx self.canvas_ny = canvas_ny self.update_canvas_display_image_from_full_image() def get_decimated_image_data_in_full_image_rect(self, full_image_rect, # type: (int, int, int, int) decimation, # type: int ): y_start = full_image_rect[0] y_end = full_image_rect[2] x_start = full_image_rect[1] x_end = full_image_rect[3] decimated_data = self.image_reader[y_start:y_end:decimation, x_start:x_end:decimation] return decimated_data def get_scaled_display_data(self, decimated_image): scale_factor = self.compute_display_scale_factor(decimated_image) new_nx = int(decimated_image.shape[1] * scale_factor) new_ny = int(decimated_image.shape[0] * scale_factor) if new_nx > self.canvas_nx: new_nx = self.canvas_nx if new_ny > self.canvas_ny: new_ny = self.canvas_ny if self.drop_bands != []: zeros_image = numpy.zeros_like(decimated_image[:, :, 0]) for drop_band in self.drop_bands: decimated_image[:, :, drop_band] = zeros_image pil_image = PIL.Image.fromarray(decimated_image) display_image = pil_image.resize((new_nx, new_ny)) return np.array(display_image) def decimated_image_coords_to_display_image_coords(self, decimated_image_yx_cords, # type: list ): scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image) display_coords = [] for coord in decimated_image_yx_cords: display_coord_y = coord[0] * scale_factor display_coord_x = coord[1] * scale_factor display_coords.append((display_coord_y, display_coord_x)) return display_coords def display_image_coords_to_decimated_image_coords(self, display_image_yx_coords, # type: list ): scale_factor = self.compute_display_scale_factor(self.canvas_decimated_image) decimated_coords = [] for coord in display_image_yx_coords: display_coord_y = coord[0] / scale_factor display_coord_x = coord[1] / scale_factor decimated_coords.append((display_coord_y, display_coord_x)) return decimated_coords @staticmethod def display_image_coords_to_canvas_coords(display_image_yx_coords, # type: list ): canvas_coords = [] for yx in display_image_yx_coords: canvas_coords.append((yx[1], yx[0])) return canvas_coords def compute_display_scale_factor(self, decimated_image): decimated_image_nx = decimated_image.shape[1] decimated_image_ny = decimated_image.shape[0] scale_factor_1 = self.canvas_nx / decimated_image_nx scale_factor_2 = self.canvas_ny / decimated_image_ny scale_factor = np.min((scale_factor_1, scale_factor_2)) return scale_factor def get_decimated_image_data_in_canvas_rect(self, canvas_rect, # type: (int, int, int, int) decimation=None, # type: int ): full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect) if decimation is None: decimation = self.get_decimation_from_canvas_rect(canvas_rect) return self.get_decimated_image_data_in_full_image_rect(full_image_rect, decimation) def update_canvas_display_image_from_full_image(self): full_image_rect = (0, 0, self.image_reader.full_image_ny, self.image_reader.full_image_nx) self.update_canvas_display_image_from_full_image_rect(full_image_rect) def update_canvas_display_image_from_full_image_rect(self, full_image_rect): self.set_decimation_from_full_image_rect(full_image_rect) decimated_image_data = self.get_decimated_image_data_in_full_image_rect(full_image_rect, self.decimation_factor) self.update_canvas_display_from_numpy_array(decimated_image_data) self.canvas_full_image_upper_left_yx = (full_image_rect[0], full_image_rect[1]) def update_canvas_display_image_from_canvas_rect(self, canvas_rect): full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect) full_image_rect = (int(round(full_image_rect[0])), int(round(full_image_rect[1])), int(round(full_image_rect[2])), int(round(full_image_rect[3]))) self.update_canvas_display_image_from_full_image_rect(full_image_rect) def update_canvas_display_from_numpy_array(self, image_data, # type: ndarray ): if self.drop_bands != []: zeros_image = numpy.zeros_like(image_data[:, :, 0]) for drop_band in self.drop_bands: image_data[:, :, drop_band] = zeros_image self.canvas_decimated_image = image_data if self.scale_to_fit_canvas: scale_factor = self.compute_display_scale_factor(image_data) self.display_rescaling_factor = scale_factor self.display_image = self.get_scaled_display_data(image_data) else: self.display_image = image_data def get_decimation_factor_from_full_image_rect(self, full_image_rect): ny = full_image_rect[2] - full_image_rect[0] nx = full_image_rect[3] - full_image_rect[1] decimation_y = ny / self.canvas_ny decimation_x = nx / self.canvas_nx decimation_factor = max(decimation_y, decimation_x) decimation_factor = int(decimation_factor) if decimation_factor < 1: decimation_factor = 1 return decimation_factor def get_decimation_from_canvas_rect(self, canvas_rect): full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect) return self.get_decimation_factor_from_full_image_rect(full_image_rect) def set_decimation_from_full_image_rect(self, full_image_rect): decimation_factor = self.get_decimation_factor_from_full_image_rect(full_image_rect) self.decimation_factor = decimation_factor def canvas_coords_to_full_image_yx(self, canvas_coords, # type: [int] ): x_coords = canvas_coords[0::2] y_coords = canvas_coords[1::2] xy_coords = zip(x_coords, y_coords) image_yx_coords = [] for xy in xy_coords: decimation_factor = self.decimation_factor if self.scale_to_fit_canvas: decimation_factor = decimation_factor / self.display_rescaling_factor image_x = xy[0] * decimation_factor + self.canvas_full_image_upper_left_yx[1] image_y = xy[1] * decimation_factor + self.canvas_full_image_upper_left_yx[0] image_yx_coords.append(image_y) image_yx_coords.append(image_x) return image_yx_coords def canvas_rect_to_full_image_rect(self, canvas_rect, # type: (int, int, int, int) ): # type: (...) ->[float] image_y1, image_x1 = self.canvas_coords_to_full_image_yx((canvas_rect[0], canvas_rect[1])) image_y2, image_x2 = self.canvas_coords_to_full_image_yx((canvas_rect[2], canvas_rect[3])) if image_x1 < 0: image_x1 = 0 if image_y1 < 0: image_y1 = 0 if image_x2 > self.image_reader.full_image_nx: image_x2 = self.image_reader.full_image_nx if image_y2 > self.image_reader.full_image_ny: image_y2 = self.image_reader.full_image_ny return image_y1,
queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret)) return lowest def func_fc176ac0bfb34c2691613ec2a5fdc75c(budget, cc, placed): queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret)) return next_larger def func_2d49266271214aaa95d35f24b76ec595(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return seen def func_f2b717277015480aa81aead5107e1714(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return queue def func_55991d4d81b847b5ab9c935dd4740b43(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return ret def func_ea807d1ab8ce4b54bebc0e342495390d(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return bets def func_afec952bb37f4eb28ab4c1177da659b1(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return p def func_542440f6c97e475186a629cf67c30bd9(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return budget def func_a58808f936184171adf4ca127385aa47(infile): budget, bets = map(int, infile.readline().split()) placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) return placed def func_69269cfcb1df46e5bce82c1642a1d1c6(budget, infile): placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) return ret def func_217e5e81e67d4bc592872b6e60df7c0f(budget, infile): placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) return cand def func_e9246604588c46948de89237bdd32275(budget, infile): placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in placed: needed_budget += max(0, lowest - p) if budget < needed_budget: continue remaining_budget = budget - needed_budget partial = len([p for p in placed if p <= lowest]) lowest_cnt = 37 - len(placed) + partial if lowest_cnt == 0: continue larger = [p for p in placed if p > lowest] if larger: next_larger = min(larger) can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt) else: can_replicate = remaining_budget / lowest_cnt if can_replicate > 0: if lowest + can_replicate not in seen: seen.add(lowest + can_replicate) queue.append(lowest + can_replicate) if lowest + can_replicate - 1 not in seen: seen.add(lowest + can_replicate - 1) queue.append(lowest + can_replicate - 1) for exclude in xrange(0, min(remaining_budget, partial) + 1): cand = get_expected(placed, lowest, exclude ) - exclude - needed_budget ret = max(ret, cand) return needed_budget def func_dae21bdf565e4f90aae2973ca9e7486f(budget, infile): placed = sorted(map(int, infile.readline().split())) ret = 0.0 queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed ] queue = sorted(set(queue)) seen = set(queue) while queue: lowest = queue.pop() if lowest == 0: continue needed_budget = (37 - len(placed)) * lowest for p in
""" Double entry accounting system: A debit is an accounting entry that either increases an asset or expense account, or decreases a liability or equity account. It is positioned to the left in an accounting entry. Debit means "left", dividends/expenses/assets/losses increased with debit. A credit is an accounting entry that either increases a liability or equity account, or decreases an asset or expense account. Credit means "right", gains/income/revenues/liabilities/equity increased with credit. """ from datetime import datetime, timedelta from decimal import Decimal from typing import Optional, Type from math import floor from django.contrib.auth.models import User from django.core.exceptions import ValidationError from jacc.helpers import sum_queryset from django.conf import settings from django.db import models, transaction from django.db.models import QuerySet, Q from django.utils.timezone import now from jutil.cache import CachedFieldsMixin from django.utils.translation import gettext_lazy as _ from jutil.format import choices_label from jutil.modelfields import SafeCharField, SafeTextField CATEGORY_ANY = "" CATEGORY_DEBIT = "D" # "left", dividends/expenses/assets/losses increased with debit CATEGORY_CREDIT = "C" # "right", gains/income/revenues/liabilities/equity increased with credit CATEGORY_TYPE = ( (CATEGORY_ANY, ""), (CATEGORY_DEBIT, _("Debit")), (CATEGORY_CREDIT, _("Credit")), ) CURRENCY_TYPE = ( ("EUR", "EUR"), ("USD", "USD"), ) INVOICE_NOT_DUE_YET = "N" INVOICE_DUE = "D" INVOICE_LATE = "L" INVOICE_PAID = "P" INVOICE_STATE = ( (INVOICE_NOT_DUE_YET, _("Not due yet")), (INVOICE_DUE, _("Due")), (INVOICE_LATE, _("Late")), (INVOICE_PAID, _("Paid")), ) INVOICE_DEFAULT = "I1" INVOICE_CREDIT_NOTE = "I2" INVOICE_TYPE = ( (INVOICE_DEFAULT, _("Invoice")), (INVOICE_CREDIT_NOTE, _("Credit Note")), ) class AccountEntrySourceFile(models.Model): """ Account entry source is set for entries based on some event like payment file import """ name = SafeCharField(verbose_name=_("name"), max_length=255, db_index=True, blank=True, default="") created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, db_index=True, editable=False, blank=True) class Meta: verbose_name = _("account entry source file") verbose_name_plural = _("account entry source files") def __str__(self): return "[{}] {}".format(self.id, self.name) class EntryType(models.Model): code = SafeCharField(verbose_name=_("code"), max_length=64, db_index=True, unique=True) identifier = SafeCharField(verbose_name=_("identifier"), max_length=40, db_index=True, blank=True, default="") name = SafeCharField(verbose_name=_("name"), max_length=128, db_index=True, blank=True, default="") created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, db_index=True, editable=False, blank=True) payback_priority = models.SmallIntegerField(verbose_name=_("payback priority"), default=0, blank=True, db_index=True) is_settlement = models.BooleanField(verbose_name=_("is settlement"), default=False, blank=True, db_index=True) is_payment = models.BooleanField(verbose_name=_("is payment"), default=False, blank=True, db_index=True) class Meta: verbose_name = _("entry type") verbose_name_plural = _("entry types") def __str__(self): return "{} ({})".format(self.name, self.code) class AccountEntryNote(models.Model): account_entry = models.ForeignKey("AccountEntry", verbose_name=_("account entry"), related_name="note_set", on_delete=models.CASCADE) created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) created_by = models.ForeignKey( User, verbose_name=_("created by"), editable=False, blank=True, related_name="accountentrynote_set", on_delete=models.CASCADE, ) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, editable=False, blank=True) note = models.TextField(_("note")) class Meta: verbose_name = _("account entry note") verbose_name_plural = _("account entry notes") class AccountEntryManager(models.Manager): pass class AccountEntry(models.Model): """ Single mutation in account state. """ objects: models.Manager = AccountEntryManager() account = models.ForeignKey( "Account", verbose_name=_("record account"), related_name="accountentry_set", db_index=True, on_delete=models.PROTECT, ) created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, editable=False, blank=True) timestamp = models.DateTimeField(verbose_name=_("timestamp"), default=now, db_index=True, blank=True) type = models.ForeignKey( EntryType, verbose_name=_("type"), related_name="+", on_delete=models.PROTECT, null=True, default=None, blank=True, ) description = SafeCharField(verbose_name=_("description"), max_length=256, default="", blank=True) amount = models.DecimalField(verbose_name=_("amount"), max_digits=10, decimal_places=2, blank=True, default=None, null=True, db_index=True) source_file = models.ForeignKey( AccountEntrySourceFile, verbose_name=_("account entry source file"), related_name="+", null=True, default=None, blank=True, on_delete=models.CASCADE, help_text=_("entry.source.file.help.text"), ) source_invoice = models.ForeignKey( "Invoice", verbose_name=_("source invoice"), null=True, related_name="+", default=None, blank=True, on_delete=models.CASCADE, help_text=_("entry.source.invoice.help.text"), ) settled_invoice = models.ForeignKey( "Invoice", verbose_name=_("settled invoice"), null=True, related_name="+", default=None, blank=True, on_delete=models.PROTECT, help_text=_("entry.settled.invoice.help.text"), ) settled_item = models.ForeignKey( "AccountEntry", verbose_name=_("settled item"), null=True, related_name="settlement_set", default=None, blank=True, on_delete=models.PROTECT, help_text=_("entry.settled.item.help.text"), ) parent = models.ForeignKey( "AccountEntry", verbose_name=_("account.entry.parent"), related_name="child_set", db_index=True, on_delete=models.CASCADE, null=True, default=None, blank=True, ) archived = models.BooleanField(_("archived"), default=False, blank=True) class Meta: verbose_name = _("account entry") verbose_name_plural = _("account entries") def __str__(self): return "[{}] {} {} {}".format( self.id, self.timestamp.date().isoformat() if self.timestamp else "", self.type if self.type else "", self.amount, ) def clean(self): if self.source_invoice and self.settled_invoice: raise ValidationError( "Both source_invoice ({}) and settled_invoice ({}) cannot be set same time for account entry ({})".format( self.source_invoice, self.settled_invoice, self ) ) @property def is_parent(self) -> bool: """ True if this is a parent of some other account entry. :return: bool """ return AccountEntry.objects.filter(parent=self).exists() @property def balance(self) -> Decimal: """ Returns account balance after this entry. :return: Decimal """ return sum_queryset(AccountEntry.objects.filter(account=self.account, timestamp__lte=self.timestamp).exclude(timestamp=self.timestamp, id__gt=self.id)) balance.fget.short_description = _("balance") # type: ignore # pytype: disable=attribute-error class AccountType(models.Model): code = SafeCharField(verbose_name=_("code"), max_length=32, db_index=True, unique=True) name = SafeCharField(verbose_name=_("name"), max_length=64, db_index=True, unique=True) is_asset = models.BooleanField(verbose_name=_("asset")) created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, db_index=True, editable=False, blank=True) class Meta: verbose_name = _("account type") verbose_name_plural = _("account types") def __str__(self): return str(self.name) @property def is_liability(self) -> bool: return not self.is_asset is_liability.fget.short_description = _("liability") # type: ignore # pytype: disable=attribute-error class Account(models.Model): """ Collects together accounting entries and provides summarizing functionality. """ type = models.ForeignKey(AccountType, verbose_name=_("type"), related_name="+", on_delete=models.PROTECT) name = SafeCharField(verbose_name=_("name"), max_length=64, blank=True, default="", db_index=True) currency = SafeCharField(verbose_name=_("currency"), max_length=3, default="EUR", choices=CURRENCY_TYPE, blank=True) created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, db_index=True, editable=False, blank=True) notes = models.TextField(_("notes"), blank=True, default="") class Meta: verbose_name = _("account") verbose_name_plural = _("accounts") def __str__(self): return "[{}] {}".format(self.id, self.name or self.type.name) def is_asset(self) -> bool: return self.type.is_asset is_asset.boolean = True # type: ignore is_asset.short_description = _("asset") # type: ignore def is_liability(self) -> bool: return self.type.is_liability is_liability.boolean = True # type: ignore is_liability.short_description = _("liability") # type: ignore @property def balance(self) -> Decimal: return sum_queryset(self.accountentry_set.all()) balance.fget.short_description = _("balance") # type: ignore # pytype: disable=attribute-error def get_balance(self, t: datetime): """ Returns account balance before specified datetime (excluding entries on the datetime). :param t: datetime :return: Decimal """ return sum_queryset(self.accountentry_set.all().filter(timestamp__lt=t)) def needs_settling(self, e: AccountEntry) -> bool: """ Returns True if all of following conditions are True: a) entry has valid amount set b) entry type is settlement c) entry has been recorded to this account d) invoice to be settled has been set e) entry has not been settled (=child set empty) :param e: AccountEntry (settlement) :return: bool """ return bool(e.amount is not None and e.type and e.type.is_settlement and e.account.id == self.id and e.settled_invoice and not e.is_parent) class InvoiceManager(models.Manager): @transaction.atomic def update_cached_fields(self, **kw): for obj in self.filter(**kw): obj.update_cached_fields() def get_default_due_date(): return now() + timedelta(days=settings.DEFAULT_DUE_DATE_DAYS) if hasattr(settings, "DEFAULT_DUE_DATE_DAYS") else None class Invoice(models.Model, CachedFieldsMixin): """ Invoice model. Typically used as base model for actual app-specific invoice model. Convention for naming date/time variables: 1) date fields are suffixed with _date if they are either plain date fields or interpreted as such (due_date) 2) natural datetime fields are in past tense, e.g. created, sent (instead of create_date, send_date) Note: It is useful sometimes to have full datetime with timezone even for plain dates like due_date, because this to be processing to be independent of server, client and invoice time zones. """ objects: models.Manager = InvoiceManager() type = SafeCharField(verbose_name=_("type"), max_length=2, db_index=True, default=INVOICE_DEFAULT, blank=True, choices=INVOICE_TYPE) number = SafeCharField(verbose_name=_("invoice number"), max_length=32, default="", blank=True, db_index=True) created = models.DateTimeField(verbose_name=_("created"), default=now, db_index=True, editable=False, blank=True) last_modified = models.DateTimeField(verbose_name=_("last modified"), auto_now=True, db_index=True, editable=False, blank=True) sent = models.DateTimeField(verbose_name=_("sent"), db_index=True, default=None, blank=True, null=True) due_date = models.DateTimeField(verbose_name=_("due date"), db_index=True, default=get_default_due_date) notes = SafeTextField(verbose_name=_("notes"), blank=True, default="") filename = SafeCharField(verbose_name=_("filename"), max_length=255, blank=True, default="", db_index=True) amount = models.DecimalField(verbose_name=_("amount"), max_digits=10, decimal_places=2, default=0, blank=True) paid_amount = models.DecimalField( verbose_name=_("paid amount"), max_digits=10, decimal_places=2, editable=False, blank=True, null=True, default=None, db_index=True, ) unpaid_amount = models.DecimalField( verbose_name=_("unpaid amount"), max_digits=10, decimal_places=2, editable=False, blank=True, null=True, default=None, db_index=True, ) overpaid_amount = models.DecimalField( verbose_name=_("overpaid amount"), max_digits=10, decimal_places=2, editable=False, blank=True, null=True, default=None, db_index=True, ) close_date = models.DateTimeField(verbose_name=_("close date"), default=None, null=True, blank=True, db_index=True) late_days = models.SmallIntegerField(verbose_name=_("late days"), default=None, null=True, blank=True, db_index=True) state = SafeCharField(verbose_name=_("state"), max_length=1, blank=True, default="", db_index=True, choices=INVOICE_STATE) cached_receivables_account: Optional[Account] = None cached_fields = [ "amount", "paid_amount", "unpaid_amount", "overpaid_amount", "close_date", "late_days", "state", ] class Meta: verbose_name = _("invoice") verbose_name_plural = _("invoices") def __str__(self): return "[{}] {} {}".format(self.id, self.due_date.date().isoformat() if self.due_date else "", self.amount) @property def receivables_account(self) -> Optional[Account]: """ Returns receivables account. Receivables account is assumed to be the one were invoice rows were recorded. :return: Account or None """ if self.cached_receivables_account is None: row = AccountEntry.objects.filter(source_invoice=self).order_by("id").first() if row is not None: assert isinstance(row, AccountEntry) self.cached_receivables_account = row.account return self.cached_receivables_account @property def currency(self) -> str: recv = self.receivables_account return recv.currency if recv else "" def get_entries(self, acc: Account, cls: Type[AccountEntry] = AccountEntry) -> QuerySet: """ Returns entries related to this invoice on specified account. :param acc: Account :param cls: AccountEntry class :return: QuerySet """ return cls.objects.filter(Q(account=acc) & (Q(source_invoice=self) | Q(settled_invoice=self))) if acc else cls.objects.none() def get_balance(self, acc: Account) -> Decimal: """ Returns balance of this invoice on specified account. :param
__all__ = () from os import listdir as list_directory from os.path import ( basename as base_name, exists, isabs as is_absolute_path_name, isdir as is_directory, isfile as is_file, join as join_paths ) from sys import path as route_paths from scarletio import CallableAnalyzer, HybridValueDictionary from .constants import ABSOLUTE_PATH_EXTENSION_NAME_PREFIX from .extension_root import register_extension_root def _validate_entry_or_exit(point): """ Validates the given entry or exit point, returning `True`, if they passed. Parameters ---------- point : `None`, `str`, `callable` The point to validate. Raises ------ TypeError If `point` was given as `callable`, but accepts less or more positional parameters, as would be given. """ if point is None: return True if isinstance(point, str): return True if callable(point): analyzer = CallableAnalyzer(point) min_, max_ = analyzer.get_non_reserved_positional_parameter_range() if min_ > 1: raise TypeError( f'`{point!r}` excepts at least `{min_!r}` non reserved parameters, meanwhile the event ' f'expects to pass `1`, got {point!r}' ) if min_ == 1: return True #min<expected if max_ >= 1: return True if analyzer.accepts_args(): return True raise TypeError( f'`{point!r}` expects maximum `{max_!r}` non reserved parameters meanwhile the event expects ' f'to pass `1`., got {point!r}' ) return False def validate_extension_parameters(entry_point=None, exit_point=None, extend_default_variables=True, locked=False, take_snapshot_difference=True, **variables): """ Validates extension parameters. Parameters ---------- entry_point : `None`, `str`, `callable`, = `None` Optional Extension specific entry point, to use over the extension loader's default. exit_point : `None`, `str`, `callable` = `None`, Optional Extension specific exit point, to use over the extension loader's default. extend_default_variables : `bool` = `True`, Optional Whether the extension should use the loader's default variables or just it's own. locked : `bool` = `False`, Optional Whether the given extension(s) should not be affected by `.{}_all` methods. take_snapshot_difference : `bool` = `True`, Optional Whether snapshot feature should be used. **variables : Keyword parameters Variables to assign to an extension(s)'s module before they are loaded. Raises ------ TypeError - If `entry_point` was not given as `None`, `str`, `callable`. - If `entry_point` was given as `callable`, but accepts less or more positional parameters, as would be given. - If `exit_point` was not given as `None`, `str`, `callable`. - If `exit_point` was given as `callable`, but accepts less or more positional parameters, as would be given. - If `extend_default_variables` was not given as `bool`. - If `locked` was not given as `bool`. - If `name` was not given as `str`, `iterable` of `str`. ValueError If a variable name is would be used, what is `module` attribute. Returns ------- entry_point : `None`, `str`, `callable` Extension specific entry point, to use over the extension loader's default. exit_point : `None`, `str`, `callable` Extension specific exit point, to use over the extension loader's default. extend_default_variables : `bool` Whether the extension should use the loader's default variables or just it's own. locked : `bool` Whether the given extension(s) should not be affected by `.{}_all` methods. take_snapshot_difference : `bool` Whether snapshot feature should be used. default_variables : `None`, `HybridValueDictionary` of (`str`, `Any`) items An optionally weak value dictionary to store objects for assigning them to modules before loading them. If would be empty, is set as `None` instead. """ if not _validate_entry_or_exit(entry_point): raise TypeError( f'`validate_extension_parameters` expected `None`, `str` or a `callable` as `entry_point`, got ' f'{entry_point.__class__.__name__}; {entry_point!r}.' ) if not _validate_entry_or_exit(exit_point): raise TypeError( f'`validate_extension_parameters` expected `None`, `str` or a `callable` as `exit_point`, got ' f'{exit_point.__class__.__name__}; {exit_point!r}.' ) if variables: default_variables = HybridValueDictionary(variables) for key, value in variables.items(): if key in PROTECTED_NAMES: raise ValueError( f'The passed {key!r} is a protected variable name of module type.' ) default_variables[key] = value else: default_variables = None extend_default_variables_type = extend_default_variables.__class__ if extend_default_variables_type is bool: pass elif issubclass(extend_default_variables_type, int): extend_default_variables = bool(extend_default_variables) else: raise TypeError( f'`extend_default_variables` can be `bool`, got {extend_default_variables_type.__name__}; ' f'{extend_default_variables!r}.' ) locked_type = type(locked) if locked_type is bool: pass elif issubclass(locked_type, int): locked = bool(locked) else: raise TypeError( f'`locked` can be `bool`, got {locked_type.__name__}; {locked!r}.' ) return entry_point, exit_point, extend_default_variables, locked, take_snapshot_difference, default_variables PROTECTED_NAMES = frozenset(( '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_cached', '_set_fileattr', 'cached', 'has_location', 'loader', 'loader_state', 'name', 'origin', 'parent', 'submodule_search_locations', '__path__', '__spec__' )) PYTHON_EXTENSION_NAMES = frozenset(('.py', '.pyd', '.pyc', '.so')) def _get_extension_name_and_path(name): """ fetches the name and the path of the first matched extension. If non is matched raised `ImportError`. Parameters ---------- name : `str` The name to fetch. Raises ------ extension_name : `None`, `str` Extension's name. extension_path : `str` Path of the extension file. Raises ------ ImportError - Could not resolve the given `name`. ImportError - If `name` could not be detected as an extension. TypeError - If `name` is not `str` nor an `iterable` of `str`. """ if not isinstance(name, str): raise TypeError( f'`name` can be `str`, got {name.__class__.__name__}; {name!r}.' ) generator = _iter_extension_names_and_paths(name) try: extension_pair = generator.send(None) except StopIteration: raise ImportError( f'No extensions found with the given name: {name!r}.' ) from None else: generator.close() return extension_pair def _iter_extension_names_and_paths(name, *, register_directories_as_roots=False): """ Fetches the names and the paths of the given extension. This function is a generator. Parameters ---------- name : `str`, `iterable` of `str` The name(s) to fetch. register_directories_as_roots : `bool` = `False`, Optional (Keyword only) Whether directory roots should be registered. Yields ------ extension_name : `None`, `str` Extension's name. extension_path : `str` Path of the extension file. Raises ------ ImportError - Could not resolve the given `name`. ImportError - If `name` could not be detected as an extension. TypeError - If `name` is not `str` nor an `iterable` of `str`. """ for name in _iter_name_maybe_iterable(name): if name.startswith(ABSOLUTE_PATH_EXTENSION_NAME_PREFIX): yield name return yield from _lookup_path(name, register_directories_as_roots) def _iter_name_maybe_iterable(name): """ Fetches the given name. This function is a generator. Parameters ---------- name : `str`, `iterable` of `str` The name to fetch to single strings. Yields ------ extension_name : `None`, `str` The extension's name. extension_path : `str` Path of the extension file. Raises ------ ImportError - Could not resolve the given `name`. TypeError - If `name` is not `str` nor an `iterable` of `str`. """ name_type = type(name) if name_type is str: yield name elif issubclass(name_type, str): yield str(name) elif hasattr(name_type, '__iter__'): for sub_name in name: sub_name_type = type(sub_name) if sub_name_type is str: yield sub_name elif issubclass(sub_name_type, str): yield str(sub_name) else: raise TypeError( f'`name` contains a non `str` element, got {sub_name_type.__name__}; {sub_name!r}; name={name!r}.' ) else: raise TypeError( f'`name` can be `str`, `iterable` of `str`, got {name_type.__name__}; {name!r}.' ) def _lookup_path(import_name_or_path, register_directories_as_roots): """ Detects the root of the given name. This function is an iterable generator. Parameters ---------- import_name_or_path : `str` An extension's import name, or it's absolute path. register_directories_as_roots : `bool` Whether directory roots should be registered. Yields ------ extension_name : `None`, `str` Import name to an extension file. extension_path : `str` Path of the file. Raise ----- ImportError If `import_name_or_path` name could not be detected as en extension. """ if is_absolute_path_name(import_name_or_path): if exists(import_name_or_path): if is_directory(import_name_or_path): yield from _iter_directory(None, import_name_or_path) return if is_file(import_name_or_path): yield None, import_name_or_path return else: path_end = join_paths(*import_name_or_path.split('.')) for base_path in route_paths: path = join_paths(base_path, path_end) if exists(path) and is_directory(path): if register_directories_as_roots: register_extension_root(import_name_or_path) yield from _iter_directory(import_name_or_path, path) return for python_extension_name in PYTHON_EXTENSION_NAMES: file_path = path + python_extension_name if exists(file_path) and is_file(file_path): yield import_name_or_path, file_path return raise ImportError( f'The given `import_name_or_path` could not be detected as an extension nor an absolute path, ' f'got {import_name_or_path!r}.' ) def _iter_directory(import_name, directory_path): """ Iterates over a directory's import names. Parameters ---------- import_name : `None`, `str` The name of the extension if we would import it. directory_path : `str` Path to the directory Yields ------ extension_name : `None`, `str` Detected import names for each applicable file in the directory. extension_path : `str` Path of the file. """ for python_extension_name in PYTHON_EXTENSION_NAMES: file_path = join_paths(directory_path,
<gh_stars>0 import unittest import packet class TestDummy(unittest.TestCase): def test_dummy(self): self.assertEqual(True, True) class TestBasicPacket(unittest.TestCase): """ Basic packet & helpers tests. """ def test_too_small_packet(self): """ Tests if too small packets are rejected due to the minimum length requirement. """ with self.assertRaises(ValueError): packet.parse_packet([10]) def test_get_cat(self): """ Tests if the CAT is read correctly. """ buf = bytes([ 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]) expected = 0x0102030405060708 self.assertEqual(packet.get_cat(buf), expected) def test_get_magic(self): """ Tests if the Magic is read correctly. """ buf = bytes([ 0x12, 0x23, 0x34, 0x81]) expected = 0x1223348 self.assertEqual(packet.get_magic(buf), expected) def test_get_flags(self): """ Tests if the Flags are read correctly. """ buf = bytes([ 0x12, 0x23, 0x34, 0x8B]) expected = 0xB self.assertEqual(packet.get_flags(buf), expected) def test_get_psn(self): """ Tests if the PSN is read correctly. """ buf = bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x34, 0x56, 0x78]) expected = 0x12345678 self.assertEqual(packet.get_psn(buf), expected) def test_get_pse(self): """ Tests if the PSE is read correctly. """ buf = bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x34, 0x56, 0x78]) expected = 0x12345678 self.assertEqual(packet.get_pse(buf), expected) def test_get_lrsx(self): """ Tests if get_l/r/s/x work correctly. """ buf = bytes([ 0x00, 0x00, 0x00, 0xB]) self.assertEqual(packet.get_l(buf), True) self.assertEqual(packet.get_r(buf), False) self.assertEqual(packet.get_s(buf), True) self.assertEqual(packet.get_x(buf), True) buf = bytes([ 0x00, 0x00, 0x00, 0x04]) self.assertEqual(packet.get_l(buf), not True) self.assertEqual(packet.get_r(buf), not False) self.assertEqual(packet.get_s(buf), not True) self.assertEqual(packet.get_x(buf), not True) def test_detect_plus(self): """ Tests if detect_plus works correctly. """ # magic := 0xd8007ff buf = bytes([ 0xD8, 0x00, 0x7F, 0xFB, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22, 0x22, 0x33, 0x33, 0x33, 0x33]) self.assertEqual(packet.detect_plus(buf), True) buf = bytes([ 0xD7, 0x00, 0x7F, 0xFB, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22, 0x22, 0x33, 0x33, 0x33, 0x33]) self.assertEqual(packet.detect_plus(buf), False) buf = bytes([ 0xD8, 0x00, 0x7F, 0xFB, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22, 0x22, 0x33, 0x33, 0x33, 0x33]) self.assertEqual(packet.detect_plus(buf), False) def test_detect_plus_in_udp(self): """ Tests if detect_plus_in_udp works correctly. """ buf = bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8, 0x00, 0x7F, 0xFB, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22, 0x22, 0x33, 0x33, 0x33, 0x33]) self.assertEqual(packet.detect_plus_in_udp(buf), True) buf = bytes([ 0xD8, 0x00, 0x7F, 0xFB, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22, 0x22, 0x33, 0x33, 0x33, 0x33]) self.assertEqual(packet.detect_plus_in_udp(buf), False) buf = bytes([ 0x00, 0x00, 0x00, 0x00]) with self.assertRaises(ValueError): self.assertEqual(packet.detect_plus_in_udp(buf), False) def test_parse_packet_1(self): """ Tests parsing a basic packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFA, #magic + flags 0x12, 0x34, 0x56, 0x78, #cat 0x21, 0x43, 0x65, 0x87, 0x87, 0x65, 0x43, 0x21, #psn 0x11, 0x22, 0x33, 0x44, #pse 0x01, 0x02, 0x03, 0x04, #payload 0x10, 0x20, 0x30, 0x40, #payload 0x99, 0x90, 0x99, 0x90]) l = True r = False s = True cat = 0x1234567821436587 psn = 0x87654321 pse = 0x11223344 payload = bytes([ 0x01, 0x02, 0x03, 0x04, 0x10, 0x20, 0x30, 0x40, 0x99, 0x90, 0x99, 0x90]) plus_packet = packet.parse_packet(buf) self.assertEqual(plus_packet.l, l) self.assertEqual(plus_packet.r, r) self.assertEqual(plus_packet.s, s) self.assertEqual(plus_packet.x, False) self.assertEqual(plus_packet.cat, cat) self.assertEqual(plus_packet.psn, psn) self.assertEqual(plus_packet.pse, pse) self.assertEqual(plus_packet.payload, payload) self.assertEqual(plus_packet.is_valid(), True) class TestExtendedPacket(unittest.TestCase): """ Tests for extended packets. """ def test_parse_packet_1(self): """ Tests parsing an extended packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x01, 0x1B, # PCF Type := 0x01, # PCF Len 6, PCF I = 11b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 6 bytes PCF value 0x99, 0x98, 0x97, 0x96]) # 4 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0x01 pcf_len = 0x06 pcf_integrity = 0x03 pcf_value = bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06]) payload = bytes([0x99, 0x98, 0x97, 0x96]) plus_packet = packet.parse_packet(buf) self.assertEqual(plus_packet.l, l) self.assertEqual(plus_packet.r, r) self.assertEqual(plus_packet.s, s) self.assertEqual(plus_packet.x, True) self.assertEqual(plus_packet.cat, cat) self.assertEqual(plus_packet.psn, psn) self.assertEqual(plus_packet.pse, pse) self.assertEqual(plus_packet.payload, payload) self.assertEqual(plus_packet.pcf_type, pcf_type) self.assertEqual(plus_packet.pcf_value, pcf_value) self.assertEqual(plus_packet.pcf_integrity, pcf_integrity) self.assertEqual(plus_packet.is_valid(), True) def test_parse_packet_2(self): """ Tests parsing an extended packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x00, 0x01, 0x00, # PCF Type := 0x0100, # PCF Len := 0, PCF I := 00b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 0x99, 0x98, 0x97, 0x96]) # 10 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0x0100 pcf_len = 0x00 pcf_integrity = 0x00 pcf_value = bytes([]) payload = bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x99, 0x98, 0x97, 0x96]) plus_packet = packet.parse_packet(buf) self.assertEqual(plus_packet.l, l) self.assertEqual(plus_packet.r, r) self.assertEqual(plus_packet.s, s) self.assertEqual(plus_packet.x, True) self.assertEqual(plus_packet.cat, cat) self.assertEqual(plus_packet.psn, psn) self.assertEqual(plus_packet.pse, pse) self.assertEqual(plus_packet.payload, payload) self.assertEqual(plus_packet.pcf_type, pcf_type) self.assertEqual(plus_packet.pcf_value, pcf_value) self.assertEqual(plus_packet.pcf_integrity, pcf_integrity) self.assertEqual(plus_packet.is_valid(), True) def test_parse_packet_3(self): """ Tests parsing an extended packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0xFF, 0x01, 0x00, # PCF Type := 0xFF, # PCF Len := 0, PCF I := 00b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 0x99, 0x98, 0x97, 0x96]) # 10 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0xFF pcf_len = None pcf_integrity = None pcf_value = None payload = bytes([0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x99, 0x98, 0x97, 0x96]) plus_packet = packet.parse_packet(buf) self.assertEqual(plus_packet.l, l) self.assertEqual(plus_packet.r, r) self.assertEqual(plus_packet.s, s) self.assertEqual(plus_packet.x, True) self.assertEqual(plus_packet.cat, cat) self.assertEqual(plus_packet.psn, psn) self.assertEqual(plus_packet.pse, pse) self.assertEqual(plus_packet.payload, payload) self.assertEqual(plus_packet.pcf_type, pcf_type) self.assertEqual(plus_packet.pcf_value, pcf_value) self.assertEqual(plus_packet.pcf_integrity, pcf_integrity) self.assertEqual(plus_packet.is_valid(), True) class TestSerialize(unittest.TestCase): """ Serialization tests. """ def test_serialize_1(self): """ Tests serialization of a packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFA, #magic + flags 0x12, 0x34, 0x56, 0x78, #cat 0x21, 0x43, 0x65, 0x87, 0x87, 0x65, 0x43, 0x21, #psn 0x11, 0x22, 0x33, 0x44, #pse 0x01, 0x02, 0x03, 0x04, #payload 0x10, 0x20, 0x30, 0x40, #payload 0x99, 0x90, 0x99, 0x90]) l = True r = False s = True cat = 0x1234567821436587 psn = 0x87654321 pse = 0x11223344 payload = bytes([ 0x01, 0x02, 0x03, 0x04, 0x10, 0x20, 0x30, 0x40, 0x99, 0x90, 0x99, 0x90]) plus_packet = packet.new_basic_packet(l, r, s, cat, psn, pse, payload) self.assertEqual(plus_packet.to_bytes(), buf) def test_serialize_2(self): """ Tests serialization of a packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x01, 0x1B, # PCF Type := 0x01, # PCF Len 6, PCF I = 11b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 6 bytes PCF value 0x99, 0x98, 0x97, 0x96]) # 4 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0x01 pcf_len = 0x06 pcf_integrity = 0x03 pcf_value = bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06]) payload = bytes([0x99, 0x98, 0x97, 0x96]) plus_packet = packet.new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload) self.assertEqual(plus_packet.to_bytes(), buf) def test_serialize_3(self): """ Tests serialization of a packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x00, 0x01, 0x1B, # PCF Type := 0x0100, # PCF Len 6, PCF I = 11b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 6 bytes PCF value 0x99, 0x98, 0x97, 0x96]) # 4 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0x0100 pcf_len = 0x06 pcf_integrity = 0x03 pcf_value = bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06]) payload = bytes([0x99, 0x98, 0x97, 0x96]) plus_packet = packet.new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload) self.assertEqual(plus_packet.to_bytes(), buf) def test_serialize_4(self): """ Tests serialization of a packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x00, 0x01, 0x00, # PCF Type := 0x0100, # PCF Len := 0, PCF I := 00b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 0x99, 0x98, 0x97, 0x96]) # 10 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0x0100 pcf_len = 0x00 pcf_integrity = 0x00 pcf_value = bytes([]) payload = bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x99, 0x98, 0x97, 0x96]) plus_packet = packet.new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload) self.assertEqual(plus_packet.to_bytes(), buf) def test_serialize_5(self): """ Tests serialization of a packet. """ buf = bytes([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0xFF, 0x01, 0x00, # PCF Type := 0xFF, # PCF Len := 0, PCF I := 00b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 0x99, 0x98, 0x97, 0x96]) # 10 bytes payload l = True r = True s = True cat = 0x1234567812345678 psn = 0x13111111 pse = 0x23222222 pcf_type = 0xFF pcf_len = None pcf_integrity = None pcf_value = None payload = bytes([0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x99, 0x98, 0x97, 0x96]) plus_packet = packet.new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload) self.assertEqual(plus_packet.to_bytes(), buf) import random class TestFuzzy(unittest.TestCase): """ Fuzzy testing. Let's hope this detects things we didn't think of. """ def _random_buf(self): """ Randomly alter a valid buffer (== can be parsed) and returns it. """ buf = ([ 0xD8, 0x00, 0x7F, 0xFF, # magic + flags (x bit set) 0x12, 0x34, 0x56, 0x78, # cat 0x12, 0x34, 0x56, 0x78, # cat.. 0x13, 0x11, 0x11, 0x11, # psn 0x23, 0x22, 0x22, 0x22, # pse 0x01, 0x1B, # PCF Type := 0x01, # PCF Len 6, PCF I = 11b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, # 6 bytes PCF value 0x99, 0x98, 0x97, 0x96]) n = random.randint(1, 10) i = 0 while i < n: j = random.randint(0, len(buf)-1) k = random.randint(0,
of rmsg # attach reciept message to existing message with val's incept message vmsg.extend(rmsg) assert vmsg == bytearray(b'{"v":"KERI10JSON0000e6_","i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY' b'_qrIZIicQg","s":"0","t":"icp","kt":"1","k":["D8KY1sKmgyjAiUDdUBP' b'NPyrSz_ad_Qf9yzhDNZlEKiMc"],"n":"EOWDAJvex5dZzDxeHBANyaIoUG3F4-i' b'c81G6GwtnC4f4","wt":"0","w":[],"c":[]}-AABAAll_W0_FsjUyJnYokSNPq' b'q7xdwIBs0ebq2eUez6RKNB-UG_y6fD0e6fb_nANvmNCWjsoFjWv3XP3ApXUabMgy' b'BA{"v":"KERI10JSON000105_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlR' b'YyB-6n4WDi7w","s":"0","t":"vrc","d":"EEnwxEm5Bg5s5aTLsgQCNpubIYz' b'wlvMwZIzdOM0Z3u7o","a":{"i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_' b'qrIZIicQg","s":"0","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJ' b'zupo"}}-AABAAb6S-RXeAqUKl8UuNwYpiaFARhMj-95elxmr7uNU8m7buVSPVLbT' b'WcQYfI_04HoP_A_fvlU_b099fiEJyDSA2Cg') # Simulate send to coe of val's incept and val's receipt of coe's inception message coeKevery.process(ims=vmsg) # coe process val's incept and receipt # check if val Kever in coe's .kevers assert valpre in coeKevery.kevers # check if receipt quadruple from val in receipt database result = coeKevery.db.getVrcs(key=dgKey(pre=coeKever.prefixer.qb64, dig=coeKever.serder.diger.qb64)) assert bytes(result[0]) == (valKever.prefixer.qb64b + Seqner(sn=valKever.sn).qb64b + valKever.serder.diger.qb64b + siger.qb64b) assert bytes(result[0]) == (b'EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg0AAAAAAAAAAAAAAAAAAAAAAAEGFSGYH2' b'BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzupoAAb6S-RXeAqUKl8UuNwYpiaFARhMj-95elxmr7uN' b'U8m7buVSPVLbTWcQYfI_04HoP_A_fvlU_b099fiEJyDSA2Cg') # create receipt to escrow use invalid dig and sn so not in coe's db fake = reserder.dig # some other dig reserder = chit(pre=coeK.prefixer.qb64, sn=10, dig=fake, seal=seal) # sign event not receipt counter = Counter(CtrDex.ControllerIdxSigs) siger = valSigners[vesn].sign(ser=coeIcpRaw, index=0) # return Siger if index # create message vmsg = bytearray(reserder.raw) vmsg.extend(counter.qb64b) vmsg.extend(siger.qb64b) assert vmsg == bytearray(b'{"v":"KERI10JSON000105_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy' b'B-6n4WDi7w","s":"a","t":"vrc","d":"EiRvswmIbhsbdz95TuwZSZkKL5jLn' b'R-kM0qwQ6PXH0hs","a":{"i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qr' b'IZIicQg","s":"0","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzu' b'po"}}-AABAAb6S-RXeAqUKl8UuNwYpiaFARhMj-95elxmr7uNU8m7buVSPVLbTWc' b'QYfI_04HoP_A_fvlU_b099fiEJyDSA2Cg') coeKevery.process(ims=vmsg) # coe process the escrow receipt from val # check if receipt quadruple in escrow database result = coeKevery.db.getVres(key=snKey(pre=coeKever.prefixer.qb64, sn=10)) assert bytes(result[0]) == (fake.encode("utf-8") + valKever.prefixer.qb64b + Seqner(sn=valKever.sn).qb64b + valKever.serder.diger.qb64b + siger.qb64b) # Send receipt from coe to val # create receipt of val's inception # create seal of coe's last est event seal = SealEvent(i=coepre, s="{:x}".format(coeKever.lastEst.s), d=coeKever.lastEst.d) valK = coeKevery.kevers[valpre] # lookup valKever from coe's .kevers # create validator receipt reserder = chit(pre=valK.prefixer.qb64, sn=valK.sn, dig=valK.serder.diger.qb64, seal=seal) # sign vals's event not receipt # look up event to sign from coe's kever for val valIcpDig = bytes(coeKevery.db.getKeLast(key=snKey(pre=valpre, sn=vsn))) assert valIcpDig == valK.serder.diger.qb64b == b'EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzupo' valIcpRaw = bytes(coeKevery.db.getEvt(key=dgKey(pre=valpre, dig=valIcpDig))) assert valIcpRaw == (b'{"v":"KERI10JSON0000e6_","i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg",' b'"s":"0","t":"icp","kt":"1","k":["D8KY1sKmgyjAiUDdUBPNPyrSz_ad_Qf9yzhDNZlEKiM' b'c"],"n":"EOWDAJvex5dZzDxeHBANyaIoUG3F4-ic81G6GwtnC4f4","wt":"0","w":[],"c":[' b']}') counter = Counter(CtrDex.ControllerIdxSigs) assert counter.qb64 == '-AAB' siger = coeSigners[vesn].sign(ser=valIcpRaw, index=0) # return Siger if index assert siger.qb64 == 'AAZqxNTt_LDZnmwEIaJX0cK9VKkCGq1UieEx6881MKKOtlRirvs_4pzFgmw3aRwAaIM2XV0biQ7xHeOoXglluDCA' # create receipt message cmsg = bytearray(reserder.raw) cmsg.extend(counter.qb64b) cmsg.extend(siger.qb64b) assert cmsg == bytearray(b'{"v":"KERI10JSON000105_","i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY' b'_qrIZIicQg","s":"0","t":"vrc","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo' b'3lMkveRoPIJzupo","a":{"i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6' b'n4WDi7w","s":"0","d":"EEnwxEm5Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u' b'7o"}}-AABAAZqxNTt_LDZnmwEIaJX0cK9VKkCGq1UieEx6881MKKOtlRirvs_4pz' b'Fgmw3aRwAaIM2XV0biQ7xHeOoXglluDCA') # coe process own receipt in own Kevery so have copy in own log coeKevery.processOne(ims=bytearray(cmsg)) # make copy # Simulate send to val of coe's receipt of val's inception message valKevery.process(ims=cmsg) # coe process val's incept and receipt # check if receipt quadruple from coe in val's receipt database result = valKevery.db.getVrcs(key=dgKey(pre=valKever.prefixer.qb64, dig=valKever.serder.diger.qb64)) assert bytes(result[0]) == (coeKever.prefixer.qb64b + Seqner(sn=coeKever.sn).qb64b + coeKever.serder.diger.qb64b + siger.qb64b) assert bytes(result[0]) == (b'EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6n4WDi7w0AAAAAAAAAAAAAAAAAAAAAAAEEnwxEm5' b'Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u7oAAZqxNTt_LDZnmwEIaJX0cK9VKkCGq1UieEx6881' b'MKKOtlRirvs_4pzFgmw3aRwAaIM2XV0biQ7xHeOoXglluDCA') # Coe Event 1 RotationTransferable csn += 1 cesn += 1 assert csn == cesn == 1 coeSerder = rotate(pre=coeKever.prefixer.qb64, keys=[coeSigners[cesn].verfer.qb64], dig=coeKever.serder.diger.qb64, nxt=Nexter(keys=[coeSigners[cesn+1].verfer.qb64]).qb64, sn=csn) coe_event_digs.append(coeSerder.dig) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[cesn].sign(coeSerder.raw, index=0) # returns siger # create serialized message cmsg = bytearray(coeSerder.raw) cmsg.extend(counter.qb64b) cmsg.extend(siger.qb64b) assert cmsg == bytearray(b'{"v":"KERI10JSON000122_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy' b'B-6n4WDi7w","s":"1","t":"rot","p":"EEnwxEm5Bg5s5aTLsgQCNpubIYzwl' b'vMwZIzdOM0Z3u7o","kt":"1","k":["DVcuJOOJF1IE8svqEtrSuyQjGTd2HhfA' b'kt9y2QkUtFJI"],"n":"E-dapdcC6XR1KWmWDsNl4J_OxcGxNZw1Xd95JH5a34fI' b'","wt":"0","wr":[],"wa":[],"a":[]}-AABAAEuHTj2jo-QgGg1FP0tq_q2Mj' b'CeJnzYoJY1Iw2h4ov3J4ki82aHDWxYhxMiXX-E8b0vRDfr3-EB11ofd_zx3cBQ') # update coe's key event verifier state coeKevery.processOne(ims=bytearray(cmsg)) # make copy # verify coe's copy of coe's event stream is updated assert coeKever.sn == csn assert coeKever.serder.diger.qb64 == coeSerder.dig # simulate send message from coe to val valKevery.process(ims=cmsg) # verify val's copy of coe's event stream is updated assert coeK.sn == csn assert coeK.serder.diger.qb64 == coeSerder.dig # create receipt of coe's rotation # create seal of val's last est event seal = SealEvent(i=valpre, s="{:x}".format(valKever.lastEst.s), d=valKever.lastEst.d) # create validator receipt reserder = chit(pre=coeK.prefixer.qb64, sn=coeK.sn, dig=coeK.serder.diger.qb64, seal=seal) # sign coe's event not receipt # look up event to sign from val's kever for coe coeRotDig = bytes(valKevery.db.getKeLast(key=snKey(pre=coepre, sn=csn))) assert coeRotDig == coeK.serder.diger.qb64b == b'Enrq74_Q11S2vHx1gpK_46Ik5Q7Yy9K1zZ5BavqGDKnk' coeRotRaw = bytes(valKevery.db.getEvt(key=dgKey(pre=coepre, dig=coeRotDig))) assert coeRotRaw == (b'{"v":"KERI10JSON000122_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6n4WDi7w",' b'"s":"1","t":"rot","p":"EEnwxEm5Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u7o","kt":"1' b'","k":["<KEY>"],"n":"<KEY>' b'DsNl4J_OxcGxNZw1Xd95JH5a34fI","wt":"0","wr":[],"wa":[],"a":[]}') counter = Counter(CtrDex.ControllerIdxSigs) siger = valSigners[vesn].sign(ser=coeRotRaw, index=0) # return Siger if index assert siger.qb64 == 'AAb1BJLLTkcTlefF1DOPKiOixLgQqnqxRsqEqGaaADLNwQ-uDeb2nNTQBB6SeclaihimPg9QwLnulUbdgYxI5ADg' # val create receipt message vmsg = bytearray(reserder.raw) vmsg.extend(counter.qb64b) vmsg.extend(siger.qb64b) assert vmsg == bytearray(b'{"v":"KERI10JSON000105_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy' b'B-6n4WDi7w","s":"1","t":"vrc","d":"Enrq74_Q11S2vHx1gpK_46Ik5Q7Yy' b'9K1zZ5BavqGDKnk","a":{"i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qr' b'IZIicQg","s":"0","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzu' b'po"}}-AABAAb1BJLLTkcTlefF1DOPKiOixLgQqnqxRsqEqGaaADLNwQ-uDeb2nNT' b'QBB6SeclaihimPg9QwLnulUbdgYxI5ADg') # val process own receipt in own kevery so have copy in own log valKevery.processOne(ims=bytearray(vmsg)) # make copy # Simulate send to coe of val's receipt of coe's rotation message coeKevery.process(ims=vmsg) # coe process val's incept and receipt # check if receipt quadruple from val in receipt database result = coeKevery.db.getVrcs(key=dgKey(pre=coeKever.prefixer.qb64, dig=coeKever.serder.diger.qb64)) assert bytes(result[0]) == (valKever.prefixer.qb64b + Seqner(sn=valKever.sn).qb64b + valKever.serder.diger.qb64b + siger.qb64b) assert bytes(result[0]) == (b'EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg0AAAAAAAAAAAAAAAAAAAAAAAEGFSGYH2' b'BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzupoAAb1BJLLTkcTlefF1DOPKiOixLgQqnqxRsqEqGaa' b'ADLNwQ-uDeb2nNTQBB6SeclaihimPg9QwLnulUbdgYxI5ADg') # Next Event 2 Coe Interaction csn += 1 # do not increment esn assert csn == 2 assert cesn == 1 coeSerder = interact(pre=coeKever.prefixer.qb64, dig=coeKever.serder.diger.qb64, sn=csn) coe_event_digs.append(coeSerder.dig) # create sig counter counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1 # sign serialization siger = coeSigners[cesn].sign(coeSerder.raw, index=0) # create msg cmsg = bytearray(coeSerder.raw) cmsg.extend(counter.qb64b) cmsg.extend(siger.qb64b) assert cmsg == bytearray(b'{"v":"KERI10JSON000098_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy' b'B-6n4WDi7w","s":"2","t":"ixn","p":"Enrq74_Q11S2vHx1gpK_46Ik5Q7Yy' b'9K1zZ5BavqGDKnk","a":[]}-AABAARxj7iqT5m3wQIPOfCPFkeGEw1j5QY-lXbR' b'GaRSVxzW9SZIX-mXJfIjs7m6MlaYFEIJs3fiCWCj9JdUz0BHlRDA') # update coe's key event verifier state coeKevery.processOne(ims=bytearray(cmsg)) # make copy # verify coe's copy of coe's event stream is updated assert coeKever.sn == csn assert coeKever.serder.diger.qb64 == coeSerder.dig # simulate send message from coe to val valKevery.process(ims=cmsg) # verify val's copy of coe's event stream is updated assert coeK.sn == csn assert coeK.serder.diger.qb64 == coeSerder.dig # create receipt of coe's interaction # create seal of val's last est event seal = SealEvent(i=valpre, s="{:x}".format(valKever.lastEst.s), d=valKever.lastEst.d) # create validator receipt reserder = chit(pre=coeK.prefixer.qb64, sn=coeK.sn, dig=coeK.serder.diger.qb64, seal=seal) # sign coe's event not receipt # look up event to sign from val's kever for coe coeIxnDig = bytes(valKevery.db.getKeLast(key=snKey(pre=coepre, sn=csn))) assert coeIxnDig == coeK.serder.diger.qb64b == b'E-5RimdY_OWoreR-Z-Q5G81-I4tjASJCaP_MqkBbtM2w' coeIxnRaw = bytes(valKevery.db.getEvt(key=dgKey(pre=coepre, dig=coeIxnDig))) assert coeIxnRaw == (b'{"v":"KERI10JSON000098_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6n4WDi7w",' b'"s":"2","t":"ixn","p":"Enrq74_Q11S2vHx1gpK_46Ik5Q7Yy9K1zZ5BavqGDKnk","a":[]}') counter = Counter(CtrDex.ControllerIdxSigs) siger = valSigners[vesn].sign(ser=coeIxnRaw, index=0) # return Siger if index assert siger.qb64 == '<KEY>' # create receipt message vmsg = bytearray(reserder.raw) vmsg.extend(counter.qb64b) vmsg.extend(siger.qb64b) assert vmsg == bytearray(b'{"v":"KERI10JSON000105_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy' b'B-6n4WDi7w","s":"2","t":"vrc","d":"E-5RimdY_OWoreR-Z-Q5G81-I4tjA' b'SJCaP_MqkBbtM2w","a":{"i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qr' b'IZIicQg","s":"0","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzu' b'po"}}-AABAA71XY3Y7gt3FQ3RkRDN2JN5wsKVFSqxc55yBl3PecKEpSSn_tjjtKx' b'hvZZgWtvUxHiaSt94h8huBZ0jVdWeM6DA') # val process own receipt in own kevery so have copy in own log valKevery.processOne(ims=bytearray(vmsg)) # make copy # Simulate send to coe of val's receipt of coe's rotation message coeKevery.process(ims=vmsg) # coe process val's incept and receipt # check if receipt quadruple from val in receipt database result = coeKevery.db.getVrcs(key=dgKey(pre=coeKever.prefixer.qb64, dig=coeKever.serder.diger.qb64)) assert bytes(result[0]) == (valKever.prefixer.qb64b + Seqner(sn=valKever.sn).qb64b + valKever.serder.diger.qb64b + siger.qb64b) assert bytes(result[0]) == (b'EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg0AAAAAAAAAAAAAAAAAAAAAAAEGFSGYH2' b'BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzupoAA71XY3Y7gt3FQ3RkRDN2JN5wsKVFSqxc55yBl3P' b'ecKEpSSn_tjjtKxhvZZgWtvUxHiaSt94h8huBZ0jVdWeM6DA') # verify final coe event state assert coeKever.verfers[0].qb64 == coeSigners[cesn].verfer.qb64 assert coeKever.sn == coeK.sn == csn db_digs = [bytes(v).decode("utf-8") for v in coeKever.baser.getKelIter(coepre)] assert len(db_digs) == len(coe_event_digs) == csn+1 assert db_digs == coe_event_digs == ['EEnwxEm5Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u7o', 'Enrq74_Q11S2vHx1gpK_46Ik5Q7Yy9K1zZ5BavqGDKnk', 'E-5RimdY_OWoreR-Z-Q5G81-I4tjASJCaP_MqkBbtM2w'] db_digs = [bytes(v).decode("utf-8") for v in valKever.baser.getKelIter(coepre)] assert len(db_digs) == len(coe_event_digs) == csn+1 assert db_digs == coe_event_digs # verify final val event state assert valKever.verfers[0].qb64 == valSigners[vesn].verfer.qb64 assert valKever.sn == valK.sn == vsn db_digs = [bytes(v).decode("utf-8") for v in valKever.baser.getKelIter(valpre)] assert len(db_digs) == len(val_event_digs) == vsn+1 assert db_digs == val_event_digs == ['EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzupo'] db_digs = [bytes(v).decode("utf-8") for v in coeKever.baser.getKelIter(valpre)] assert len(db_digs) == len(val_event_digs) == vsn+1 assert db_digs == val_event_digs assert not os.path.exists(valKevery.db.path) assert not os.path.exists(coeKever.baser.path) """ Done Test """ def test_direct_mode_cbor_mgpk(): """ Test direct mode with transverable validator event receipts but using cbor and mspk serializations """ # manual process to generate a list of secrets # root = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES) # secrets = generateSecrets(root=root, count=8) # Direct Mode initiated by coe is controller, val is validator # but goes both ways once initiated. # set of secrets (seeds for private keys) coeSecrets = [ '<KEY>', '<KEY>', '<KEY>', 'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8', 'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E', 'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc', 'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw', 'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY' ] # create coe signers coeSigners = [Signer(qb64=secret) for secret in coeSecrets] assert [signer.qb64 for signer in coeSigners] == coeSecrets # set of secrets (seeds for private keys) valSecrets = ['<KEY>', '<KEY>', '<KEY>', '<KEY>', '<KEY>', '<KEY>', '<KEY>', 'ADW3o9m3udwEf0aoOdZLLJdf1aylokP0lwwI_M2J9h0s'] # create val signers valSigners = [Signer(qb64=secret) for secret in valSecrets] assert [signer.qb64 for signer in valSigners] == valSecrets with openDB("controller") as coeLogger, openDB("validator") as valLogger: # init Keverys coeKevery = Kevery(db=coeLogger) valKevery = Kevery(db=valLogger) coe_event_digs = [] # list of coe's own event log digs to verify against database val_event_digs = [] # list of val's own event log digs to verify against database # init sequence numbers for both coe and val csn = cesn = 0 # sn and last establishment sn = esn vsn = vesn = 0 # sn and last establishment sn
import sys import time import numpy as np import readers.utils as utils from readers.Mention import Mention from readers.config import Config from readers.vocabloader import VocabLoader import ccg_nlpy from ccg_nlpy.core.text_annotation import TextAnnotation start_word = "<s>" end_word = "<eos>" # Reader for Text Annotations class TextAnnoTestReader(object): def __init__( self, config, vocabloader, num_cands, batch_size, strict_context=True, pretrain_wordembed=True, coherence=True, nerviewname="NER_CONLL", ): self.typeOfReader = "inference" self.start_word = start_word self.end_word = end_word self.unk_word = "unk" # In tune with word2vec self.unk_wid = "<unk_wid>" self.tr_sup = "tr_sup" self.tr_unsup = "tr_unsup" self.pretrain_wordembed = pretrain_wordembed self.coherence = coherence self.nerviewname = nerviewname # Word Vocab (self.word2idx, self.idx2word) = vocabloader.getGloveWordVocab() self.num_words = len(self.idx2word) # Label Vocab (self.label2idx, self.idx2label) = vocabloader.getLabelVocab() self.num_labels = len(self.idx2label) # Known WID Vocab (self.knwid2idx, self.idx2knwid) = vocabloader.getKnwnWidVocab() self.num_knwn_entities = len(self.idx2knwid) # Wid2Wikititle Map self.wid2WikiTitle = vocabloader.getWID2Wikititle() # Coherence String Vocab print("Loading Coherence Strings Dicts ... ") (self.cohG92idx, self.idx2cohG9) = utils.load(config.cohstringG9_vocab_pkl) self.num_cohstr = len(self.idx2cohG9) # Crosswikis print("Loading Crosswikis dict. (takes ~2 mins to load)") self.crosswikis = utils.load(config.crosswikis_pruned_pkl) print("Crosswikis loaded. Size: {}".format(len(self.crosswikis))) if self.pretrain_wordembed: stime = time.time() self.word2vec = vocabloader.loadGloveVectors() print("[#] Glove Vectors loaded!") ttime = (time.time() - stime) / float(60) # print("[#] Test Mentions File : {}".format(test_mens_file)) # print("[#] Loading test file and preprocessing ... ") # with open(test_mens_file, 'r') as f: # tajsonstr = f.read() # ta = TextAnnotation(json_str=tajsonstr) # # (sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta) # # self.mention_lines = self.convertSent2NerToMentionLines( # sentences_tokenized, modified_ner_cons_list) # # self.mentions = [] # for line in self.mention_lines: # m = Mention(line) # self.mentions.append(m) self.men_idx = 0 # self.num_mens = len(self.mentions) self.epochs = 0 # print( "[#] Test Mentions : {}".format(self.num_mens)) self.batch_size = batch_size print("[#] Batch Size: %d" % self.batch_size) self.num_cands = num_cands self.strict_context = strict_context print("\n[#]LOADING COMPLETE") # ******************* END __init__ ********************************* def new_test_file(self, test_mens_file): self.test_mens_file = test_mens_file with open(test_mens_file, "r") as f: tajsonstr = f.read() ta = TextAnnotation(json_str=tajsonstr) self.textanno = ta (sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta) self.mention_lines = self.convertSent2NerToMentionLines( sentences_tokenized, modified_ner_cons_list ) self.mentions = [] for line in self.mention_lines: m = Mention(line) self.mentions.append(m) self.men_idx = 0 self.num_mens = len(self.mentions) self.epochs = 0 def new_tajsonstr(self, tajsonstr): """ tajsonstr is a json str of a TA """ ta = TextAnnotation(json_str=tajsonstr) self.new_ta(ta) def new_ta(self, ta): self.textanno = ta (sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta) self.mention_lines = self.convertSent2NerToMentionLines( sentences_tokenized, modified_ner_cons_list ) self.mentions = [] for line in self.mention_lines: m = Mention(line) self.mentions.append(m) self.men_idx = 0 self.num_mens = len(self.mentions) self.epochs = 0 def get_vector(self, word): if word in self.word2vec: return self.word2vec[word] else: return self.word2vec["unk"] def reset_test(self): self.men_idx = 0 self.epochs = 0 def processTestDoc(self, ccgdoc): doc_tokens = ccgdoc.get_tokens # sent_end_token_indices : contains index for the starting of the # next sentence. sent_end_token_indices = ccgdoc.get_sentence_end_token_indices # List of tokenized sentences sentences_tokenized = [] for i in range(0, len(sent_end_token_indices)): start = sent_end_token_indices[i - 1] if i != 0 else 0 end = sent_end_token_indices[i] sent_tokens = doc_tokens[start:end] sentences_tokenized.append(sent_tokens) # List of ner dicts from ccg pipeline ner_cons_list = [] try: ner_cons_list = ccgdoc.get_view(self.nerviewname).cons_list if ner_cons_list is None: ner_cons_list = [] except: print("NO NAMED ENTITIES IN THE DOC. EXITING") modified_ner_cons_list = [] for orig_ner in ner_cons_list: ner = orig_ner.copy() # ner['end'] = ner['end'] + 1 # ner['tokens'] = ' '.join(doc_tokens[ner['start']:ner['end']]) found = False # idx = sentIdx, j = sentEndTokenIdx for idx, j in enumerate(sent_end_token_indices): sent_start_token = sent_end_token_indices[idx - 1] if idx != 0 else 0 # ner['end'] is the idx of the token after ner if ner["end"] <= j: ner["start"] = ner["start"] - sent_start_token ner["end"] = ner["end"] - sent_start_token - 1 ner["sent_idx"] = idx modified_ner_cons_list.append(ner) found = True if found: break return (sentences_tokenized, modified_ner_cons_list) def convertSent2NerToMentionLines( self, sentences_tokenized, modified_ner_cons_list ): """Convert NERs from document to list of mention strings""" mentions = [] # Make Document Context String for whole document cohStr = "" # for sent_idx, s_nerDicts in sentidx2ners.items(): # for s, ner in s_nerDicts: # cohStr += ner['tokens'].replace(' ', '_') + ' ' for ner_men in modified_ner_cons_list: cohStr += ner_men["tokens"].replace(" ", "_") + " " cohStr = cohStr.strip() for ner_men in modified_ner_cons_list: idx = ner_men["sent_idx"] sentence = " ".join(sentences_tokenized[idx]) mention = "%s\t%s\t%s" % ("unk_mid", "unk_wid", "unkWT") mention = mention + "\t" + str(ner_men["start"]) mention = mention + "\t" + str(ner_men["end"]) mention = mention + "\t" + str(ner_men["tokens"]) mention = mention + "\t" + sentence mention = mention + "\t" + "UNK_TYPES" mention = mention + "\t" + cohStr mentions.append(mention) return mentions def bracketMentionInSentence(self, s, nerDict): tokens = s.split(" ") start = nerDict["start"] end = nerDict["end"] tokens.insert(start, "[[") tokens.insert(end + 2, "]]") return " ".join(tokens) def _read_mention(self): if self.num_mens == 0: return None mention = self.mentions[self.men_idx] self.men_idx += 1 if self.men_idx == self.num_mens: self.men_idx = 0 self.epochs += 1 return mention def _next_batch(self): """ Data : wikititle \t mid \t wid \t start \t end \t tokens \t labels start and end are inclusive """ # Sentence = s1 ... m1 ... mN, ... sN. # Left Batch = s1 ... m1 ... mN # Right Batch = sN ... mN ... m1 (left_batch, right_batch) = ([], []) coh_indices = [] coh_values = [] if self.coherence: coh_matshape = [self.batch_size, self.num_cohstr] else: coh_matshape = [] # Candidate WID idxs and their cprobs # First element is always true wid (wid_idxs_batch, wid_cprobs_batch) = ([], []) while len(left_batch) < self.batch_size: batch_el = len(left_batch) m = self._read_mention() if m is None: return None, None, None, None, None # for label in m.types: # if label in self.label2idx: # labelidx = self.label2idx[label] # labels_batch[batch_el][labelidx] = 1.0 cohFound = False # If no coherence mention is found, add unk if self.coherence: cohidxs = [] # Indexes in the [B, NumCoh] matrix cohvals = [] # 1.0 to indicate presence for cohstr in m.coherence: if cohstr in self.cohG92idx: cohidx = self.cohG92idx[cohstr] cohidxs.append([batch_el, cohidx]) cohvals.append(1.0) cohFound = True if cohFound: coh_indices.extend(cohidxs) coh_values.extend(cohvals) else: cohidx = self.cohG92idx[self.unk_word] coh_indices.append([batch_el, cohidx]) coh_values.append(1.0) # Left and Right context includes mention surface left_tokens = m.sent_tokens[0 : m.end_token + 1] right_tokens = m.sent_tokens[m.start_token :][::-1] # Strict left and right context if self.strict_context: left_tokens = m.sent_tokens[0 : m.start_token] right_tokens = m.sent_tokens[m.end_token + 1 :][::-1] # Left and Right context includes mention surface else: left_tokens = m.sent_tokens[0 : m.end_token + 1] right_tokens = m.sent_tokens[m.start_token :][::-1] if not self.pretrain_wordembed: left_idxs = [self.convert_word2idx(word) for word in left_tokens] right_idxs = [self.convert_word2idx(word) for word in right_tokens] else: left_idxs = left_tokens right_idxs = right_tokens left_batch.append(left_idxs) right_batch.append(right_idxs) # wids : [true_knwn_idx, cand1_idx, cand2_idx, ..., unk_idx] # wid_cprobs : [cwikis probs or 0.0 for unks] (wid_idxs, wid_cprobs) = self.make_candidates_cprobs(m) wid_idxs_batch.append(wid_idxs) wid_cprobs_batch.append(wid_cprobs) coherence_batch = (coh_indices, coh_values, coh_matshape) return ( left_batch, right_batch, coherence_batch, wid_idxs_batch, wid_cprobs_batch, ) def print_test_batch(self, mention, wid_idxs, wid_cprobs): print( "Surface : {} WID : {} WT: {}".format( mention.surface, mention.wid, self.wid2WikiTitle[mention.wid] ) ) print(mention.wid in self.knwid2idx) for (idx, cprob) in zip(wid_idxs, wid_cprobs): print( "({} : {:0.5f})".format(self.wid2WikiTitle[self.idx2knwid[idx]], cprob), end=" ", ) print("\n") def make_candidates_cprobs(self, m): # Fill num_cands now surface = utils._getLnrm(m.surface) wid_idxs = [] wid_cprobs = [] # print(surface) if surface in self.crosswikis: # Pruned crosswikis has only known wids and 30 cands at max candwids_cprobs = self.crosswikis[surface][0 : self.num_cands - 1] (wids, wid_cprobs) = candwids_cprobs wid_idxs = [self.knwid2idx[wid] for wid in wids] # All possible candidates added now. Pad with unks # assert len(wid_idxs) == len(wid_cprobs) remain = self.num_cands - len(wid_idxs) wid_idxs.extend([0] * remain) remain = self.num_cands - len(wid_cprobs) wid_cprobs.extend([0.0] * remain) return (wid_idxs, wid_cprobs) def embed_batch(self, batch): """ Input is a padded batch of left or right contexts containing words Dimensions should be [B, padded_length] Output: Embed the word idxs using pretrain word embedding """ output_batch = [] for sent in batch: word_embeddings = [self.get_vector(word) for word in sent] output_batch.append(word_embeddings) return output_batch def embed_mentions_batch(self, mentions_batch): """ Input is batch of mention tokens as a list of list of tokens. Output: For each mention, average word embeddings """ embedded_mentions_batch = [] for m_tokens in mentions_batch: outvec = np.zeros(300, dtype=float) for word in m_tokens: outvec += self.get_vector(word) outvec = outvec / len(m_tokens) embedded_mentions_batch.append(outvec) return embedded_mentions_batch def pad_batch(self, batch): if not self.pretrain_wordembed: pad_unit = self.word2idx[self.unk_word] else: pad_unit = self.unk_word
bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 101, '8 bytes in - 4 bytes out - takes pid - InRaw<8,8,0>, OutRaw<4,4,0>'), ('nn::hid::IHidServer', 102, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, Buffer<0,9,0>'), ('nn::hid::IHidServer', 103, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidServer', 104, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidServer', 106, '0x18 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, OutHandle<0,1>, InRaw<4,4,0>, InRaw<8,8,0x10>'), ('nn::hid::IHidServer', 107, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 108, '4 bytes in - 8 bytes out - OutRaw<8,8,0>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 120, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidServer', 121, '8 bytes in - 8 bytes out - takes pid - InRaw<8,8,0>, OutRaw<8,8,0>'), ('nn::hid::IHidServer', 122, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 123, '0x18 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>, InRaw<8,8,0x10>'), ('nn::hid::IHidServer', 124, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 125, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>, InRaw<4,4,4>'), ('nn::hid::IHidServer', 126, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidServer', 127, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidServer', 128, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidServer', 129, '8 bytes in - 8 bytes out - takes pid - InRaw<8,8,0>, OutRaw<8,8,0>'), ('nn::hid::IHidServer', 130, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>, InRaw<4,4,4>'), ('nn::hid::IHidServer', 131, '0x10 bytes in - 1 bytes out - takes pid - OutRaw<1,1,0>, InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 132, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,4>, InRaw<1,1,0>'), ('nn::hid::IHidServer', 200, '4 bytes in - 8 bytes out - OutRaw<8,4,0>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 201, '0x20 bytes in - 0 bytes out - takes pid - InRaw<8,8,0x18>, InRaw<4,4,0>, InRaw<0x10,4,4>'), ('nn::hid::IHidServer', 202, '0x10 bytes in - 0x10 bytes out - takes pid - OutRaw<0x10,4,0>, InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 203, '0 bytes in - 0 bytes out - OutObject<0,0>'), ('nn::hid::IHidServer', 204, '1 bytes in - 0 bytes out - InRaw<1,1,0>'), ('nn::hid::IHidServer', 205, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'), ('nn::hid::IHidServer', 206, '8 bytes in - 0 bytes out - InRaw<8,8,0>, Buffer<0,9,0>, Buffer<1,9,0>'), ('nn::hid::IHidServer', 300, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidServer', 301, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 302, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 400, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'), ('nn::hid::IHidServer', 401, '1 bytes in - 0 bytes out - InRaw<1,1,0>'), ('nn::hid::IHidServer', 402, '4 bytes in - 1 bytes out - OutRaw<1,1,0>, InRaw<4,4,0>'), ('nn::hid::IHidServer', 1000, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidServer', 1001, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 31, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 101, '8 bytes in - 0 bytes out - takes pid - OutHandle<0,1>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 111, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 121, '8 bytes in - 0 bytes out - takes pid - OutHandle<0,1>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 131, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 141, '8 bytes in - 0 bytes out - takes pid - OutHandle<0,1>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 151, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 210, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 211, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>'), ('nn::hid::IHidSystemServer', 212, '4 bytes in - 0 bytes out - OutHandle<0,1>, InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 213, '0x10 bytes in - 0 bytes out - takes pid - InRaw<4,4,4>, InRaw<1,1,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 230, '4 bytes in - 0 bytes out - OutHandle<0,1>, InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 231, '0x10 bytes in - 0 bytes out - takes pid - InRaw<4,4,4>, InRaw<1,1,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 301, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 303, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 304, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 305, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 306, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 307, '4 bytes in - 0x10 bytes out - OutRaw<8,8,0>, OutRaw<8,8,8>, InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 311, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,8>, InRaw<4,4,0>, InRaw<4,4,4>'), ('nn::hid::IHidSystemServer', 321, '4 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>, InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 322, '0x10 bytes in - 8 bytes out - takes pid - InRaw<4,4,0>, OutRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 323, '0x10 bytes in - 8 bytes out - takes pid - InRaw<4,4,0>, OutRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 500, '8 bytes in - 0 bytes out - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 501, '0x10 bytes in - 0 bytes out - InRaw<8,8,8>, InRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 502, '8 bytes in - 0 bytes out - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 503, '0x10 bytes in - 0 bytes out - InRaw<8,8,8>, InRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 504, '0x10 bytes in - 0 bytes out - InRaw<8,8,8>, InRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 505, '0x10 bytes in - 0 bytes out - InRaw<8,8,8>, InRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 510, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 511, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 512, '8 bytes in - 0 bytes out - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 513, '0 bytes in - 0 bytes out'), ('nn::hid::IHidSystemServer', 520, '0 bytes in - 0 bytes out'), ('nn::hid::IHidSystemServer', 521, '0 bytes in - 0 bytes out'), ('nn::hid::IHidSystemServer', 540, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 541, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>'), ('nn::hid::IHidSystemServer', 542, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 543, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>'), ('nn::hid::IHidSystemServer', 544, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 545, '6 bytes in - 0 bytes out - InRaw<6,1,0>'), ('nn::hid::IHidSystemServer', 546, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 547, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 700, '0x10 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 702, '0 bytes in - 0 bytes out - OutHandle<0,1>'), ('nn::hid::IHidSystemServer', 703, '0 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>'), ('nn::hid::IHidSystemServer', 751, '8 bytes in - 0 bytes out - takes pid - OutHandle<0,1>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 800, '8 bytes in - 8 bytes out - OutRaw<8,8,0>, Buffer<0,0xA,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 801, '4 bytes in - 1 bytes out - OutRaw<1,1,0>, InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 802, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 803, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 804, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 805, '8 bytes in - 6 bytes out - OutRaw<6,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 806, '8 bytes in - 0 bytes out - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 821, '0x10 bytes in - 0 bytes out - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 822, '0x10 bytes in - 0 bytes out - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 823, '0x10 bytes in - 0 bytes out - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 824, '0x10 bytes in - 0 bytes out - InRaw<8,8,0>, InRaw<8,8,8>'), ('nn::hid::IHidSystemServer', 850, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 851, '1 bytes in - 0 bytes out - InRaw<1,1,0>'), ('nn::hid::IHidSystemServer', 852, '8 bytes in - 1 bytes out - OutRaw<1,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 900, '8 bytes in - 0 bytes out - takes pid - InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 901, '4 bytes in - 0 bytes out - InRaw<4,4,0>'), ('nn::hid::IHidSystemServer', 1000, '0 bytes in - 0 bytes out'), ('nn::hid::IHidSystemServer', 1001, '8 bytes in - 0x10 bytes out - OutRaw<0x10,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 1002, '8 bytes in - 0x10 bytes out - OutRaw<0x10,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 1003, '8 bytes in - 1 bytes out - OutRaw<1,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 1004, '8 bytes in - 8 bytes out - OutRaw<8,8,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 1005, '8 bytes in - 8 bytes out - OutRaw<8,8,0>, InRaw<8,8,0>'), ('nn::hid::IHidSystemServer', 1006, '0 bytes in - 0 bytes out'), ('nn::hid::IHidSystemServer', 1007, '8 bytes in - 4 bytes out - OutRaw<4,1,0>, InRaw<8,8,0>'), ('nn::hid::IHidTemporaryServer', 0, '0x10 bytes in - 0x18 bytes out - takes pid - OutRaw<0x18,2,0>, InRaw<8,8,8>, InRaw<4,4,0>'), ('nn::htc::tenv::IService', 0, '0x40 bytes
DefaultJobs.append(['DARPin', '', 'GACGTTAACGCT', 'GGATCC', 'AAGCTT', 2, [['ACTCCGCTGCACCTGGCTGCT', 6, 6, 0], ['GGTCACCTGGAAATCGTTGAAGTTCTGCTGAAGTACGGTGCT', 2, 2, 0], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4,6, 2]]]) DefaultJobs.append(['Fibronectin_Fn3HP','High_affinity.fasta','TCCTCCGACTCTCCGCGTAACCTGGAGGTTACCAACGCAACTCCGAACTCTCTGACTATTTCTTGG','GCTAGC','GGATCC',3,[['TACCGTATCACCTACGGCGAAACTGGTGGTAACTCCCCGAGCCAGGAATTCACTGTTCCG',6,10,3],['GCGACCATCAGCGGTCTGAAACCGGGCCAGGATTATACCATTACCGTGTACGCTGTA',3,7,1],['CCAATCAGCATCAATTATCGCACCGAAATCGACAAACCGTCTCAG',6,12,3]]+[['',4,6,2] for j in xrange(5)]]) DefaultJobs.append(['Gene-2-Protein_Gp2', 'Gp2_evolved_binders.fasta', 'AAATTTTGGGCGACTGTA', 'GCTAGC', 'GGATCC', 2, [['TTCGAGGTTCCGGTTTATGCTGAAACCCTGGACGAAGCACTGGAACTGGCCGAATGGCAGTAC', 6, 8, 6], ['GTGACCCGCGTGCGTCCG', 6, 8, 6], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2]]]) DefaultJobs.append(['Knottin', '', 'GGCCAGTCTGGCCAGGGCACCTGCAACACCCCGGGCTGCACCTGCAGCTGGCCGGTGTGC', 'TGACTAGCAATGCTGACTGA', 'TCTGGTGACTACAACAAAAAC', 1, [['TGCGGCGAAACCTGCGTGGGCGGAGGGCAGTCTGGGCAG', 7, 7, 0], ['', 4, 6, 2], ['',4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2], ['', 4, 6, 2]]]) saved = open( "SavedJobs.p", "wb" ) pickle.dump(DefaultJobs, saved) saved.close() DefaultJobs.append(job+[divs]) saved = open( "SavedJobs.p", "wb" ) pickle.dump(DefaultJobs, saved) saved.close() print '\n'*10+'Saved!'.center(cc)+'\n'*18 __getch() elif ss == limit-1: return True, False elif ss == 5: clear() if key == 'Left': job[5] = (job[5]-2)%8 + 1 if key == 'Right': job[5] = (job[5])%8 + 1 elif ss == 6: if key == 'Left': rs = (rs-1)%job[5] if key == 'Right': rs = (rs+1)%job[5] elif ss == 8: if key == 'Left' and divs[rs][1] > 1: divs[rs][1] -= 1 if key == 'Right' and divs[rs][1] < divs[rs][2]: divs[rs][1] += 1 elif ss == 9: if key == 'Left' and divs[rs][2] > divs[rs][1]: divs[rs][2] -= 1 if key == 'Right': divs[rs][2] += 1 elif ss == 10: if key == 'Left' and divs[rs][3] > 0: divs[rs][3] -= 1 if key == 'Right' and divs[rs][3] < divs[rs][1]: divs[rs][3] += 1 elif key == 'Left' or key == 'Right' or key == 'Enter': pass elif ss <= len(JobMenu): if key == 'Delete': job[ss] = job[ss][:-1] else: job[ss] += key elif ss == 7: if key == 'Delete': divs[rs][ss-len(JobMenu)-1] = divs[rs][ss-len(JobMenu)-1][:-1] else: divs[rs][ss-len(JobMenu)-1] += key ''' Interface Menu (Settings Menu) Purpose: Allows user to modify the behind the scenes settings for the analysis Note: ''' def MainSettings(settings): SettingsMenu = ["Sequence Similarity Threshold","Frequency Dampening Power","Maximum Sequence Count","Assay Background Filter","Pairwise Analysis",["Filter Coefficient","Hard Cap Filter"],""] cc,ss,limit,switch,pairwise_analysis = 79,0,len(SettingsMenu),['On','Off'],['On','Off'] while True: clear() print '\n'*2 print '-- System Settings --'.center(cc) + '\n' for i in xrange(len(SettingsMenu)): if i == 5: display = SettingsMenu[i][switch.index(settings[3])] else: display = SettingsMenu[i] if i == ss: print (display+' > '+settings[i]+' <').center(cc) else: print (display+' '+settings[i]+' ').center(cc) print '\n'*10 key = __getch() if key == 'Up': ss = (ss-1)%len(SettingsMenu) elif key == 'Down': ss = (ss+1)%len(SettingsMenu) elif key == 'Left': if ss == 3: settings[3] = switch[1-switch.index(settings[3])] if ss == 4: settings[4] = pairwise_analysis[1-pairwise_analysis.index(settings[4])] elif key == 'Right': if ss == 3: settings[3] = switch[1-switch.index(settings[3])] if ss == 4: settings[4] = pairwise_analysis[1-pairwise_analysis.index(settings[4])] elif ss != limit-3 and ss != limit-1: if key == 'Delete': settings[ss] = settings[ss][:-1] else: settings[ss] += key elif key == 'Enter' and ss == len(SettingsMenu)-1: try: [float(i) for i in settings[0:3]+[settings[5]]] return settings except: print '\n'*30 print 'Invalid entry in settings'.center(cc)+'\n'+'Be sure all values are numbers'.center(cc) print '\n'*19 __getch() def Silent_Load(fname): text = open(fname, 'r') lines = text.readlines() JobMenu = ['Job Name','FASTA/FASTQ File','Gene Start',"5' Anchor","3' Anchor",'# of Diversified Regions'] RegionMenu = ['DNA After Region','Minimum Region Length','Maximum Region Length','Insert after # Position'] SettingMenu = ["Sequence Similarity Threshold","Frequency Dampening Power","Maximum Sequence Count","Assay Background Filter","Pairwise Analysis","Filter Coefficient","Hard Cap Filter"] Jobs,Regions,Settings = ['' for i in xrange(len(JobMenu))],['' for i in xrange(len(RegionMenu))],['' for i in xrange(len(SettingMenu))] for i in xrange(len(JobMenu)): for line in lines: if JobMenu[i] in line: Jobs[i] = line[line.index(JobMenu[i])+len(JobMenu[i]):].replace(' ','').replace('\n','').replace(':','') for i in xrange(len(RegionMenu)): for line in lines: if RegionMenu[i] in line: Regions[i] = line[line.index(RegionMenu[i])+len(RegionMenu[i]):].replace(' ','').replace('\n','').replace(':','') for i in xrange(len(SettingMenu)): for line in lines: if SettingMenu[i] in line: Settings[i] = line[line.index(SettingMenu[i])+len(SettingMenu[i]):].replace(' ','').replace('\n','').replace(':','') temp1 = Jobs[:-1]+[int(Jobs[-1])] temp2 = [[a,int(b),int(c),int(d)] for a,b,c,d in zip(Regions[0].split(','),Regions[1].split(','),Regions[2].split(','),Regions[3].split(','))] temp3 = [float(Settings[0]),float(Settings[1]),int(Settings[2])]+Settings[3:5]+[int(Settings[5])] return temp3,temp1,temp2 ############################## ### Start of Execution ### ############################## ''' Program Start Purpose: Start the application, choosing between silent and non-silent modes Note: You can change the default settings below if so desired ''' if silentmode: settings,job,divs = Silent_Load(job_file) else: clear() settings = ['.8','0.5','10000','On','On','10','Return to Main Menu '] job,divs,settings = Main(settings) [thresh, damp, maxSeqCount, use_bck, pairwise, bead_ratio] = settings[0:6] [thresh, damp, maxSeqCount, use_bck, pairwise, bead_ratio] = [float(thresh), float(damp), int(maxSeqCount), use_bck, pairwise, float(bead_ratio)] [filename,files],FRAMEsplit = job[0:2],[] adaptors = job [3:5] FRAMEsplit.append(job[2]) looplength,diversityspacing = [],[] for i in xrange(job[5]): looplength.append(divs[i][1:3]) diversityspacing.append(divs[i][3]) FRAMEsplit.append(divs[i][0]) ################################################################### ### The above section is devoted to the user interface ### ################################################################### ################################################################### ### The below section is devoted to the algorithm ### ################################################################### #Generate empty count matrices for later counting LOOPcnt = [[[[0.*22] for j in xrange(i)] for i in range(pos[0],pos[1]+1)] for pos in looplength] ### Set up variables to be used for sequence sorting ''' Sequence Acquisition (Adaptor Alignment) Purpose: This function allows you to check for where the adaptors match the sequence, and output the identified sequence/length Note: You can change tolerance by modifying global variable "adaptor_tolerance" ''' def Adaptor_Alignment(seq,adaptors): if adaptor_tolerance == 0: length = len(seq[seq.find(adaptors[0]):seq.find(adaptors[1])]) - len(adaptors[0]) protein = seq[seq.find(adaptors[0])+len(adaptors[0]):seq.find(adaptors[1])] return length,protein else: adaptor_index = [] for i in xrange(2): temp = [] for j in xrange(len(seq) - len(adaptors[i])+1): temp.append(FamCrit(seq[j:j+len(adaptors[i])],adaptors[i])*(len(adaptors[i]))) print 'Seq:',seq[j:j+len(adaptors[i])],adaptors[i] print temp[-1] print len(adaptors[i]) - max(temp) print adaptors if len(adaptors[i]) - max(temp) <= adaptor_tolerance: print temp.index(max(temp)) adaptor_index.append(temp.index(max(temp))) else: return 0,'' length = len(seq[adaptor_index[0]:adaptor_index[1]]) - len(adaptors[0]) protein = seq[adaptor_index[0]+len(adaptors[0]):adaptor_index[1]] return length,protein ''' Program Start ''' loadtime = time.time()-start print 'Job has started. \nScaffoldSeq is evaluating the data set...' time1 = time.time() ProteinTotal,FULLPROT,timeCnt = 0,[],[] timeCnt.append(time.time()) ''' Sequence Acquisition (Ruler Generation/Search) Purpose: Creates a 'blank scaffold' of a certain size, uses the 5'/3' boundaries to identify sequences that fit the size restrictions FULLPROT --> Stores list of all diversified regions originating from the same sequence, but only when all diversified regions are accounted for. ''' while not os.path.isfile(files): files = raw_input('\nError! \nSequence data file must exist within the current directory. \nPlease enter the sequence file name: \n') with open(files, 'r') as Mainfile: LOWlmt = len(''.join(FRAMEsplit)) + 3*(sum([i[0] for i in looplength])) HIGHlmt = len(''.join(FRAMEsplit)) + 3*(sum([i[1] for i in looplength])) SeqCnt = 0 LOOPseqs = [[[] for i in xrange(j[1]-j[0]+1)] for j in looplength] for line in Mainfile: SeqCnt += 1 matchFn, tempLOOPS, buildFULL = 0, [], [] if SeqCnt >= (maxSeqCount): break length,protein = Adaptor_Alignment(line,adaptors) if length >= LOWlmt and length <= HIGHlmt and length%3 == 0: LOOPstart = 0 ### Process sequences that lie within an appropriate size range ''' Sequence Acquisition (Region Alignment) Purpose: Goes through each matching sequence at tries to align sections of framework in order to identify diversified regions Note: ''' for (i,loop) in zip(xrange(len(looplength)),looplength): protFrag,protFragRat,protFragaa = [],[],[] LOOPstart += len(FRAMEsplit[i])+loop[0]*3 for x in range(LOOPstart,LOOPstart+(loop[1]-loop[0]+1)*3,3): protFrag.append(protein[x:x+len(FRAMEsplit[i+1])]) for x in protFrag: protFragRat.append(FamCrit(FRAMEsplit[i+1],x)) tag = protFragRat.index(max(protFragRat)) LOOPseq = protein[LOOPstart-loop[0]*3:LOOPstart+tag*3] if max(protFragRat) >= framework_match_threshold: matchFn += 1 LOOPseqs[i][tag].append(translate_dna(LOOPseq)) tempLOOPS.append(translate_dna(LOOPseq)) # DRW edit on 2016-02-01 to exclude all cysteine containing clones. # if max(protFragRat) >= framework_match_threshold and 'C' not in translate_dna(LOOPseq): # matchFn += 1 # LOOPseqs[i][tag].append(translate_dna(LOOPseq)) # tempLOOPS.append(translate_dna(LOOPseq)) LOOPstart += 3*tag #Reset loopstart variable to prepare for next interation ### Take into account loop length diversity (-1...+1) if len(tempLOOPS) == len(looplength): FULLPROT.append(tempLOOPS) if matchFn > 0: # Keep track of the number of identified proteins of interest ProteinTotal += 1 if SeqCnt%100000 == 0: # Give us some kind of meter for data processing print 'Scanned next 100k entries in %.1f sec' % (time.time() - timeCnt[0]) # Remind user how long the algorithm took to identify all relavent proteins scantime = time.time() - time1 print 'Scanned full data set in %.1f sec' % (scantime) time1b = time.time() ################################################################### ## ## ## Operate on AA sequences ## ## List, Tally Occurrences and Sort Unique Sequences ## ## ## ################################################################### ### Enumerate and Remove Duplicates by Loop Length UNIQUELOOPseqs = [[[[str(x), LOOPseq.count(x)] for x in list(set(LOOPseq))] for LOOPseq in LOOPset] for LOOPset in LOOPseqs] UNIQUELOOPseqs = [[sorted(LOOPseq,key=lambda x: -x[1]) for LOOPseq in LOOPset] for LOOPset in UNIQUELOOPseqs] ################################### ## Time Point ## ################################### scantime1 = time.time() - time1b print 'Organized Diversified Regions in %.1f sec' % (scantime1) time2 = time.time() ####################################### ## Remove Background (Rare Events) ## ####################################### FREQcntr = [[i[1] for FREQset in UNIQUELOOPseq for i in FREQset] for UNIQUELOOPseq in UNIQUELOOPseqs] seqcnt = [[[x, cntr.count(x)] for x in list(set(cntr))] for cntr in FREQcntr] # Removes repeats and stores as [sequence,frequency] instead seqcnt = [sorted(sc,key=lambda
on save only if it hasn't already been done and the # build is complete. if self.build.status == BUILD_SUCCESS and not self.hash: self.hash = self.compute_hash() validate_config_marshaling(self) super(Release, self).save() @reversion.register @six.python_2_unicode_compatible class Host(models.Model): name = models.CharField(max_length=200, unique=True) # It might be hard to delete host records if there are things linked # to them in the DB, but we should be able to mark them inactive. active = models.BooleanField(default=True) squad = models.ForeignKey('Squad', null=True, blank=True, related_name='hosts') def __str__(self): return self.name def get_used_ports(self): return set(p.port for p in self.get_procs()) def get_next_port(self): all_ports = range(settings.PORT_RANGE_START, settings.PORT_RANGE_END) used_ports = self.get_used_ports() # Return the first port in our configured range that's not already in # use. def free(port): if PortLock.objects.filter(host=self, port=port): return False return port not in used_ports return next(x for x in all_ports if free(x)) def get_free_port(self): used_ports = self.get_used_ports() locked_ports = {p.port for p in PortLock.objects.filter(host=self)} occupied_ports = used_ports.union(locked_ports) all_ports = range(settings.PORT_RANGE_START, settings.PORT_RANGE_END) # Shuffle, so that we pick a random port random.shuffle(all_ports) for port in all_ports: if port not in occupied_ports: return port # No free ports! raise ValueError( 'No free ports found on host {}: used={}, locked={}'.format( self, used_ports, locked_ports)) def get_proc(self, name, check_cache=False): """ Given a name of a proc, get its information from supervisord and return a Proc instance. """ return self.raw_host.get_proc(name, check_cache) def get_procs(self, check_cache=False): return self.raw_host.get_procs(check_cache) def shortname(self): return self.raw_host.shortname() class Meta: ordering = ('name',) db_table = 'deployment_host' def __init__(self, *args, **kwargs): super(Host, self).__init__(*args, **kwargs) user = getattr(settings, 'SUPERVISOR_USERNAME', None) pwd = getattr(settings, 'SUPERVISOR_PASSWORD', None) self.raw_host = common_models.Host( self.name, settings.SUPERVISOR_PORT, redis_or_url=events_redis, supervisor_username=user, supervisor_password=<PASSWORD>, ) @reversion.register @six.python_2_unicode_compatible class Squad(models.Model): """ A Squad is a group of identical hosts. When deploying a swarm, its procs will be load balanced across the specified squad. A host may only be in one squad. """ name = models.CharField(max_length=50, unique=True) def __str__(self): return self.name class Meta: ordering = ('name',) db_table = 'deployment_squad' config_name_help = ( "Short name like 'prod' or 'europe' to distinguish between " "deployments of the same app. Must be filesystem-safe, " "with no dashes or spaces." ) def release_eq(release, config, env, volumes): """ Given a release, see if its config, env, and volumes match those passed in. Note that this function does *not* check whether the app and version are the same. You should do that in SQL to narrow the list of relevant releases before using this function to check the equality of the YAML-formatted fields. """ # For settings and env vars, treat None and '' the same as {} r_config = release.config_yaml or {} if r_config != config: return False r_env = release.env_yaml or {} if r_env != env: return False r_volumes = release.volumes or [] volumes = volumes or [] if r_volumes != volumes: return False return True @reversion.register @six.python_2_unicode_compatible class Swarm(models.Model): """ This is the payoff. Save a swarm record and then you can tell Velociraptor to 'make it so'. """ app = models.ForeignKey(App, null=True) release = models.ForeignKey(Release) config_name = models.CharField(max_length=50, help_text=config_name_help) proc_name = models.CharField(max_length=50) squad = models.ForeignKey(Squad) size = models.IntegerField(help_text='The number of procs in the swarm', default=1) pool_help = "The name of the pool in the load balancer (omit prefix)" pool = models.CharField(max_length=50, help_text=pool_help, blank=True, null=True) # Select which balancer should be used for this squad, from # settings.BALANCERS _balancer_choices = [(k, k) for k in settings.BALANCERS] balancer = models.CharField(max_length=50, choices=_balancer_choices, blank=True, null=True) config_yaml = YAMLDictField(help_text=config_help, blank=True, null=True) env_yaml = YAMLDictField(help_text=env_help, blank=True, null=True) volumes = YAMLListField(help_text=volumes_help, null=True, blank=True) run_as = models.CharField(max_length=32, default='nobody', blank=True) mem_limit = models.CharField( max_length=32, null=True, blank=True, help_text=mem_limit_help) memsw_limit = models.CharField( max_length=32, null=True, blank=True, help_text=memsw_limit_help) ing_help = "Optional config shared with other swarms." config_ingredients = models.ManyToManyField(ConfigIngredient, help_text=ing_help, blank=True) def save(self): if self.pool and not self.balancer: msg = 'Swarms that specify a pool must specify a balancer' raise ValidationError(msg) validate_config_marshaling(self) super(Swarm, self).save() class Meta: unique_together = ('app', 'config_name', 'squad', 'proc_name') ordering = ['app__name', 'config_name', 'proc_name'] db_table = 'deployment_swarm' def __str__(self): # app-version-swarm_config_name-release_hash-procname return u'-'.join(str(x) for x in [ self.release.build, self.config_name, self.release.hash, self.proc_name, ]) def shortname(self): return u'%(build)s-%(configname)s-%(proc)s' % { 'build': self.release.build, 'configname': self.config_name, 'proc': self.proc_name } def get_memory_limit_str(self): s = [] if self.mem_limit: s.append('RAM=%s' % self.mem_limit) if self.memsw_limit: s.append('RAM+Swap=%s' % self.memsw_limit) return '/'.join(s) def get_procs(self, check_cache=False): """ Return all running procs on the squad that share this swarm's name and proc name. """ if not self.release: return [] procs = [] for host in self.squad.hosts.all(): procs += host.get_procs(check_cache=check_cache) def is_mine(proc): return ( proc.config_name == self.config_name and proc.proc_name == self.proc_name and proc.app_name == self.app.name ) return [p for p in procs if is_mine(p)] def get_prioritized_hosts(self): """ Return list of hosts in the squad sorted first by number of procs from this swarm, then by total number of procs. """ # Make list of all hosts in the squad. Then we'll sort it. squad_hosts = list(self.squad.hosts.filter(active=True)) if not squad_hosts: raise ValidationError( u'No active hosts in squad {}'.format(self.squad)) for h in squad_hosts: # Set a couple temp attributes on each host in the squad, for # sorting by. h.all_procs = h.get_procs() h.swarm_procs = [ p for p in h.all_procs if p.hash == self.release.hash and p.proc_name == self.proc_name ] # On each host, set a tuple in form (x, y), where: # x = number of procs running on this host that belong to the swarm # y = total number of procs running on this host h.sortkey = (len(h.swarm_procs), len(h.all_procs)) squad_hosts.sort(key=lambda h: h.sortkey) return squad_hosts def get_next_host(self): return self.get_prioritized_hosts()[0] def get_config(self): """ Pull the swarm's config_ingredients' config dicts. Update with the swarm's own config dict. Return the result. Used to create the yaml dict that gets stored with a release. """ config = {} # Only bother checking the m:m if we've been saved, since it's not # possible for m:ms to exist on a Swarm that's already been saved. if self.id: for ing in self.config_ingredients.all(): config.update(ing.config_yaml or {}) config.update(self.config_yaml or {}) return config def get_env(self, build=None): """ Pull the build's env var dict. Update with the swarm's config_ingredients' env var dicts. Finally update with the swarm's own env var dict. Return the result. Used to create the yaml dict that gets stored with a release. """ env = dict(build.env_yaml or {}) if build else {} # Only bother checking the m:m if we've been saved, since it's not # possible for m:ms to exist on a Swarm that's already been saved. if self.id: for ing in self.config_ingredients.all(): env.update(ing.env_yaml or {}) env.update(self.env_yaml or {}) return env def get_current_release(self, tag): """ Retrieve or create a Release that has current config and a successful or pending build with the specified OS image and tag. """ os_image = self.app.get_os_image() build = Build.get_current(self.app, os_image, tag) if build is None: build = Build(app=self.app, os_image=os_image, tag=tag) build.save() env = self.get_env(build) config = self.get_config() # If there's a release with the build and config we need, re-use it. # First filter by build in the DB query... releases = Release.objects.filter( build=build, run_as=self.run_as, mem_limit=self.mem_limit, memsw_limit=self.memsw_limit, ).order_by('-id') # ...then filter in Python for equivalent config (identical keys/values # in different order are treated as the same, since we're comparing # dicts here instead of serialized yaml) try: release = next(r for r in releases if release_eq(r, config, env, self.volumes)) # If we have a complete build but release is not yet hashed, hash # it. if release.build.file and not release.hash: release.save() log.info("Found existing release %s", release.hash) return release except StopIteration: pass # We didn't find a release with the right build+config+env+volumes. Go # ahead and make one. release = Release( build=build, config_yaml=config, env_yaml=env, volumes=self.volumes, run_as=self.run_as, mem_limit=self.mem_limit, memsw_limit=self.memsw_limit, ) release.save() log.info("Created new release %s", release.hash) return release def get_version(self): return self.release.build.tag def set_version(self, version): self.release = self.get_current_release(version) version = property(get_version, set_version) def get_latest_reversion(self): try: return reversion.get_for_object(self)[0] except IndexError: return None @six.python_2_unicode_compatible class PortLock(models.Model): """ The presence of one of these records indicates that a port is reserved for a particular proc that's probably still in the process of being deployed. Port
("report_date_stamp", pymongo.ASCENDING), ("f_ann_date_stamp", pymongo.ASCENDING)]) if fields: df = pd.DataFrame(cursor).drop(columns="_id")[fields].set_index("code") df.report_date = pd.to_datetime(df.report_date) df.ann_date = pd.to_datetime(df.ann_date) df.f_ann_date = pd.to_datetime(df.f_ann_date) else: df = pd.DataFrame(cursor).drop(columns="_id").set_index("code") df.report_date = pd.to_datetime(df.report_date) df.ann_date = pd.to_datetime(df.ann_date) df.f_ann_date = pd.to_datetime(df.f_ann_date) return df def QA_fetch_last_financial( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None, report_label: Union[int, str] = None, report_type: Union[int, str, List, Tuple] = None, sheet_type: str = "income", fields: Union[str, List, Tuple] = None) -> pd.DataFrame: """获取距离指定日期 (cursor_date) 最近的原始数据 (不包含在 cursor_date 发布的财务数据), 当同时输入 cursor_date 与 report_date 时,以 report_date 作为查询标准 注意: 这里的 report_type 仅支持 (1,4, 5) 三种类型,以避免混淆合并数据和单季数据等 说明: 柳工 (000528) 在 2018 年 8 月 30 日发布半年报,之后在 2018 年 9 月 29 日发布修正报告, - 如果输入的 cursor_date 为 2018-08-31, 那么获取到的就是原始半年报,对应 report_type == 5 - 如果输入的 cursor_date 为 2018-09-30,那么获取到的就是最新合并报表,对应 report_type == 1 - 如果对应的 cursor_date 为 2019-08-31,需要获取 2018 年半年报,那么就返回柳工在 2019 年 8 月 29 日发布的上年同期基准,对应 report_type == 4 Args: code (Union[str, List, Tuple], optional): 股票代码或股票列表,默认为 None, 查询所有股票 cursor_date (Union[str, datetime.datetime, pd.Timestamp]): 查询截面日期 (一般指调仓日), 默认为 None report_label (Union[str, int], optional): 指定报表类型,这里的类型分类为一季报,半年报,三季报,年报, 默认为 None,即选择距离 cursor_date 最近的报表类型 report_type (Union[str, List, Tuple], optional): [description]. 报表类型,默认为 None. 即距离 cursor_date 最近的财报,不指定类型,避免引入未来数据 (1 合并报表 上市公司最新报表(默认)| 2 单季合并报表 4 调整合并报表 本年度公布上年同期的财务报表数据,报告期为上年度 | 5 调整前合并报表 数据发生变更,将原数据进行保留,即调整前的原数据) sheet_type (str, optional): 报表类型,默认为 "income". fields (Union[str, List, Tuple], optional): 字段, 默认为 None, 返回所有字段 Returns: pd.DataFrame: 复合条件的财务数据 """ def _trans_financial_type(x): if x.empty: return x if sheet_type == "balancesheet": # 资产负债表属于时点信息,直接返回 return x else: if x.iloc[0].report_date[4:] in ['0331', '1231']: # 一季报而言,单季合并与普通合并没有区别,直接返回 # 年报而言,不存在单季概念 return x.iloc[0] if x.iloc[0].report_type in ['1', '4', '5']: return x.iloc[0] if x.iloc[0].report_type == '2': # 尝试查找同一报告期报告类型为 '1' 或 '4' 的报表数据 # try: # if (x.shape[0] > 1) & (x.iloc[1].report_date == x.iloc[0].report_date) & (x.iloc[1].report_type in ['1', '4']): # return x.iloc[1] # except: # return pd.Series() # 尝试直接利用单季数据进行拼接 cursor_x = x.loc[x.report_date.map(str).str.slice( 0, 4) == x.iloc[0].report_date[:4]] cursor_x = cursor_x.drop_duplicates(subset = ['report_date'], keep='first') cursor_x = cursor_x.loc[cursor_x.report_date <= x.iloc[0].report_date] cursor_x = cursor_x.fillna(0) non_numeric_columns = sorted(["f_ann_date", "f_ann_date_stamp", "ann_date", "ann_date_stamp", "report_date", "report_date_stamp", "update_flag", "report_type", "code", "report_label"]) columns = sorted(list(set(cursor_x.columns) - set(non_numeric_columns))) rtn_se = cursor_x[columns].sum(axis=0) rtn_se = rtn_se.append(cursor_x[non_numeric_columns].iloc[0]) return rtn_se if isinstance(code, str): code = (code,) if not report_type: report_type = ["1", "2", "4", "5"] else: if isinstance(report_type, int): report_type = str(report_type) if isinstance(report_type, str): if report_type not in ["1", "4", "5"]: raise ValueError("[REPORT_TYPE ERROR]") report_type = (report_type,) else: report_type = list(set(report_type) & set('1', '2', '4', '5')) if sheet_type not in SHEET_TYPE: raise ValueError(f"[SHEET_TYPE ERROR]") if report_label: report_label = str(report_label) if isinstance(fields, str): fields = list( set([fields, "code", "ann_date", "report_date", "f_ann_date", "report_type"])) elif fields: fields = list( set(fields + ["code", "ann_date", "report_date", "f_ann_date", "report_type"])) coll = eval(f"DATABASE.{sheet_type}") if (not code) and (not report_label): # 为了加快检索速度,从当前日期往前至多回溯一季度,实际调仓时,仅考虑当前能拿到的最新数据,调仓周期一般以月, 季为单位, # 最长一般为年报,而修正报表如果超过 1 个季度,基本上怼调仓没有影响,这里以 1 年作为回溯基准 qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not report_label: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": {"$in": report_type}} cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() if not code: qry = { "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() else: qry = { "code": { "$in": code }, "f_ann_date_stamp": { "$gt": QA_util_date_stamp((pd.Timestamp(cursor_date) - pd.Timedelta(days=400)).strftime("%Y-%m-%d")), "$lt": QA_util_date_stamp(cursor_date) }, "report_type": { "$in": report_type }, "report_label": report_label } cursor = coll.find(qry, batch_size=10000).sort([ ("report_date_stamp", pymongo.DESCENDING), ("f_ann_date_stamp", pymongo.DESCENDING)]) try: if not fields: df = pd.DataFrame(cursor).drop(columns="_id") else: df = pd.DataFrame(cursor).drop(columns="_id")[fields] except: raise ValueError("[QRY ERROR]") # df.report_date = pd.to_datetime(df.report_date) # df.ann_date = pd.to_datetime(df.ann_date) # df.f_ann_date = pd.to_datetime(df.f_ann_date) if sheet_type == "balancesheet": return df.groupby("code").apply(lambda x: x.iloc[0]) return df.groupby("code").apply(_trans_financial_type).unstack() def QA_fetch_stock_basic( code: Union[str, List, Tuple] = None, status: Union[str, List, Tuple] = 'L') -> pd.DataFrame: """获取股票基本信息 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None,获取全部股票 status (Union[str, List, Tuple], optional): 股票状态, 默认为 'L', 即仍在上市的股票,如果为 None, 则返回所有状态股票 Returns: pd.DataFrame: 股票基本信息 """ coll = DATABASE.stock_basic if isinstance(code, str): code = (code,) if isinstance(status, str): status = (status,) qry = {} if not status: if not code: qry = {} else: qry = { "code": { "$in": code } } else: if not code: qry = { "status": { "$in": status } } else: qry = { "code": { "$in": code }, "status": { "$in": status } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.list_date = pd.to_datetime(res.list_date) return res.drop(columns="_id").set_index("code") def QA_fetch_stock_name( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime, pd.Timestamp] = None ) -> pd.DataFrame: """获取股票历史曾用名 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None,查询所有股票. cursor (Union[str, datetime.datetime, pd.Timestamp], optional): 截止时间,股票名称距离 cursor_date 最近的名字 Returns: pd.DataFrame: 股票历史曾用名 """ coll = DATABASE.namechange if isinstance(code, str): code = [code] qry = {} if not code: if not cursor_date: qry = {} else: qry = { "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } else: if not cursor_date: qry = { "code": { "$in": code } } else: qry = { "code": { "$in": code }, "start_date_stamp": { "$lte": QA_util_date_stamp(cursor_date) }, "end_date_stamp": { "$gte": QA_util_date_stamp(cursor_date) } } cursor = coll.find(qry) res = pd.DataFrame(cursor) if res.empty: return res else: res.start_date = pd.to_datetime(res.start_date) res.end_date = pd.to_datetime(res.end_date) return res.drop(columns="_id").set_index("code").sort_values(by="start_date_stamp").drop_duplicates(keep="last").sort_index() def QA_fetch_industry_adv( code: Union[str, List, Tuple] = None, cursor_date: Union[str, datetime.datetime] = None, start: Union[str, datetime.datetime] = None, end: Union[str, datetime.datetime] = None, levels: Union[str, List, Tuple] = None, src: str = "sw" ) -> pd.DataFrame: """本地获取指定股票或股票列表的行业 Args: code (Union[str, List, Tuple], optional): 股票代码或列表,默认为 None, 查询所有股票代码. cursor_date (Union[str, datetime.datetime], optional): 一般指调仓日,此时不需要再设置 start 与 end start(Union[str, datetime.datetime], optional): 起始时间,默认为 None. end(Union[str, datetime.datetime], optional): 截止时间, 默认为 None. levels (Union[str, List, Tuple], optional): [description]. 对应行业分级级别,默认为 None,查询所有行业分级数据 src (str, optional): 分级来源,默认为 "sw"(目前仅支持申万行业分类). Returns: pd.DataFrame: 行业信息 """ coll = DATABASE.industry if not code: code = QA_fetch_stock_list().index.tolist() if isinstance(code, str): code = [code] if isinstance(levels, str): levels = [levels, ] if not levels: levels = ["l1", "l2", "l3"] levels = list(map(lambda x: x.lower(), levels)) df_tmp = pd.DataFrame() if not cursor_date: if not start: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower() } else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(start).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("找不到对应行业数据") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") if end: df_tmp = df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(end).strftime("%Y-%m-%d"))] else: qry = { "code": { "$in": code }, "level": { "$in": levels }, "src": src.lower(), "in_date_stamp": { "$lte": QA_util_date_stamp(pd.Timestamp(cursor_date).strftime("%Y-%m-%d")) } } if coll.count_documents(filter=qry) < 1: print("找不到对应行业数据") return pd.DataFrame() cursor = coll.find(qry) df_tmp = pd.DataFrame(cursor).drop(columns="_id") df_tmp.loc[df_tmp.out_date_stamp > QA_util_date_stamp( pd.Timestamp(cursor_date).strftime("%Y-%m-%d"))] df_tmp.in_date = pd.to_datetime(df_tmp.in_date) df_tmp.out_date = pd.to_datetime(df_tmp.out_date) return df_tmp.drop(columns=["in_date_stamp", "out_date_stamp"]) def QA_fetch_daily_basic( code: Union[str, List, Tuple] = None, start: Union[str, pd.Timestamp, datetime.datetime] = None, end: Union[str, pd.Timestamp, datetime.datetime] = None, cursor_date: Union[str, pd.Timestamp, datetime.datetime] = None, fields: Union[str, Tuple, List]= None ) -> pd.DataFrame: """获取全部股票每日重要的基本面指标,可用于选股分析、报表展示等 Args: code (Union[str, List, Tuple], optional): 指定股票代码或列表, 默认为 None,获取全市场 start (Union[str, pd.Timestamp, datetime.datetime], optional): 起始日期,默认为 None end (Union[str, pd.Timestamp, datetime.datetime], optional): 结束日期,默认为 None cursor_date (Union[str, pd.Timestamp, datetime.datetime], optional): 指定日期,与 start 和 end 冲突,只能选择 cursor_date 或者 start, end fields (Union[str, Tuple, List], optional): 指定 fields Returns: pd.DataFrame: 以日期,股票名为 Multiindex 的基本信息 """ if isinstance(code, str): code = (code,) if not code: if (not start) and (not cursor_date): raise ValueError( "[ERROR]\tstart and end and cursor_date cannot all be none!") if not cursor_date: if not end: end_stamp = QA_util_date_stamp(datetime.date.today()) else: end_stamp = QA_util_date_stamp(end) start_stamp = QA_util_date_stamp(start) qry = { "trade_date_stamp": { "$gte": start_stamp, "$lte": end_stamp } } else: real_trade_date = QA_util_get_real_date(cursor_date) trade_date_stamp = QA_util_date_stamp(real_trade_date) qry = { "trade_date_stamp": trade_date_stamp } else: if (not
'|' ^ bitwise_xor_expr pass char_literal211 = self.match(self.input, 133, self.FOLLOW_133_in_bitwise_or_expr1783) if self._state.backtracking == 0: char_literal211_tree = self._adaptor.createWithPayload(char_literal211) root_0 = self._adaptor.becomeRoot(char_literal211_tree, root_0) self._state.following.append(self.FOLLOW_bitwise_xor_expr_in_bitwise_or_expr1786) bitwise_xor_expr212 = self.bitwise_xor_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, bitwise_xor_expr212.tree) else: break #loop46 retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "bitwise_or_expr" class bitwise_xor_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.bitwise_xor_expr_return, self).__init__() self.tree = None # $ANTLR start "bitwise_xor_expr" # Expr.g:288:1: bitwise_xor_expr : bitwise_and_expr ( '^' ^ bitwise_and_expr )* ; def bitwise_xor_expr(self, ): retval = self.bitwise_xor_expr_return() retval.start = self.input.LT(1) root_0 = None char_literal214 = None bitwise_and_expr213 = None bitwise_and_expr215 = None char_literal214_tree = None try: try: # Expr.g:289:2: ( bitwise_and_expr ( '^' ^ bitwise_and_expr )* ) # Expr.g:289:4: bitwise_and_expr ( '^' ^ bitwise_and_expr )* pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_bitwise_and_expr_in_bitwise_xor_expr1798) bitwise_and_expr213 = self.bitwise_and_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, bitwise_and_expr213.tree) # Expr.g:289:21: ( '^' ^ bitwise_and_expr )* while True: #loop47 alt47 = 2 LA47_0 = self.input.LA(1) if (LA47_0 == 102) : alt47 = 1 if alt47 == 1: # Expr.g:289:22: '^' ^ bitwise_and_expr pass char_literal214 = self.match(self.input, 102, self.FOLLOW_102_in_bitwise_xor_expr1801) if self._state.backtracking == 0: char_literal214_tree = self._adaptor.createWithPayload(char_literal214) root_0 = self._adaptor.becomeRoot(char_literal214_tree, root_0) self._state.following.append(self.FOLLOW_bitwise_and_expr_in_bitwise_xor_expr1804) bitwise_and_expr215 = self.bitwise_and_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, bitwise_and_expr215.tree) else: break #loop47 retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "bitwise_xor_expr" class bitwise_and_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.bitwise_and_expr_return, self).__init__() self.tree = None # $ANTLR start "bitwise_and_expr" # Expr.g:291:1: bitwise_and_expr : relation_expr ( '&' ^ relation_expr )* ; def bitwise_and_expr(self, ): retval = self.bitwise_and_expr_return() retval.start = self.input.LT(1) root_0 = None char_literal217 = None relation_expr216 = None relation_expr218 = None char_literal217_tree = None try: try: # Expr.g:292:2: ( relation_expr ( '&' ^ relation_expr )* ) # Expr.g:292:4: relation_expr ( '&' ^ relation_expr )* pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_relation_expr_in_bitwise_and_expr1816) relation_expr216 = self.relation_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, relation_expr216.tree) # Expr.g:292:18: ( '&' ^ relation_expr )* while True: #loop48 alt48 = 2 LA48_0 = self.input.LA(1) if (LA48_0 == 73) : alt48 = 1 if alt48 == 1: # Expr.g:292:19: '&' ^ relation_expr pass char_literal217 = self.match(self.input, 73, self.FOLLOW_73_in_bitwise_and_expr1819) if self._state.backtracking == 0: char_literal217_tree = self._adaptor.createWithPayload(char_literal217) root_0 = self._adaptor.becomeRoot(char_literal217_tree, root_0) self._state.following.append(self.FOLLOW_relation_expr_in_bitwise_and_expr1822) relation_expr218 = self.relation_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, relation_expr218.tree) else: break #loop48 retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "bitwise_and_expr" class relation_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.relation_expr_return, self).__init__() self.tree = None # $ANTLR start "relation_expr" # Expr.g:294:1: relation_expr : add_expr ( ( '<' | '>' | '<=' | '>=' | '==' | '!=' ) ^ add_expr )? ; def relation_expr(self, ): retval = self.relation_expr_return() retval.start = self.input.LT(1) root_0 = None set220 = None add_expr219 = None add_expr221 = None set220_tree = None try: try: # Expr.g:295:2: ( add_expr ( ( '<' | '>' | '<=' | '>=' | '==' | '!=' ) ^ add_expr )? ) # Expr.g:295:4: add_expr ( ( '<' | '>' | '<=' | '>=' | '==' | '!=' ) ^ add_expr )? pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_add_expr_in_relation_expr1834) add_expr219 = self.add_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, add_expr219.tree) # Expr.g:295:13: ( ( '<' | '>' | '<=' | '>=' | '==' | '!=' ) ^ add_expr )? alt49 = 2 LA49_0 = self.input.LA(1) if (LA49_0 == 69 or (93 <= LA49_0 <= 94) or LA49_0 == 96 or (98 <= LA49_0 <= 99)) : alt49 = 1 if alt49 == 1: # Expr.g:295:14: ( '<' | '>' | '<=' | '>=' | '==' | '!=' ) ^ add_expr pass set220 = self.input.LT(1) set220 = self.input.LT(1) if self.input.LA(1) == 69 or (93 <= self.input.LA(1) <= 94) or self.input.LA(1) == 96 or (98 <= self.input.LA(1) <= 99): self.input.consume() if self._state.backtracking == 0: root_0 = self._adaptor.becomeRoot(self._adaptor.createWithPayload(set220), root_0) self._state.errorRecovery = False else: if self._state.backtracking > 0: raise BacktrackingFailed mse = MismatchedSetException(None, self.input) raise mse self._state.following.append(self.FOLLOW_add_expr_in_relation_expr1852) add_expr221 = self.add_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, add_expr221.tree) retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "relation_expr" class add_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.add_expr_return, self).__init__() self.tree = None # $ANTLR start "add_expr" # Expr.g:297:1: add_expr : mul_expr ( ( '+' | '-' ) ^ mul_expr )* ; def add_expr(self, ): retval = self.add_expr_return() retval.start = self.input.LT(1) root_0 = None set223 = None mul_expr222 = None mul_expr224 = None set223_tree = None try: try: # Expr.g:298:2: ( mul_expr ( ( '+' | '-' ) ^ mul_expr )* ) # Expr.g:298:4: mul_expr ( ( '+' | '-' ) ^ mul_expr )* pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_mul_expr_in_add_expr1864) mul_expr222 = self.mul_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, mul_expr222.tree) # Expr.g:298:13: ( ( '+' | '-' ) ^ mul_expr )* while True: #loop50 alt50 = 2 LA50_0 = self.input.LA(1) if (LA50_0 == 79 or LA50_0 == 83) : alt50 = 1 if alt50 == 1: # Expr.g:298:14: ( '+' | '-' ) ^ mul_expr pass set223 = self.input.LT(1) set223 = self.input.LT(1) if self.input.LA(1) == 79 or self.input.LA(1) == 83: self.input.consume() if self._state.backtracking == 0: root_0 = self._adaptor.becomeRoot(self._adaptor.createWithPayload(set223), root_0) self._state.errorRecovery = False else: if self._state.backtracking > 0: raise BacktrackingFailed mse = MismatchedSetException(None, self.input) raise mse self._state.following.append(self.FOLLOW_mul_expr_in_add_expr1874) mul_expr224 = self.mul_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, mul_expr224.tree) else: break #loop50 retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "add_expr" class mul_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.mul_expr_return, self).__init__() self.tree = None # $ANTLR start "mul_expr" # Expr.g:300:1: mul_expr : not_expr ( ( '*' | '/' | '%' ) ^ not_expr )* ; def mul_expr(self, ): retval = self.mul_expr_return() retval.start = self.input.LT(1) root_0 = None set226 = None not_expr225 = None not_expr227 = None set226_tree = None try: try: # Expr.g:301:2: ( not_expr ( ( '*' | '/' | '%' ) ^ not_expr )* ) # Expr.g:301:4: not_expr ( ( '*' | '/' | '%' ) ^ not_expr )* pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_not_expr_in_mul_expr1886) not_expr225 = self.not_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, not_expr225.tree) # Expr.g:301:13: ( ( '*' | '/' | '%' ) ^ not_expr )* while True: #loop51 alt51 = 2 LA51_0 = self.input.LA(1) if (LA51_0 == 70 or LA51_0 == 77 or LA51_0 == 89) : alt51 = 1 if alt51 == 1: # Expr.g:301:14: ( '*' | '/' | '%' ) ^ not_expr pass set226 = self.input.LT(1) set226 = self.input.LT(1) if self.input.LA(1) == 70 or self.input.LA(1) == 77 or self.input.LA(1) == 89: self.input.consume() if self._state.backtracking == 0: root_0 = self._adaptor.becomeRoot(self._adaptor.createWithPayload(set226), root_0) self._state.errorRecovery = False else: if self._state.backtracking > 0: raise BacktrackingFailed mse = MismatchedSetException(None, self.input) raise mse self._state.following.append(self.FOLLOW_not_expr_in_mul_expr1898) not_expr227 = self.not_expr() self._state.following.pop() if self._state.backtracking == 0: self._adaptor.addChild(root_0, not_expr227.tree) else: break #loop51 retval.stop = self.input.LT(-1) if self._state.backtracking == 0: retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException, re: self.reportError(re) self.recover(self.input, re) retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re) finally: pass return retval # $ANTLR end "mul_expr" class not_expr_return(ParserRuleReturnScope): def __init__(self): super(ExprParser.not_expr_return, self).__init__() self.tree = None # $ANTLR start "not_expr" # Expr.g:303:1: not_expr : (op= '!' )? negative_expr -> {$op != None}? ^( '!' negative_expr ) -> negative_expr ; def not_expr(self, ): retval = self.not_expr_return() retval.start = self.input.LT(1) root_0 = None op = None negative_expr228 = None op_tree = None stream_68 = RewriteRuleTokenStream(self._adaptor, "token 68") stream_negative_expr = RewriteRuleSubtreeStream(self._adaptor, "rule negative_expr") try: try: # Expr.g:304:2: ( (op= '!' )? negative_expr -> {$op != None}? ^( '!' negative_expr ) -> negative_expr ) # Expr.g:304:4:
pair should have normal behavior for that (user, stream) pair. # A normal muted topic. No notifications and unreads hidden. MUTED = 1 # This topic will behave like an unmuted topic in an unmuted stream even if it # belongs to a muted stream. UNMUTED = 2 # This topic will behave like `UNMUTED`, plus will also always trigger notifications. FOLLOWED = 3 visibility_policy_choices = ( (MUTED, "Muted topic"), (UNMUTED, "Unmuted topic in muted stream"), (FOLLOWED, "Followed topic"), ) visibility_policy: int = models.SmallIntegerField( choices=visibility_policy_choices, default=MUTED ) class Meta: unique_together = ("user_profile", "stream", "topic_name") indexes = [ # This index is designed to optimize queries fetching the # set of users who have special policy for a stream, # e.g. for the send-message code paths. models.Index( fields=("stream", "topic_name", "visibility_policy", "user_profile"), name="zerver_usertopic_stream_topic_user_visibility_idx", ), # This index is useful for handling API requests fetching the # muted topics for a given user or user/stream pair. models.Index( fields=("user_profile", "visibility_policy", "stream", "topic_name"), name="zerver_usertopic_user_visibility_idx", ), ] def __str__(self) -> str: return f"<UserTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.last_updated})>" class MutedUser(models.Model): user_profile = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE) muted_user = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE) date_muted: datetime.datetime = models.DateTimeField(default=timezone_now) class Meta: unique_together = ("user_profile", "muted_user") def __str__(self) -> str: return f"<MutedUser: {self.user_profile.email} -> {self.muted_user.email}>" post_save.connect(flush_muting_users_cache, sender=MutedUser) post_delete.connect(flush_muting_users_cache, sender=MutedUser) class Client(models.Model): id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID") name: str = models.CharField(max_length=30, db_index=True, unique=True) def __str__(self) -> str: return f"<Client: {self.name}>" get_client_cache: Dict[str, Client] = {} def clear_client_cache() -> None: # nocoverage global get_client_cache get_client_cache = {} def get_client(name: str) -> Client: # Accessing KEY_PREFIX through the module is necessary # because we need the updated value of the variable. cache_name = cache.KEY_PREFIX + name if cache_name not in get_client_cache: result = get_client_remote_cache(name) get_client_cache[cache_name] = result return get_client_cache[cache_name] def get_client_cache_key(name: str) -> str: return f"get_client:{make_safe_digest(name)}" @cache_with_key(get_client_cache_key, timeout=3600 * 24 * 7) def get_client_remote_cache(name: str) -> Client: (client, _) = Client.objects.get_or_create(name=name) return client @cache_with_key(get_stream_cache_key, timeout=3600 * 24 * 7) def get_realm_stream(stream_name: str, realm_id: int) -> Stream: return Stream.objects.select_related().get(name__iexact=stream_name.strip(), realm_id=realm_id) def get_active_streams(realm: Realm) -> QuerySet: # TODO: Change return type to QuerySet[Stream] # NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet] """ Return all streams (including invite-only streams) that have not been deactivated. """ return Stream.objects.filter(realm=realm, deactivated=False) def get_linkable_streams(realm_id: int) -> QuerySet: """ This returns the streams that we are allowed to linkify using something like "#frontend" in our markup. For now the business rule is that you can link any stream in the realm that hasn't been deactivated (similar to how get_active_streams works). """ return Stream.objects.filter(realm_id=realm_id, deactivated=False) def get_stream(stream_name: str, realm: Realm) -> Stream: """ Callers that don't have a Realm object already available should use get_realm_stream directly, to avoid unnecessarily fetching the Realm object. """ return get_realm_stream(stream_name, realm.id) def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream: return Stream.objects.select_related().get(id=stream_id, realm=realm) def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]: def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]: # # This should be just # # Stream.objects.select_related().filter(name__iexact__in=stream_names, # realm_id=realm_id) # # But chaining __in and __iexact doesn't work with Django's # ORM, so we have the following hack to construct the relevant where clause where_clause = ( "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)" ) return ( get_active_streams(realm) .select_related() .extra(where=[where_clause], params=(list(stream_names),)) ) def stream_name_to_cache_key(stream_name: str) -> str: return get_stream_cache_key(stream_name, realm.id) def stream_to_lower_name(stream: Stream) -> str: return stream.name.lower() return bulk_cached_fetch( stream_name_to_cache_key, fetch_streams_by_name, [stream_name.lower() for stream_name in stream_names], id_fetcher=stream_to_lower_name, ) def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient: # The caller should ensure that user_profile_ids includes # the sender. Note that get_huddle hits the cache, and then # we hit another cache to get the recipient. We may want to # unify our caching strategy here. huddle = get_huddle(list(user_profile_ids)) return huddle.recipient def get_huddle_user_ids(recipient: Recipient) -> List[int]: assert recipient.type == Recipient.HUDDLE return ( Subscription.objects.filter( recipient=recipient, ) .order_by("user_profile_id") .values_list("user_profile_id", flat=True) ) def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]: """ Takes a list of huddle-type recipients, returns a dict mapping recipient id to list of user ids in the huddle. """ assert all(recipient.type == Recipient.HUDDLE for recipient in recipients) if not recipients: return {} subscriptions = Subscription.objects.filter( recipient__in=recipients, ).order_by("user_profile_id") result_dict: Dict[int, List[int]] = {} for recipient in recipients: result_dict[recipient.id] = [ subscription.user_profile_id for subscription in subscriptions if subscription.recipient_id == recipient.id ] return result_dict class AbstractMessage(models.Model): sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE) recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE) # The message's topic. # # Early versions of Zulip called this concept a "subject", as in an email # "subject line", before changing to "topic" in 2013 (commit dac5a46fa). # UI and user documentation now consistently say "topic". New APIs and # new code should generally also say "topic". # # See also the `topic_name` method on `Message`. subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True) content: str = models.TextField() rendered_content: Optional[str] = models.TextField(null=True) rendered_content_version: Optional[int] = models.IntegerField(null=True) date_sent: datetime.datetime = models.DateTimeField("date sent", db_index=True) sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE) last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True) # A JSON-encoded list of objects describing any past edits to this # message, oldest first. edit_history: Optional[str] = models.TextField(null=True) has_attachment: bool = models.BooleanField(default=False, db_index=True) has_image: bool = models.BooleanField(default=False, db_index=True) has_link: bool = models.BooleanField(default=False, db_index=True) class Meta: abstract = True def __str__(self) -> str: display_recipient = get_display_recipient(self.recipient) return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>" class ArchiveTransaction(models.Model): id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID") timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True) # Marks if the data archived in this transaction has been restored: restored: bool = models.BooleanField(default=False, db_index=True) type: int = models.PositiveSmallIntegerField(db_index=True) # Valid types: RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function # ForeignKey to the realm with which objects archived in this transaction are associated. # If type is set to MANUAL, this should be null. realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE) def __str__(self) -> str: return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format( id=self.id, type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED", realm=self.realm.string_id if self.realm else None, timestamp=self.timestamp, ) class ArchivedMessage(AbstractMessage): """Used as a temporary holding place for deleted messages before they are permanently deleted. This is an important part of a robust 'message retention' feature. """ id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID") archive_transaction: ArchiveTransaction = models.ForeignKey( ArchiveTransaction, on_delete=CASCADE ) class Message(AbstractMessage): id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID") def topic_name(self) -> str: """ Please start using this helper to facilitate an eventual switch over to a separate topic table. """ return self.subject def set_topic_name(self, topic_name: str) -> None: self.subject = topic_name def is_stream_message(self) -> bool: """ Find out whether a message is a stream message by looking up its recipient.type. TODO: Make this an easier operation by denormalizing the message type onto Message, either explicitly (message.type) or implicitly (message.stream_id is not None). """ return self.recipient.type == Recipient.STREAM def get_realm(self) -> Realm: return self.sender.realm def save_rendered_content(self) -> None: self.save(update_fields=["rendered_content", "rendered_content_version"]) @staticmethod def need_to_render_content( rendered_content: Optional[str], rendered_content_version: Optional[int], markdown_version: int, ) -> bool: return ( rendered_content is None or rendered_content_version is None or rendered_content_version < markdown_version ) def sent_by_human(self) -> bool: """Used to determine whether a message was sent by a full Zulip UI style client (and thus whether the message should be treated as sent by a human and automatically marked as read for the sender). The purpose of this distinction is to ensure that message sent to the user by e.g. a Google Calendar integration using the user's own API key don't get marked as read automatically. """ sending_client = self.sending_client.name.lower() return ( sending_client in ( "zulipandroid", "zulipios", "zulipdesktop", "zulipmobile", "zulipelectron", "zulipterminal", "snipe", "website", "ios", "android", ) ) or ("desktop app" in sending_client) @staticmethod def is_status_message(content: str, rendered_content: str) -> bool: """ "status messages" start with /me and have special rendering: /me loves chocolate -> Full Name loves chocolate """ if content.startswith("/me "): return True return False def get_context_for_message(message: Message) -> Sequence[Message]: # TODO: Change return type to QuerySet[Message] return Message.objects.filter( recipient_id=message.recipient_id, subject=message.subject, id__lt=message.id, date_sent__gt=message.date_sent - timedelta(minutes=15), ).order_by("-id")[:10] post_save.connect(flush_message, sender=Message) class AbstractSubMessage(models.Model): # We can send little text messages that are associated with a regular # Zulip message. These can
""" Cisco_IOS_XR_ncs5500_coherent_node_oper This module contains a collection of YANG definitions for Cisco IOS\-XR ncs5500\-coherent\-node package operational data. This module contains definitions for the following management objects\: coherent\: Coherent node operational data Copyright (c) 2013\-2016 by Cisco Systems, Inc. All rights reserved. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError class Coherent(object): """ Coherent node operational data .. attribute:: nodes Coherent list of nodes **type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes>` """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.nodes = Coherent.Nodes() self.nodes.parent = self class Nodes(object): """ Coherent list of nodes .. attribute:: node Coherent discovery operational data for a particular node **type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node>` """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.node = YList() self.node.parent = self self.node.name = 'node' class Node(object): """ Coherent discovery operational data for a particular node .. attribute:: node_name <key> The node name **type**\: str **pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+) .. attribute:: coherent_time_stats Coherent driver performace information **type**\: :py:class:`CoherentTimeStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats>` .. attribute:: coherenthealth Coherent node data for driver health **type**\: :py:class:`Coherenthealth <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.Coherenthealth>` .. attribute:: devicemapping Coherent node data for device \_mapping **type**\: :py:class:`Devicemapping <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.Devicemapping>` .. attribute:: port_mode_all_info PortMode all operational data **type**\: :py:class:`PortModeAllInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.PortModeAllInfo>` """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.node_name = None self.coherent_time_stats = Coherent.Nodes.Node.CoherentTimeStats() self.coherent_time_stats.parent = self self.coherenthealth = Coherent.Nodes.Node.Coherenthealth() self.coherenthealth.parent = self self.devicemapping = Coherent.Nodes.Node.Devicemapping() self.devicemapping.parent = self self.port_mode_all_info = Coherent.Nodes.Node.PortModeAllInfo() self.port_mode_all_info.parent = self class CoherentTimeStats(object): """ Coherent driver performace information .. attribute:: device_created device created **type**\: str **length:** 0..255 .. attribute:: driver_init driver init **type**\: str **length:** 0..255 .. attribute:: driver_operational driver operational **type**\: str **length:** 0..255 .. attribute:: dsp_controllers_created dsp controllers created **type**\: str **length:** 0..255 .. attribute:: dsp_ea_bulk_create dsp ea bulk create **type**\: :py:class:`DspEaBulkCreate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkCreate>` .. attribute:: dsp_ea_bulk_update dsp ea bulk update **type**\: :py:class:`DspEaBulkUpdate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkUpdate>` .. attribute:: eth_intf_created eth intf created **type**\: str **length:** 0..255 .. attribute:: optics_controllers_created optics controllers created **type**\: str **length:** 0..255 .. attribute:: opts_ea_bulk_create opts ea bulk create **type**\: :py:class:`OptsEaBulkCreate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkCreate>` .. attribute:: opts_ea_bulk_update opts ea bulk update **type**\: :py:class:`OptsEaBulkUpdate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkUpdate>` .. attribute:: port_stat port stat **type**\: list of :py:class:`PortStat <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat>` """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.device_created = None self.driver_init = None self.driver_operational = None self.dsp_controllers_created = None self.dsp_ea_bulk_create = Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkCreate() self.dsp_ea_bulk_create.parent = self self.dsp_ea_bulk_update = Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkUpdate() self.dsp_ea_bulk_update.parent = self self.eth_intf_created = None self.optics_controllers_created = None self.opts_ea_bulk_create = Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkCreate() self.opts_ea_bulk_create.parent = self self.opts_ea_bulk_update = Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkUpdate() self.opts_ea_bulk_update.parent = self self.port_stat = YList() self.port_stat.parent = self self.port_stat.name = 'port_stat' class OptsEaBulkCreate(object): """ opts ea bulk create .. attribute:: end end **type**\: str **length:** 0..255 .. attribute:: start start **type**\: str **length:** 0..255 .. attribute:: time_taken time taken **type**\: str **length:** 0..255 .. attribute:: worst_time worst time **type**\: str **length:** 0..255 """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.end = None self.start = None self.time_taken = None self.worst_time = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ncs5500-coherent-node-oper:opts-ea-bulk-create' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.end is not None: return True if self.start is not None: return True if self.time_taken is not None: return True if self.worst_time is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_node_oper as meta return meta._meta_table['Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkCreate']['meta_info'] class OptsEaBulkUpdate(object): """ opts ea bulk update .. attribute:: end end **type**\: str **length:** 0..255 .. attribute:: start start **type**\: str **length:** 0..255 .. attribute:: time_taken time taken **type**\: str **length:** 0..255 .. attribute:: worst_time worst time **type**\: str **length:** 0..255 """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.end = None self.start = None self.time_taken = None self.worst_time = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ncs5500-coherent-node-oper:opts-ea-bulk-update' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.end is not None: return True if self.start is not None: return True if self.time_taken is not None: return True if self.worst_time is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_node_oper as meta return meta._meta_table['Coherent.Nodes.Node.CoherentTimeStats.OptsEaBulkUpdate']['meta_info'] class DspEaBulkCreate(object): """ dsp ea bulk create .. attribute:: end end **type**\: str **length:** 0..255 .. attribute:: start start **type**\: str **length:** 0..255 .. attribute:: time_taken time taken **type**\: str **length:** 0..255 .. attribute:: worst_time worst time **type**\: str **length:** 0..255 """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.end = None self.start = None self.time_taken = None self.worst_time = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ncs5500-coherent-node-oper:dsp-ea-bulk-create' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.end is not None: return True if self.start is not None: return True if self.time_taken is not None: return True if self.worst_time is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_node_oper as meta return meta._meta_table['Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkCreate']['meta_info'] class DspEaBulkUpdate(object): """ dsp ea bulk update .. attribute:: end end **type**\: str **length:** 0..255 .. attribute:: start start **type**\: str **length:** 0..255 .. attribute:: time_taken time taken **type**\: str **length:** 0..255 .. attribute:: worst_time worst time **type**\: str **length:** 0..255 """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.end = None self.start = None self.time_taken = None self.worst_time = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ncs5500-coherent-node-oper:dsp-ea-bulk-update' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.end is not None: return True if self.start is not None: return True if self.time_taken is not None: return True if self.worst_time is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_node_oper as meta return meta._meta_table['Coherent.Nodes.Node.CoherentTimeStats.DspEaBulkUpdate']['meta_info'] class PortStat(object): """ port stat .. attribute:: cd_max cd max **type**\: int **range:** 0..4294967295 .. attribute:: cd_min cd min **type**\: int **range:** 0..4294967295 .. attribute:: cdmax_op_stats cdmax op stats **type**\: :py:class:`CdmaxOpStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.CdmaxOpStats>` .. attribute:: cdmin_op_stats cdmin op stats **type**\: :py:class:`CdminOpStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.CdminOpStats>` .. attribute:: laser_off_stats laser off stats **type**\: :py:class:`LaserOffStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.LaserOffStats>` .. attribute:: laser_on_stats laser on stats **type**\: :py:class:`LaserOnStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.LaserOnStats>` .. attribute:: laser_state laser state **type**\: bool .. attribute:: traffic_type traffic type **type**\: int **range:** 0..4294967295 .. attribute:: traffictype_op_stats traffictype op stats **type**\: :py:class:`TraffictypeOpStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.TraffictypeOpStats>` .. attribute:: tx_power tx power **type**\: int **range:** 0..4294967295 .. attribute:: txpwr_op_stats txpwr op stats **type**\: :py:class:`TxpwrOpStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.TxpwrOpStats>` .. attribute:: wavelength wavelength **type**\: int **range:** 0..4294967295 .. attribute:: wl_op_stats wl op stats **type**\: :py:class:`WlOpStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_node_oper.Coherent.Nodes.Node.CoherentTimeStats.PortStat.WlOpStats>` """ _prefix = 'ncs5500-coherent-node-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.cd_max = None
te: result.te = te oprot.writeMessageBegin("get", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_get_slice(self, seqid, iprot, oprot): args = get_slice_args() args.read(iprot) iprot.readMessageEnd() result = get_slice_result() try: result.success = self._handler.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("get_slice", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_get_count(self, seqid, iprot, oprot): args = get_count_args() args.read(iprot) iprot.readMessageEnd() result = get_count_result() try: result.success = self._handler.get_count(args.key, args.column_parent, args.predicate, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("get_count", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_multiget_slice(self, seqid, iprot, oprot): args = multiget_slice_args() args.read(iprot) iprot.readMessageEnd() result = multiget_slice_result() try: result.success = self._handler.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("multiget_slice", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_multiget_count(self, seqid, iprot, oprot): args = multiget_count_args() args.read(iprot) iprot.readMessageEnd() result = multiget_count_result() try: result.success = self._handler.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("multiget_count", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_get_range_slices(self, seqid, iprot, oprot): args = get_range_slices_args() args.read(iprot) iprot.readMessageEnd() result = get_range_slices_result() try: result.success = self._handler.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("get_range_slices", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_get_indexed_slices(self, seqid, iprot, oprot): args = get_indexed_slices_args() args.read(iprot) iprot.readMessageEnd() result = get_indexed_slices_result() try: result.success = self._handler.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("get_indexed_slices", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_insert(self, seqid, iprot, oprot): args = insert_args() args.read(iprot) iprot.readMessageEnd() result = insert_result() try: self._handler.insert(args.key, args.column_parent, args.column, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("insert", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_remove(self, seqid, iprot, oprot): args = remove_args() args.read(iprot) iprot.readMessageEnd() result = remove_result() try: self._handler.remove(args.key, args.column_path, args.timestamp, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("remove", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_batch_mutate(self, seqid, iprot, oprot): args = batch_mutate_args() args.read(iprot) iprot.readMessageEnd() result = batch_mutate_result() try: self._handler.batch_mutate(args.mutation_map, args.consistency_level) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue except TimedOutException, te: result.te = te oprot.writeMessageBegin("batch_mutate", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_truncate(self, seqid, iprot, oprot): args = truncate_args() args.read(iprot) iprot.readMessageEnd() result = truncate_result() try: self._handler.truncate(args.cfname) except InvalidRequestException, ire: result.ire = ire except UnavailableException, ue: result.ue = ue oprot.writeMessageBegin("truncate", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_schema_versions(self, seqid, iprot, oprot): args = describe_schema_versions_args() args.read(iprot) iprot.readMessageEnd() result = describe_schema_versions_result() try: result.success = self._handler.describe_schema_versions() except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("describe_schema_versions", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_keyspaces(self, seqid, iprot, oprot): args = describe_keyspaces_args() args.read(iprot) iprot.readMessageEnd() result = describe_keyspaces_result() try: result.success = self._handler.describe_keyspaces() except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("describe_keyspaces", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_cluster_name(self, seqid, iprot, oprot): args = describe_cluster_name_args() args.read(iprot) iprot.readMessageEnd() result = describe_cluster_name_result() result.success = self._handler.describe_cluster_name() oprot.writeMessageBegin("describe_cluster_name", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_version(self, seqid, iprot, oprot): args = describe_version_args() args.read(iprot) iprot.readMessageEnd() result = describe_version_result() result.success = self._handler.describe_version() oprot.writeMessageBegin("describe_version", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_ring(self, seqid, iprot, oprot): args = describe_ring_args() args.read(iprot) iprot.readMessageEnd() result = describe_ring_result() try: result.success = self._handler.describe_ring(args.keyspace) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("describe_ring", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_partitioner(self, seqid, iprot, oprot): args = describe_partitioner_args() args.read(iprot) iprot.readMessageEnd() result = describe_partitioner_result() result.success = self._handler.describe_partitioner() oprot.writeMessageBegin("describe_partitioner", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_snitch(self, seqid, iprot, oprot): args = describe_snitch_args() args.read(iprot) iprot.readMessageEnd() result = describe_snitch_result() result.success = self._handler.describe_snitch() oprot.writeMessageBegin("describe_snitch", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_keyspace(self, seqid, iprot, oprot): args = describe_keyspace_args() args.read(iprot) iprot.readMessageEnd() result = describe_keyspace_result() try: result.success = self._handler.describe_keyspace(args.keyspace) except NotFoundException, nfe: result.nfe = nfe except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("describe_keyspace", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_describe_splits(self, seqid, iprot, oprot): args = describe_splits_args() args.read(iprot) iprot.readMessageEnd() result = describe_splits_result() result.success = self._handler.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split) oprot.writeMessageBegin("describe_splits", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_add_column_family(self, seqid, iprot, oprot): args = system_add_column_family_args() args.read(iprot) iprot.readMessageEnd() result = system_add_column_family_result() try: result.success = self._handler.system_add_column_family(args.cf_def) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_add_column_family", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_drop_column_family(self, seqid, iprot, oprot): args = system_drop_column_family_args() args.read(iprot) iprot.readMessageEnd() result = system_drop_column_family_result() try: result.success = self._handler.system_drop_column_family(args.column_family) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_drop_column_family", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_add_keyspace(self, seqid, iprot, oprot): args = system_add_keyspace_args() args.read(iprot) iprot.readMessageEnd() result = system_add_keyspace_result() try: result.success = self._handler.system_add_keyspace(args.ks_def) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_add_keyspace", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_drop_keyspace(self, seqid, iprot, oprot): args = system_drop_keyspace_args() args.read(iprot) iprot.readMessageEnd() result = system_drop_keyspace_result() try: result.success = self._handler.system_drop_keyspace(args.keyspace) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_drop_keyspace", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_update_keyspace(self, seqid, iprot, oprot): args = system_update_keyspace_args() args.read(iprot) iprot.readMessageEnd() result = system_update_keyspace_result() try: result.success = self._handler.system_update_keyspace(args.ks_def) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_update_keyspace", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_system_update_column_family(self, seqid, iprot, oprot): args = system_update_column_family_args() args.read(iprot) iprot.readMessageEnd() result = system_update_column_family_result() try: result.success = self._handler.system_update_column_family(args.cf_def) except InvalidRequestException, ire: result.ire = ire oprot.writeMessageBegin("system_update_column_family", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() # HELPER FUNCTIONS AND STRUCTURES class login_args: """ Attributes: - auth_request """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'auth_request', (AuthenticationRequest, AuthenticationRequest.thrift_spec), None, ), # 1 ) def __init__(self, auth_request=None,): self.auth_request = auth_request def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.auth_request = AuthenticationRequest() self.auth_request.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('login_args') if self.auth_request != None: oprot.writeFieldBegin('auth_request', TType.STRUCT, 1) self.auth_request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.auth_request is None: raise TProtocol.TProtocolException(message='Required field auth_request is unset!') return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class login_result: """ Attributes: - authnx - authzx """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'authnx', (AuthenticationException, AuthenticationException.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'authzx', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2 ) def __init__(self, authnx=None, authzx=None,): self.authnx = authnx self.authzx = authzx def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.authnx = AuthenticationException() self.authnx.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.authzx = AuthorizationException() self.authzx.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('login_result') if self.authnx != None: oprot.writeFieldBegin('authnx', TType.STRUCT, 1) self.authnx.write(oprot) oprot.writeFieldEnd() if self.authzx != None: oprot.writeFieldBegin('authzx', TType.STRUCT, 2) self.authzx.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class set_keyspace_args: """ Attributes: - keyspace """ thrift_spec = ( None, # 0 (1, TType.STRING, 'keyspace', None, None, ), # 1 ) def __init__(self, keyspace=None,): self.keyspace = keyspace def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.keyspace
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: logging.warn("The default value of combiner will change from \"mean\" " "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum", "tile"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") if isinstance(params, variables.PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] if not isinstance(sp_ids, sparse_tensor.SparseTensor): raise TypeError("sp_ids must be SparseTensor") ignore_weights = sp_weights is None if not ignore_weights: if not isinstance(sp_weights, sparse_tensor.SparseTensor): raise TypeError("sp_weights must be either None or SparseTensor") sp_ids.values.get_shape().assert_is_compatible_with( sp_weights.values.get_shape()) sp_ids.indices.get_shape().assert_is_compatible_with( sp_weights.indices.get_shape()) sp_ids.dense_shape.get_shape().assert_is_compatible_with( sp_weights.dense_shape.get_shape()) # TODO(yleon): Add enhanced node assertions to verify that sp_ids and # sp_weights have equal indices and shapes. with ops.name_scope(name, "embedding_lookup_sparse", params + [sp_ids]) as name: segment_ids = sp_ids.indices[:, 0] if segment_ids.dtype != dtypes.int32: segment_ids = math_ops.cast(segment_ids, dtypes.int32) ids = sp_ids.values if isinstance(params[0], kv_variable_ops.EmbeddingVariable) and params[0]._filter_freq == 0: ids, idx = array_ops.unique(ids) counts = None else: ids, idx, counts = array_ops.unique_with_counts(ids) uniqued_blocknums = None if blocknums is not None: if idx is None: raise ValueError("blocknums now require unqiue index to be generagted") else: uniqued_blocknums = math_ops.unsorted_segment_max(blocknums, idx, array_ops.squeeze(array_ops.shape(ids), 0)) embeddings = embedding_lookup( params, ids, partition_strategy=partition_strategy, max_norm=max_norm, blocknums=uniqued_blocknums, counts = counts) if embeddings.dtype in (dtypes.float16, dtypes.bfloat16): embeddings = math_ops.cast(embeddings, dtypes.float32) if not ignore_weights: weights = sp_weights.values if weights.dtype != embeddings.dtype: weights = math_ops.cast(weights, embeddings.dtype) embeddings = array_ops.gather(embeddings, idx) # Reshape weights to allow broadcast ones = array_ops.fill( array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1) bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = array_ops.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape( orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights if combiner == "sum": embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name) elif combiner == "mean": embeddings = math_ops.segment_sum(embeddings, segment_ids) weight_sum = math_ops.segment_sum(weights, segment_ids) embeddings = math_ops.div(embeddings, weight_sum, name=name) elif combiner == "sqrtn": embeddings = math_ops.segment_sum(embeddings, segment_ids) weights_squared = math_ops.pow(weights, 2) weight_sum = math_ops.segment_sum(weights_squared, segment_ids) weight_sum_sqrt = math_ops.sqrt(weight_sum) embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name) elif combiner == "tile": column_ids = sp_ids.indices[:, 1] embeddings = _tile_combine_embedding(embeddings, segment_ids, column_ids, sp_ids.dense_shape) else: assert False, "Unrecognized combiner" else: assert idx is not None if combiner == "sum": embeddings = math_ops.sparse_segment_sum( embeddings, idx, segment_ids, name=name) elif combiner == "mean": embeddings = math_ops.sparse_segment_mean( embeddings, idx, segment_ids, name=name) elif combiner == "sqrtn": embeddings = math_ops.sparse_segment_sqrt_n( embeddings, idx, segment_ids, name=name) elif combiner == "tile": embeddings = array_ops.gather(embeddings, idx) column_ids = sp_ids.indices[:, 1] embeddings = _tile_combine_embedding(embeddings, segment_ids, column_ids, sp_ids.dense_shape) else: assert False, "Unrecognized combiner" return embeddings @tf_export(v1=["nn.adaptive_embedding_lookup_sparse"]) def adaptive_embedding_lookup_sparse(hash_params, ev_params, sp_ids, hash_ev_ids, sp_weights, partition_strategy="mod", name=None, combiner=None, max_norm=None, bucket_size=None, adaptive_mask_tensor=None, blocknums=None): """Computes embeddings for the given ids and weights. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. Args: hash_params: A single tensor representing the complete embedding tensor, by normal Variable. or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. ev_params: A single tensor representing the complete embedding tensor, by EmbeddingVariable or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId), where N is typically batch size and M is arbitrary. sp_weights: either a SparseTensor of float / double weights, or None to indicate all weights should be taken to be 1. If specified, sp_weights must have exactly the same shape and indices as sp_ids. partition_strategy: A string specifying the partitioning strategy, relevant if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: Optional name for the op. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If provided, each embedding is normalized to have l2 norm equal to max_norm before combining. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by sp_ids, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if shape(combined params) = [p0, p1, ..., pm] and shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn] then shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id 0, weight 1.0 [2, 3]: id 1, weight 3.0 with `combiner`="mean", then the output will be a 3x20 matrix where output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = params[0, :] * 1.0 output[2, :] = params[1, :] * 3.0 Raises: TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither None nor SparseTensor. ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: #logging.warn("The default value of combiner will change from \"mean\" " # "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") # convert hash and ev to list if isinstance(hash_params, variables.PartitionedVariable): hash_params = list(hash_params) # Iterate to get the underlying Variables. if not isinstance(hash_params, list): hash_params = [hash_params] if isinstance(ev_params, variables.PartitionedVariable): ev_params = list(ev_params) # Iterate to get the underlying Variables. if not isinstance(ev_params, list): ev_params = [ev_params] if len(hash_params) < 1 or len(ev_params) < 1: raise ValueError("Missing hash_params: %s, ev_params:." % hash_params, ev_params) if not isinstance(sp_ids, sparse_tensor.SparseTensor): raise TypeError("sp_ids must be SparseTensor") ignore_weights = sp_weights is None if not ignore_weights: if not isinstance(sp_weights, sparse_tensor.SparseTensor): raise TypeError("sp_weights must be either None or SparseTensor") sp_ids.values.get_shape().assert_is_compatible_with( sp_weights.values.get_shape()) sp_ids.indices.get_shape().assert_is_compatible_with( sp_weights.indices.get_shape()) sp_ids.dense_shape.get_shape().assert_is_compatible_with( sp_weights.dense_shape.get_shape()) # TODO(yleon): Add enhanced node assertions to verify that sp_ids and # sp_weights have equal indices and shapes. if not ignore_weights: raise ValueError("AdaptiveEmbedding lookup not support not ignore weights") if adaptive_mask_tensor is None: raise ValueError("AdaptiveEmbedding lookup not support not ignore weights") with ops.name_scope(name, "embedding_lookup_sparse", ev_params + [sp_ids]) as name: segment_ids = sp_ids.indices[:, 0] if segment_ids.dtype != dtypes.int32: segment_ids = math_ops.cast(segment_ids, dtypes.int32) ids = sp_ids.values flat_ids = array_ops.reshape(ids, [-1]) original_indices = math_ops.range(array_ops.size(flat_ids)) parts = data_flow_ops.dynamic_partition(original_indices, adaptive_mask_tensor, 2) spids_part = data_flow_ops.dynamic_partition(flat_ids, adaptive_mask_tensor, 2) hash_ids, hash_idx = array_ops.unique(spids_part[0]) #ev_ids, ev_idx = array_ops.unique(spids_part[1]) hash_embeddings = embedding_lookup( hash_params, hash_ids, partition_strategy=partition_strategy, max_norm=max_norm, blocknums=None) ev_init_value = embedding_lookup( hash_params, hash_ev_ids, partition_strategy=partition_strategy, max_norm=max_norm, blocknums=None) ev_embeddings = embedding_lookup( ev_params, spids_part[1], partition_strategy=partition_strategy, max_norm=max_norm, ev_init_value=ev_init_value, blocknums=None) if (hash_idx is not None): hash_segment_ids = math_ops.range(0, array_ops.squeeze(array_ops.shape(hash_idx)), 1) #ev_segment_ids = math_ops.range(0, array_ops.squeeze(array_ops.shape(spids_part[1])), 1) if combiner == "sum": hash_embeddings = math_ops.sparse_segment_sum( hash_embeddings, hash_idx, hash_segment_ids, name=name+"_hash") #ev_embeddings = math_ops.sparse_segment_sum( #
twist. This option has meaning only when *dimensionality*=AXISYMMETRIC. Possible values are ON and OFF. The default value is OFF. Returns ------- part: Part A Part object """ pass def PartFromOdb(self, name: str, odb: str, fileName: str = '', instance: str = '', elementSet: str = '', shape: SymbolicConstant = UNDEFORMED, step: int = None, frame: int = None, twist: Boolean = OFF): """This method creates an orphan mesh Part object by reading an output database. The new part is placed in the parts repository. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].Part Parameters ---------- name A String specifying the repository key. odb An output database object. fileName A String specifying the name of the output database file from which to create the part. The default value is an empty string. instance A String specifying the part instance in the output database from which to create the part. If no instance name is specified, Abaqus creates an orphan mesh part from the first part instance in the output database. elementSet A String specifying an element set defined on the output database. Only elements from this set will be imported. The default is to import all element sets. shape A SymbolicConstant specifying the configuration state. Possible values are UNDEFORMED and DEFORMED. The default value is UNDEFORMED. step An Int specifying the step number for reading deformed coordinates. 0≤step≤N−10≤step≤N-1 where NN is the number of available steps. The default value is the last available step. You should specify the *step* argument only when *shape*=DEFORMED. frame An Int specifying the frame number for reading deformed coordinates. 0≤frame≤N−10≤frame≤N-1 where NN is the number of available frames. The default value is the last available frame. You should specify the *frame* argument only when *shape*=DEFORMED. twist A Boolean specifying whether to include a twist DEGREE OF FREEDOM in the part (only available when *dimensionality*=AXISYMMETRIC and *type*=DEFORMABLE_BODY). The default value is OFF. Returns ------- part: Part A Part object - If the output database contains elements of more than one dimensionality or type: File contains both axisymmetric and nonaxisymmetric elements.File contains both 2D and 3D elements.File contains both rigid and deformable elements. - If more than one part is found on the output database: PartError: importing of more than one part is not currently supported - If the output database does not contain any valid results for the specified step: Error. File does not contain any valid frames. - If the specified step and frame do not contain any displacements: Error. Specified frame does not contain nodal displacements. - If the specified element set is not found on the output database: Error. Specified element set is not defined in the ODB. - If the step number is invalid: OdiError: Invalid step index: i. Available step indices: 0 - j. - If the frame number is invalid: OdiError: Invalid frame index: i. Available frame indices: 0 - j. """ pass def PartFromSection3DMeshByPlane(self, name: str, part: 'PartBase', point1: float, point2: float, point3: tuple): """This method creates a Part object by cutting an existing three-dimensional orphan mesh Part object by a plane and places it in the parts repository. This method is valid only for orphan mesh parts composed of 8-node brick elements. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].Part Parameters ---------- name A String specifying the repository key. part A Part object specifying an existing three-dimensional orphan mesh part. point1 A Sequence of three Floats specifying a point on the cutting plane. This point is the local origin in the local system of the plane. point2 A Sequence of three Floats specifying a point in the direction of the normal to the cutting plane. This point must not be coincident with *point1*. point3 A sequence of three Floats specifying the direction of the local 1-axis in the local system of the plane. This point must not project onto *point1*. Returns ------- part: Part A Part object - If the specified part is not an orphan mesh part: Cannot reduce dimension of a geometric part. - If the specified part is not three-dimensional: Cannot reduce dimension of a 2D part. - If the specified part is a rigid body: Cannot change dimension of a rigid body. - If *point1* and *point2* are coincident: Cutting plane director has zero length. - If *point3* projects onto *point1*: Local axis point projects to origin. - If no elements are cut by the specified plane: Cannot reduce part dimension. """ pass def PartFromSubstructure(self, name: str, substructureFile: str, odbFile: str): """This method creates a substructure Part object by reading a substructure sim file and places it in the parts repository. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].Part Parameters ---------- name A String specifying the repository key. substructureFile A substructure sim file. odbFile The output database file corresponding to the substructure sim file. Returns ------- part: Part A Part object - If the specified part is not a substructure: File specified does not contain a substructure. - If the specified part already exists: A part with the same name already exists. - If the substructure cannot be imported: The output database is missing nodes and elements.Nested substructures are not supported.The substructure sim file was generated using a version that is different from the current version. """ pass def Part2DGeomFrom2DMesh(self, name: str, part: 'PartBase', featureAngle: float, splineCurvatureLimit: float = 90, twist: Boolean = OFF): """This method creates a geometric Part object from the outline of an existing two-dimensional orphan mesh Part object and places it in the parts repository. If the Part2DGeomFrom2DMesh method cannot create a valid two-dimensional shell section from the two-dimensional mesh, the method fails and creates an empty geometry part with a failed base shell feature. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].Part Parameters ---------- name A String specifying the repository key. part A Part object specifying an existing two-dimensional orphan mesh Part object. featureAngle A Float specifying the angle (in degrees) between line segments that triggers a break in the geometry. splineCurvatureLimit A Float specifying the traversal angle in degrees of the spline that triggers a break in the geometry. The default value is 90. twist A Boolean specifying whether to include a twist DEGREE OF FREEDOM in the part (only available when *dimensionality*=AXISYMMETRIC and *type*=DEFORMABLE_BODY). The default value is OFF. Returns ------- part: Part A Part object If the specified part is not an orphan mesh part: Specified part must be an orphan mesh. If the Part2DGeomFrom2DMesh method cannot create a valid two-dimensional shell section from the two-dimensional mesh: Planar shell feature failed If the specified part is not two-dimensional: Cannot create a geometry from a 3D part. If the specified part is a rigid body: Cannot create a geometry from a rigid body. """ pass def setValues(self, *args, **kwargs): """This method modifies the Part object. Raises ------ RangeError """ pass def addGeomToSketch(self, sketch: ConstrainedSketch): """This method converts a part into a sketch by projecting all of the edges of the part onto the X-Y plane of the sketch. You can use addGeomToSketch with a part of any modeling space. Parameters ----------
<gh_stars>0 import py from rpython.rlib.rstrategies import rstrategies as rs from rpython.rlib.objectmodel import import_from_mixin from rpython.compat import with_metaclass # === Define small model tree class W_AbstractObject(object): pass class W_Object(W_AbstractObject): pass class W_Integer(W_AbstractObject): def __init__(self, value): self.value = value def __eq__(self, other): return isinstance(other, W_Integer) and self.value == other.value class W_List(W_AbstractObject): rs.make_accessors() def __init__(self, strategy=None, size=0, elements=None): self.strategy = None if strategy: factory.set_initial_strategy(self, strategy, size, elements) def fetch(self, i): assert self.strategy return self.strategy.fetch(self, i) def store(self, i, value): assert self.strategy return self.strategy.store(self, i, value) def size(self): assert self.strategy return self.strategy.size(self) def insert(self, index0, list_w): assert self.strategy return self.strategy.insert(self, index0, list_w) def delete(self, start, end): assert self.strategy return self.strategy.delete(self, start, end) def append(self, list_w): assert self.strategy return self.strategy.append(self, list_w) def pop(self, index0): assert self.strategy return self.strategy.pop(self, index0) def slice(self, start, end): assert self.strategy return self.strategy.slice(self, start, end) def fetch_all(self): assert self.strategy return self.strategy.fetch_all(self) def store_all(self, elements): assert self.strategy return self.strategy.store_all(self, elements) w_nil = W_Object() # === Define concrete strategy classes @with_metaclass(rs.StrategyMetaclass) class AbstractStrategy(object): import_from_mixin(rs.AbstractStrategy) import_from_mixin(rs.SafeIndexingMixin) def __init__(self, factory, w_self=None, size=0): self.factory = factory def strategy_factory(self): return self.factory class Factory(rs.StrategyFactory): switching_log = [] def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], NilStrategy: [IntegerOrNilStrategy, GenericStrategy], GenericStrategy: [], IntegerStrategy: [IntegerOrNilStrategy, GenericStrategy], IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) def clear_log(self): del self.switching_log[:] class EmptyStrategy(AbstractStrategy): import_from_mixin(rs.EmptyStrategy) # TODO - implement and test transition from Generic back to Empty class NilStrategy(AbstractStrategy): import_from_mixin(rs.SingleValueStrategy) def value(self): return w_nil class GenericStrategy(AbstractStrategy): import_from_mixin(rs.GenericStrategy) import_from_mixin(rs.UnsafeIndexingMixin) def default_value(self): return w_nil class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer def wrap(self, value): return W_Integer(value) def unwrap(self, value): return value.value def default_value(self): return W_Integer(0) class IntegerOrNilStrategy(AbstractStrategy): import_from_mixin(rs.TaggingStrategy) contained_type = W_Integer def wrap(self, value): return W_Integer(value) def unwrap(self, value): return value.value def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): super(NonSingletonStrategy, self).__init__(factory, w_list, size) self.w_list = w_list self.the_size = size class NonStrategy(NonSingletonStrategy): pass @rs.strategy(generalize=[]) class InefficientStrategy(GenericStrategy): def _convert_storage_from(self, w_self, previous_strategy): return AbstractStrategy._convert_storage_from(self, w_self, previous_strategy) factory = Factory(AbstractStrategy) def check_contents(list, expected): assert list.size() == len(expected) for i, val in enumerate(expected): assert list.fetch(i) == val def teardown(): factory.clear_log() # === Test Initialization and fetch def test_setup(): pass def test_factory_setup(): expected_strategies = 7 assert len(factory.strategies) == expected_strategies assert len(set(factory.strategies)) == len(factory.strategies) for strategy in factory.strategies: assert isinstance(factory.strategy_singleton_instance(strategy), strategy) def test_factory_setup_singleton_instances(): new_factory = Factory(AbstractStrategy) s1 = factory.strategy_singleton_instance(GenericStrategy) s2 = new_factory.strategy_singleton_instance(GenericStrategy) assert s1 is not s2 assert s1.strategy_factory() is factory assert s2.strategy_factory() is new_factory def test_metaclass(): assert NonStrategy._is_strategy == False assert IntegerOrNilStrategy._is_strategy == True assert IntegerOrNilStrategy._is_singleton == True assert NonSingletonStrategy._is_singleton == False assert NonStrategy._is_singleton == False assert NonStrategy.get_storage is not NonSingletonStrategy.get_storage def test_singletons(): def do_test_singletons(cls, expected_true): l1 = W_List(cls, 0) l2 = W_List(cls, 0) if expected_true: assert l1.strategy is l2.strategy else: assert l1.strategy is not l2.strategy do_test_singletons(EmptyStrategy, True) do_test_singletons(NonSingletonStrategy, False) do_test_singletons(NonStrategy, False) do_test_singletons(GenericStrategy, True) def do_test_initialization(cls, default_value=w_nil, is_safe=True): size = 10 l = W_List(cls, size) s = l.strategy assert s.size(l) == size assert s.fetch(l,0) == default_value assert s.fetch(l,size/2) == default_value assert s.fetch(l,size-1) == default_value py.test.raises(IndexError, s.fetch, l, size) py.test.raises(IndexError, s.fetch, l, size+1) py.test.raises(IndexError, s.fetch, l, size+5) if is_safe: py.test.raises(IndexError, s.fetch, l, -1) else: py.test.raises(AssertionError, s.fetch, l, -1) def test_init_Empty(): l = W_List(EmptyStrategy, 0) s = l.strategy assert s.size(l) == 0 py.test.raises(IndexError, s.fetch, l, 0) py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): size = 10 l = W_List(cls, size) s = l.strategy def store_test(index): s.store(l, index, stored_value) assert s.fetch(l, index) == stored_value store_test(0) store_test(size/2) store_test(size-1) if not is_varsize: py.test.raises(IndexError, s.store, l, size, stored_value) py.test.raises(IndexError, s.store, l, size+1, stored_value) py.test.raises(IndexError, s.store, l, size+5, stored_value) if is_safe: py.test.raises(IndexError, s.store, l, -1, stored_value) else: py.test.raises(AssertionError, s.store, l, -1, stored_value) def test_store_Nil(): do_test_store(NilStrategy, stored_value=w_nil) def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) # === Test Insert def do_test_insert(cls, values): l = W_List(cls, 0) assert len(values) >= 6 values0 = values[0:1] values1 = values[1:2] values2 = values[2:4] values3 = values[4:6] l.insert(3, values0) # Will still be inserted at the very beginning check_contents(l, values0) l.insert(1, values1+values3) check_contents(l, values0+values1+values3) l.insert(2, values2) check_contents(l, values) return l def test_insert_Nil(): do_test_insert(NilStrategy, [w_nil]*6) def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): assert len(values) >= 6 l = W_List(cls, len(values), values) if not indexing_unsafe: py.test.raises(IndexError, l.delete, 2, 1) l.delete(2, 4) del values[2: 4] check_contents(l, values) l.delete(1, 2) del values[1: 2] check_contents(l, values) def test_delete_Nil(): do_test_delete(NilStrategy, [w_nil]*6) def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) # === Test Transitions def test_CheckCanHandle(): def assert_handles(cls, good, bad): s = cls(0) for val in good: assert s._check_can_handle(val) for val in bad: assert not s._check_can_handle(val) obj = W_Object() i = W_Integer(0) nil = w_nil assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) assert_handles(WeakGenericStrategy, [nil, obj, i], []) assert_handles(IntegerStrategy, [i], [nil, obj]) assert_handles(IntegerOrNilStrategy, [nil, i], [obj]) def do_test_transition(OldStrategy, value, NewStrategy, initial_size=10): w = W_List(OldStrategy, initial_size) old = w.strategy w.store(0, value) assert isinstance(w.strategy, NewStrategy) assert factory.switching_log == [(None, old), (old, w.strategy)] def test_AllNil_to_Generic(): do_test_transition(NilStrategy, W_Object(), GenericStrategy) def test_AllNil_to_IntegerOrNil(): do_test_transition(NilStrategy, W_Integer(0), IntegerOrNilStrategy) def test_IntegerOrNil_to_Generic(): do_test_transition(IntegerOrNilStrategy, W_Object(), GenericStrategy) def test_Integer_to_IntegerOrNil(): do_test_transition(IntegerStrategy, w_nil, IntegerOrNilStrategy) def test_Generic_to_AllNil(): w = W_List(GenericStrategy, 5) old = w.strategy factory.switch_strategy(w, NilStrategy) assert isinstance(w.strategy, NilStrategy) assert factory.switching_log == [(None, old), (old, w.strategy)] def test_Integer_Generic(): do_test_transition(IntegerStrategy, W_Object(), GenericStrategy) def test_TaggingValue_not_storable(): tag = IntegerOrNilStrategy(10).unwrapped_tagged_value() # sys.maxint do_test_transition(IntegerOrNilStrategy, W_Integer(tag), GenericStrategy) def test_insert_StrategySwitch_IntOrNil(): o = W_Object() l = do_test_insert(IntegerOrNilStrategy, [W_Integer(1), w_nil, o, o, w_nil, W_Integer(3)]) assert isinstance(l.strategy, GenericStrategy) def test_insert_StrategySwitch_AllNil(): o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) strategy1 = l.strategy assert isinstance(strategy1, NonSingletonStrategy) factory.switch_strategy(l, NonSingletonStrategy) assert strategy1 != l.strategy def test_generic_convert_storage(): l = W_List(NilStrategy, 5) # This triggers AbstractStrategy._convert_storage_from factory.switch_strategy(l, InefficientStrategy) assert isinstance(l.strategy, InefficientStrategy) assert l.fetch_all() == [w_nil] * 5 def test_Empty_store(): l = W_List(EmptyStrategy, 0) o = W_Object() py.test.raises(IndexError, l.store, 0, o) py.test.raises(IndexError, l.store, 1, o) def test_Empty_insert(): def do_insert(obj, expected_strategy, default_element=w_nil): l = W_List(EmptyStrategy, 0) l.insert(0, [obj]) assert l.size() == 1 assert isinstance(l.strategy, expected_strategy) assert l.fetch_all() == [obj] # Also test insert with too-high index l = W_List(EmptyStrategy, 0) l.insert(5, [obj]) assert l.fetch_all() == [obj] do_insert(W_Object(), GenericStrategy) do_insert(w_nil, NilStrategy) do_insert(W_Integer(1), IntegerStrategy) # === Test helper methods def generic_list(): values = [W_Object() for _ in range(6)] return W_List(GenericStrategy, len(values), values), values def test_slice(): l, v = generic_list() assert l.slice(2, 4) == v[2:4] def test_fetch_all(): l, v = generic_list() assert l.fetch_all() == v def test_append(): l, v = generic_list() o1 = W_Object() o2 = W_Object() l.append([o1]) assert l.fetch_all() == v + [o1] l.append([o1, o2]) assert l.fetch_all() == v + [o1, o1, o2] def test_pop(): l, v = generic_list() o = l.pop(3) del v[3] assert l.fetch_all() == v o = l.pop(3) del v[3] assert l.fetch_all() == v def test_store_all(): l, v = generic_list() v2 = [W_Object() for _ in range(4) ] v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy # TODO # === Other tests def test_optimized_strategy_switch(monkeypatch): l = W_List(NilStrategy, 5) s = l.strategy s.copied = 0 def convert_storage_from_default(self, w_self, other): assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: factory.switch_strategy(l, IntegerOrNilStrategy) finally: monkeypatch.undo() assert s.copied == 1, "Optimized switching routine not called exactly one time." def test_strategy_type_for(monkeypatch): assert factory.strategy_type_for([w_nil, w_nil]) == NilStrategy assert factory.strategy_type_for([W_Integer(2), W_Integer(1)]) == IntegerStrategy assert factory.strategy_type_for([w_nil, W_Integer(2), w_nil]) == IntegerOrNilStrategy assert factory.strategy_type_for([w_nil, W_Integer(2), W_Object()]) == GenericStrategy assert factory.strategy_type_for([W_Integer(2), w_nil, W_Object()]) == GenericStrategy assert factory.strategy_type_for([W_Object(), W_Integer(2), w_nil]) == GenericStrategy assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False)
blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount) if (not doesPackageExist): self.tracer.error("%s User provided RFC SDK blob does not exist %s, skipping...", self.logTag, self.sapRfcSdkBlobUrl) return False self.tracer.info("%s user provided RFC SDK blob exists for download %s, lastModified=%s", self.logTag, self.sapRfcSdkBlobUrl, packageLastModifiedTime) # the user provided sdk blob exists, so before we download compare the last_modified timestamp # with the last modified time of the last download attempt. If nothing has changed, # then no need to try and download the package again # TODO: confirm, should we go ahead and try to re-download previously failed packages # once every 30 minutes anyway? just in case failure was something external? lastInstallPackageModifiedTime = installer.getLastSdkInstallPackageModifiedTime() if (packageLastModifiedTime == lastInstallPackageModifiedTime): self.tracer.info("%s rfc sdk download package has not been modified since last download " + "attempt (last_modified=%s), will not download again", self.logTag, lastInstallPackageModifiedTime) return False self.tracer.info("%s user provided rfc sdk package last_modified (%s) has changed " + "since last install attempt (%s), attempting to re-download and install", self.logTag, packageLastModifiedTime, lastInstallPackageModifiedTime) # try to download user provided RFC SDK blob, install to local system and configure necessary # environment variables and system settings so that it can be usable by pyrfc module if (not installer.downloadAndInstallRfcSdk(blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount)): self.tracer.error("%s failed to download and install rfc sdk package, RFC calls will not be enabled...", self.logTag) return False # on Linux pyrfc module may not be usable upon first install attempt, as it appears that unpacking # libraries to the LD_LIBRARY_PATH env variable after the python process starts may not pick up the change. # The module should be usable on the next sapmon process run. if (not installer.isPyrfcModuleUsable()): self.tracer.error("%s pyrfc module still not usable after RFC SDK install (might require process restart), " + "RFC calls will not be enabled...", self.logTag) return False self.tracer.info("%s pyrfc module is usable after RFC SDK install, RFC calls will be enabled...", self.logTag) return True except Exception as e: self.tracer.error("%s exception trying to setup and validate RFC SDK, RFC calls will be disabled: %s", self.logTag, e, exc_info=True) return False ########################### class sapNetweaverProviderCheck(ProviderCheck): lastResult = [] # hard-coded set of action names that require RFC SDK to be usable # and can override runtime isEnabled() check if RFC is not usable rfcCheckNames = {'SMON_Metrics', 'SWNC_Workload_Metrics', 'SDF_Short_Dumps_Metrics', 'Sys_Log_Metrics', 'Failed_Updates_Metrics', 'Batch_Jobs_Metrics', 'Inbound_Queues_Metrics', 'Outbound_Queues_Metrics', 'Enqueue_Read_Metrics'} def __init__(self, provider: ProviderInstance, **kwargs ): super().__init__(provider, **kwargs) self.lastRunLocal = None self.lastRunServer = None # provider check common logging prefix self.logTag = "[%s][%s]" % (self.fullName, self.providerInstance.sapSid) """ return flag indicating whether this check instances requires the SAP RFC SDK to be installed and usable """ def doesCheckRequireRfcSdk(self) -> bool: return self.name in sapNetweaverProviderCheck.rfcCheckNames """ override base ProviderCheck implementation to allow RFC metric collection methods enabled in the default Provider JSON configuration yet treated as disabled at runtime if RFC SDK is not configured (to reduce log spam) """ def isEnabled(self) -> bool: if not self.state["isEnabled"]: return False # if this check requires RFC and RFC is not installed, then treat as disabled if (self.doesCheckRequireRfcSdk()): if (not self.providerInstance.areRfcMetricsEnabled()): return False return True def _getFormattedTimestamp(self) -> str: return datetime.utcnow().isoformat() def _parseResult(self, result: object) -> list: return [helpers.serialize_object(result, dict)] def _parseResults(self, results: list) -> list: return helpers.serialize_object(results, dict) def _getServerTimestamp(self) -> datetime: self.tracer.info("%s fetching current timestamp from message server", self.logTag) message_server_instances = self.providerInstance.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True) date = datetime.fromisoformat(self._getFormattedTimestamp()) # Get timestamp from the first message server that returns a valid date for instance in message_server_instances: hostname = instance['hostname'] instanceNr = str(instance['instanceNr']).zfill(2) port = self.providerInstance.getMessageServerPortFromInstanceNr(instanceNr) hostname = self.providerInstance.getFullyQualifiedDomainName(hostname) message_server_endpoint = "http://%s:%s/" % (hostname, port) try: # We only care about the date in the response header. so we ignore the response body # 'Thu, 04 Mar 2021 05:02:12 GMT' # NOTE: we don't need to follow redirects because the redirect response itself 300-3XX # will have the 'date' header as well. In some cases we were following a chain # of redirects that would terminate in a 404, which would not have the 'date' header response = requests.get(message_server_endpoint, allow_redirects=False) if ('date' not in response.headers): raise Exception("no 'date' response header found for response status:%s/%s from:%s" % (response.status_code, response.reason, message_server_endpoint)) date = datetime.strptime(response.headers['date'], '%a, %d %b %Y %H:%M:%S %Z') self.tracer.info("%s received message server %s header: %s, parsed time: %s", self.logTag, message_server_endpoint, response.headers['date'], date) break except Exception as e: self.tracer.info("%s suppressing expected error while fetching server time during HTTP GET request to url %s: %s ", self.logTag, message_server_endpoint, e) return date def _actionGetSystemInstanceList(self) -> None: self.tracer.info("%s refreshing list of system instances", self.logTag) self.lastRunLocal = datetime.utcnow() # when performing the actual provider check action, always fetch fressh instance list snapshot and refresh the cache instanceList = self.providerInstance.getInstances(useCache=False) self.lastRunServer = self._getServerTimestamp() # Update host config, if new list is fetched # Parse dictionary and add current timestamp and SID to data and log it if len(instanceList) != 0: currentTimestamp = self._getFormattedTimestamp() for instance in instanceList: instance['timestamp'] = currentTimestamp instance['serverTimestamp'] = self.lastRunServer.isoformat() instance['SID'] = self.providerInstance.sapSid instance['subdomain'] = self.providerInstance.sapSubdomain self.lastResult = instanceList # Update internal state if not self.updateState(): raise Exception("%s failed to update state" % self.logTag) self.tracer.info("%s successfully fetched system instance list", self.logTag) def _executeWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str, parser: Callable[[Any], list] = None) -> None: self.tracer.info("[%s] executing web service request: %s" % (self.fullName, apiName)) self.lastRunLocal = datetime.utcnow() # track latency of entire method excecution with dependencies startTime = time() if parser is None: parser = self._parseResults # Use cached list of instances if available since they don't change that frequently; else fetch afresh. # filter down to just the instances we need for this SOAP API type sapInstances = self.providerInstance.getInstances(useCache=True, filterFeatures=filterFeatures, filterType=filterType) self.lastRunServer = self._getServerTimestamp() if len(sapInstances) == 0: self.tracer.info("%s no instances found that support this API: %s", self.logTag, apiName) # Call web service all_results = [] currentTimestamp = self._getFormattedTimestamp() for instance in sapInstances: # default to https unless the httpsPort was not defined, in which case fallback to http httpProtocol = "https" port = instance['httpsPort'] if ((not port) or port == "0"): # fallback to http port instead httpProtocol = "http" port = instance['httpPort'] results = [] try: client = self.providerInstance.getClient(instance['hostname'], httpProtocol, port) results = self.providerInstance.callSoapApi(client, apiName) if(apiName == "GetProcessList"): results = self._sanitizeGetProcessList(results) elif(apiName == "ABAPGetWPTable"): results = self._sanitizeABAPGetWPTable(results) except Exception as e: self.tracer.error("%s unable to call the Soap Api %s - %s://%s:%s, %s", self.logTag, apiName, httpProtocol, instance['hostname'], port, e, exc_info=True) continue if len(results) != 0: parsed_results = parser(results) for result in parsed_results: result['hostname'] = instance['hostname'] result['instanceNr'] = instance['instanceNr'] result['subdomain'] = self.providerInstance.sapSubdomain result['timestamp'] = currentTimestamp result['serverTimestamp'] = self.lastRunServer.isoformat() result['SID'] = self.providerInstance.sapSid all_results.extend(parsed_results) if len(all_results) == 0: self.tracer.info("%s no results found for: %s", self.logTag, apiName) self.lastResult = all_results # Update internal state if not self.updateState(): raise Exception("[%s] failed to update state for web service request: %s [%d ms]" % \ (self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime))) self.tracer.info("%s successfully processed web service request: %s [%d ms]", self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime)) def _actionExecuteGenericWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str) -> None: self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResults) def _actionExecuteEnqGetStatistic(self, apiName: str, filterFeatures: list, filterType: str) -> None: self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResult) """ Method to parse the value based on the key provided and set the values with None value to empty string '' """ def _getKeyValue(self, dictionary, key, apiName): if key not in dictionary: raise ValueError("Result received for api %s does not contain key: %s"% (apiName, key)) if(dictionary[key] == None): dictionary[key] = "" return dictionary[key] """ Method to parse the results from ABAPGetWPTable and set the strings with None value to empty string '' """ def _sanitizeABAPGetWPTable(self, records: list) -> list: apiName = "ABAPGetWPTable" processed_results = list() for record in records: processed_result = { "Action": self._getKeyValue(record, 'Action', apiName), "Client": self._getKeyValue(record, 'Client', apiName), "Cpu": self._getKeyValue(record, 'Cpu', apiName), "Err": self._getKeyValue(record, 'Err', apiName), "No": self._getKeyValue(record, 'No', apiName), "Pid": self._getKeyValue(record, 'Pid', apiName), "Program": self._getKeyValue(record, 'Program', apiName), "Reason": self._getKeyValue(record, 'Reason', apiName), "Sem": self._getKeyValue(record, 'Sem', apiName), "Start": self._getKeyValue(record,
{ "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK15.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK14.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK13.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK12.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK11.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"C:\\Users\\athomas\\Documents\\Audacity\\Spoken ' 'Tones\\Gigs\\Highball\\2020-02-16\\Raw Tracks\\TRK10.WAV", "accel":"" ' '},\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Clear", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Close", "accel":"Ctrl+W", ' '"id":"Close" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":1, "label":"Save Project", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Save Project", "accel":"Ctrl+S", ' '"id":"Save" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Save Project As...", "accel":"", "id":"SaveAs" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Save Lossless Copy of Project...", "accel":"", ' '"id":"SaveCopy" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Save Compressed Copy of Project...", "accel":"", ' '"id":"SaveCompressed" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":1, "label":"Export", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Export as MP3", "accel":"", ' '"id":"ExportMp3" },\n' ' { "depth":2, "flags":0, "label":"Export as WAV", "accel":"", ' '"id":"ExportWav" },\n' ' { "depth":2, "flags":0, "label":"Export as OGG", "accel":"", ' '"id":"ExportOgg" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Export Audio...", "accel":"Ctrl+Shift+E", "id":"Export" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"Export Selected Audio...", "accel":"", "id":"ExportSel" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"Export Labels...", "accel":"", "id":"ExportLabels" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Export Multiple...", "accel":"Ctrl+Shift+L", ' '"id":"ExportMultiple" },\n' ' { "depth":2, "flags":0, "label":"Export MIDI...", "accel":"", ' '"id":"ExportMIDI" },\n' ' { "depth":1, "flags":1, "label":"Import", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Audio...", "accel":"Ctrl+Shift+I", ' '"id":"ImportAudio" },\n' ' { "depth":2, "flags":0, "label":"Labels...", "accel":"", ' '"id":"ImportLabels" },\n' ' { "depth":2, "flags":0, "label":"MIDI...", "accel":"", ' '"id":"ImportMIDI" },\n' ' { "depth":2, "flags":0, "label":"Raw Data...", "accel":"", ' '"id":"ImportRaw" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Page Setup...", "accel":"", ' '"id":"PageSetup" },\n' ' { "depth":1, "flags":0, "label":"Print...", "accel":"", "id":"Print" ' '},\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Exit", "accel":"Ctrl+Q", "id":"Exit" ' '},\n' ' { "depth":0, "flags":0, "label":"Edit", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Undo", "accel":"Ctrl+Z", "id":"Undo" ' '},\n' ' { "depth":1, "flags":0, "label":"Redo", "accel":"Ctrl+Y", "id":"Redo" ' '},\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Cut", "accel":"Ctrl+X", "id":"Cut" ' '},\n' ' { "depth":1, "flags":0, "label":"Delete", "accel":"Ctrl+K", ' '"id":"Delete" },\n' ' { "depth":1, "flags":0, "label":"Copy", "accel":"Ctrl+C", "id":"Copy" ' '},\n' ' { "depth":1, "flags":0, "label":"Paste", "accel":"Ctrl+V", ' '"id":"Paste" },\n' ' { "depth":1, "flags":0, "label":"Duplicate", "accel":"Ctrl+D", ' '"id":"Duplicate" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":1, "label":"Remove Special", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Split Cut", "accel":"Ctrl+Alt+X", ' '"id":"SplitCut" },\n' ' { "depth":2, "flags":0, "label":"Split Delete", "accel":"Ctrl+Alt+K", ' '"id":"SplitDelete" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Silence Audio", "accel":"Ctrl+L", ' '"id":"Silence" },\n' ' { "depth":2, "flags":0, "label":"Trim Audio", "accel":"Ctrl+T", ' '"id":"Trim" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":1, \n' ' "label":"Clip Boundaries", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Split", "accel":"Ctrl+I", ' '"id":"Split" },\n' ' { "depth":2, "flags":0, "label":"Split New", "accel":"", ' '"id":"SplitNew" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Join", "accel":"", "id":"Join" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Detach at Silences", "accel":"", "id":"Disjoin" },\n' ' { "depth":1, "flags":1, "label":"Labels", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Edit Labels...", "accel":"", ' '"id":"EditLabels" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Add Label at Selection", "accel":"Ctrl+B", "id":"AddLabel" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"Add Label at Playback Position", "accel":"Ctrl+M", \n' ' "id":"AddLabelPlaying" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Paste Text to New Label", "accel":"", "id":"PasteNewLabel" ' '},\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":2, \n' ' "label":"Type to Create a Label (on/off)", "accel":"", \n' ' "id":"TypeToCreateLabel" },\n' ' { "depth":1, "flags":1, "label":"Labeled Audio", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Cut", "accel":"", "id":"CutLabels" ' '},\n' ' { "depth":2, "flags":0, "label":"Delete", "accel":"", ' '"id":"DeleteLabels" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Split Cut", "accel":"Alt+Shift+X", ' '"id":"SplitCutLabels" },\n' ' { "depth":2, "flags":0, "label":"Split Delete", ' '"accel":"Alt+Shift+K", \n' ' "id":"SplitDeleteLabels" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Silence Audio", "accel":"", ' '"id":"SilenceLabels" },\n' ' { "depth":2, "flags":0, "label":"Copy", "accel":"Alt+Shift+C", ' '"id":"CopyLabels" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Split", "accel":"", ' '"id":"SplitLabels" },\n' ' { "depth":2, "flags":0, "label":"Join", "accel":"", "id":"JoinLabels" ' '},\n' ' { "depth":2, "flags":0, \n' ' "label":"Detach at Silences", "accel":"Alt+Shift+J", ' '"id":"DisjoinLabels" },\n' ' { "depth":1, "flags":0, "label":"Metadata...", "accel":"", ' '"id":"EditMetaData" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"Preferences...", "accel":"Ctrl+P", ' '"id":"Preferences" },\n' ' { "depth":0, "flags":0, "label":"Select", "accel":"" },\n' ' { "depth":1, "flags":0, "label":"All", "accel":"Ctrl+A", ' '"id":"SelectAll" },\n' ' { "depth":1, "flags":0, "label":"None", "accel":"", "id":"SelectNone" ' '},\n' ' { "depth":1, "flags":1, "label":"Tracks", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"In All Tracks", ' '"accel":"Ctrl+Shift+K", "id":"SelAllTracks" },\n' ' { "depth":2, "flags":0, \n' ' "label":"In All Sync-Locked Tracks", "accel":"Ctrl+Shift+Y", \n' ' "id":"SelSyncLockTracks" },\n' ' { "depth":1, "flags":1, "label":"Region", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Left at Playback Position", "accel":"[", \n' ' "id":"SetLeftSelection" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Right at Playback Position", "accel":"]", \n' ' "id":"SetRightSelection" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Track Start to Cursor", "accel":"Shift+J", \n' ' "id":"SelTrackStartToCursor" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Cursor to Track End", "accel":"Shift+K", \n' ' "id":"SelCursorToTrackEnd" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Track Start to End", "accel":"", \n' ' "id":"SelTrackStartToEnd" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Store Selection", "accel":"", "id":"SelSave" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Retrieve Selection", "accel":"", "id":"SelRestore" },\n' ' { "depth":1, "flags":1, "label":"Spectral", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Toggle Spectral Selection", "accel":"Q", \n' ' "id":"ToggleSpectralSelection" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Next Higher Peak Frequency", "accel":"", \n' ' "id":"NextHigherPeakFrequency" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Next Lower Peak Frequency", "accel":"", \n' ' "id":"NextLowerPeakFrequency" },\n' ' { "depth":1, "flags":1, \n' ' "label":"Clip Boundaries", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Previous Clip Boundary to Cursor", "accel":"", \n' ' "id":"SelPrevClipBoundaryToCursor" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Cursor to Next Clip Boundary", "accel":"", \n' ' "id":"SelCursorToNextClipBoundary" },\n' ' { "depth":2, "flags":0, "label":"Previous Clip", "accel":"Alt+,", ' '"id":"SelPrevClip" },\n' ' { "depth":2, "flags":0, "label":"Next Clip", "accel":"Alt+.", ' '"id":"SelNextClip" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, \n' ' "label":"Cursor to Stored Cursor Position", "accel":"", \n' ' "id":"SelCursorStoredCursor" },\n' ' { "depth":1, "flags":0, \n' ' "label":"Store Cursor Position", "accel":"", \n' ' "id":"StoreCursorPosition" },\n' ' { "depth":1, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":1, "flags":0, \n' ' "label":"At Zero Crossings", "accel":"Z", "id":"ZeroCross" },\n' ' { "depth":0, "flags":0, "label":"View", "accel":"" },\n' ' { "depth":1, "flags":1, "label":"Zoom", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Zoom In", "accel":"Ctrl+1", ' '"id":"ZoomIn" },\n' ' { "depth":2, "flags":0, "label":"Zoom Normal", "accel":"Ctrl+2", ' '"id":"ZoomNormal" },\n' ' { "depth":2, "flags":0, "label":"Zoom Out", "accel":"Ctrl+3", ' '"id":"ZoomOut" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Zoom to Selection", "accel":"Ctrl+E", "id":"ZoomSel" },\n' ' { "depth":2, "flags":0, "label":"Zoom Toggle", "accel":"Shift+Z", ' '"id":"ZoomToggle" },\n' ' { "depth":2, "flags":0, "label":"----", "accel":"" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Advanced Vertical Zooming", "accel":"", ' '"id":"AdvancedVZoom" },\n' ' { "depth":1, "flags":1, "label":"Track Size", "accel":"" },\n' ' { "depth":2, "flags":0, "label":"Fit to Width", "accel":"Ctrl+F", ' '"id":"FitInWindow" },\n' ' { "depth":2, "flags":0, "label":"Fit to Height", ' '"accel":"Ctrl+Shift+F", "id":"FitV" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Collapse All Tracks", "accel":"Ctrl+Shift+C", \n' ' "id":"CollapseAllTracks" },\n' ' { "depth":2, "flags":0, \n' ' "label":"Expand Collapsed Tracks", "accel":"Ctrl+Shift+X", \n' ' "id":"ExpandAllTracks" },\n' '
<filename>cellpainter/moves.py ''' robotarm moves ''' from __future__ import annotations from dataclasses import * from typing import * from pathlib import Path import abc import re import textwrap from . import utils class Move(abc.ABC): def to_dict(self) -> dict[str, Any]: res = utils.to_json(self) assert isinstance(res, dict) return res @classmethod def from_dict(cls, d: dict[str, Any]) -> Move: return utils.from_json(d) @abc.abstractmethod def to_script(self) -> str: raise NotImplementedError def try_name(self) -> str: if hasattr(self, 'name'): return getattr(self, 'name') else: return "" def is_gripper(self) -> bool: return isinstance(self, (GripperMove, GripperCheck)) def is_close(self) -> bool: if isinstance(self, GripperMove): return self.pos == 255 else: return False def is_open(self) -> bool: if isinstance(self, GripperMove): return self.pos != 255 else: return False def call(name: str, *args: Any, **kwargs: Any) -> str: strs = [str(arg) for arg in args] strs += [k + '=' + str(v) for k, v in kwargs.items()] return name + '(' + ', '.join(strs) + ')' def keep_true(**kvs: Any) -> dict[str, Any]: return {k: v for k, v in kvs.items() if v} @dataclass(frozen=True) class MoveLin(Move): ''' Move linearly to an absolute position in the room reference frame. xyz in mm rpy is roll-pitch-yaw in degrees, with: roll: gripper twist. 0°: horizontal pitch: gripper incline. 0°: horizontal, -90° pointing straight down yaw: gripper rotation in room XY, CCW. 0°: to x+, 90°: to y+ ''' xyz: list[float] rpy: list[float] tag: str | None = None name: str = "" slow: bool = False def to_script(self) -> str: return call('MoveLin', *self.xyz, *self.rpy, **keep_true(slow=self.slow)) @dataclass(frozen=True) class MoveRel(Move): ''' Move linearly to a position relative to the current position. xyz in mm rpy in degrees xyz applied in rotation of room reference frame, unaffected by any rpy, so: xyz' = xyz + Δxyz rpy' = rpy + Δrpy ''' xyz: list[float] rpy: list[float] tag: str | None = None name: str = "" slow: bool = False def to_script(self) -> str: return call('MoveRel', *self.xyz, *self.rpy, **keep_true(slow=self.slow)) @dataclass(frozen=True) class MoveJoint(Move): ''' Joint rotations in degrees ''' joints: list[float] name: str = "" slow: bool = False def to_script(self) -> str: return call('MoveJoint', *self.joints, **keep_true(slow=self.slow)) @dataclass(frozen=True) class GripperMove(Move): pos: int soft: bool = False def to_script(self) -> str: return call('GripperMove', self.pos, **keep_true(soft=self.soft)) @dataclass(frozen=True) class GripperCheck(Move): def to_script(self) -> str: return call('GripperCheck') @dataclass(frozen=True) class Section(Move): sections: list[str] def to_script(self) -> str: return textwrap.indent(', '.join(self.sections), '# ') @dataclass(frozen=True) class RawCode(Move): ''' Send a raw piece of code, used only in the gui and available at the cli. ''' code: str def to_script(self) -> str: return self.code def is_gripper(self) -> bool: raise ValueError def is_open(self) -> bool: raise ValueError def is_close(self) -> bool: raise ValueError HotelLocs = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21] class MoveList(list[Move]): ''' Utility class for dealing with moves in a list ''' @staticmethod def read_jsonl(filename: str | Path) -> MoveList: return MoveList(utils.serializer.read_jsonl(filename)) def write_jsonl(self, filename: str | Path) -> None: utils.serializer.write_jsonl(self, filename) def adjust_tagged(self, tag: str, *, dname: str, dz: float) -> MoveList: ''' Adjusts the z in room reference frame for all MoveLin with the given tag. ''' out = MoveList() for m in self: if isinstance(m, MoveLin) and m.tag == tag: x, y, z = list(m.xyz) out += [replace(m, name=dname + ' ' + m.name, tag=None, xyz=[x, y, round(z + dz, 1)])] elif hasattr(m, 'tag') and getattr(m, 'tag') == tag: raise ValueError('Tagged move must be MoveLin for adjust_tagged') else: out += [m] return out def tags(self) -> list[str]: out: list[str] = [] for m in self: if hasattr(m, 'tag'): tag = getattr(m, 'tag') if tag is not None: out += [tag] return out def expand_hotels(self, name: str) -> dict[str, MoveList]: ''' If there is a tag like 19/21 then expand to all heights 1/21, 3/21, .., 21/21 The first occurence of 19 in the name is replaced with 1, 3, .., 21, so "lid_B19_put" becomes "lid_B1_put" and so on. ''' hotel_dist: float = 70.94 out: dict[str, MoveList] = {} for tag in set(self.tags()): if m := re.match(r'(\d+)/21$', tag): ref_h = int(m.group(1)) assert str(ref_h) in name assert ref_h in HotelLocs for h in HotelLocs: dz = (h - ref_h) / 2 * hotel_dist name_h = name.replace(str(ref_h), str(h), 1) out[name_h] = self.adjust_tagged(tag, dname=str(h), dz=dz) return out def with_sections(self, include_Section: bool=False) -> list[tuple[tuple[str, ...], Move]]: out: list[tuple[tuple[str, ...], Move]] = [] active: tuple[str, ...] = tuple() for _i, move in enumerate(self): if isinstance(move, Section): active = tuple(move.sections) if include_Section: out += [(active, move)] else: out += [(active, move)] return out def expand_sections(self, base_name: str, include_self: bool=True) -> dict[str, MoveList]: with_section = self.with_sections() sections: set[tuple[str, ...]] = { sect for sect, _move in with_section if sect } out: dict[str, MoveList] = {} if include_self: out[base_name] = self for section in sections: pos = {i for i, (active, _) in enumerate(with_section) if section == active[:len(section)]} maxi = max(pos) assert all(i == maxi or i + 1 in pos for i in pos), f'section {section} not contiguous' name = ' '.join([base_name, *section]) out[name] = MoveList(m for active, m in with_section if section == active[:len(section)]) return out def describe(self) -> str: return '\n'.join([ m.__class__.__name__ + ' ' + (m.try_name() or utils.catch(lambda: str(getattr(m, 'pos')), '')) for m in self ]) def has_open(self) -> bool: return any(m.is_open() for m in self) def has_close(self) -> bool: return any(m.is_close() for m in self) def has_gripper(self) -> bool: return any(m.is_gripper() for m in self) def split_on(self, pred: Callable[[Move], bool]) -> tuple[MoveList, Move, MoveList]: for i, move in enumerate(self): if pred(move): return MoveList(self[:i]), move, MoveList(self[i+1:]) raise ValueError def split(self) -> MoveListParts: before_pick, close, after_pick = self.split_on(Move.is_close) mid, open, after_drop = after_pick.split_on(Move.is_open) return MoveListParts.init( before_pick=before_pick, close=close, transfer_inner=mid, open=open, after_drop=after_drop, ) @dataclass(frozen=True) class MoveListParts: prep: MoveList transfer: MoveList ret: MoveList @staticmethod def init( before_pick: MoveList, close: Move, transfer_inner: MoveList, open: Move, after_drop: MoveList, ): assert not any(m.is_gripper() for m in before_pick) assert not any(m.is_gripper() for m in transfer_inner) assert not any(m.is_gripper() for m in after_drop) assert close.is_close() assert open.is_open() *to_pick_neu, pick_neu, pick_pos = before_pick drop_neu, *from_drop_neu = after_drop assert pick_neu.try_name().endswith("neu"), f'{pick_neu.try_name()} needs a neu before pick' assert pick_pos.try_name().endswith("pick"), f'{pick_pos.try_name()} needs a pick move before gripper pick close' assert drop_neu.try_name().endswith("neu"), f'{drop_neu.try_name()} needs a neu after drop' return MoveListParts( prep = MoveList([*to_pick_neu, pick_neu]), transfer = MoveList([ pick_neu, pick_pos, close, *transfer_inner, open, drop_neu]), ret = MoveList([ drop_neu, *from_drop_neu]), ) HasMoveList = TypeVar('HasMoveList') def sleek_movements( xs: list[HasMoveList], get_movelist: Callable[[HasMoveList], MoveList | None], pair_ok: Callable[[HasMoveList, HasMoveList], bool], ) -> list[HasMoveList]: ''' if program A ends by B21 neu and program B by B21 neu then run: program A to B21 neu program B from B21 neu ''' ms: list[tuple[int, MoveList]] = [] for i, x in enumerate(xs): if m := get_movelist(x): ms += [(i, m)] rm: set[int] = set() for (i, a), (j, b) in zip(ms, ms[1:]): a_first = a[0].try_name() b_last = b[-1].try_name() if a.has_gripper() or b.has_gripper(): continue if a_first and a_first == b_last and pair_ok(xs[i], xs[j]): rm |= {i, j} return [ x for i, x in enumerate(xs) if i not in rm ] @dataclass(frozen=True) class TaggedMoveList: ''' Extra information about a move list that is used for resumption ''' base: str kind: Literal[ 'full', 'prep', 'transfer', 'return', 'transfer to drop neu', 'transfer from drop neu', ] movelist: MoveList prep: list[str] = field(default_factory=list) is_ret: bool = False @property def name(self): if self.kind == 'full': return self.base else: return self.base + ' ' + self.kind def read_and_expand(filename: Path) -> dict[str, MoveList]: ml = MoveList.read_jsonl(filename) name = filename.stem expanded = ml.expand_sections(name, include_self=name == 'wash_to_disp') for k, v in list(expanded.items()): expanded |= v.expand_hotels(k) return expanded def read_movelists() -> dict[str, TaggedMoveList]: expanded: dict[str, MoveList] = {} for filename in Path('./movelists').glob('*.jsonl'): expanded |= read_and_expand(filename) if not expanded: import sys print(f''' No movelists found. You need to start this program in the repo root directory so that ./movelists/ is a direct child. If you installed with pip install --editable, you probably want to be in {Path(__file__).parent.parent} but you're in {Path.cwd()} ''', file=sys.stderr) sys.exit(-1) out: list[TaggedMoveList] = [] for base, v in expanded.items(): if 'put-prep' in base or 'put-return' in base: assert 'incu_A21' in base # these are used to
<reponame>nyuvis/explanation_explorer #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import division import os import csv import sys import json import math import time import random import numpy as np from sklearn.tree import DecisionTreeClassifier from scipy.sparse import coo_matrix, csr_matrix class _Explanation_v0(object): def __init__(self, features, pred, obj_up, obj_down): self._features = features self._pred = pred def convert(obj): return [ ( int(e[0]), float(e[1]), frozenset([ int(f) for f in e[2] ]) ) for e in obj ] self._expl_up = convert(obj_up) self._expl_down = convert(obj_down) def get_explanation(self, score): l, r = score up = self._pred < r if not up and self._pred < l: return [] th = l if up else r expl = self._expl_up if up else self._expl_down def get_e(): fs = [] for e in expl: fs.append(e[0]) p = e[1] if up: if p >= th: return fs, e[2] else: if p < th: return fs, e[2] return [], frozenset() def convert(fs, minus): return [ self._features[f] for f in fs if f not in minus ] return convert(*get_e()) class _Explanation_v1(object): def __init__(self, expl, features, postfixes, th, msg): self._th = th def get_feature(fix): if fix < 0: return "" return features[fix] self._expl = [ "{0}{1}{2}".format(e[1], get_feature(e[0]), postfixes[e[0]]) for e in expl["expl"] ] def get_explanation(self, score): l, r = score th = self._th if l != th or r != th: raise ValueError("expected threshold {0} got {1}".format((th, th), score)) return self._expl def _get_int_type(maxsize): for dt in [ np.int8, np.int16, np.int32, np.int64 ]: if maxsize <= np.iinfo(dt).max: return dt raise ValueError("array too large: {0}".format(maxsize)) def _optimize_lookup(lookup): if min(lookup.keys()) < 0: raise ValueError("no index lookup optimization possible") high = max(lookup.keys()) high_v = max(lookup.values()) return np.array([ lookup[ix] if ix in lookup else -1 for ix in range(high + 1) ], dtype=_get_int_type(high_v)) class _DataMatrix_v0(object): def __init__(self, csvfile, ixs, train_ixs, labels, features, cache, msg): with cache.get_hnd({ "function": "dm", "csv": csvfile, "ixs": ixs, "tixs": train_ixs, }, "explainer") as c: if c.has(): load_time = time.clock() msg("loading matrix from cache..") matrix, ix_map, train_labels, train_ix_map = c.read() msg("loading matrix from cache took {0:0.4f}s", time.clock() - load_time) else: matrix, ix_map, train_labels, train_ix_map = c.write(self._load(csvfile, ixs, train_ixs, labels, features, msg)) self._matrix = matrix self._ix_map = ix_map self._features = features self._train_labels = train_labels self._train_ix_map = train_ix_map def _load(self, csvfile, ixs, train_ixs, labels, features, msg): load_time = time.clock() msg("loading matrix..") temp_labels = [] features_checked = False temp_rows = [] temp_cols = [] ix_map = _optimize_lookup(dict([ (ix, pos) for (pos, ix) in enumerate(ixs) ])) skip = frozenset([ "label", "pred", "meta" ]) with open(csvfile, 'r') as f_in: for (rix, row) in enumerate(csv.DictReader(f_in)): temp_labels.append(int(row["label"]) > 0) if not features_checked: fset = set(features) tfset = set([ f for f in row.keys() if f not in skip ]) if not tfset.issubset(fset): raise ValueError("missing features in explanations: {0}".format(tfset.difference(fset))) if not fset.issubset(tfset): msg("superfluous features in explanations: {0}", fset.difference(tfset)) features_checked = True for (fix, f) in enumerate(features): v = row[f] if int(v) and ix_map[rix] >= 0: temp_rows.append(ix_map[rix]) temp_cols.append(fix) def where_not(xs, ys): if len(xs) != len(ys): return "length: {0} != {1}".format(len(xs), len(ys)) res = [] for (pos, x) in enumerate(xs): y = ys[pos] if x != y: res.append("{0}: '{1}' != '{2}'".format(pos, x, y)) return "at\n{0}".format("\n".join(res)) train_labels = [ temp_labels[ix] for ix in train_ixs ] temp_labels = [ temp_labels[ix] for ix in ixs ] if labels != temp_labels: raise ValueError("inconsistent labels {0}".format(where_not(labels, temp_labels))) train_ix_map = _optimize_lookup(dict([ (ix, pos) for (pos, ix) in enumerate(train_ixs) ])) matrix = coo_matrix((np.repeat(1, len(temp_rows)), (temp_rows, temp_cols)), shape=(len(ixs), len(features)), dtype=np.int8) msg("loading matrix took {0:0.4f}s", time.clock() - load_time) return matrix.tocsr(), ix_map, train_labels, train_ix_map def get_train_labels(self, train_ixs): return [ "T" if self._train_labels[tix] else "F" for tix in self._train_ix_map[train_ixs] ] def get_vecs(self, ixs): return self._matrix[self._ix_map[ixs], :] def get_vec(self, ix): _, nz = self._matrix[self._ix_map[ix], :].nonzero() return [ self._features[pos] for pos in nz ] def get_counts(self, ixs): fcounts = self._matrix[self._ix_map[ixs], :].sum(axis=0).tolist()[0] return dict([ (self._features[pos], fcounts[pos]) for pos in range(len(self._features)) if fcounts[pos] > 0 ]) def _process_rows(self, ixs, handle): rixs, fixs = self._matrix[self._ix_map[ixs], :].nonzero() def process(from_pos, to_pos): if to_pos <= from_pos: return handle(ixs[rixs[from_pos]], [ fixs[p] for p in range(from_pos, to_pos) ]) pos = 0 last_pos = 0 while pos < rixs.shape[0]: if rixs[last_pos] != rixs[pos]: process(last_pos, pos) last_pos = pos pos += 1 process(last_pos, pos) def get_groups(self, ixs, ignore_fixs=set()): groups = {} def hnd(ix, key): key = tuple([ k for k in key if k not in ignore_fixs ]) if key not in groups: groups[key] = [] groups[key].append(ix) self._process_rows(ixs, hnd) return dict([ (tuple([ self._features[k] for k in ks ]), vs) for (ks, vs) in groups.items() ]) class _DataMatrix_v1(object): def __init__(self, csvfile, features, expls, cache, msg): with cache.get_hnd({ "function": "dmv1", "csv": csvfile, "features": features, }, "explainer") as c: if c.has(): load_time = time.clock() msg("loading matrix from cache..") matrix, mins, diffs = c.read() msg("loading matrix from cache took {0:0.4f}s", time.clock() - load_time) else: matrix, mins, diffs = c.write(self._load(csvfile, features, expls, msg)) self._features = features self._matrix = matrix self._mins = mins self._diffs = diffs def _load(self, data_file, features, expls, msg): if data_file.endswith(".csr"): return self._load_csr(data_file, features, expls, msg) load_time = time.clock() msg("loading CSV data..") skip = frozenset([ "label" ]) labels = [] values = [] features_lookup = dict([ (f, ix) for (ix, f) in enumerate(features) ]) maxs = np.zeros((len(features),), dtype=np.float64) mins = np.zeros((len(features),), dtype=np.float64) def set_value(cur_row, f, val): fix = features_lookup[f] if val > maxs[fix]: maxs[fix] = val mm = mins[fix] if val < mm: if val < 0.0 and mm == 0.0: msg("WARNING: negative value found -- slow mode for feature {0}!", features[fix]) # we have to fix all missing values in previous rows now :( for cr in values: if fix not in cr: cr[fix] = mm mins[fix] = val mm = mins[fix] if mm != 0.0 or val > 0.0: cur_row[fix] = val with open(data_file, 'r') as data_in: for row in csv.DictReader(data_in): labels.append(int(row["label"]) != 0) cur_row = {} for (k, v) in row.items(): if k in skip: continue v = np.float64(v.strip()) set_value(cur_row, k, v) values.append(cur_row) diffs = maxs - mins diffs[np.isclose(0, diffs)] = 1.0 def prepare(val, fix): return (val - mins[fix]) / diffs[fix] coords = [ (prepare(val, fix), rix, fix) for (rix, cur_row) in enumerate(values) for (fix, val) in cur_row.items() ] vals, rows, cols = zip(*coords) matrix = coo_matrix((vals, (rows, cols)), shape=(len(values), len(features)), dtype=np.float64) matrix = matrix.tocsr() matrix.sort_indices() for (pos, l) in enumerate(labels): if expls[pos]["label"] != l: raise ValueError("inconsistent label at index {0}".format(pos)) msg("loading data took {0:0.4f}s", time.clock() - load_time) return matrix, mins, diffs def _load_csr(self, data_file, features, expls, msg): load_time = time.clock() msg("loading CSR data..") labels = [] data = [] indices = [] indptr = [ 0 ] feature_map = None with open(data_file, "r") as f_in: for row in csv.reader(f_in): if feature_map is None: own_features = row[1:] features_lookup = dict((f, ix) for (ix, f) in enumerate(features)) feature_map = dict((fix, features_lookup[f]) for (fix, f) in enumerate(own_features)) continue labels.append(int(row[0]) > 0) for fix in row[1:]: data.append(True) indices.append(feature_map[int(fix)]) indptr.append(len(data)) labels = np.array(labels, dtype=np.bool) matrix = csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, len(features)), dtype=np.bool) matrix.sort_indices() for (pos, l) in enumerate(labels): if expls[pos]["label"] != l: raise ValueError("inconsistent label at index {0}".format(pos)) mins = np.zeros((len(features),), dtype=np.float64) diffs = np.ones((len(features),), dtype=np.float64) msg("loading data took {0:0.4f}s", time.clock() - load_time) return matrix, mins, diffs def _unprepare(self, X): return X * self._diffs + self._mins def get_train_labels(self, train_ixs): raise NotImplementedError("protocol 1 does not support training inspection") def get_vecs(self, ixs): return self._matrix[ixs, :] def get_vec(self, ix): # in case of empty explanations # TODO think about better solution _, nz = self._matrix[ix, :].nonzero() return [ self._features[pos] for pos in nz ] def get_counts(self, ixs): # TODO think about better solution fcounts = self._matrix[ixs, :].sum(axis=0).tolist()[0] return dict([ (self._features[pos], fcounts[pos]) for pos in range(len(self._features)) if fcounts[pos] > 0 ]) def _process_rows(self, ixs, handle): # TODO think about better solution rixs, fixs = self._matrix[ixs, :].nonzero() def process(from_pos, to_pos): if to_pos <= from_pos: return handle(ixs[rixs[from_pos]], [ fixs[p] for p in range(from_pos, to_pos) ]) pos = 0 last_pos = 0 while pos < rixs.shape[0]: if rixs[last_pos] != rixs[pos]: process(last_pos, pos) last_pos = pos pos += 1 process(last_pos, pos) def get_groups(self, ixs, ignore_fixs=set()): #
status of {}".format(service)) if thread is None: # no thread means the service is not running # self.logger.info("{} is stopped".format(service)) reply = 'stopped' elif thread.is_alive(): # self.logger.info("{} is running".format(service)) reply = 'running' else: # self.logger.info("{} is stopped".format(service)) reply = 'stopped' return status, reply def get_queue(self, service): """ Get control queue corresponding to a service :param str service: Service to get queue for :return: queue (Queue) """ if service == 'amber_listener': queue = self.amber_listener_queue elif service == 'amber_clustering': queue = self.amber_trigger_queue elif service == 'voevent_generator': queue = self.voevent_generator_queue elif service == 'status_website': queue = self.status_website_queue elif service == 'offline_processing': queue = self.offline_queue elif service == 'dada_trigger': queue = self.dadatrigger_queue elif service == 'lofar_trigger': queue = self.lofar_trigger_queue elif service == 'processor': queue = self.processor_queue else: queue = None return queue def start_service(self, service): """ Start a service :param str service: service to start :return: status, reply """ if service not in self.service_mapping.keys(): self.logger.error('Unknown service: {}'.format(service)) status = 'Error' reply = "Unknown service" return status, reply # get thread thread = self.threads[service] # check if a new thread has to be generated if thread is None or not thread.is_alive(): self.logger.info("Creating new thread for service {}".format(service)) self.create_thread(service) thread = self.threads[service] # start the specified service self.logger.info("Starting service: {}".format(service)) # check if already running if thread.is_alive(): status = 'Success' reply = 'already running' self.logger.warning("Service already running: {}".format(service)) else: # start thread.start() # check status if not thread.is_alive(): status = 'Error' reply = "failed" self.logger.error("Failed to start service: {}".format(service)) else: status = 'Success' reply = "started" self.logger.info("Started service: {}".format(service)) return status, reply def stop_service(self, service): """ Stop a service :param str service: service to stop :return: status, reply """ # settings for specific services if service not in self.services: status = 'Error' reply = "Unknown service" self.logger.error("Unknown service: {}".format(service)) return status, reply # get thread and event thread = self.threads[service] # check is it was running at all if thread is None or not thread.is_alive(): # is_alive is false if thread died # remove thread if that is the case if thread is not None and not thread.is_alive(): self.threads[service] = None self.logger.info("Service not running: {}".format(service)) reply = 'Success' status = 'Stopped service' return status, reply # stop the specified service self.logger.info("Stopping service: {}".format(service)) queue = self.get_queue(service) if not queue: status = 'Error' reply = f"No queue to stop {service}" return status, reply queue.put('stop') tstart = time() while thread.is_alive() and time() - tstart < self.stop_timeout: sleep(.1) if thread.is_alive(): status = 'error' reply = "Failed to stop service before timeout" self.logger.error("Failed to stop service before timeout: {}".format(service)) else: status = 'Success' reply = 'stopped' self.logger.info("Stopped service: {}".format(service)) # remove thread self.threads[service] = None return status, reply def restart_service(self, service): """ Restart a service :param str service: service to restart :return: status, reply """ status = 'Success' _status, reply_stop = self.stop_service(service) if _status != 'Success': status = _status _status, reply_start = self.start_service(service) if _status != 'Success': status = _status reply = {'stop': reply_stop, 'start': reply_start} return status, reply def create_thread(self, service): """ Initialise a service thread :param str service: service to create a new thread for """ # settings for specific services source_queue = self.get_queue(service) second_target_queue = None if service == 'amber_listener': target_queue = self.amber_trigger_queue # only output to processor if this is not the master if self.hostname != MASTER: second_target_queue = self.processor_queue elif service == 'amber_clustering': target_queue = self.dadatrigger_queue elif service == 'voevent_generator': target_queue = None elif service == 'status_website': target_queue = None elif service == 'offline_processing': target_queue = None elif service == 'dada_trigger': target_queue = None elif service == 'lofar_trigger': target_queue = None elif service == 'processor': target_queue = None else: self.logger.error("Cannot create thread for {}".format(service)) return # Instantiate a new instance of the class self.threads[service] = self.service_mapping[service](source_queue=source_queue, target_queue=target_queue, second_target_queue=second_target_queue, control_queue=self.control_queue, config_file=self.config_file) def stop(self): """ Stop all services and exit :return: status, reply """ self.logger.info("Stopping all services") for service in self.services: self.stop_service(service) self.stop_event.set() status = 'Success' reply = "Stopping master" return status, reply def start_observation(self, config_file): """ Start an observation :param str config_file: Path to observation config file :return: status, reply """ self.logger.info("Received start_observation command with config file {}".format(config_file)) # check if config file exists if not os.path.isfile(config_file): self.logger.error("File not found: {}".format(config_file)) return "Error", "Failed: config file not found" # load config if config_file.endswith('.yaml'): config = self._load_yaml(config_file) elif config_file.endswith('.parset'): config = self._load_parset(config_file) else: self.logger.error("Failed to determine config file type from {}".format(config_file)) return "Error", "Failed: unknown config file type" # check if process_triggers is enabled if not config['proctrigger']: self.logger.info("Process triggers is disabled; not starting observation") return "Success", "Process triggers disabled - not starting" # store the config for future reference config_output_dir = os.path.join(self.parset_dir, config['datetimesource']) try: util.makedirs(config_output_dir) except Exception as e: raise DARCMasterException("Cannot create config output directory: {}".format(e)) try: copy2(config_file, config_output_dir) except Exception as e: self.logger.error("Could not store config file: {}".format(e)) # initialize observation # ensure services are running for service in self.services: self.start_service(service) # check host type if self.hostname == MASTER: host_type = 'master' elif self.hostname in WORKERS: host_type = 'worker' else: self.logger.error("Running on unknown host: {}".format(self.hostname)) return "Error", "Failed: running on unknown host" # create command command = {'command': 'start_observation', 'obs_config': config, 'host_type': host_type} # wait until start time utc_start = Time(config['startpacket'] / TIME_UNIT, format='unix') utc_end = utc_start + TimeDelta(config['duration'], format='sec') # if end time is in the past, only start offline processing if utc_end < Time.now(): self.logger.warning("End time in past! Only starting offline processing and processor") self.offline_queue.put(command) self.processor_queue.put(command) return "Warning", "Only offline processing and processor started" t_setup = utc_start - TimeDelta(self.setup_time, format='sec') self.logger.info("Starting observation at {}".format(t_setup.isot)) util.sleepuntil_utc(t_setup) # clear queues, then send command for queue in self.all_queues: util.clear_queue(queue) for queue in self.all_queues: queue.put(command) return "Success", "Observation started" def stop_observation(self, config_file, abort=False): """ Stop an observation :param str config_file: path to observation config file :param bool abort: whether to abort the observation :return: status, reply message """ self.logger.info("Received stop_observation command with config file {}".format(config_file)) # check if config file exists if not os.path.isfile(config_file): self.logger.error("File not found: {}".format(config_file)) return "Error", "Failed: config file not found" # load config if config_file.endswith('.yaml'): config = self._load_yaml(config_file) elif config_file.endswith('.parset'): config = self._load_parset(config_file) else: self.logger.error("Failed to determine config file type from {}".format(config_file)) return "Error", "Failed: unknown config file type" # call stop_observation for all relevant services through their queues for queue in self.all_queues: # in mixed mode, skip stopping offline_processing, unless abort is True if (self.mode == 'mixed') and (queue == self.offline_queue) and not abort: self.logger.info("Skipping stopping offline processing in mixed mode") continue queue.put({'command': 'stop_observation', 'obs_config': config}) status = 'Success' reply = "Stopped observation" self.logger.info("Stopped observation") return status, reply def _load_yaml(self, config_file): """ Load yaml file and convert to observation config :param str config_file: Path to yaml file :return: observation config dict """ self.logger.info("Loading yaml config {}".format(config_file)) if not os.path.isfile(config_file): self.logger.error("Yaml file not found: {}".format(config_file)) return {} with open(config_file) as f: config = yaml.load(f, Loader=yaml.SafeLoader) return config def _load_parset(self, config_file): """ Load parset file and convert to observation config :param str config_file: Path to parset file :return: observation configuration """ self.logger.info("Loading parset {}".format(config_file)) if not os.path.isfile(config_file): self.logger.error("Parset not found: {}".format(config_file)) # no parset - do not process this observation return {'proctrigger': False} # Read raw parset with open(config_file) as f: parset = f.read().strip() # Convert to dict config = util.parse_parset(parset) return config def _switch_cmd(self, command): """ Check status of LOFAR trigger system / VOEvent generator, or enable/disable them :param str command: command to run :return: status, reply """ if command.startswith('lofar_'): service = self.threads['lofar_trigger'] name = 'LOFAR triggering' queue = self.lofar_trigger_queue elif command.startswith('voevent_'): service = self.threads['voevent_generator'] name = 'VOEvent generator' queue = self.voevent_generator_queue else: self.logger.info("Unknown command: {}".format(command)) return 'Error', "Failed: Unknown command {}".format(command) if self.hostname != MASTER: return 'Error', "Failed: should run on master node" # check if service is running if (service is None) or (not service.is_alive()): return 'Error', f"{name} service is not running" # send command to service queue.put(command) # retrieve reply
@param resp: The string response returned from the SMTP Server @param numOK: the number of addresses accepted by the remote host. @param addresses: is a list of tuples (address, code, resp) listing the response to each RCPT command. @param log: is the SMTP session log """ raise NotImplementedError def _disconnectFromServer(self): self._expected = xrange(0, 1000) self._okresponse = self.smtpState_disconnect self.sendLine('QUIT') class ESMTPClient(SMTPClient): # Fall back to HELO if the server does not support EHLO heloFallback = True # Refuse to proceed if authentication cannot be performed requireAuthentication = False # Refuse to proceed if TLS is not available requireTransportSecurity = False # Indicate whether or not our transport can be considered secure. tlsMode = False # ClientContextFactory to use for STARTTLS context = None def __init__(self, secret, contextFactory=None, *args, **kw): SMTPClient.__init__(self, *args, **kw) self.authenticators = [] self.secret = secret self.context = contextFactory self.tlsMode = False def esmtpEHLORequired(self, code=-1, resp=None): self.sendError(EHLORequiredError(502, "Server does not support ESMTP Authentication", self.log.str())) def esmtpAUTHRequired(self, code=-1, resp=None): tmp = [] for a in self.authenticators: tmp.append(a.getName().upper()) auth = "[%s]" % ', '.join(tmp) self.sendError(AUTHRequiredError(502, "Server does not support Client Authentication schemes %s" % auth, self.log.str())) def esmtpTLSRequired(self, code=-1, resp=None): self.sendError(TLSRequiredError(502, "Server does not support secure communication via TLS / SSL", self.log.str())) def esmtpTLSFailed(self, code=-1, resp=None): self.sendError(TLSError(code, "Could not complete the SSL/TLS handshake", self.log.str())) def esmtpAUTHDeclined(self, code=-1, resp=None): self.sendError(AUTHDeclinedError(code, resp, self.log.str())) def esmtpAUTHMalformedChallenge(self, code=-1, resp=None): str = "Login failed because the SMTP Server returned a malformed Authentication Challenge" self.sendError(AuthenticationError(501, str, self.log.str())) def esmtpAUTHServerError(self, code=-1, resp=None): self.sendError(AuthenticationError(code, resp, self.log.str())) def registerAuthenticator(self, auth): """Registers an Authenticator with the ESMTPClient. The ESMTPClient will attempt to login to the SMTP Server in the order the Authenticators are registered. The most secure Authentication mechanism should be registered first. @param auth: The Authentication mechanism to register @type auth: class implementing C{IClientAuthentication} """ self.authenticators.append(auth) def connectionMade(self): SMTPClient.connectionMade(self) self._okresponse = self.esmtpState_ehlo def esmtpState_ehlo(self, code, resp): self._expected = SUCCESS self._okresponse = self.esmtpState_serverConfig self._failresponse = self.esmtpEHLORequired if self.heloFallback: self._failresponse = self.smtpState_helo self.sendLine('EHLO ' + self.identity) def esmtpState_serverConfig(self, code, resp): items = {} for line in resp.splitlines(): e = line.split(None, 1) if len(e) > 1: items[e[0]] = e[1] else: items[e[0]] = None if self.tlsMode: self.authenticate(code, resp, items) else: self.tryTLS(code, resp, items) def tryTLS(self, code, resp, items): if self.context and 'STARTTLS' in items: self._expected = [220] self._okresponse = self.esmtpState_starttls self._failresponse = self.esmtpTLSFailed self.sendLine('STARTTLS') elif self.requireTransportSecurity: self.tlsMode = False self.esmtpTLSRequired() else: self.tlsMode = False self.authenticate(code, resp, items) def esmtpState_starttls(self, code, resp): try: self.transport.startTLS(self.context) self.tlsMode = True except: log.err() self.esmtpTLSFailed(451) # Send another EHLO once TLS has been started to # get the TLS / AUTH schemes. Some servers only allow AUTH in TLS mode. self.esmtpState_ehlo(code, resp) def authenticate(self, code, resp, items): if self.secret and items.get('AUTH'): schemes = items['AUTH'].split() tmpSchemes = {} #XXX: May want to come up with a more efficient way to do this for s in schemes: tmpSchemes[s.upper()] = 1 for a in self.authenticators: auth = a.getName().upper() if auth in tmpSchemes: self._authinfo = a # Special condition handled if auth == "PLAIN": self._okresponse = self.smtpState_from self._failresponse = self._esmtpState_plainAuth self._expected = [235] challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 1), eol="") self.sendLine('AUTH ' + auth + ' ' + challenge) else: self._expected = [334] self._okresponse = self.esmtpState_challenge # If some error occurs here, the server declined the AUTH # before the user / password phase. This would be # a very rare case self._failresponse = self.esmtpAUTHServerError self.sendLine('AUTH ' + auth) return if self.requireAuthentication: self.esmtpAUTHRequired() else: self.smtpState_from(code, resp) def _esmtpState_plainAuth(self, code, resp): self._okresponse = self.smtpState_from self._failresponse = self.esmtpAUTHDeclined self._expected = [235] challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 2), eol="") self.sendLine('AUTH PLAIN ' + challenge) def esmtpState_challenge(self, code, resp): self._authResponse(self._authinfo, resp) def _authResponse(self, auth, challenge): self._failresponse = self.esmtpAUTHDeclined try: challenge = base64.decodestring(challenge) except binascii.Error: # Illegal challenge, give up, then quit self.sendLine('*') self._okresponse = self.esmtpAUTHMalformedChallenge self._failresponse = self.esmtpAUTHMalformedChallenge else: resp = auth.challengeResponse(self.secret, challenge) self._expected = [235, 334] self._okresponse = self.smtpState_maybeAuthenticated self.sendLine(encode_base64(resp, eol="")) def smtpState_maybeAuthenticated(self, code, resp): """ Called to handle the next message from the server after sending a response to a SASL challenge. The server response might be another challenge or it might indicate authentication has succeeded. """ if code == 235: # Yes, authenticated! del self._authinfo self.smtpState_from(code, resp) else: # No, not authenticated yet. Keep trying. self._authResponse(self._authinfo, resp) class ESMTP(SMTP): ctx = None canStartTLS = False startedTLS = False authenticated = False def __init__(self, chal = None, contextFactory = None): SMTP.__init__(self) if chal is None: chal = {} self.challengers = chal self.authenticated = False self.ctx = contextFactory def connectionMade(self): SMTP.connectionMade(self) self.canStartTLS = ITLSTransport.providedBy(self.transport) self.canStartTLS = self.canStartTLS and (self.ctx is not None) def greeting(self): return SMTP.greeting(self) + ' ESMTP' def extensions(self): ext = {'AUTH': self.challengers.keys()} if self.canStartTLS and not self.startedTLS: ext['STARTTLS'] = None return ext def lookupMethod(self, command): m = SMTP.lookupMethod(self, command) if m is None: m = getattr(self, 'ext_' + command.upper(), None) return m def listExtensions(self): r = [] for (c, v) in self.extensions().iteritems(): if v is not None: if v: # Intentionally omit extensions with empty argument lists r.append('%s %s' % (c, ' '.join(v))) else: r.append(c) return '\n'.join(r) def do_EHLO(self, rest): peer = self.transport.getPeer().host self._helo = (rest, peer) self._from = None self._to = [] self.sendCode( 250, '%s Hello %s, nice to meet you\n%s' % ( self.host, peer, self.listExtensions(), ) ) def ext_STARTTLS(self, rest): if self.startedTLS: self.sendCode(503, 'TLS already negotiated') elif self.ctx and self.canStartTLS: self.sendCode(220, 'Begin TLS negotiation now') self.transport.startTLS(self.ctx) self.startedTLS = True else: self.sendCode(454, 'TLS not available') def ext_AUTH(self, rest): if self.authenticated: self.sendCode(503, 'Already authenticated') return parts = rest.split(None, 1) chal = self.challengers.get(parts[0].upper(), lambda: None)() if not chal: self.sendCode(504, 'Unrecognized authentication type') return self.mode = AUTH self.challenger = chal if len(parts) > 1: chal.getChallenge() # Discard it, apparently the client does not # care about it. rest = parts[1] else: rest = None self.state_AUTH(rest) def _cbAuthenticated(self, loginInfo): """ Save the state resulting from a successful cred login and mark this connection as authenticated. """ result = SMTP._cbAnonymousAuthentication(self, loginInfo) self.authenticated = True return result def _ebAuthenticated(self, reason): """ Handle cred login errors by translating them to the SMTP authenticate failed. Translate all other errors into a generic SMTP error code and log the failure for inspection. Stop all errors from propagating. """ self.challenge = None if reason.check(cred.error.UnauthorizedLogin): self.sendCode(535, 'Authentication failed') else: log.err(reason, "SMTP authentication failure") self.sendCode( 451, 'Requested action aborted: local error in processing') def state_AUTH(self, response): """ Handle one step of challenge/response authentication. @param response: The text of a response. If None, this function has been called as a result of an AUTH command with no initial response. A response of '*' aborts authentication, as per RFC 2554. """ if self.portal is None: self.sendCode(454, 'Temporary authentication failure') self.mode = COMMAND return if response is None: challenge = self.challenger.getChallenge() encoded = challenge.encode('base64') self.sendCode(334, encoded) return if response == '*': self.sendCode(501, 'Authentication aborted') self.challenger = None self.mode = COMMAND return try: uncoded = response.decode('base64') except binascii.Error: self.sendCode(501, 'Syntax error in parameters or arguments') self.challenger = None self.mode = COMMAND return self.challenger.setResponse(uncoded) if self.challenger.moreChallenges(): challenge = self.challenger.getChallenge() coded = challenge.encode('base64')[:-1] self.sendCode(334, coded) return self.mode = COMMAND result = self.portal.login( self.challenger, None, IMessageDeliveryFactory, IMessageDelivery) result.addCallback(self._cbAuthenticated) result.addCallback(lambda ign: self.sendCode(235, 'Authentication successful.')) result.addErrback(self._ebAuthenticated) class SenderMixin: """Utility class for sending emails easily. Use with SMTPSenderFactory or ESMTPSenderFactory. """ done = 0 def getMailFrom(self): if not self.done: self.done = 1 return str(self.factory.fromEmail) else: return None def getMailTo(self): return self.factory.toEmail def getMailData(self): return self.factory.file def sendError(self, exc): # Call the base class to close the connection with the SMTP server SMTPClient.sendError(self, exc) # Do not retry to connect to SMTP Server if: # 1. No more retries left (This allows the correct error to be returned to the errorback) # 2. retry is false # 3. The error code is not in the 4xx range (Communication Errors) if (self.factory.retries >= 0 or (not exc.retry and not (exc.code >= 400 and exc.code < 500))): self.factory.sendFinished = 1 self.factory.result.errback(exc) def sentMail(self, code, resp,
must be nonnegative' i = from_index v = self.vector l = len(v) o = i >> 4 s = i & 0x0F i = o << 4 while o < l: h = v[o] if h: i += s m = 1 << s while m != (1 << 0x10): if h & m: return i m <<= 1 i += 1 else: i += 0x10 s = 0 o += 1 return -1 def rank_of_bit_set_at_index(self, position): ''' For a bit that is set at the argument 'position', this method returns how many bits are set to the left of that bit. For example, in the bit pattern 000101100100, a call to this method with position set to 9 will return 4. ''' assert self[position] == 1, 'the arg bit not set' bv = self[0:position+1] return bv.count_bits() def is_power_of_2( self ): ''' Determines whether the integer value of a bit vector is a power of 2. ''' if self.intValue() == 0: return False bv = self & BitVector( intVal = self.intValue() - 1 ) if bv.intValue() == 0: return True return False isPowerOf2 = is_power_of_2 def is_power_of_2_sparse(self): ''' Faster version of is_power_of2() for sparse bit vectors ''' if self.count_bits_sparse() == 1: return True return False isPowerOf2_sparse = is_power_of_2_sparse def reverse(self): ''' Returns a new bit vector by reversing the bits in the bit vector on which the method is invoked. ''' reverseList = [] i = 1 while ( i < self.size + 1 ): reverseList.append( self[ -i ] ) i += 1 return BitVector( bitlist = reverseList ) def gcd(self, other): ''' Using Euclid's Algorithm, returns the greatest common divisor of the integer value of the bit vector on which the method is invoked and the integer value of the argument bit vector. ''' a = self.intValue(); b = other.intValue() if a < b: a,b = b,a while b != 0: a, b = b, a % b return BitVector( intVal = a ) def multiplicative_inverse(self, modulus): ''' Calculates the multiplicative inverse of a bit vector modulo the bit vector that is supplied as the argument. Code based on the Extended Euclid's Algorithm. ''' MOD = mod = modulus.intValue(); num = self.intValue() x, x_old = 0, 1 y, y_old = 1, 0 while mod: quotient = num // mod num, mod = mod, num % mod x, x_old = x_old - x * quotient, x y, y_old = y_old - y * quotient, y if num != 1: return None else: MI = (x_old + MOD) % MOD return BitVector( intVal = MI ) def length(self): return self.size def gf_multiply(self, b): ''' In the set of polynomials defined over GF(2), multiplies the bitvector on which the method is invoked with the bitvector b. Returns the product bitvector. ''' a = self.deep_copy() b_copy = b.deep_copy() a_highest_power = a.length() - a.next_set_bit(0) - 1 b_highest_power = b.length() - b_copy.next_set_bit(0) - 1 result = BitVector( size = a.length()+b_copy.length() ) a.pad_from_left( result.length() - a.length() ) b_copy.pad_from_left( result.length() - b_copy.length() ) for i,bit in enumerate(b_copy): if bit == 1: power = b_copy.length() - i - 1 a_copy = a.deep_copy() a_copy.shift_left( power ) result ^= a_copy return result def gf_divide(self, mod, n): ''' Carries out modular division of a bitvector by the modulus bitvector mod in GF(2^n) finite field. Returns both the quotient and the remainder. ''' num = self if mod.length() > n+1: raise ValueError("Modulus bit pattern too long") quotient = BitVector( intVal = 0, size = num.length() ) remainder = num.deep_copy() i = 0 while 1: i = i+1 if (i==num.length()): break mod_highest_power = mod.length()-mod.next_set_bit(0)-1 if remainder.next_set_bit(0) == -1: remainder_highest_power = 0 else: remainder_highest_power = remainder.length() - remainder.next_set_bit(0) - 1 if (remainder_highest_power < mod_highest_power) or int(remainder)==0: break else: exponent_shift = remainder_highest_power - mod_highest_power quotient[quotient.length()-exponent_shift-1] = 1 quotient_mod_product = mod.deep_copy(); quotient_mod_product.pad_from_left(remainder.length() - mod.length()) quotient_mod_product.shift_left(exponent_shift) remainder = remainder ^ quotient_mod_product if remainder.length() > n: remainder = remainder[remainder.length()-n:] return quotient, remainder def gf_multiply_modular(self, b, mod, n): ''' Multiplies a bitvector with the bitvector b in GF(2^n) finite field with the modulus bit pattern set to mod ''' a = self a_copy = a.deep_copy() b_copy = b.deep_copy() product = a_copy.gf_multiply(b_copy) quotient, remainder = product.gf_divide(mod, n) return remainder def gf_MI(self, mod, n): ''' Returns the multiplicative inverse of a vector in the GF(2^n) finite field with the modulus polynomial set to mod ''' num = self NUM = num.deep_copy(); MOD = mod.deep_copy() x = BitVector( size=mod.length() ) x_old = BitVector( intVal=1, size=mod.length() ) y = BitVector( intVal=1, size=mod.length() ) y_old = BitVector( size=mod.length() ) while int(mod): quotient, remainder = num.gf_divide(mod, n) num, mod = mod, remainder x, x_old = x_old ^ quotient.gf_multiply(x), x y, y_old = y_old ^ quotient.gf_multiply(y), y if int(num) != 1: return "NO MI. However, the GCD of ", str(NUM), " and ", \ str(MOD), " is ", str(num) else: z = x_old ^ MOD quotient, remainder = z.gf_divide(MOD, n) return remainder def runs(self): ''' Returns a list of the consecutive runs of 1's and 0's in the bit vector. Each run is either a string of all 1's or a string of all 0's. ''' if self.size == 0: raise ValueError('''An empty vector has no runs''') allruns = [] run = '' previous_bit = self[0] if previous_bit == 0: run = '0' else: run = '1' for bit in list(self)[1:]: if bit == 0 and previous_bit == 0: run += '0' elif bit == 1 and previous_bit == 0: allruns.append( run ) run = '1' elif bit == 0 and previous_bit == 1: allruns.append( run ) run = '0' else: run += '1' previous_bit = bit allruns.append( run ) return allruns def test_for_primality(self): ''' Check if the integer value of the bitvector is a prime through the Miller-Rabin probabilistic test of primality. If not found to be a composite, estimate the probability of the bitvector being a prime using this test. ''' p = int(self) probes = [2,3,5,7,11,13,17] for a in probes: if a == p: return 1 if any([p % a == 0 for a in probes]): return 0 k, q = 0, p-1 while not q&1: q >>= 1 k += 1 for a in probes: a_raised_to_q = pow(a, q, p) if a_raised_to_q == 1 or a_raised_to_q == p-1: continue a_raised_to_jq = a_raised_to_q primeflag = 0 for j in range(k-1): a_raised_to_jq = pow(a_raised_to_jq, 2, p) if a_raised_to_jq == p-1: primeflag = 1 break if not primeflag: return 0 probability_of_prime = 1 - 1.0/(4 ** len(probes)) return probability_of_prime def gen_rand_bits_for_prime(self, width): ''' The bulk of the work here is done by calling random.getrandbits( width) which returns an integer whose binary code representation will not be larger than the argument 'width'. However, when random numbers are generated as candidates for primes, you often want to make sure that the random number thus created spans the full width specified
<reponame>ValentinSilvestri/cammesa import os import re import pymongo import pandas as pd import numpy as np import streamlit as st from bokeh.plotting import figure from bokeh.palettes import Set1_9, Set3_12, Inferno256 @st.cache(suppress_st_warning=True, allow_output_mutation=True) def get_caudales(): """Function to obtain the rivers basin flows from MongoDB Atlas. Returns: DataFrame: Pandas DataFrame with the query result. """ st.spinner("Obteniendo los datos de caudales...") client = pymongo.MongoClient(os.environ['MONGO']) try: collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos'] project={ '_id': 0, 'fecha': 1, 'situacionCuencaComahue': { '<NAME>': 1, 'Caudal Neuquen': 1, 'Caudal Limay': 1, 'Caudal Río Negro': 1, 'Caudal Limay despues desembocadura de <NAME>': 1 }, 'situacionYacyretaSaltoGrande': { 'Caudal Río Uruguay': 1, 'Caudal Río Paraná': 1 }, 'situacionCuencaPatagonica': { 'Caudal Río Chubut': 1, 'Caudal Río Futaleufu': 1 }, 'situacionCuencaRioGrande': { 'Caudal Río Grande': 1 }, 'situacionCuencaRioSanJuan': { 'Caudal Inicial Río San Juan': 1, 'Caudal Final Río San Juan': 1 } } df = pd.DataFrame(collection_name.find(projection=project)) return df except Exception as e: st.error(f'Opps, algo fallo\n{e}') finally: client.close() @st.cache(suppress_st_warning=True, allow_output_mutation=True) def get_cotas(): """Function to obtai the rivers basin levels from MongoDB Atlas. Returns: DataFrame: Pandas DataFrame with the query result. """ st.spinner("Obteniendo los datos de cotas...") client = pymongo.MongoClient(os.environ['MONGO']) try: collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos'] project={ '_id': 0, 'fecha': 1, 'situacionCuencaComahue': { 'Cota Hoy Alicura': 1, 'Cota Min Alicura': 1, 'Cota Max Alicura': 1, 'Cota Hoy Mari Menuco': 1, 'Cota Min Mari Menuco': 1, 'Cota Max Mari Menuco': 1, 'Cota Hoy Piedra del Aguila': 1, 'Cota Min Piedra del Aguila': 1, 'Cota Max Piedra del Aguila': 1, 'Cota Hoy Planicie Banderita Barreales': 1, 'Cota Min Planicie Banderita Barreales': 1, 'Cota Max Planicie Banderita Barreales': 1, 'Cota Hoy Arroyito': 1, 'Cota Min Arroyito': 1, 'Cota Max Arroyito': 1, 'Cota Hoy El Chocon': 1, 'Cota Min El Chocon': 1, 'Cota Max El Chocon': 1, 'Cota Hoy P': { 'P': { 'Leufu': 1 } } }, 'situacionYacyretaSaltoGrande': { 'Cota Hoy Yacyreta': 1, 'Cota Min Yacyreta': 1, 'Cota Max Yacyreta': 1, 'Cota Hoy Salto Grande': 1, 'Cota Min Salto Grande': 1, 'Cota Max Salto Grande': 1 }, 'situacionCuencaPatagonica': { 'Cota Hoy Futaleufu': 1, 'Cota Min Futaleufu': 1, 'Cota Max Futaleufu': 1, 'Cota Hoy Ameghino': 1, 'Cota Min Ameghino': 1, 'Cota Max Ameghino': 1 }, 'situacionCuencaRioGrande': { 'Cota Hoy Río Grande': 1, 'Cota Min Río Grande': 1, 'Cota Max Río Grande': 1 }, 'situacionCuencaRioSanJuan': { 'Cota Hoy Quebrada de Ullum': 1, 'Cota Min Quebrada de Ullum': 1, 'Cota Max Quebrada de Ullum': 1, 'Cota Hoy Los Caracole': 1, 'Cota Min Los Caracoles': 1, 'Cota Max Los Caracoles': 1, 'Cota Hoy Punta Negra': 1, 'Cota Min Punta Negra': 1, 'Cota Max Punta Negra': 1 } } df = pd.DataFrame(collection_name.find(projection=project)) return df except Exception as e: st.error(f'Opps, algo fallo\n{e}') finally: client.close() @st.cache(suppress_st_warning=True, allow_output_mutation=True) def get_turbinado(): """Function to obtain the rivers basin turbinate from MongoDB Atlas. Returns: DataFrame: Pandas DataFrame with the query result. """ st.spinner("Obteniendo los datos de turbinado...") client = pymongo.MongoClient(os.environ['MONGO']) try: collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos'] project={ '_id': 0, 'fecha': 1, 'situacionCuencaComahue': { 'Turbinado Alicura': 1, 'Turbinado Piedra del Aguila': 1, 'Turbinado Arroyito': 1, 'Turbinado El Chocon': 1, 'Turbinado Mari Menuco': 1, 'Turbinado P': { 'P': { 'Leufu': 1 } } }, 'situacionYacyretaSaltoGrande': { 'Turbinado Salto Grande': 1, 'Turbinado Yacyreta': 1 }, 'situacionCuencaPatagonica': { 'Turbinado Futaleufu': 1, 'Turbinado Ameghino': 1 }, 'situacionCuencaRioGrande': { 'Turbinado Río Grande': 1 }, 'situacionCuencaRioSanJuan': { 'Turbinado Punta Negra': 1, 'Turbinado Ullum': 1, 'Turbinado Los Caracoles': 1, 'Turbinado Quebrada de Ullum': 1 } } df = pd.DataFrame(collection_name.find(projection=project)) return df except Exception as e: st.error(f'Opps, algo fallo\n{e}') finally: client.close() @st.cache(suppress_st_warning=True, allow_output_mutation=True) def get_vertido(): """Function to obtain the rivers basin discharge from MongoDB Atlas. Returns: DataFrame: Pandas DataFrame with the query result. """ st.spinner("Obteniendo los datos de turbinado...") client = pymongo.MongoClient(os.environ['MONGO']) try: collection_name = client['publicaciones-especiales']['cuencas-datos-hidraulicos'] project={ '_id': 0, 'fecha': 1, 'situacionCuencaComahue': { 'Vertido El Chañar': 1, 'Vertido Arroyito': 1, 'Vertido Piedra del Aguila': 1, 'Vertido P': { 'P': { 'Leufu': 1 } } }, 'situacionYacyretaSaltoGrande': { 'Vertido Salto Grande': 1, 'Vertido Yacyreta': 1 }, 'situacionCuencaPatagonica': { 'Vertido Futaleufu': 1, 'Vertido Ameghino': 1 }, 'situacionCuencaRioGrande': { 'Bombeo Río Grande': 1 }, 'situacionCuencaRioSanJuan': { 'Vertido Punta Negra': 1, 'Vertido Los Caracoles': 1, 'Vertido Quebrada de Ullum': 1 } } df = pd.DataFrame(collection_name.find(projection=project)) return df except Exception as e: st.error(f'Opps, algo fallo\n{e}') finally: client.close() def caudales(): """Get the rivers basin flows and process this data. Returns: Figure: Bokeh plotting figure. DataFrame: Pandas DataFrame with the query result. """ df = get_caudales() df = pd.concat([ df['fecha'], pd.json_normalize(df['situacionCuencaComahue']), pd.json_normalize(df['situacionYacyretaSaltoGrande']), pd.json_normalize(df['situacionCuencaPatagonica']), pd.json_normalize(df['situacionCuencaRioGrande']), pd.json_normalize(df['situacionCuencaRioSanJuan']) ], axis=1, join="inner") df.rename(columns={ "fecha": "Fecha", "Caudal Collon Cura": "Cuenca Comahue - Caudal Collon Cura", "Caudal Neuquen": "Cuenca Comahue - Caudal Neuquen", "Caudal Limay": "Cuenca Comahue - Caudal Limay", "Caudal Río Negro": "Cuenca Comahue - Caudal Río Negro", "Caudal Limay despues desembocadura de Collon Cura": "Cuenca Comahue - Caudal Limay despues desembocadura de Collon Cura", "Caudal Río Uruguay": "Yacyreta Salto Grande - Caudal Río Uruguay", "Caudal Río Paraná": "Yacyreta Salto Grande - Caudal Río Paraná", "Caudal Río Chubut": "Cuenca Patagónica - Caudal Río Chubut", "Caudal Río Futaleufu": "Cuenca Patagónica - Caudal Río Futaleufu", "Caudal Río Grande": "Cuenca Río Grande - Caudal Río Grande", "Caudal Inicial Río San Juan": "Cuenca Río San Juan - Caudal Inicial Río San Juan", "Caudal Final Río San Juan": "Cuenca Río San Juan - Caudal Final Río San Juan" }, inplace=True) df['Fecha'] = pd.to_datetime(df['Fecha'], format='%Y/%m/%d').dt.date df = df.drop_duplicates().sort_values('Fecha', ascending=False).reset_index(drop=True) df = df.replace(0, np.nan) p = figure(x_axis_type="datetime", title="Caudales cuencas", sizing_mode="stretch_both") p.grid.grid_line_alpha=0.3 p.xaxis.axis_label = 'Fecha' p.yaxis.axis_label = 'Caudal [m\u00b3/s]' p.legend.location = "top_left" return p, df def cotas(): """Get the rivers basin levels and process this data. Returns: Figure: Bokeh plotting figure. DataFrame: Pandas DataFrame with the query result. """ df = get_cotas() df = pd.concat([ df['fecha'], pd.json_normalize(df['situacionCuencaComahue']), pd.json_normalize(df['situacionYacyretaSaltoGrande']), pd.json_normalize(df['situacionCuencaPatagonica']), pd.json_normalize(df['situacionCuencaRioGrande']), pd.json_normalize(df['situacionCuencaRioSanJuan']) ], axis=1, join="inner") df.rename(columns={ 'fecha': 'Fecha', 'Cota Hoy Alicura': 'Cuenca Comahue - Alicura', 'Cota Min Alicura': 'Cuenca Comahue - Min Alicura', 'Cota Max Alicura': 'Cuenca Comahue - Max Alicura', 'Cota Hoy Piedra del Aguila': 'Cuenca Comahue - Piedra del Aguil', 'Cota Min Piedra del Aguila': 'Cuenca Comahue - Min Piedra del Aguila', 'Cota Max Piedra del Aguila': 'Cuenca Comahue - Max Piedra del Aguila', 'Cota Hoy Arroyito': 'Cuenca Comahue - Arroyito', 'Cota Min Arroyito': 'Cuenca Comahue - Min Arroyito', 'Cota Max Arroyito': 'Cuenca Comahue - Max Arroyito', 'Cota Hoy Mari Menuco': 'Cuenca Comahue - Mari Menuco', 'Cota Min Mari Menuco': 'Cuenca Comahue - Min Mari Menuco', 'Cota Max Mari Menuco': 'Cuenca Comahue - Max Mari Menuco', 'Cota Hoy Planicie Banderita Barreales': 'Cuenca Comahue - Planicie Banderita Barreales', 'Cota Min Planicie Banderita Barreales': 'Cuenca Comahue - Min Planicie Banderita Barreales', 'Cota Max Planicie Banderita Barreales': 'Cuenca Comahue - Max Planicie Banderita Barreales', 'Cota Hoy El Chocon': 'Cuenca Comahue - El Chocon', 'Cota Min El Chocon': 'Cuenca Comahue - Min El Chocon', 'Cota Max El Chocon': 'Cuenca Comahue - Max El Chocon', 'Cota Hoy P.P.Leufu': 'Cuenca Comahue - Leufu', 'Cota Hoy Yacyreta': 'Cuenca Yacyreta - Yacyreta', 'Cota Min Yacyreta': 'Cuenca Yacyreta - Min Yacyreta', 'Cota Max Yacyreta': 'Cuenca Yacyreta - Max Yacyreta', 'Cota Hoy Salto Grande': 'Cuenca Yacyreta - Salto Grande', 'Cota Min Salto Grande': 'Cuenca Yacyreta - Min Salto Grande', 'Cota Max Salto Grande': 'Cuenca Yacyreta - Max Salto Grande', 'Cota Hoy Futaleufu': 'Cuenca Patagónica - Futaleufu', 'Cota Min Futaleufu': 'Cuenca Patagónica - Min Futaleufu', 'Cota Max Futaleufu': 'Cuenca Patagónica - Max Futaleufu', 'Cota Hoy Ameghino': 'Cuenca Patagónica - Ameghino', 'Cota Min Ameghino': 'Cuenca Patagónica - Min Ameghino', 'Cota Max Ameghino': 'Cuenca Patagónica - Max Ameghino', 'Cota Hoy Río Grande': 'Cuenca Río Grande - Río Grande', 'Cota Min Río Grande': 'Cuenca Río Grande - Min Río Grande', 'Cota Max Río Grande': 'Cuenca Río Grande - Max Río Grande', 'Cota Hoy Quebrada de Ullum': 'Cuenca Río San Juan - Quebrada de Ullum', 'Cota Min Quebrada de Ullum': 'Cuenca Río
ndata and edata are maintained by Frames. So to maintain compatibility # with older code, DGLHeteroGraphs and other graph storages are handled separately: (1) # DGLHeteroGraphs will preserve the lazy feature slicing for subgraphs. (2) Other graph storages # will not have lazy feature slicing; all feature slicing will be eager. def remove_parent_storage_columns(item, g): """Removes the storage objects in the given graphs' Frames if it is a sub-frame of the given parent graph, so that the storages are not serialized during IPC from PyTorch DataLoader workers. """ if not isinstance(item, DGLHeteroGraph) or not isinstance(g, DGLHeteroGraph): return item for subframe, frame in zip( itertools.chain(item._node_frames, item._edge_frames), itertools.chain(g._node_frames, g._edge_frames)): for key in list(subframe.keys()): subcol = subframe._columns[key] # directly get the column object if isinstance(subcol, LazyFeature): continue col = frame._columns.get(key, None) if col is None: continue if col.storage is subcol.storage: subcol.storage = None return item def restore_parent_storage_columns(item, g): """Restores the storage objects in the given graphs' Frames if it is a sub-frame of the given parent graph (i.e. when the storage object is None). """ if not isinstance(item, DGLHeteroGraph) or not isinstance(g, DGLHeteroGraph): return item for subframe, frame in zip( itertools.chain(item._node_frames, item._edge_frames), itertools.chain(g._node_frames, g._edge_frames)): for key in subframe.keys(): subcol = subframe._columns[key] if isinstance(subcol, LazyFeature): continue col = frame._columns.get(key, None) if col is None: continue if subcol.storage is None: subcol.storage = col.storage return item class _PrefetchingIter(object): def __init__(self, dataloader, dataloader_it, use_thread=False, use_alternate_streams=True, num_threads=None): self.queue = Queue(1) self.dataloader_it = dataloader_it self.dataloader = dataloader self.graph_sampler = self.dataloader.graph_sampler self.pin_prefetcher = self.dataloader.pin_prefetcher self.num_threads = num_threads self.use_thread = use_thread self.use_alternate_streams = use_alternate_streams self._shutting_down = False if use_thread: self._done_event = threading.Event() thread = threading.Thread( target=_prefetcher_entry, args=(dataloader_it, dataloader, self.queue, num_threads, use_alternate_streams, self._done_event), daemon=True) thread.start() self.thread = thread def __iter__(self): return self def _shutdown(self): # Sometimes when Python is exiting complicated operations like # self.queue.get_nowait() will hang. So we set it to no-op and let Python handle # the rest since the thread is daemonic. # PyTorch takes the same solution. if PYTHON_EXIT_STATUS is True or PYTHON_EXIT_STATUS is None: return if not self._shutting_down: try: self._shutting_down = True self._done_event.set() try: self.queue.get_nowait() # In case the thread is blocking on put(). except: # pylint: disable=bare-except pass self.thread.join() except: # pylint: disable=bare-except pass def __del__(self): if self.use_thread: self._shutdown() def _next_non_threaded(self): batch = next(self.dataloader_it) batch = recursive_apply(batch, restore_parent_storage_columns, self.dataloader.graph) device = self.dataloader.device if self.use_alternate_streams: stream = torch.cuda.Stream(device=device) if device.type == 'cuda' else None else: stream = None feats = _prefetch(batch, self.dataloader, stream) batch = recursive_apply(batch, lambda x: x.to(device, non_blocking=True)) stream_event = stream.record_event() if stream is not None else None return batch, feats, stream_event def _next_threaded(self): try: batch, feats, stream_event, exception = self.queue.get(timeout=prefetcher_timeout) except Empty: raise RuntimeError( f'Prefetcher thread timed out at {prefetcher_timeout} seconds.') if batch is None: self.thread.join() if exception is None: raise StopIteration exception.reraise() return batch, feats, stream_event def __next__(self): batch, feats, stream_event = \ self._next_non_threaded() if not self.use_thread else self._next_threaded() batch = recursive_apply_pair(batch, feats, _assign_for) if stream_event is not None: stream_event.wait() return batch # Make them classes to work with pickling in mp.spawn class CollateWrapper(object): """Wraps a collate function with :func:`remove_parent_storage_columns` for serializing from PyTorch DataLoader workers. """ def __init__(self, sample_func, g, use_uva, device): self.sample_func = sample_func self.g = g self.use_uva = use_uva self.device = device def __call__(self, items): if self.use_uva or (self.g.device != torch.device('cpu')): # Only copy the indices to the given device if in UVA mode or the graph is not on # CPU. items = recursive_apply(items, lambda x: x.to(self.device)) batch = self.sample_func(self.g, items) return recursive_apply(batch, remove_parent_storage_columns, self.g) class WorkerInitWrapper(object): """Wraps the :attr:`worker_init_fn` argument of the DataLoader to set the number of DGL OMP threads to 1 for PyTorch DataLoader workers. """ def __init__(self, func): self.func = func def __call__(self, worker_id): set_num_threads(1) if self.func is not None: self.func(worker_id) def create_tensorized_dataset(indices, batch_size, drop_last, use_ddp, ddp_seed): """Converts a given indices tensor to a TensorizedDataset, an IterableDataset that returns views of the original tensor, to reduce overhead from having a list of scalar tensors in default PyTorch DataLoader implementation. """ if use_ddp: return DDPTensorizedDataset(indices, batch_size, drop_last, ddp_seed) else: return TensorizedDataset(indices, batch_size, drop_last) def _get_device(device): device = torch.device(device) if device.type == 'cuda' and device.index is None: device = torch.device('cuda', torch.cuda.current_device()) return device class DataLoader(torch.utils.data.DataLoader): """Sampled graph data loader. Wrap a :class:`~dgl.DGLGraph` and a :class:`~dgl.dataloading.Sampler` into an iterable over mini-batches of samples. DGL's ``DataLoader`` extends PyTorch's ``DataLoader`` by handling creation and transmission of graph samples. It supports iterating over a set of nodes, edges or any kinds of indices to get samples in the form of ``DGLGraph``, message flow graphs (MFGS), or any other structures necessary to train a graph neural network. Parameters ---------- graph : DGLGraph The graph. indices : Tensor or dict[ntype, Tensor] The set of indices. It can either be a tensor of integer indices or a dictionary of types and indices. The actual meaning of the indices is defined by the :meth:`sample` method of :attr:`graph_sampler`. graph_sampler : dgl.dataloading.Sampler The subgraph sampler. device : device context, optional The device of the generated MFGs in each iteration, which should be a PyTorch device object (e.g., ``torch.device``). By default this value is None. If :attr:`use_uva` is True, MFGs and graphs will generated in torch.cuda.current_device(), otherwise generated in the same device of :attr:`g`. use_ddp : boolean, optional If True, tells the DataLoader to split the training set for each participating process appropriately using :class:`torch.utils.data.distributed.DistributedSampler`. Overrides the :attr:`sampler` argument of :class:`torch.utils.data.DataLoader`. ddp_seed : int, optional The seed for shuffling the dataset in :class:`torch.utils.data.distributed.DistributedSampler`. Only effective when :attr:`use_ddp` is True. use_uva : bool, optional Whether to use Unified Virtual Addressing (UVA) to directly sample the graph and slice the features from CPU into GPU. Setting it to True will pin the graph and feature tensors into pinned memory. If True, requires that :attr:`indices` must have the same device as the :attr:`device` argument. Default: False. use_prefetch_thread : bool, optional (Advanced option) Spawns a new Python thread to perform feature slicing asynchronously. Can make things faster at the cost of GPU memory. Default: True if the graph is on CPU and :attr:`device` is CUDA. False otherwise. use_alternate_streams : bool, optional (Advanced option) Whether to slice and transfers the features to GPU on a non-default stream. Default: True if the graph is on CPU, :attr:`device` is CUDA, and :attr:`use_uva` is False. False otherwise. pin_prefetcher : bool, optional (Advanced option) Whether to pin the feature tensors into pinned memory. Default: True if the graph is on CPU and :attr:`device` is CUDA. False otherwise. kwargs : dict Key-word arguments to be passed to the parent PyTorch :py:class:`torch.utils.data.DataLoader` class. Common arguments are: - ``batch_size`` (int): The number of indices in each batch. - ``drop_last`` (bool): Whether to drop the last incomplete batch. - ``shuffle`` (bool): Whether to randomly shuffle the indices at each epoch. Examples -------- To train a 3-layer GNN for node classification on a set of nodes ``train_nid`` on a homogeneous graph where each node takes messages from all neighbors (assume the backend is PyTorch): >>> sampler = dgl.dataloading.MultiLayerNeighborSampler([15, 10, 5]) >>> dataloader = dgl.dataloading.DataLoader( ... g, train_nid, sampler, ... batch_size=1024, shuffle=True, drop_last=False, num_workers=4) >>> for input_nodes, output_nodes, blocks in dataloader: ... train_on(input_nodes, output_nodes, blocks) **Using with Distributed Data Parallel** If you are using PyTorch's distributed training (e.g. when using :mod:`torch.nn.parallel.DistributedDataParallel`), you can train the model by turning on the `use_ddp` option: >>> sampler = dgl.dataloading.MultiLayerNeighborSampler([15, 10, 5]) >>> dataloader = dgl.dataloading.DataLoader( ... g, train_nid, sampler, use_ddp=True, ... batch_size=1024, shuffle=True, drop_last=False, num_workers=4) >>> for epoch in range(start_epoch, n_epochs): ... for input_nodes, output_nodes, blocks in dataloader: ... train_on(input_nodes, output_nodes, blocks) Notes ----- Please refer to :doc:`Minibatch Training Tutorials <tutorials/large/L0_neighbor_sampling_overview>` and :ref:`User Guide Section 6 <guide-minibatch>` for usage. **Tips for selecting the proper device** * If the input graph :attr:`g` is on GPU, the output device :attr:`device` must be the same GPU and :attr:`num_workers` must be zero. In this case, the sampling and subgraph construction will take place on the GPU. This is the recommended setting when using
dref[rid]['val'].shape[0]==dref[otherid]['val'].size if c0: dref[rid]['indother'] = None else: assert dref[rid]['indother'].ndim == 1 # Get nn val = dref[rid]['val'] # Check if is2d ltypes = [] for ax in dax.keys(): if rid in dax[ax]['ref'].keys(): ltypes.append(dax[ax]['ref'][rid]) ltypes = sorted(set(ltypes)) assert ltypes == ['2d'] or not '2d' in ltypes is2d = '2d' in ltypes dref[rid]['is2d'] = is2d # Get functions otherid = dref[rid]['otherid'] indother = dref[rid]['indother'] df_ind_pos, df_ind_key, df_pos_ind = {}, {}, {} for ax in dax.keys(): if rid in dax[ax]['ref'].keys(): typ = dax[ax]['ref'][rid] if typ == '2d': assert '2d' in dref[rid].keys() val2 = dref[rid]['2d'] is2d = True nn = (val2[0].size, val2[1].size) else: val2 = None is2d = False if isinstance(val, np.ndarray): nn = val.shape elif isinstance(val, list): nn = (len(val),) else: raise Exception("Unknown val type !") df_ind_pos[ax] = get_ind_frompos(typ, val, val2, otherid = otherid, indother = indother) dmovkeys = dax[ax]['dmovkeys'][rid] df_ind_key[ax] = get_ind_fromkey(dmovkeys, is2d = is2d, nn = nn) df_pos_ind[ax] = get_pos_fromind(val, val2, otherid = otherid, indother = indother) dref[rid]['df_ind_pos'] = df_ind_pos dref[rid]['df_ind_key'] = df_ind_key dref[rid]['df_pos_ind'] = df_pos_ind # lobj lobj = [oo for oo in dobj.keys() if rid in dobj[oo]['drefid'].keys()] dref[rid]['lobj'] = lobj # dind lref = list(dref.keys()) nref = len(lref) ancur = np.zeros((2,nref),dtype=int) ancur[1,:] = [dgroup[dref[rid]['group']]['nMax'] for rid in lref] cumsum0 = np.r_[0, np.cumsum(ancur[1,:])] arefind = np.zeros((np.sum(ancur[1,:]),),dtype=int) dind = {'lrefid':lref, 'anMaxcur':ancur, 'arefind':arefind, 'cumsum0':cumsum0} # dobj for oo in dobj.keys(): dobj[oo]['vis'] = False # get functions to update lrefid = list(dobj[oo]['drefid'].keys()) for k, v in dobj[oo]['dupdate'].items(): # Check consistency with ddata if v['id'] not in ddata.keys(): if not v['id'] in dref.keys(): msg = "Missing id in ddata or dref " msg += "(vs dobj[idobj]['dupdate'][k]['id']) !\n" msg += " idobj: %s\n"%oo msg += " k : %s\n"%k msg += " id : %s"%v['id'] raise Exception(msg) ddata[v['id']] = {'val':dref[v['id']]['val']} if dref[v['id']]['otherid'] is None: ddata[v['id']]['refids'] = [v['id']] else: ddata[v['id']]['refids'] = [dref[v['id']]['otherid'],v['id']] # get update tools val = ddata[v['id']]['val'] lrefs = ddata[v['id']]['refids'] linds = v['lrid'] fgetval = get_valf(val, lrefs, linds) lkv = [(kk,vv) for kk,vv in dobj[oo]['dupdate'][k].items() if kk not in ['id','lrid']] fupdate = get_fupdate(oo, k, **dict(lkv)) dobj[oo]['dupdate'][k]['fgetval'] = fgetval dobj[oo]['dupdate'][k]['fupdate'] = fupdate indrefind = get_indrefind(dind, linds, dobj[oo]['drefid']) dobj[oo]['dupdate'][k]['indrefind'] = indrefind # linds not necessarily identical ! dobj[oo]['aindvis'] = np.array([dobj[oo]['drefid'][rid] for rid in lrefid], dtype=int) indncurind = get_indncurind(dind, lrefid) dobj[oo]['indncurind'] = indncurind return dgroup, dref, dax, dobj, dind, ddata def _get_dkeys(self): dkeys = {'shift':{'val':False, 'action':'generic'}, 'control':{'val':False, 'action':'generic'}, 'ctrl':{'val':False, 'action':'generic'}, 'alt':{'val':False, 'action':'generic'}, 'left':{'val':False, 'action':'move'}, 'right':{'val':False, 'action':'move'}, 'up':{'val':False, 'action':'move'}, 'down':{'val':False, 'action':'move'}} # 'pageup':{'val':False, 'action':'move'}, # 'pagedown':{'val':False, 'action':'move'}} dkeys.update(dict([(v['key'],{'group':k, 'val':False, 'action':'group'}) for k,v in self.dgroup.items()])) nMax = np.max([v['nMax'] for v in self.dgroup.values()]) dkeys.update(dict([(str(ii),{'ind':ii, 'val':False, 'action':'indices'}) for ii in range(0,nMax)])) return dkeys def disconnect_old(self, force=False): if self._warn_ifnotInteractive(): return if force: self.can.mpl_disconnect(self.can.manager.key_press_handler_id) else: lk = [kk for kk in list(plt.rcParams.keys()) if 'keymap' in kk] self.store_rcParams = {} for kd in self.dkeys.keys(): self.store_rcParams[kd] = [] for kk in lk: if kd in plt.rcParams[kk]: self.store_rcParams[kd].append(kk) plt.rcParams[kk].remove(kd) self.can.mpl_disconnect(self.can.button_pick_id) def reconnect_old(self): if self._warn_ifnotInteractive(): return if self.store_rcParams is not None: for kd in self.store_rcParams.keys(): for kk in self.store_rcParams[kk]: if kd not in plt.rcParams[kk]: plt.rcParams[kk].append(kd) def connect(self): if self._warn_ifnotInteractive(): return keyp = self.can.mpl_connect('key_press_event', self.onkeypress) keyr = self.can.mpl_connect('key_release_event', self.onkeypress) butp = self.can.mpl_connect('button_press_event', self.mouseclic) res = self.can.mpl_connect('resize_event', self.resize) #butr = self.can.mpl_connect('button_release_event', self.mouserelease) #if not plt.get_backend() == "agg": self.can.manager.toolbar.release = self.mouserelease self._cid = {'keyp':keyp, 'keyr':keyr, 'butp':butp, 'res':res}#, 'butr':butr} def disconnect(self): if self._warn_ifnotInteractive(): return for kk in self._cid.keys(): self.can.mpl_disconnect(self._cid[kk]) self.can.manager.toolbar.release = lambda event: None def resize(self, event): self._set_dBck(self.dax.keys()) def _set_dBck(self, lax): # Make all invisible for ax in lax: for obj in self.dax[ax]['lobj']: obj.set_visible(False) # Draw and reset Bck self.can.draw() for ax in lax: #ax.draw(self.can.renderer) self.dax[ax]['Bck'] = self.can.copy_from_bbox(ax.bbox) # Redraw for ax in lax: for obj in self.dax[ax]['lobj']: obj.set_visible(self.dobj[obj]['vis']) #ax.draw(self.can.renderer) self.can.draw() def init(self, dgroup=None, ngroup=None, dobj=None): pass def update(self, excluderef=True): self._update_dcur() # 0.4 ms self._update_dref(excluderef=excluderef) # 0.1 ms self._update_dobj() # 0.2 s def _update_dcur(self): group = self.dcur['group'] refid = self.dcur['refid'] assert self.dref[refid]['group'] == group assert refid in self.dax[self.dcur['ax']]['graph'].keys() # Update also dind ! an = [self.dgroup[self.dref[rid]['group']]['ncur'] for rid in self.dind['lrefid']] self.dind['anMaxcur'][0,:] = an # Update array ncur for obj in self.dgroup[group]['lobj']: a0 = self.dind['anMaxcur'][0,self.dobj[obj]['indncurind']] a1 = self.dobj[obj]['aindvis'] self.dobj[obj]['vis'] = np.all( a0 >= a1 ) def _update_dref(self, excluderef=True): group = self.dcur['group'] ind = self.dgroup[group]['indcur'] val = self.dgroup[group]['valind'][ind,:] if excluderef and len(self.dgroup[group]['lrefid'])>1: for rid in self.dgroup[group]['lrefid']: if rid == self.dcur['refid']: continue if self.dref[rid]['otherid'] is None: indother = None else: group2 = self.dref[self.dref[rid]['otherid']]['group'] ind2 = self.dgroup[group2]['indcur'] indother = self.dref[self.dref[rid]['otherid']]['ind'][ind2] lax = list(self.dref[rid]['df_ind_pos'].keys()) if len(lax) == 0: msg = "A ref has no associated ax !\n" msg += " - group: %s\n"%group msg += " - rid : %s"%rid raise Exception(msg) ii = self.dref[rid]['df_ind_pos'][lax[0]](val, indother) if self._follow: self.dref[rid]['ind'][ind:] = ii else: self.dref[rid]['ind'][ind] = ii else: for rid in self.dgroup[group]['lrefid']: if self.dref[rid]['otherid'] is None: indother = None else: group2 = self.dref[self.dref[rid]['otherid']]['group'] ind2 = self.dgroup[group2]['indcur'] indother = self.dref[self.dref[rid]['otherid']]['ind'][ind2] lax = list(self.dref[rid]['df_ind_pos'].keys()) if len(lax) == 0: msg = "A ref has no associated ax !\n" msg += " - group: %s\n"%group msg += " - rid : %s"%rid raise Exception(msg) ii = self.dref[rid]['df_ind_pos'][lax[0]](val, indother) if self._follow: self.dref[rid]['ind'][ind:] = ii else: self.dref[rid]['ind'][ind] = ii # Update dind['arefind'] for ii in range(0,len(self.dind['lrefid'])): rid = self.dind['lrefid'][ii] i0 = self.dind['cumsum0'][ii] i1 = i0 + self.dgroup[self.dref[rid]['group']]['nMax'] self.dind['arefind'][i0:i1] = self.dref[rid]['ind'] def _update_dobj(self): # --- Prepare ----- 2 us group = self.dcur['group'] refid = self.dcur['refid'] indcur = self.dgroup[group]['indcur'] lax = self.dgroup[group]['lax'] # ---- Restore backgrounds ---- 1 ms for aa in lax: self.can.restore_region(self.dax[aa]['Bck']) # ---- update data of group objects ---- 0.15 s for obj in self.dgroup[group]['d2obj'][indcur]: for k in self.dobj[obj]['dupdate'].keys(): ii = self.dobj[obj]['dupdate'][k]['indrefind'] # 20 us li = self.dind['arefind'][ii] # 50 us val = self.dobj[obj]['dupdate'][k]['fgetval']( li ) # 0.0001 s self.dobj[obj]['dupdate'][k]['fupdate']( val ) # 2 ms # --- Redraw all objects (due to background restore) --- 25 ms for obj in self.dobj.keys(): obj.set_visible(self.dobj[obj]['vis']) self.dobj[obj]['ax'].draw_artist(obj) # ---- blit axes ------ 5 ms for aa in lax: self.can.blit(aa.bbox) def mouseclic(self,event): # Check click is relevant C0 = event.inaxes is not None and event.button == 1 if not C0: return self.curax_panzoom = event.inaxes # DB ? # Check axes is relevant and toolbar not active c_activeax = 'fix' not in self.dax[event.inaxes].keys() c_toolbar = self.can.manager.toolbar._active in [None,False] if not all([c_activeax,c_toolbar]): return # Set self.dcur self.dcur['ax'] = event.inaxes lrid = list(self.dax[event.inaxes]['graph'].keys()) if len(lrid)>1: lg = [self.dref[rid]['group'] for rid in lrid] if self.dcur['group'] in lg: rid = lrid[lg.index(self.dcur['group'])] else: rid = self.dax[event.inaxes]['defrefid'] else: rid= lrid[0] self.dcur['refid'] = rid self.dcur['group'] = self.dref[self.dcur['refid']]['group'] group = self.dcur['group'] ax = self.dcur['ax'] refid = self.dcur['refid'] # Check max number of occurences not reached if shift c0 = (self.dkeys['shift']['val'] and self.dgroup[group]['indcur'] == self.dgroup[group]['nMax']-1) if c0: msg = "Max nb. of plots reached ({0}) for group {1}" msg = msg.format(self.dgroup[group]['nMax'], group) print(msg) return # Update indcur ctrl = self.dkeys['control']['val'] or self.dkeys['ctrl']['val'] if ctrl: nn = 0 ii = 0 elif self.dkeys['shift']['val']: nn = int(self.dgroup[group]['ncur'])+1 ii = nn else: nn = int(self.dgroup[group]['ncur']) ii = int(self.dgroup[group]['indcur']) # Update dcur self.dgroup[group]['ncur'] = nn self.dgroup[group]['indcur'] = ii # Update group val val = (event.xdata, event.ydata) if self._follow: self.dgroup[group]['valind'][ii:,:] = val else: self.dgroup[group]['valind'][ii,:] = val self.update(excluderef=False) def mouserelease(self, event): msg = "Make sure you release the mouse button on an axes !" msg += "\n Otherwise the background plot cannot be properly updated !" c0 = self.can.manager.toolbar._active == 'PAN' c1 = self.can.manager.toolbar._active == 'ZOOM' if c0 or c1: ax = self.curax_panzoom assert ax is not None, msg lax = ax.get_shared_x_axes().get_siblings(ax) lax += ax.get_shared_y_axes().get_siblings(ax) lax = list(set(lax)) self._set_dBck(lax) def onkeypress(self, event): lkey = event.key.split('+') c0 = self.can.manager.toolbar._active is not None c1 = len(lkey) not in [1,2] c2 = [ss not in self.dkeys.keys() for ss in lkey] if c0 or c1 or any(c2): return lgen = [k for k in self.dkeys_r['generic'] if k in lkey] lmov = [k for k in self.dkeys_r['move'] if k in lkey] lgrp = [k for k in self.dkeys_r['group'] if k in lkey] lind = [k for k in self.dkeys_r['indices'] if k in lkey] ngen, nmov, ngrp, nind = len(lgen),
""" Harness for GP Bandit Optimisation. -- <EMAIL> """ from __future__ import division import numpy as np from argparse import Namespace from . import gpb_acquisitions from .blackbox_optimiser import blackbox_opt_args, BlackboxOptimiser, \ CalledMFOptimiserWithSFCaller # Local imports from ..exd import domains from ..exd.cp_domain_utils import get_processed_func_from_raw_func_for_cp_domain, \ load_cp_domain_from_config_file, load_config_file from ..exd.exd_core import mf_exd_args from ..exd.exd_utils import get_euclidean_initial_qinfos, get_cp_domain_initial_qinfos from ..exd.experiment_caller import CPFunctionCaller, get_multifunction_caller_from_config from ..exd.worker_manager import SyntheticWorkerManager from ..gp.cartesian_product_gp import cartesian_product_gp_args, \ cartesian_product_mf_gp_args, \ CPGPFitter, CPMFGPFitter from ..gp.euclidean_gp import EuclideanGPFitter, euclidean_gp_args, \ EuclideanMFGPFitter, euclidean_mf_gp_args from ..utils.general_utils import block_augment_array, get_idxs_from_list_of_lists from ..utils.option_handler import get_option_specs, load_options from ..utils.reporters import get_reporter # pylint: disable=invalid-name # pylint: disable=no-member # pylint: disable=redefined-builtin # pylint: disable=unbalanced-tuple-unpacking gp_bandit_args = [ \ get_option_specs('acq', False, 'default', \ 'Which acquisition to use: ts, ucb, ei, ttei, bucb. If using multiple ' + \ 'give them as a hyphen separated list e.g. ucb-ts-ei-ttei'), get_option_specs('acq_probs', False, 'adaptive', \ 'With what probability should we choose each strategy given in acq. If "uniform" ' + \ 'we we will use uniform probabilities and if "adaptive" we will use adaptive ' + \ 'probabilities which weight acquisitions according to how well they do.'), get_option_specs('acq_opt_method', False, 'default', \ 'Which optimiser to use when maximising the acquisition function.'), get_option_specs('handle_parallel', False, 'halluc', \ 'How to handle parallelisations. Should be halluc or naive.'), get_option_specs('acq_opt_max_evals', False, -1, \ 'Number of evaluations when maximising acquisition. If negative uses default value.'), # The following are for managing GP hyper-parameters. They override hp_tune_criterion # and ml_hp_tune_opt from the GP args. get_option_specs('gpb_hp_tune_criterion', False, 'ml-post_sampling', 'Which criterion to use when tuning hyper-parameters. Other ' + 'options are post_sampling and post_mean.'), get_option_specs('gpb_hp_tune_probs', False, '0.3-0.7', \ 'With what probability should we choose each strategy given in hp_tune_criterion.' + \ 'If "uniform" we we will use uniform probabilities and if "adaptive" we will use ' + \ 'adaptive probabilities which weight acquisitions according to how well they do.'), get_option_specs('gpb_ml_hp_tune_opt', False, 'default', 'Which optimiser to use when maximising the tuning criterion.'), get_option_specs('gpb_post_hp_tune_method', False, 'slice', 'Which sampling to use when maximising the tuning criterion. Other ' + 'option is nuts'), get_option_specs('gpb_post_hp_tune_burn', False, -1, 'How many initial samples to ignore during sampling.'), get_option_specs('gpb_post_hp_tune_offset', False, 25, 'How many samples to ignore between samples.'), get_option_specs('rand_exp_sampling_replace', False, False, \ 'Whether to replace already sampled values or not in rand_exp_sampling.'), # For multi-fidelity BO get_option_specs('mf_strategy', False, 'boca', 'Which multi-fidelity strategy to use. Should be one of {boca}.'), # Mean of the GP get_option_specs('gpb_prior_mean', False, None, 'The prior mean of the GP for the model.'), # The following are perhaps not so important. Some have not been implemented yet. get_option_specs('shrink_kernel_with_time', False, 0, 'If True, shrinks the kernel with time so that we don\'t get stuck.'), get_option_specs('perturb_thresh', False, 1e-4, \ ('If the next point chosen is too close to an exisiting point by this times the ' \ 'diameter, then we will perturb the point a little bit before querying. This is ' \ 'mainly to avoid numerical stability issues.')), get_option_specs('track_every_time_step', False, 0, ('If 1, it tracks every time step.')), get_option_specs('next_pt_std_thresh', False, 0.005, ('If the std of the queried point queries below this times the kernel scale ', \ 'frequently we will reduce the bandwidth range')), # Miscellanneous get_option_specs('nn_report_results_every', False, 1, ('If NN variables are present, report results more frequently')), ] mf_gp_bandit_args = [ \ get_option_specs('target_fidel_to_opt_query_frac_max', False, 0.5, ('A target to maintain on the number of queries to fidel_to_opt.')), get_option_specs('target_fidel_to_opt_query_frac_min', False, 0.25, ('A target to maintain on the number of queries to fidel_to_opt.')), get_option_specs('boca_thresh_window_length', False, 20, \ ('The window length to keep checking if the target fidel_to_opt is achieved.')), get_option_specs('boca_thresh_coeff_init', False, 0.01, ('The coefficient to use in determining the threshold for boca.')), get_option_specs('boca_thresh_multiplier', False, 1.1, \ ('The amount by which to multiply/divide the threshold coeff for boca.')), get_option_specs('boca_max_low_fidel_cost_ratio', False, 0.90, \ ('If the fidel_cost_ratio is larger than this, just query at fidel_to_opt.')), \ ] euclidean_specific_gp_bandit_args = [ \ get_option_specs('euc_init_method', False, 'latin_hc', \ 'Method to obtain initial queries. Is used if get_initial_qinfos is None.'), \ ] def get_all_gp_bandit_args(additional_args): """ Returns the GP bandit arguments from the arguments for the GP. """ return additional_args + blackbox_opt_args + gp_bandit_args def get_all_mf_gp_bandit_args(additional_args): """ Returns the GP bandit arguments from the arguments for the GP. """ return additional_args + blackbox_opt_args + gp_bandit_args + mf_exd_args + \ mf_gp_bandit_args def get_all_euc_gp_bandit_args(additional_args=None): """ Returns all GP bandit arguments. """ if additional_args is None: additional_args = [] return get_all_gp_bandit_args(additional_args) + euclidean_gp_args + \ euclidean_specific_gp_bandit_args def get_all_mf_euc_gp_bandit_args(additional_args=None): """ Returns all GP bandit arguments. """ if additional_args is None: additional_args = [] return get_all_mf_gp_bandit_args(additional_args) + euclidean_mf_gp_args + \ euclidean_specific_gp_bandit_args def get_all_cp_gp_bandit_args(additional_args=None): """ Returns all Cartesian Product GP bandit arguments. """ if additional_args is None: additional_args = [] return get_all_gp_bandit_args(additional_args) + cartesian_product_gp_args def get_all_mf_cp_gp_bandit_args(additional_args=None): """ Returns all Cartesian Product GP bandit arguments. """ if additional_args is None: additional_args = [] return get_all_mf_gp_bandit_args(additional_args) + cartesian_product_mf_gp_args def get_default_acquisition_for_domain(domain): """ Returns the default acquisition for the domain. """ if domain.get_type() == 'euclidean': return 'ei-ucb-ttei-add_ucb' else: return 'ei-ucb-ttei' def get_default_acq_opt_method_for_domain(domain): """ Returns the default acquisition optimisation method for the domain. """ if domain.get_type() == 'euclidean': if domain.get_dim() > 60: return 'pdoo' else: return 'direct' elif domain.get_type() == 'cartesian_product': if all([dom.get_type() == 'euclidean' for dom in domain.list_of_domains]) and \ (not domain.has_constraints()): if domain.get_dim() > 60: return 'pdoo' else: return 'direct' else: return 'ga' # The GPBandit Class # ======================================================================================== class GPBandit(BlackboxOptimiser): """ GPBandit Class. """ # pylint: disable=attribute-defined-outside-init # Constructor. def __init__(self, func_caller, worker_manager=None, is_mf=False, options=None, reporter=None, ask_tell_mode=False): """ Constructor. """ self._is_mf = is_mf if is_mf and not func_caller.is_mf(): raise CalledMFOptimiserWithSFCaller(self, func_caller) super(GPBandit, self).__init__(func_caller, worker_manager, None, options=options, reporter=reporter, ask_tell_mode=ask_tell_mode) def is_an_mf_method(self): """ Returns Truee since this is a MF method. """ return self._is_mf def _get_method_str(self): """ Returns a string describing the method. """ gpb_str = 'mfbo-%s' % (self.options.mf_strategy) if self.is_an_mf_method() else 'bo' return '%s(%s)' % (gpb_str, '-'.join(self.acqs_to_use)) def _opt_method_set_up(self): """ Some set up for the GPBandit class. """ # Set up acquisition optimisation self.gp = None # Set up for acquisition optimisation and then acquisition self._set_up_acq_opt() self._set_up_for_acquisition() # Override options for hp_tune_criterion and ml_hp_tune_opt self.options.hp_tune_criterion = self.options.gpb_hp_tune_criterion self.options.hp_tune_probs = self.options.gpb_hp_tune_probs self.options.ml_hp_tune_opt = self.options.gpb_ml_hp_tune_opt self.options.post_hp_tune_method = self.options.gpb_post_hp_tune_method self.options.post_hp_tune_burn = self.options.gpb_post_hp_tune_burn self.options.post_hp_tune_offset = self.options.gpb_post_hp_tune_offset # To store in history self.history.query_acqs = [] self.to_copy_from_qinfo_to_history['curr_acq'] = 'query_acqs' # For multi-fidelity if self.is_an_mf_method(): self.mf_params_for_anc_data = {} if self.options.mf_strategy == 'boca': self.mf_params_for_anc_data['boca_thresh_coeff'] = \ self.options.boca_thresh_coeff_init self.mf_params_for_anc_data['boca_max_low_fidel_cost_ratio'] = \ self.options.boca_max_low_fidel_cost_ratio self._child_opt_method_set_up() def _set_up_for_acquisition(self): """ Set up for acquisition. """ if self.options.acq == 'default': acq = self._get_default_acquisition_for_domain(self.domain) else: acq = self.options.acq self.acqs_to_use = [elem.lower() for elem in acq.split('-')] self.acqs_to_use_counter = {key: 0 for key in self.acqs_to_use} if self.options.acq_probs == 'uniform': self.acq_probs = np.ones(len(self.acqs_to_use)) / float(len(self.acqs_to_use)) elif self.options.acq_probs == 'adaptive': self.acq_uniform_sampling_prob = 0.05 self.acq_sampling_weights = {key: 1.0 for key in self.acqs_to_use} self.acq_probs = self._get_adaptive_ensemble_acq_probs() else: self.acq_probs = np.array([float(x) for x in self.options.acq_probs.split('-')]) self.acq_probs = self.acq_probs / self.acq_probs.sum() assert len(self.acq_probs) == len(self.acqs_to_use) @classmethod def _get_default_acquisition_for_domain(cls, domain): """ Return default acqusition for domain. """ return get_default_acquisition_for_domain(domain) def _child_opt_method_set_up(self): """ Set up for child class. Override this method in child class""" pass def _get_adaptive_ensemble_acq_probs(self): """ Computes the adaptive ensemble acqusitions probs. """ num_acqs = len(self.acqs_to_use) uniform_sampling_probs = self.acq_uniform_sampling_prob * \ np.ones((num_acqs,)) / num_acqs acq_succ_counter = np.array([self.acq_sampling_weights[key] for key in self.acqs_to_use]) acq_use_counter = np.array([self.acqs_to_use_counter[key] for key in self.acqs_to_use]) acq_weights = acq_succ_counter / np.sqrt(1 + acq_use_counter) acq_norm_weights = acq_weights / acq_weights.sum() adaptive_sampling_probs = (1 - self.acq_uniform_sampling_prob) * acq_norm_weights ret = uniform_sampling_probs + adaptive_sampling_probs return ret / ret.sum() def _set_up_acq_opt(self): """ Sets up optimisation for acquisition. """ # First set up function to get maximum evaluations. if isinstance(self.options.acq_opt_max_evals, int): if self.options.acq_opt_max_evals > 0: self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals else: self.get_acq_opt_max_evals = None else: # In this case, the user likely passed a function here. self.get_acq_opt_max_evals = self.options.acq_opt_max_evals # Additional set up based on the specific optimisation procedure if self.options.acq_opt_method == 'default': acq_opt_method = get_default_acq_opt_method_for_domain(self.domain) else: acq_opt_method = self.options.acq_opt_method self.acq_opt_method = acq_opt_method self._domain_specific_acq_opt_set_up() def _opt_method_update_history(self, qinfo): """ Update history for GP bandit specific statistics. """ if hasattr(qinfo, 'curr_acq'): self.acqs_to_use_counter[qinfo.curr_acq] += 1 if self.options.acq_probs == 'adaptive' and \ (len(self.history.curr_opt_vals) >= 2 and self.history.curr_opt_vals[-1] > self.history.curr_opt_vals[-2]): self.acq_sampling_weights[qinfo.curr_acq] += 1 if hasattr(self, 'gp_processor') and hasattr(qinfo, 'hp_tune_method') and \ (len(self.history.curr_opt_vals) >= 2 and \
<gh_stars>1-10 # -*- coding: utf-8 -*- """ .. module:: volume :synopsis: Volume Indicators. .. moduleauthor:: <NAME> (Bukosabino) """ import numpy as np import pandas as pd from .utils import get_drift, get_offset, signed_series, verify_series from .momentum import roc from .overlap import hl2, hlc3, ema def ad(high, low, close, volume, open_=None, offset=None, **kwargs): """Indicator: Accumulation/Distribution (AD)""" # Validate Arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) volume = verify_series(volume) offset = get_offset(offset) # Calculate Result if open_ is not None: open_ = verify_series(open_) ad = close - open_ # AD with Open else: ad = 2 * close - high - low # AD with High, Low, Close hl_range = high - low ad *= volume / hl_range ad = ad.cumsum() # Offset if offset != 0: ad = ad.shift(offset) # Handle fills if 'fillna' in kwargs: ad.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: ad.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it ad.name = f"AD" ad.category = 'volume' return ad def adosc(high, low, close, volume, open_=None, fast=None, slow=None, offset=None, **kwargs): """Indicator: Accumulation/Distribution Oscillator""" # Validate Arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) volume = verify_series(volume) fast = int(fast) if fast and fast > 0 else 12 slow = int(slow) if slow and slow > 0 else 26 if slow < fast: fast, slow = slow, fast offset = get_offset(offset) # Calculate Result ad_ = ad(high=high, low=low, close=close, volume=volume, open_=open_) fast_ad = ema(close=ad_, length=fast) slow_ad = ema(close=ad_, length=slow) adosc = fast_ad - slow_ad # Offset if offset != 0: adosc = adosc.shift(offset) # Handle fills if 'fillna' in kwargs: adosc.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: adosc.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it adosc.name = f"ADOSC_{fast}_{slow}" adosc.category = 'volume' return adosc def cmf(high, low, close, volume, open_=None, length=None, offset=None, **kwargs): """Indicator: Chaikin Money Flow (CMF)""" # Validate Arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) volume = verify_series(volume) length = int(length) if length and length > 0 else 20 min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length offset = get_offset(offset) # Calculate Result if open_ is not None: open_ = verify_series(open_) ad = close - open_ # AD with Open else: ad = 2 * close - high - low # AD with High, Low, Close hl_range = high - low ad *= volume / hl_range cmf = ad.rolling(length, min_periods=min_periods).sum() / volume.rolling(length, min_periods=min_periods).sum() # Offset if offset != 0: cmf = cmf.shift(offset) # Handle fills if 'fillna' in kwargs: cmf.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: cmf.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it cmf.name = f"CMF_{length}" cmf.category = 'volume' return cmf def efi(close, volume, length=None, drift=None, mamode=None, offset=None, **kwargs): """Indicator: Elder's Force Index (EFI)""" # Validate arguments close = verify_series(close) volume = verify_series(volume) length = int(length) if length and length > 0 else 13 min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length drift = get_drift(drift) mamode = mamode.lower() if mamode else None offset = get_offset(offset) # Calculate Result pv_diff = close.diff(drift) * volume if mamode == 'sma': efi = pv_diff.rolling(length, min_periods=min_periods).mean() else: efi = pv_diff.ewm(span=length, min_periods=min_periods).mean() # Offset if offset != 0: efi = efi.shift(offset) # Handle fills if 'fillna' in kwargs: efi.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: efi.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it efi.name = f"EFI_{length}" efi.category = 'volume' return efi def eom(high, low, close, volume, length=None, divisor=None, drift=None, offset=None, **kwargs): """Indicator: Ease of Movement (EOM)""" # Validate arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) volume = verify_series(volume) length = int(length) if length and length > 0 else 14 min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length divisor = divisor if divisor and divisor > 0 else 100000000 drift = get_drift(drift) offset = get_offset(offset) # Calculate Result hl_range = high - low distance = hl2(high=high, low=low) - hl2(high=high.shift(drift), low=low.shift(drift)) box_ratio = (volume / divisor) / hl_range eom = distance / box_ratio eom = eom.rolling(length, min_periods=min_periods).mean() # Offset if offset != 0: eom = eom.shift(offset) # Handle fills if 'fillna' in kwargs: eom.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: eom.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it eom.name = f"EOM_{length}_{divisor}" eom.category = 'volume' return eom def mfi(high, low, close, volume, length=None, drift=None, offset=None, **kwargs): """Indicator: Money Flow Index (MFI)""" # Validate arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) volume = verify_series(volume) length = int(length) if length and length > 0 else 14 drift = get_drift(drift) offset = get_offset(offset) # Calculate Result typical_price = hlc3(high=high, low=low, close=close) raw_money_flow = typical_price * volume tdf = pd.DataFrame({'diff': 0, 'rmf': raw_money_flow, '+mf': 0, '-mf': 0}) tdf.loc[(typical_price.diff(drift) > 0), 'diff'] = 1 tdf.loc[tdf['diff'] == 1, '+mf'] = raw_money_flow tdf.loc[(typical_price.diff(drift) < 0), 'diff'] = -1 tdf.loc[tdf['diff'] == -1, '-mf'] = raw_money_flow psum = tdf['+mf'].rolling(length).sum() nsum = tdf['-mf'].rolling(length).sum() tdf['mr'] = psum / nsum mfi = 100 * psum / (psum + nsum) tdf['mfi'] = mfi # Offset if offset != 0: mfi = mfi.shift(offset) # Handle fills if 'fillna' in kwargs: mfi.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: mfi.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it mfi.name = f"MFI_{length}" mfi.category = 'momentum' return mfi def nvi(close, volume, length=None, initial=None, offset=None, **kwargs): """Indicator: Negative Volume Index (NVI)""" # Validate arguments close = verify_series(close) volume = verify_series(volume) length = int(length) if length and length > 0 else 1 min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length initial = int(initial) if initial and initial > 0 else 1000 offset = get_offset(offset) # Calculate Result roc_ = roc(close=close) signed_volume = signed_series(volume, initial=1) nvi = signed_volume[signed_volume < 0].abs() * roc_ nvi.fillna(0, inplace=True) nvi.iloc[0]= initial nvi = nvi.cumsum() # Offset if offset != 0: nvi = nvi.shift(offset) # Handle fills if 'fillna' in kwargs: nvi.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: nvi.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it nvi.name = f"NVI_{length}" nvi.category = 'volume' return nvi def obv(close, volume, offset=None, **kwargs): """Indicator: On Balance Volume (OBV)""" # Validate arguments close = verify_series(close) volume = verify_series(volume) offset = get_offset(offset) # Calculate Result signed_volume = signed_series(close, initial=1) * volume obv = signed_volume.cumsum() # Offset if offset != 0: obv = obv.shift(offset) # Handle fills if 'fillna' in kwargs: obv.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: obv.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it obv.name = f"OBV" obv.category = 'volume' return obv def pvol(close, volume, signed=True, offset=None, **kwargs): """Indicator: Price-Volume (PVOL)""" # Validate arguments close = verify_series(close) volume = verify_series(volume) offset = get_offset(offset) # Calculate Result if signed: pvol = signed_series(close, 1) * close * volume else: pvol = close * volume # Offset if offset != 0: pvol = pvol.shift(offset) # Handle fills if 'fillna' in kwargs: pvol.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: pvol.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it pvol.name = f"PVOL" pvol.category = 'volume' return pvol def pvt(close, volume, drift=None, offset=None, **kwargs): """Indicator: Price-Volume Trend (PVT)""" # Validate arguments close = verify_series(close) volume = verify_series(volume) drift = get_drift(drift) offset = get_offset(offset) # Calculate Result pv = roc(close=close, length=drift) * volume pvt = pv.cumsum() # Offset if offset != 0: pvt = pvt.shift(offset) # Handle fills if 'fillna' in kwargs: pvt.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: pvt.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it pvt.name = f"PVT" pvt.category = 'volume' return pvt # Legacy Code def acc_dist_index_depreciated(high, low, close, volume, fillna=False): """Accumulation/Distribution Index (ADI) Acting as leading indicator of price movements. https://en.wikipedia.org/wiki/Accumulation/distribution_index Args: high(pandas.Series): dataset 'High' column. low(pandas.Series): dataset 'Low' column. close(pandas.Series): dataset 'Close' column. volume(pandas.Series): dataset 'Volume' column. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ clv = ((close - low) - (high - close)) / (high - low) clv = clv.fillna(0.0) # float division by zero ad = clv * volume ad = ad + ad.shift(1) if fillna: ad = ad.replace([np.inf, -np.inf], np.nan).fillna(0) return pd.Series(ad, name='adi') def on_balance_volume_depreciated(close, volume, fillna=False): """On-balance volume (OBV) It relates price and volume in the stock market. OBV is based on signed cumulative volume. https://en.wikipedia.org/wiki/On-balance_volume Args: close(pandas.Series): dataset 'Close' column. volume(pandas.Series):
train[['loan_status']] x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))] return x_test #============= # Function 57 def cleaning_func_37(df): # additional context code from user definitions def status_binary(text): if (text == 'Fully Paid'): return 0 elif ((text == 'Current') or (text == 'Issued')): return (- 1) else: return 1 # core cleaning code import pandas as pd import pandas as pd # df = pd.read_csv('../input/loan.csv') df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1) df = df.dropna(thresh=(len(df) / 2), axis=1) df = df.dropna() df['loan_status'] = df['loan_status'].apply(status_binary) df = df[(df['loan_status'] != (- 1))] dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']]) df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1) df = pd.concat([df, dummy_df], axis=1) mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}} df = df.replace(mapping_dict) cols = list(df) df = df.ix[(slice(None, None, None), cols)] from sklearn.model_selection import train_test_split (train, test) = train_test_split(df, test_size=0.3) x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))] y_train = train[['loan_status']] x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))] y_test = test[['loan_status']] method = ['Decision Tree', 'Random Forests', 'Logistic Regression'] false_paid = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test']) default_identified = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test']) from sklearn.tree import DecisionTreeClassifier from sklearn import tree model = tree.DecisionTreeClassifier(max_depth=5, criterion='entropy', class_weight={0: 0.15, 1: 0.85}) from sklearn.metrics import confusion_matrix import numpy as np p_train = model.predict(x_train) p_test = model.predict(x_test) (fully_paid, default) = confusion_matrix(p_train, np.array(y_train)) false_paid.loc[('train', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('train', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1])) (fully_paid, default) = confusion_matrix(p_test, np.array(y_test)) false_paid.loc[('test', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('test', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1])) from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(max_depth=6, n_estimators=10, class_weight={0: 0.15, 1: 0.85}) from sklearn.metrics import confusion_matrix p_train = model.predict(x_train) p_test = model.predict(x_test) (fully_paid, default) = confusion_matrix(p_train, np.array(y_train)) false_paid.loc[('train', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('train', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1])) (fully_paid, default) = confusion_matrix(p_test, np.array(y_test)) false_paid.loc[('test', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) return false_paid #============= # Function 58 def cleaning_func_39(df): # additional context code from user definitions def status_binary(text): if (text == 'Fully Paid'): return 0 elif ((text == 'Current') or (text == 'Issued')): return (- 1) else: return 1 # core cleaning code import pandas as pd import pandas as pd # df = pd.read_csv('../input/loan.csv') df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1) df = df.dropna(thresh=(len(df) / 2), axis=1) df = df.dropna() df['loan_status'] = df['loan_status'].apply(status_binary) df = df[(df['loan_status'] != (- 1))] dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']]) df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1) df = pd.concat([df, dummy_df], axis=1) mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}} df = df.replace(mapping_dict) cols = list(df) df = df.ix[(slice(None, None, None), cols)] from sklearn.model_selection import train_test_split (train, test) = train_test_split(df, test_size=0.3) x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))] y_train = train[['loan_status']] x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))] y_test = test[['loan_status']] method = ['Decision Tree', 'Random Forests', 'Logistic Regression'] false_paid = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test']) default_identified = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test']) from sklearn.tree import DecisionTreeClassifier from sklearn import tree model = tree.DecisionTreeClassifier(max_depth=5, criterion='entropy', class_weight={0: 0.15, 1: 0.85}) from sklearn.metrics import confusion_matrix import numpy as np p_train = model.predict(x_train) p_test = model.predict(x_test) (fully_paid, default) = confusion_matrix(p_train, np.array(y_train)) false_paid.loc[('train', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('train', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1])) (fully_paid, default) = confusion_matrix(p_test, np.array(y_test)) false_paid.loc[('test', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('test', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1])) from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(max_depth=6, n_estimators=10, class_weight={0: 0.15, 1: 0.85}) from sklearn.metrics import confusion_matrix p_train = model.predict(x_train) p_test = model.predict(x_test) (fully_paid, default) = confusion_matrix(p_train, np.array(y_train)) false_paid.loc[('train', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('train', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1])) (fully_paid, default) = confusion_matrix(p_test, np.array(y_test)) false_paid.loc[('test', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) default_identified.loc[('test', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1])) from sklearn.linear_model import LogisticRegression import numpy as np model = LogisticRegression(class_weight={0: 0.15, 1: 0.85}) from sklearn.metrics import confusion_matrix p_train = model.predict(x_train) (fully_paid, default) = confusion_matrix(p_train, np.array(y_train)) false_paid.loc[('train', 'Logistic Regression')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1])) return false_paid #============= # Function 59 def cleaning_func_0(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1) return loan #============= # Function 60 def cleaning_func_1(loan): # core cleaning code import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median()) return loan #============= # Function 61 def cleaning_func_2(loan): # core cleaning code import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median()) return loan #============= # Function 62 def cleaning_func_3(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc']) return loan #============= # Function 63 def cleaning_func_4(loan): # core cleaning code import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median()) return loan #============= # Function 64 def cleaning_func_5(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc']) return loan #============= # Function 65 def cleaning_func_6(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med']) return loan #============= # Function 66 def cleaning_func_7(loan): # core cleaning code import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median()) return loan #============= # Function 67 def cleaning_func_8(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['title'] = np.where(loan['title'].isnull(), 0, loan['title']) return loan #============= # Function 68 def cleaning_func_9(loan): # core cleaning code import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median()) return loan #============= # Function 69 def cleaning_func_10(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs']) return loan #============= # Function 70 def cleaning_func_11(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq']) return loan #============= # Function 71 def cleaning_func_12(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths']) return loan #============= # Function 72 def cleaning_func_13(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec']) return loan #============= # Function 73 def cleaning_func_14(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title']) return loan #============= # Function 74 def cleaning_func_15(loan): # core cleaning code import numpy as np import pandas as pd # loan = pd.read_csv('../input/loan.csv', low_memory=False) loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq']) return loan #============= # Function 75 def cleaning_func_0(ld): # core cleaning code import pandas as pd # ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True) pct_full = (ld.count() / len(ld)) names = list(pct_full[(pct_full > 0.75)].index) loan = ld[names] loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt) return loan #============= # Function 76 def cleaning_func_1(ld): # core cleaning code import pandas as pd # ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True) pct_full = (ld.count() / len(ld)) names = list(pct_full[(pct_full > 0.75)].index) loan = ld[names] loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)] return loan #============= # Function 77 def cleaning_func_2(ld): # core cleaning code import pandas as pd # ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True) pct_full = (ld.count() / len(ld)) names = list(pct_full[(pct_full > 0.75)].index) loan = ld[names] loan['issue_year'] = loan.issue_d.str[slice(4, None, None)] return loan #============= # Function 78 def cleaning_func_0(df_loan): # core cleaning code import pandas as pd # df_loan = pd.read_csv('../input/loan.csv', low_memory=False) df_loan.loc[((df_loan.loan_status == 'Does not meet the credit policy. Status:Fully Paid'), 'loan_status')] = 'NMCP Fully Paid' df_loan.loc[((df_loan.loan_status == 'Does not meet the credit policy. Status:Charged Off'), 'loan_status')] = 'NMCP Charged Off' return df_loan #============= # Function 79 def cleaning_func_1(df_loan): # core cleaning code import pandas as pd # df_loan = pd.read_csv('../input/loan.csv', low_memory=False) (df_loan['issue_month'], df_loan['issue_year']) = df_loan['issue_d'].str.split('-', 1).str return df_loan #============= # Function 80 def cleaning_func_2(df_loan): # core cleaning code import pandas as pd # df_loan = pd.read_csv('../input/loan.csv', low_memory=False) df_loan['int_round'] = df_loan['int_rate'].round(0).astype(int) return df_loan #============= # Function 81 def cleaning_func_3(df_loan): # core cleaning code import pandas as pd # df_loan = pd.read_csv('../input/loan.csv', low_memory=False) (df_loan['issue_month'], df_loan['issue_year']) = df_loan['issue_d'].str.split('-', 1).str months_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] df_loan['issue_month'] = pd.Categorical(df_loan['issue_month'], categories=months_order, ordered=True) return df_loan #============= # Function 82 def cleaning_func_0(df): # core cleaning code import pandas as pd # df = pd.read_csv('../input/loan.csv', low_memory=False) df.mths_since_last_delinq = df.mths_since_last_delinq.fillna(df.mths_since_last_delinq.median()) return df #============= # Function 83 def cleaning_func_1(df): # core cleaning code import numpy as np import pandas as pd # df
<filename>aminator/util/linux.py # -*- coding: utf-8 -*- # # # Copyright 2013 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # """ aminator.util.linux =================== Linux utility functions """ import errno import io import logging import os import shutil import stat import string import sys from collections import namedtuple from contextlib import contextmanager from copy import copy from fcntl import fcntl, F_GETFL, F_SETFL, LOCK_EX, LOCK_UN, LOCK_NB from fcntl import flock as _flock from os import O_NONBLOCK, environ, makedirs from os.path import isdir, dirname from select import select from signal import signal, alarm, SIGALRM from subprocess import Popen, PIPE from decorator import decorator log = logging.getLogger(__name__) MountSpec = namedtuple('MountSpec', 'dev fstype mountpoint options') CommandResult = namedtuple('CommandResult', 'success result') Response = namedtuple('Response', ['command', 'std_err', 'std_out', 'status_code']) # need to scrub anything not in this list from AMI names and other metadata SAFE_AMI_CHARACTERS = string.ascii_letters + string.digits + '().-/_' def command(timeout=None, data=None, *cargs, **ckwargs): """ decorator used to define shell commands to be executed via envoy.run decorated function should return a list or string representing the command to be executed decorated function should return None if a guard fails """ @decorator def _run(f, *args, **kwargs): _cmd = f(*args, **kwargs) assert _cmd is not None, "null command passed to @command decorator" return monitor_command(_cmd, timeout) return _run def set_nonblocking(stream): fl = fcntl(stream.fileno(), F_GETFL) fcntl(stream.fileno(), F_SETFL, fl | O_NONBLOCK) def monitor_command(cmd, timeout=None): cmdStr = cmd shell = True if isinstance(cmd, list): cmdStr = " ".join(cmd) shell = False assert cmdStr, "empty command passed to monitor_command" log.debug('command: {0}'.format(cmdStr)) # sanitize PATH if we are running in a virtualenv env = copy(environ) if hasattr(sys, "real_prefix"): env["PATH"] = string.replace(env["PATH"], "{0}/bin:".format(sys.prefix), "") proc = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, shell=shell, env=env) set_nonblocking(proc.stdout) set_nonblocking(proc.stderr) stdout = io.open( proc.stdout.fileno(), encoding='utf-8', errors='replace', closefd=False) stderr = io.open( proc.stderr.fileno(), encoding='utf-8', errors='replace', closefd=False) if timeout: alarm(timeout) def handle_sigalarm(*_): proc.terminate() signal(SIGALRM, handle_sigalarm) io_streams = [stdout, stderr] std_out = u'' std_err = u'' with stdout, stderr: while io_streams: reads, _, _ = select(io_streams, [], []) for fd in reads: buf = fd.read(4096) if buf is None or len(buf) == 0: # got eof io_streams.remove(fd) else: if fd == stderr: log.debug(u'STDERR: {0}'.format(buf)) std_err = u''.join([std_err, buf]) else: if buf[-1] == u'\n': log.debug(buf[:-1]) else: log.debug(buf) std_out = u''.join([std_out, buf]) proc.wait() std_out = std_out.encode('utf-8') std_err = std_err.encode('utf-8') alarm(0) status_code = proc.returncode log.debug("status code: {0}".format(status_code)) return CommandResult(status_code == 0, Response(cmdStr, std_err, std_out, status_code)) def mounted(path): pat = path.strip() + ' ' with open('/proc/mounts') as mounts: return any(pat in mount for mount in mounts) def fsck(dev): cmd = monitor_command(['fsck', '-y', '-f', dev]) # e2fsck will exit 1 if it finds and corrects filesystem problems. # consider that a success but fail all other exits as they should be legitimate # problems that prevent a bake. if not cmd.success and cmd.result.status_code == 1: cmd = CommandResult(True, cmd.result) return cmd def resize2fs(dev): return monitor_command(['resize2fs', dev]) def mount(mountspec): if not any((mountspec.dev, mountspec.mountpoint)): log.error('Must provide dev or mountpoint') return None fstype_arg = options_arg = '' mountpoint = mountspec.mountpoint if mountspec.fstype: if mountspec.fstype == 'bind': fstype_flag = '-o' # we may need to create the mountpoint if it does not exist if not isdir(mountspec.dev): mountpoint = dirname(mountspec.mountpoint) else: fstype_flag = '-t' fstype_arg = '{0} {1}'.format(fstype_flag, mountspec.fstype) if not isdir(mountpoint): makedirs(mountpoint) if mountspec.options: options_arg = '-o ' + mountspec.options return monitor_command('mount {0} {1} {2} {3}'.format(fstype_arg, options_arg, mountspec.dev, mountspec.mountpoint)) def unmount(dev): return monitor_command(['umount', dev]) def busy_mount(mountpoint): return monitor_command(['lsof', '-X', mountpoint]) def sanitize_metadata(word): chars = list(word) for index, char in enumerate(chars): if char not in SAFE_AMI_CHARACTERS: chars[index] = '_' return ''.join(chars) def keyval_parse(record_sep='\n', field_sep=':'): """decorator for parsing CommandResult stdout into key/value pairs returned in a dict """ @decorator def _parse(f, *args, **kwargs): return result_to_dict(f(*args, **kwargs), record_sep, field_sep) return _parse def result_to_dict(commandResult, record_sep='\n', field_sep=':'): metadata = {} if commandResult.success: for record in commandResult.result.std_out.split(record_sep): try: key, val = record.split(field_sep, 1) except ValueError: continue metadata[key.strip()] = val.strip() else: log.debug('failure:{0.command} :{0.std_err}'.format(commandResult.result)) return metadata class Chroot(object): def __init__(self, path): self.path = path log.debug('Chroot path: {0}'.format(self.path)) def __enter__(self): log.debug('Configuring chroot at {0}'.format(self.path)) self.real_root = os.open('/', os.O_RDONLY) self.cwd = os.getcwd() os.chroot(self.path) os.chdir('/') log.debug('Inside chroot') return self def __exit__(self, typ, exc, trc): if typ: log.debug('Exception encountered in Chroot', exc_info=(typ, exc, trc)) log.debug('Leaving chroot') os.fchdir(self.real_root) os.chroot('.') os.chdir(self.cwd) log.debug('Outside chroot') return False def lifo_mounts(root=None): """return list of mount points mounted on 'root' and below in lifo order from /proc/mounts.""" with open('/proc/mounts') as proc_mounts: # grab the mountpoint for each mount where we MIGHT match mount_entries = [line.split(' ')[1] for line in proc_mounts if root in line] if not mount_entries: # return an empty list if we didn't match return mount_entries return [entry for entry in reversed(mount_entries) if entry == root or entry.startswith(root + '/')] def copy_image(src=None, dst=None): """dd like utility for copying image files. eg. copy_image('/dev/sdf1','/mnt/bundles/ami-name.img') """ try: src_fd = os.open(src, os.O_RDONLY) dst_fd = os.open(dst, os.O_WRONLY | os.O_CREAT, 0644) blks = 0 blksize = 64 * 1024 log.debug("copying {0} to {1}".format(src, dst)) while True: buf = os.read(src_fd, blksize) if len(buf) <= 0: log.debug("{0} {1} blocks written.".format(blks, blksize)) os.close(src_fd) os.close(dst_fd) break out = os.write(dst_fd, buf) if out < blksize: log.debug('wrote {0} bytes.'.format(out)) blks += 1 except OSError as e: log.debug("{0}: errno[{1}]: {2}.".format(e.filename, e.errno, e.strerror)) return False return True @contextmanager def flock(filename=None): """simple blocking exclusive file locker eg: with flock(lockfilepath): ... """ with open(filename, 'a') as fh: _flock(fh, LOCK_EX) yield _flock(fh, LOCK_UN) def locked(filename=None): """ :param filename: :return: True if file is locked. """ with open(filename, 'a') as fh: try: _flock(fh, LOCK_EX | LOCK_NB) ret = False except IOError as e: if e.errno == errno.EAGAIN: log.debug('{0} is locked: {1}'.format(filename, e)) ret = True else: ret = False return ret def root_check(): """ Simple root gate :return: errno.EACCESS if not running as root, None if running as root """ if os.geteuid() != 0: return errno.EACCES return None def native_device_prefix(prefixes): log.debug('Getting the OS-native device prefix from potential prefixes: {0}'.format(prefixes)) for prefix in prefixes: if any(device.startswith(prefix) for device in os.listdir('/sys/block')): log.debug('Native prefix is {0}'.format(prefix)) return prefix log.debug('{0} contains no native device prefixes'.format(prefixes)) return None def device_prefix(source_device): log.debug('Getting prefix for device {0}'.format(source_device)) # strip off any incoming /dev/ foo source_device_name = os.path.basename(source_device) # if we have a subdevice/partition... if source_device_name[-1].isdigit(): # then its prefix is the name minus the last TWO chars log.debug('Device prefix for {0} is {1}'.format(source_device, source_device_name[:-2:])) return source_device_name[:-2:] else: # otherwise, just strip the last one log.debug('Device prefix for {0} is {1}'.format(source_device, source_device_name[:-1:])) return source_device_name[:-1:] def native_block_device(source_device, native_prefix): source_device_prefix = device_prefix(source_device) if source_device_prefix == native_prefix: # we're okay, using the right name already, just return the same name return source_device else: # sub out the bad prefix for the good return source_device.replace(source_device_prefix, native_prefix) def os_node_exists(dev): try: mode = os.stat(dev).st_mode except OSError: return False return stat.S_ISBLK(mode) def install_provision_config(src, dstpath, backup_ext='_aminator'): if os.path.isfile(src) or os.path.isdir(src): log.debug('Copying {0} from the aminator host to {1}'.format(src, dstpath)) dst = os.path.join(dstpath.rstrip('/'), src.lstrip('/')) log.debug('copying src: {0} dst: {1}'.format(src, dst)) try: if os.path.isfile(dst) or os.path.islink(dst) or os.path.isdir(dst): backup = '{0}{1}'.format(dst, backup_ext) log.debug('Making backup of {0}'.format(dst)) try: if os.path.isdir(dst) or os.path.islink(dst): try: os.rename(dst, backup) except OSError as e: if e.errno == 18: # EXDEV Invalid cross-device link # need to copy across devices if os.path.isdir(dst): shutil.copytree(dst, backup, symlinks=True) shutil.rmtree(dst) elif os.path.islink(dst): link = os.readlink(dst) os.remove(dst) os.symlink(link, backup) elif os.path.isfile(dst): shutil.copy(dst, backup) except Exception: errstr = 'Error encountered while copying {0} to {1}'.format(dst, backup) log.critical(errstr) log.debug(errstr, exc_info=True) return False if os.path.isdir(src): shutil.copytree(src, dst, symlinks=True) else: shutil.copy(src, dst) except Exception: errstr = 'Error encountered while copying {0} to {1}'.format(src, dst) log.critical(errstr) log.debug(errstr, exc_info=True) return False log.debug('{0} copied from aminator host to {1}'.format(src, dstpath)) return True else: log.critical('File not found: {0}'.format(src)) return True def install_provision_configs(files, dstpath, backup_ext='_aminator'): for filename in files: if not install_provision_config(filename, dstpath, backup_ext): return False return True def remove_provision_config(src, dstpath, backup_ext='_aminator'): dst = os.path.join(dstpath.rstrip('/'), src.lstrip('/')) backup = '{0}{1}'.format(dst, backup_ext) try: if os.path.isfile(dst) or os.path.islink(dst) or
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Optional, Sequence, Type import abc import re from . import exceptions, printer, sketch __all__ = [ "Statement", "Root", "Block", "BaseOutput", "builtin_stmt_classes", "IndentMixIn", "AppendMixIn", "UnindentMixIn", ] _VALID_FN_NAME_RE = re.compile(r"^[a-zA-Z]([a-zA-Z0-9\_]+)?$") def _is_valid_fn_name(maybe_fn_name: str) -> bool: """ Check if this is a valid function name. """ return re.fullmatch(_VALID_FN_NAME_RE, maybe_fn_name) is not None class IndentMixIn(abc.ABC): @abc.abstractmethod def append_stmt(self, stmt: "AppendMixIn") -> None: # pragma: no cover raise NotImplementedError @property @abc.abstractmethod def line_no(self) -> int: # pragma: no cover raise NotImplementedError class AppendMixIn(abc.ABC): @property @abc.abstractmethod def line_no(self) -> int: # pragma: no cover raise NotImplementedError @abc.abstractmethod def print_code( self, py_printer: printer.PythonPrinter ) -> None: # pragma: no cover raise NotImplementedError class UnindentMixIn(abc.ABC): pass class Statement(abc.ABC): @classmethod @abc.abstractmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: # pragma: no cover raise NotImplementedError class Root(Statement, AppendMixIn): def __init__(self, skt: sketch.Sketch) -> None: self._skt = skt self._stmts: List[AppendMixIn] = [] self._block_stmts: Dict[str, Block] = {} @property def line_no(self) -> int: return 0 def append_stmt(self, stmt: AppendMixIn) -> None: self._stmts.append(stmt) def append_block(self, block_stmt: "Block") -> None: if block_stmt.block_name in self._block_stmts: raise exceptions.BlockNameConflictError( f"The name of the block at {block_stmt.line_no}" "conflicts with the block at " f"{self._block_stmts[block_stmt.block_name].line_no} " f"in file {self._skt._path}. You cannot define two blocks " "with the same name in the one file." ) self._block_stmts[block_stmt.block_name] = block_stmt @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: # pragma: no cover raise NotImplementedError("This does not apply to Root.") def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline("import sketchbook") py_printer.writeline("_SKT_BLOCK_RUNTIMES = {}") for block_stmt in self._block_stmts.values(): block_stmt.print_block_code(py_printer) py_printer.writeline( "class _SktCurrentRuntime(sketchbook.SketchRuntime):", self ) with py_printer.indent_block(): py_printer.writeline("_BLOCK_RUNTIMES = _SKT_BLOCK_RUNTIMES", self) py_printer.writeline("async def _draw_body(self) -> None:", self) with py_printer.indent_block(): for stmt in self._stmts: stmt.print_code(py_printer) class Block(Statement, IndentMixIn, AppendMixIn): def __init__( self, block_name: str, skt: sketch.Sketch, line_no: int ) -> None: self._block_name = block_name self._skt = skt self._line_no = line_no self._stmts: List[AppendMixIn] = [] def append_stmt(self, stmt: AppendMixIn) -> None: self._stmts.append(stmt) @property def block_name(self) -> str: return self._block_name @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: splitted_stmt = stmt_str.split(" ", 1) if splitted_stmt[0] != "block": return None if len(splitted_stmt) != 2 or (not splitted_stmt[1].strip()): raise exceptions.SketchSyntaxError("Block name cannot be empty.") block_name = splitted_stmt[1].strip() if not _is_valid_fn_name(block_name): raise exceptions.SketchSyntaxError( "Invalid Block Statement. Block name expected, " f"got: {repr(block_name)}." ) return cls(block_name=block_name, skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( f"self.write(await self.blocks[{repr(self.block_name)}, True](), " 'escape="raw")', self, ) def print_block_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( "class _SktCurrentBlockRuntime(sketchbook.BlockRuntime):", self ) with py_printer.indent_block(): py_printer.writeline("async def _draw_block(self) -> None:", self) with py_printer.indent_block(): for stmt in self._stmts: stmt.print_code(py_printer) py_printer.writeline( f"_SKT_BLOCK_RUNTIMES[{self.block_name!r}] = " "_SktCurrentBlockRuntime" ) class Plain(Statement, AppendMixIn): def __init__(self, plain_str: str) -> None: self._plain_str = plain_str @property def line_no(self) -> int: # pragma: no cover raise NotImplementedError("This does not apply to Plain.") @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: # pragma: no cover raise NotImplementedError("This does not apply to Plain.") def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( f'self.write({repr(self._plain_str)}, escape="raw")' ) class BaseOutput(Statement, AppendMixIn): _filter_fn_names: List[str] = [] def __init__( self, output_filter: str, output_exp: str, skt: sketch.Sketch, line_no: int, ) -> None: self._output_filter = output_filter self._output_exp = output_exp self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: splitted_stmt = stmt_str.split(" ", 1) stmt_keyword = splitted_stmt[0] if not stmt_keyword.endswith("="): return None stmt_output_filter = stmt_keyword[:-1] or "default" if stmt_output_filter not in cls._filter_fn_names: return None if len(splitted_stmt) != 2: raise exceptions.SketchSyntaxError( ( "Output content is empty " f"in file {skt._path} at line {line_no}." ) ) stmt_output_exp = splitted_stmt[1].strip() if not stmt_output_exp: raise exceptions.SketchSyntaxError( ( "The expression to be output is empty " f"in file {skt._path} at line {line_no}." ) ) return cls( output_filter=stmt_output_filter, output_exp=stmt_output_exp, skt=skt, line_no=line_no, ) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( f"self.write({self._output_exp}, " f"escape={self._output_filter!r})" ) class _Include(Statement, AppendMixIn): def __init__( self, target_path: str, skt: sketch.Sketch, line_no: int ) -> None: self._target_path = target_path self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: splitted_stmt = stmt_str.split(" ", 1) if splitted_stmt[0] != "include": return None if len(splitted_stmt) < 2: raise exceptions.SketchSyntaxError( f"Invalid syntax in file {skt._path} at line {line_no}, " "you must provide the path to be included." ) return cls(target_path=splitted_stmt[1], skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( "self.write(await " f'self._include_sketch({self._target_path}), escape="raw")', self, ) class _Inherit(Statement, AppendMixIn): def __init__( self, target_path: str, skt: sketch.Sketch, line_no: int ) -> None: self._target_path = target_path self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: splitted_stmt = stmt_str.split(" ", 1) if splitted_stmt[0] != "inherit": return None if len(splitted_stmt) < 2: raise exceptions.SketchSyntaxError( f"Invalid syntax in file {skt._path} at line {line_no}, " "you must provide the path to be inherited." ) return cls(target_path=splitted_stmt[1], skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline( f"await self._add_parent({self._target_path})", self ) class _Indent(Statement, IndentMixIn, AppendMixIn): def __init__( self, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> None: self._stmt_str = stmt_str self._skt = skt self._line_no = line_no self._stmts: List[AppendMixIn] = [] def append_stmt(self, stmt: AppendMixIn) -> None: self._stmts.append(stmt) @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: if stmt_str.split(" ", 1)[0] not in ( "if", "with", "for", "while", "try", "async", ): return None return cls(stmt_str=stmt_str.strip(), skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline(f"{self._stmt_str}:", self) with py_printer.indent_block(): for stmt in self._stmts: stmt.print_code(py_printer) py_printer.writeline("pass", self) class _Unindent(Statement, UnindentMixIn): @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: if stmt_str.split(" ", 1)[0] != "end": return None return cls() class _HalfIndent(Statement, IndentMixIn, AppendMixIn, UnindentMixIn): def __init__( self, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> None: self._stmt_str = stmt_str self._skt = skt self._line_no = line_no self._stmts: List[AppendMixIn] = [] def append_stmt(self, stmt: AppendMixIn) -> None: self._stmts.append(stmt) @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: if stmt_str.split(" ", 1)[0] not in ( "else", "elif", "except", "finally", ): return None return cls(stmt_str=stmt_str.strip(), skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline(f"{self._stmt_str}:", self) with py_printer.indent_block(): for stmt in self._stmts: stmt.print_code(py_printer) py_printer.writeline("pass", self) class _Inline(Statement, AppendMixIn): def __init__( self, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> None: self._stmt_str = stmt_str self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: if stmt_str.split(" ", 1)[0] not in ( "break", "continue", "import", "raise", "from", "nonlocal", "global", "assert", ): return None return cls(stmt_str=stmt_str.strip(), skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: py_printer.writeline(self._stmt_str, self) class _Comment(Statement, AppendMixIn): def __init__( self, cmnt_str: str, skt: sketch.Sketch, line_no: int ) -> None: self._cmnt_str = cmnt_str self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no @classmethod def try_match( cls, stmt_str: str, skt: sketch.Sketch, line_no: int ) -> Optional["Statement"]: if not stmt_str.startswith("#"): return None return cls(cmnt_str=stmt_str[1:].strip(), skt=skt, line_no=line_no) def print_code(self, py_printer: printer.PythonPrinter) -> None: for cmnt_line in self._cmnt_str.splitlines(): py_printer.writeline(f"# {cmnt_line}", self) class _Assign(Statement, AppendMixIn): def __init__( self, target_lst: str, exp: str, skt: sketch.Sketch, line_no: int ) -> None: self._target_lst = target_lst self._exp = exp self._skt = skt self._line_no = line_no @property def line_no(self) -> int: return self._line_no
<gh_stars>0 import warnings import math from pythermalcomfort.psychrometrics import p_sat warnings.simplefilter("always") def transpose_sharp_altitude(sharp, altitude): altitude_new = math.degrees( math.asin( math.sin(math.radians(abs(sharp - 90))) * math.cos(math.radians(altitude)) ) ) sharp = math.degrees( math.atan(math.sin(math.radians(sharp)) * math.tan(math.radians(90 - altitude))) ) sol_altitude = altitude_new return round(sharp, 3), round(sol_altitude, 3) def check_standard_compliance(standard, **kwargs): params = dict() params["standard"] = standard for key, value in kwargs.items(): params[key] = value if params["standard"] == "utci": for key, value in params.items(): if key == "v" and (value > 17 or value < 0.5): warnings.warn( "UTCI wind speed applicability limits between 0.5 and 17 m/s", UserWarning, ) if params["standard"] == "ankle_draft": for key, value in params.items(): if key == "met" and value > 1.3: warnings.warn( "The ankle draft model is only valid for met <= 1.3", UserWarning, ) if key == "clo" and value > 0.7: warnings.warn( "The ankle draft model is only valid for clo <= 0.7", UserWarning, ) elif params["standard"] == "ashrae": # based on table 7.3.4 ashrae 55 2017 for key, value in params.items(): if key in ["tdb", "tr"]: if key == "tdb": parameter = "dry-bulb" else: parameter = "mean radiant" if value > 40 or value < 10: warnings.warn( f"ASHRAE {parameter} temperature applicability limits between 10 and 40 °C", UserWarning, ) if key in ["v", "vr"] and (value > 2 or value < 0): warnings.warn( "ASHRAE air speed applicability limits between 0 and 2 m/s", UserWarning, ) if key == "met" and (value > 2 or value < 1): warnings.warn( "ASHRAE met applicability limits between 1.0 and 2.0 met", UserWarning, ) if key == "clo" and (value > 1.5 or value < 0): warnings.warn( "ASHRAE clo applicability limits between 0.0 and 1.5 clo", UserWarning, ) if key == "v_limited" and value > 0.2: raise ValueError( "This equation is only applicable for air speed lower than 0.2 m/s" ) elif params["standard"] == "fan_heatwaves": # based on table 7.3.4 ashrae 55 2017 for key, value in params.items(): if key in ["tdb", "tr"]: if key == "tdb": parameter = "dry-bulb" else: parameter = "mean radiant" if value > 50 or value < 30: warnings.warn( f"{parameter} temperature applicability limits between 30 and 50 °C", UserWarning, ) if key in ["v", "vr"] and (value > 4.5 or value < 0.1): warnings.warn( "Air speed applicability limits between 0.4 and 4.5 m/s", UserWarning, ) if key == "met" and (value > 2 or value < 0.7): warnings.warn( "Met applicability limits between 0.7 and 2.0 met", UserWarning, ) if key == "clo" and (value > 1.0 or value < 0): warnings.warn( "Clo applicability limits between 0.0 and 1.0 clo", UserWarning, ) elif params["standard"] == "iso": # based on ISO 7730:2005 page 3 for key, value in params.items(): if key == "tdb" and (value > 30 or value < 10): warnings.warn( "ISO air temperature applicability limits between 10 and 30 °C", UserWarning, ) if key == "tr" and (value > 40 or value < 10): warnings.warn( "ISO mean radiant temperature applicability limits between 10 and 40 °C", UserWarning, ) if key in ["v", "vr"] and (value > 1 or value < 0): warnings.warn( "ISO air speed applicability limits between 0 and 1 m/s", UserWarning, ) if key == "met" and (value > 4 or value < 0.8): warnings.warn( "ISO met applicability limits between 0.8 and 4.0 met", UserWarning, ) if key == "clo" and (value > 2 or value < 0): warnings.warn( "ISO clo applicability limits between 0.0 and 2 clo", UserWarning, ) elif params["standard"] == "ISO7933": # based on ISO 7933:2004 Annex A if params["tdb"] > 50 or params["tdb"] < 15: warnings.warn( "ISO 7933:2004 air temperature applicability limits between 15 and 50 °C", UserWarning, ) p_a = p_sat(params["tdb"]) / 1000 * params["rh"] / 100 rh_max = 4.5 * 100 * 1000 / p_sat(params["tdb"]) if p_a > 4.5 or p_a < 0: warnings.warn( f"ISO 7933:2004 t_r - t_db applicability limits between 0 and {rh_max} %", UserWarning, ) if params["tr"] - params["tdb"] > 50 or params["tr"] - params["tdb"] < 0: warnings.warn( "ISO 7933:2004 t_r - t_db applicability limits between 0 and 60 °C", UserWarning, ) if params["v"] > 3 or params["v"] < 0: warnings.warn( "ISO 7933:2004 air speed applicability limits between 0 and 3 m/s", UserWarning, ) if params["met"] > 450 or params["met"] < 100: warnings.warn( "ISO 7933:2004 met applicability limits between 100 and 450 met", UserWarning, ) if params["clo"] > 1 or params["clo"] < 0.1: warnings.warn( "ISO 7933:2004 clo applicability limits between 0.1 and 1 clo", UserWarning, ) def body_surface_area(weight, height, formula="dubois"): """ Returns the body surface area in square meters. Parameters ---------- weight : float body weight, [kg] height : float height, [m] formula : {"dubois"}, default="dubois" formula used to calculate the body surface area Returns ------- body_surface_area : float body surface area, [m2] """ if formula == "dubois": return 0.202 * (weight ** 0.425) * (height ** 0.725) def f_svv(w, h, d): """Calculates the sky-vault view fraction Parameters ---------- w : float width of the window, [m] h : float height of the window, [m] d : float distance between the occupant and the window, [m] Returns ------- f_svv : float sky-vault view fraction ranges between 0 and 1 """ return ( math.degrees(math.atan(h / (2 * d))) * math.degrees(math.atan(w / (2 * d))) / 16200 ) def v_relative(v, met): """Estimates the relative air speed which combines the average air speed of the space plus the relative air speed caused by the body movement. Vag is assumed to be 0 for metabolic rates equal and lower than 1 met and otherwise equal to Vag = 0.3 (M – 1) (m/s) Parameters ---------- v : float air speed measured by the sensor, [m/s] met : float metabolic rate, [met] Returns ------- vr : float relative air speed, [m/s] """ if met > 1: return round(v + 0.3 * (met - 1), 3) else: return v def clo_dynamic(clo, met, standard="ASHRAE"): """Estimates the dynamic clothing insulation of a moving occupant. The activity as well as the air speed modify the insulation characteristics of the clothing and the adjacent air layer. Consequently the ISO 7730 states that the clothing insulation shall be corrected [2]_. The ASHRAE 55 Standard corrects for the effect of the body movement for met equal or higher than 1.2 met using the equation clo = Icl × (0.6 + 0.4/met) Parameters ---------- clo : float clothing insulation, [clo] met : float metabolic rate, [met] standard: str (default="ASHRAE") - If "ASHRAE", uses Equation provided in Section 5.2.2.2 of ASHRAE 55 2017 Returns ------- clo : float dynamic clothing insulation, [clo] """ if standard.lower() not in ["ashrae"]: raise ValueError( "clo dynamic calculation can only be performed in compliance ASHRAE Standard" ) if met > 1.2: return round(clo * (0.6 + 0.4 / met), 3) else: return clo def running_mean_outdoor_temperature(temp_array, alpha=0.8, units="SI"): """Estimates the running mean temperature also known as prevailing mean outdoor temperature. Parameters ---------- temp_array: list array containing the mean daily temperature in descending order (i.e. from newest/yesterday to oldest) :math:`[\Theta_{day-1}, \Theta_{day-2}, \dots , \Theta_{day-n}]`. Where :math:`\Theta_{day-1}` is yesterday's daily mean temperature. The EN 16798-1 2019 [3]_ states that n should be equal to 7 alpha : float constant between 0 and 1. The EN 16798-1 2019 [3]_ recommends a value of 0.8, while the ASHRAE 55 2017 recommends to choose values between 0.9 and 0.6, corresponding to a slow- and fast- response running mean, respectively. Adaptive comfort theory suggests that a slow-response running mean (alpha = 0.9) could be more appropriate for climates in which synoptic-scale (day-to- day) temperature dynamics are relatively minor, such as the humid tropics. units: str default="SI" select the SI (International System of Units) or the IP (Imperial Units) system. Returns ------- t_rm : float running mean outdoor temperature """ if units.lower() == "ip": for ix, x in enumerate(temp_array): temp_array[ix] = units_converter(tdb=temp_array[ix])[0] coeff = [alpha ** ix for ix, x in enumerate(temp_array)] t_rm = sum([a *
characteristics.""" subclass = None superclass = None def __init__(self, Machine=None, Number_Of_Sections=None, Time_Date_Stamp=None, Pointer_To_Symbol_Table=None, Number_Of_Symbols=None, Size_Of_Optional_Header=None, Characteristics=None, Hashes=None): self.Machine = Machine self.Number_Of_Sections = Number_Of_Sections self.Time_Date_Stamp = Time_Date_Stamp self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table self.Number_Of_Symbols = Number_Of_Symbols self.Size_Of_Optional_Header = Size_Of_Optional_Header self.Characteristics = Characteristics self.Hashes = Hashes def factory(*args_, **kwargs_): if PEFileHeaderType.subclass: return PEFileHeaderType.subclass(*args_, **kwargs_) else: return PEFileHeaderType(*args_, **kwargs_) factory = staticmethod(factory) def get_Machine(self): return self.Machine def set_Machine(self, Machine): self.Machine = Machine def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Number_Of_Sections(self): return self.Number_Of_Sections def set_Number_Of_Sections(self, Number_Of_Sections): self.Number_Of_Sections = Number_Of_Sections def validate_NonNegativeIntegerObjectPropertyType(self, value): # Validate type cybox_common.NonNegativeIntegerObjectPropertyType, a restriction on None. pass def get_Time_Date_Stamp(self): return self.Time_Date_Stamp def set_Time_Date_Stamp(self, Time_Date_Stamp): self.Time_Date_Stamp = Time_Date_Stamp def get_Pointer_To_Symbol_Table(self): return self.Pointer_To_Symbol_Table def set_Pointer_To_Symbol_Table(self, Pointer_To_Symbol_Table): self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table def get_Number_Of_Symbols(self): return self.Number_Of_Symbols def set_Number_Of_Symbols(self, Number_Of_Symbols): self.Number_Of_Symbols = Number_Of_Symbols def get_Size_Of_Optional_Header(self): return self.Size_Of_Optional_Header def set_Size_Of_Optional_Header(self, Size_Of_Optional_Header): self.Size_Of_Optional_Header = Size_Of_Optional_Header def get_Characteristics(self): return self.Characteristics def set_Characteristics(self, Characteristics): self.Characteristics = Characteristics def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.Machine is not None or self.Number_Of_Sections is not None or self.Time_Date_Stamp is not None or self.Pointer_To_Symbol_Table is not None or self.Number_Of_Symbols is not None or self.Size_Of_Optional_Header is not None or self.Characteristics is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEFileHeaderType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Machine is not None: self.Machine.export(lwrite, level, 'WinExecutableFileObj:', name_='Machine', pretty_print=pretty_print) if self.Number_Of_Sections is not None: self.Number_Of_Sections.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Sections', pretty_print=pretty_print) if self.Time_Date_Stamp is not None: self.Time_Date_Stamp.export(lwrite, level, 'WinExecutableFileObj:', name_='Time_Date_Stamp', pretty_print=pretty_print) if self.Pointer_To_Symbol_Table is not None: self.Pointer_To_Symbol_Table.export(lwrite, level, 'WinExecutableFileObj:', name_='Pointer_To_Symbol_Table', pretty_print=pretty_print) if self.Number_Of_Symbols is not None: self.Number_Of_Symbols.export(lwrite, level, 'WinExecutableFileObj:', name_='Number_Of_Symbols', pretty_print=pretty_print) if self.Size_Of_Optional_Header is not None: self.Size_Of_Optional_Header.export(lwrite, level, 'WinExecutableFileObj:', name_='Size_Of_Optional_Header', pretty_print=pretty_print) if self.Characteristics is not None: self.Characteristics.export(lwrite, level, 'WinExecutableFileObj:', name_='Characteristics', pretty_print=pretty_print) if self.Hashes is not None: self.Hashes.export(lwrite, level, 'WinExecutableFileObj:', name_='Hashes', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Machine': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Machine(obj_) elif nodeName_ == 'Number_Of_Sections': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Sections(obj_) elif nodeName_ == 'Time_Date_Stamp': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Time_Date_Stamp(obj_) elif nodeName_ == 'Pointer_To_Symbol_Table': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Pointer_To_Symbol_Table(obj_) elif nodeName_ == 'Number_Of_Symbols': obj_ = cybox_common.NonNegativeIntegerObjectPropertyType.factory() obj_.build(child_) self.set_Number_Of_Symbols(obj_) elif nodeName_ == 'Size_Of_Optional_Header': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Size_Of_Optional_Header(obj_) elif nodeName_ == 'Characteristics': obj_ = cybox_common.HexBinaryObjectPropertyType.factory() obj_.build(child_) self.set_Characteristics(obj_) elif nodeName_ == 'Hashes': obj_ = cybox_common.HashListType.factory() obj_.build(child_) self.set_Hashes(obj_) # end class PEFileHeaderType class PEOptionalHeaderType(GeneratedsSuper): """The PEOptionalHeaderType type describes the PE Optional Header structure. Additional computed metadata, e.g., hashes of the header, are also included.""" subclass = None superclass = None def __init__(self, Magic=None, Major_Linker_Version=None, Minor_Linker_Version=None, Size_Of_Code=None, Size_Of_Initialized_Data=None, Size_Of_Uninitialized_Data=None, Address_Of_Entry_Point=None, Base_Of_Code=None, Base_Of_Data=None, Image_Base=None, Section_Alignment=None, File_Alignment=None, Major_OS_Version=None, Minor_OS_Version=None, Major_Image_Version=None, Minor_Image_Version=None, Major_Subsystem_Version=None, Minor_Subsystem_Version=None, Win32_Version_Value=None, Size_Of_Image=None, Size_Of_Headers=None, Checksum=None, Subsystem=None, DLL_Characteristics=None, Size_Of_Stack_Reserve=None, Size_Of_Stack_Commit=None, Size_Of_Heap_Reserve=None, Size_Of_Heap_Commit=None, Loader_Flags=None, Number_Of_Rva_And_Sizes=None, Data_Directory=None, Hashes=None): self.Magic = Magic self.Major_Linker_Version = Major_Linker_Version self.Minor_Linker_Version = Minor_Linker_Version self.Size_Of_Code = Size_Of_Code self.Size_Of_Initialized_Data = Size_Of_Initialized_Data self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data self.Address_Of_Entry_Point = Address_Of_Entry_Point self.Base_Of_Code = Base_Of_Code self.Base_Of_Data = Base_Of_Data self.Image_Base = Image_Base self.Section_Alignment = Section_Alignment self.File_Alignment = File_Alignment self.Major_OS_Version = Major_OS_Version self.Minor_OS_Version = Minor_OS_Version self.Major_Image_Version = Major_Image_Version self.Minor_Image_Version = Minor_Image_Version self.Major_Subsystem_Version = Major_Subsystem_Version self.Minor_Subsystem_Version = Minor_Subsystem_Version self.Win32_Version_Value = Win32_Version_Value self.Size_Of_Image = Size_Of_Image self.Size_Of_Headers = Size_Of_Headers self.Checksum = Checksum self.Subsystem = Subsystem self.DLL_Characteristics = DLL_Characteristics self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve self.Size_Of_Stack_Commit = Size_Of_Stack_Commit self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve self.Size_Of_Heap_Commit = Size_Of_Heap_Commit self.Loader_Flags = Loader_Flags self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes self.Data_Directory = Data_Directory self.Hashes = Hashes def factory(*args_, **kwargs_): if PEOptionalHeaderType.subclass: return PEOptionalHeaderType.subclass(*args_, **kwargs_) else: return PEOptionalHeaderType(*args_, **kwargs_) factory = staticmethod(factory) def get_Magic(self): return self.Magic def set_Magic(self, Magic): self.Magic = Magic def validate_HexBinaryObjectPropertyType(self, value): # Validate type cybox_common.HexBinaryObjectPropertyType, a restriction on None. pass def get_Major_Linker_Version(self): return self.Major_Linker_Version def set_Major_Linker_Version(self, Major_Linker_Version): self.Major_Linker_Version = Major_Linker_Version def get_Minor_Linker_Version(self): return self.Minor_Linker_Version def set_Minor_Linker_Version(self, Minor_Linker_Version): self.Minor_Linker_Version = Minor_Linker_Version def get_Size_Of_Code(self): return self.Size_Of_Code def set_Size_Of_Code(self, Size_Of_Code): self.Size_Of_Code = Size_Of_Code def get_Size_Of_Initialized_Data(self): return self.Size_Of_Initialized_Data def set_Size_Of_Initialized_Data(self, Size_Of_Initialized_Data): self.Size_Of_Initialized_Data = Size_Of_Initialized_Data def get_Size_Of_Uninitialized_Data(self): return self.Size_Of_Uninitialized_Data def set_Size_Of_Uninitialized_Data(self, Size_Of_Uninitialized_Data): self.Size_Of_Uninitialized_Data = Size_Of_Uninitialized_Data def get_Address_Of_Entry_Point(self): return self.Address_Of_Entry_Point def set_Address_Of_Entry_Point(self, Address_Of_Entry_Point): self.Address_Of_Entry_Point = Address_Of_Entry_Point def get_Base_Of_Code(self): return self.Base_Of_Code def set_Base_Of_Code(self, Base_Of_Code): self.Base_Of_Code = Base_Of_Code def get_Base_Of_Data(self): return self.Base_Of_Data def set_Base_Of_Data(self, Base_Of_Data): self.Base_Of_Data = Base_Of_Data def get_Image_Base(self): return self.Image_Base def set_Image_Base(self, Image_Base): self.Image_Base = Image_Base def get_Section_Alignment(self): return self.Section_Alignment def set_Section_Alignment(self, Section_Alignment): self.Section_Alignment = Section_Alignment def get_File_Alignment(self): return self.File_Alignment def set_File_Alignment(self, File_Alignment): self.File_Alignment = File_Alignment def get_Major_OS_Version(self): return self.Major_OS_Version def set_Major_OS_Version(self, Major_OS_Version): self.Major_OS_Version = Major_OS_Version def get_Minor_OS_Version(self): return self.Minor_OS_Version def set_Minor_OS_Version(self, Minor_OS_Version): self.Minor_OS_Version = Minor_OS_Version def get_Major_Image_Version(self): return self.Major_Image_Version def set_Major_Image_Version(self, Major_Image_Version): self.Major_Image_Version = Major_Image_Version def get_Minor_Image_Version(self): return self.Minor_Image_Version def set_Minor_Image_Version(self, Minor_Image_Version): self.Minor_Image_Version = Minor_Image_Version def get_Major_Subsystem_Version(self): return self.Major_Subsystem_Version def set_Major_Subsystem_Version(self, Major_Subsystem_Version): self.Major_Subsystem_Version = Major_Subsystem_Version def get_Minor_Subsystem_Version(self): return self.Minor_Subsystem_Version def set_Minor_Subsystem_Version(self, Minor_Subsystem_Version): self.Minor_Subsystem_Version = Minor_Subsystem_Version def get_Win32_Version_Value(self): return self.Win32_Version_Value def set_Win32_Version_Value(self, Win32_Version_Value): self.Win32_Version_Value = Win32_Version_Value def get_Size_Of_Image(self): return self.Size_Of_Image def set_Size_Of_Image(self, Size_Of_Image): self.Size_Of_Image = Size_Of_Image def get_Size_Of_Headers(self): return self.Size_Of_Headers def set_Size_Of_Headers(self, Size_Of_Headers): self.Size_Of_Headers = Size_Of_Headers def get_Checksum(self): return self.Checksum def set_Checksum(self, Checksum): self.Checksum = Checksum def get_Subsystem(self): return self.Subsystem def set_Subsystem(self, Subsystem): self.Subsystem = Subsystem def get_DLL_Characteristics(self): return self.DLL_Characteristics def set_DLL_Characteristics(self, DLL_Characteristics): self.DLL_Characteristics = DLL_Characteristics def get_Size_Of_Stack_Reserve(self): return self.Size_Of_Stack_Reserve def set_Size_Of_Stack_Reserve(self, Size_Of_Stack_Reserve): self.Size_Of_Stack_Reserve = Size_Of_Stack_Reserve def get_Size_Of_Stack_Commit(self): return self.Size_Of_Stack_Commit def set_Size_Of_Stack_Commit(self, Size_Of_Stack_Commit): self.Size_Of_Stack_Commit = Size_Of_Stack_Commit def get_Size_Of_Heap_Reserve(self): return self.Size_Of_Heap_Reserve def set_Size_Of_Heap_Reserve(self, Size_Of_Heap_Reserve): self.Size_Of_Heap_Reserve = Size_Of_Heap_Reserve def get_Size_Of_Heap_Commit(self): return self.Size_Of_Heap_Commit def set_Size_Of_Heap_Commit(self, Size_Of_Heap_Commit): self.Size_Of_Heap_Commit = Size_Of_Heap_Commit def get_Loader_Flags(self): return self.Loader_Flags def set_Loader_Flags(self, Loader_Flags): self.Loader_Flags = Loader_Flags def get_Number_Of_Rva_And_Sizes(self): return self.Number_Of_Rva_And_Sizes def set_Number_Of_Rva_And_Sizes(self, Number_Of_Rva_And_Sizes): self.Number_Of_Rva_And_Sizes = Number_Of_Rva_And_Sizes def get_Data_Directory(self): return self.Data_Directory def set_Data_Directory(self, Data_Directory): self.Data_Directory = Data_Directory def get_Hashes(self): return self.Hashes def set_Hashes(self, Hashes): self.Hashes = Hashes def hasContent_(self): if ( self.Magic is not None or self.Major_Linker_Version is not None or self.Minor_Linker_Version is not None or self.Size_Of_Code is not None or self.Size_Of_Initialized_Data is not None or self.Size_Of_Uninitialized_Data is not None or self.Address_Of_Entry_Point is not None or self.Base_Of_Code is not None or self.Base_Of_Data is not None or self.Image_Base is not None or self.Section_Alignment is not None or self.File_Alignment is not None or self.Major_OS_Version is not None or self.Minor_OS_Version is not None or self.Major_Image_Version is not None or self.Minor_Image_Version is not None or self.Major_Subsystem_Version is not None or self.Minor_Subsystem_Version is not None or self.Win32_Version_Value is not None or self.Size_Of_Image is not None or self.Size_Of_Headers is not None or self.Checksum is not None or self.Subsystem is not None or self.DLL_Characteristics is not None or self.Size_Of_Stack_Reserve is not None or self.Size_Of_Stack_Commit is not None or self.Size_Of_Heap_Reserve is not None or self.Size_Of_Heap_Commit is not None or self.Loader_Flags is not None or self.Number_Of_Rva_And_Sizes is not None or self.Data_Directory is not None or self.Hashes is not None ): return True else: return False def export(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(lwrite, level, pretty_print) lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PEOptionalHeaderType') if self.hasContent_(): lwrite('>%s' % (eol_, )) self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(lwrite, level, pretty_print) lwrite('</%s%s>%s' % (namespace_, name_, eol_)) else: lwrite('/>%s' % (eol_, )) def exportAttributes(self, lwrite, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType'): pass def exportChildren(self, lwrite, level, namespace_='WinExecutableFileObj:', name_='PEOptionalHeaderType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Magic is not None: self.Magic.export(lwrite, level, 'WinExecutableFileObj:', name_='Magic', pretty_print=pretty_print) if self.Major_Linker_Version is not None: self.Major_Linker_Version.export(lwrite, level, 'WinExecutableFileObj:', name_='Major_Linker_Version', pretty_print=pretty_print) if self.Minor_Linker_Version is not None:
CHINESE RESTAURANT', 'RAFAELLA CAFE', 'DUKES', 'JOEY PEPPERONI PIZZA', 'WGC SOLUTIONS', "BONNIE'S GRILL", 'SUSHI OF GARG46', 'ROBERT SILMAN ASSOC STRUCT ENG DPC', 'ZHENG & ASSOCIATES LLC', 'BUILDING ENGINEERING SYSTEMS PLLC', '<NAME>', "MAMA'S FAMOUS PIZZA", 'MG NEW YORK ARCHITECTS,PLLC', 'PLAN B ENGINEERING', 'C.M. RABINOVITCH ARCHITECTS', 'CO ADAPTIVE ARCHITECTURE PLLC', 'ARTUROS PIZZA', 'BIER INTERNATIONAL HARLEM', 'IL FORNO', 'DYNAMIC ENGINEERING CONSULTANTS PC', '810 DELI & CAFE', 'BBSTUDIO ARCHITECTS', 'CAFFE NAPOLI', 'MCKAY ARCHITECTURE AND DESIGN PLLC', '<NAME>', 'NIU', 'AVISHAY I MAZOR, P.E.', 'AMOIA CODY ARCHITECTURE', 'NEW TING HUA CHINESE RESTAURANT', 'BAJA FRESH', 'IZUMI', 'HAVANA ALMA DEGUMA', 'BISTRO TEN 18', "S'MAC", '<NAME>', '<NAME>', 'CAPPUCINO CAFE', 'LEAN CRUST', 'HAPPY FEET PET', 'GEORGE E. BERGER & ASSOCIATES', 'STEWART ENGINEERING SERVICES, PLLC', 'ISLA RESTAURANT', 'BRIGITTE', 'SUNG HO SHIN ARCHITECT, PC', 'GOLDMAN COPELAND ASSOCIATES', '696 GOURMET', 'FLAG-CLEAN ISLAND LTD', "PERRY'S DINER", 'TIFFIN WALLAH', 'HL SPRING GARDEN', 'RAFAEL VINOLY ARCHITECTS PC', '27 MORTON', 'B&A E, PC', 'MEI WEI RESTAURANT', 'CHOLULITA DELI & RESTAURANT', 'MAJ3STY PIZZA & DINER', 'PESCATORE', 'JI LI ASAIN FOOD INC', 'BLACKSTONE BAR & GRILL', 'DRAGON GARDEN INC.', 'MELS BURGER BAR', "DAVID'S BRISKET HOUSE", 'BANGKOKZ', 'MASAKI TERIYAKI & SUSHI', 'MERCADITO', 'EL NUEVO CARIDAD RESTAURANT', 'CORONA RESTURANT', 'DAVID TURNER ARCHITECT, P.C', 'HOP HAP THAI', 'TANDOOR OVEN', 'CAFE PLYMOUTH', '<NAME>', 'ADS & Y INC.', 'AROQA', 'DELI GRILL RESTAURANT', 'MEZZALUNA', 'MEXICOZINA', 'VEZZO', "CALLIE'S TAQUERIA & TEQUILA BAR", '<NAME>', 'EVEREST', 'MIKE DUE PIZZA', 'D ZIGN STREAM INC', 'CONSTRUCTION MANAGEMENT & SAFETY CO', 'CHUNG HING', 'HIGHLINE PIZZERIA', 'SHARON ENGINEERING, P.C.', 'SEA STAR', 'MEXICAN RESTAURANT', 'CURRY IN A HURRY', 'OMAI', 'EAST HARLEM BOTTLING CO.', 'CHELSEA RISTORANTE', 'BUTCHER BAR SMOKEHOUSE', 'BARNYARD CHEESE SHOP', 'HOMEMADE TANGERIA', 'LA PEQUENA', 'PETRUS FORTUNE, PE', "JENNY'S MARKET PLACE", 'GIRASOL BAKERY', 'LEDER-LUIS ARCHITECTURAL DESIGN, PL', 'SAW SHACK', 'TAQUERIA GUADALUPE', 'ROSAMUNDE', 'DELI & GRILL', 'SALUGGI', 'EGNINEERING GROUP ASSOCIATES PC', 'CORE STATES GROUP', 'MONICA EXPEDITOR', 'CAFE MINGALA', 'HAPPY GARDEN RESTAURANT', 'DAMA FALAFEL', 'COLALA CHINESE & JAPANESE RESTAURANT', 'INGA PIRCU RESTAURANT', 'RAVAGH PERSIAN GRILL RESTAURANT', 'CAFE 86', 'REILLY TARANTINO ENGINEERING', 'SHE SHE PIZZERIA', 'BEIJING CHINESE RESTAURANT', 'HAPPY WOK CHINESE RESTAURANT', 'YONG SHENG RESTAURANT', "CHARLY'S", 'EDWARDS & ZUCK CONSULTING ENGINEERS', 'GREEBAY', 'SEVERUD ASSOCIATES CONSULTING ENGIN', 'PIZZA SUPREMA', 'NA', 'TASTE OF TOKYO', 'R.G. VANDERWEIL ENGINEERS, PC', 'DAR 525', 'AINSWORTH', 'METRO COFFEE SHOP', 'ARCHETYPE CONSULTANTS INC', 'TORTUGA MEXICAN FOOD', 'GOOD TASTE RESTAURANT', 'PLUS GROUP CONSULTING ENGINEERING,', 'LENA LATIN GRILL', 'BANGKOK GRAND PALACE', 'TARTIME', 'NIRVANA', "BUBBA'S BISTRO", 'KORONET PIZZA', 'YUM YUM', 'PANCA', 'C.WALL ARCHITECTURE', 'LIBRETTOS', 'PIETRA SANTA', 'PATH ARCHITECTURE PC', 'LUCK THAI', 'BIG ORANGE CONSULTING INC', 'HO MEI CHINESE RESTAURANT', 'RAMEN THUKPA', 'KAM WAH RESTAURANT', 'MALINGO RESTAURANT & BAR', 'AKT ARCHITECTS', 'THREE E THAI', 'NOUVELLE', 'VESELKA', 'INSIGHT STUDIO ARCHITECTURE PC', 'JUDGE ARCHITECTURE, PC', 'BAY LEAF INDIAN FOOD', 'DINASTIA CHINA INC', '<NAME>, RA', '<NAME>', 'KCE CONSULTING ENGINEERING PLLC', 'NU SUSHI', 'BAGATELLE', 'THE STEPHEN B JACOBS GROUP PC', 'FRANK COLASURDO ARCHITECTS, INC', 'RAMAKER & ASSOCIATES, INC.', 'ORIENTAL KITCHEN', 'ACIES ENGINEERING', 'TURKISH KITCHEN', "ANNIE'S", '<NAME>', 'AINSWORTH PARK', 'THAI @ LEX', 'SAKETUMI', 'OLLIES TO GO', '<NAME>', 'LOS CUENCANITOS', 'YAMASHIRO', 'BROTHER JIMMYS BBQ', 'MILK BURGER', 'OVELIA', 'AKO JAPANESE CUISINE', "MARIA'S BISTRO MEXICANO", 'NO.1 RESTAURANT', 'PUNJABI TASKA INDIAN FOOD', 'MANHATTAN DINER', '<NAME>', 'INFINITY DESIGN CONSULTANT INC.', 'MAOZ VEGETARIAN', 'NEW CHINA RESTAURANT', 'SULLIVAN BISTRO', 'WAVENUE THAI', "ELLIE'S DINER", 'THE MALT HOUSE', 'ROCKING HORSE CAF\xc9', 'CAFE GUSTO', 'PATZERIA', 'CIRCLE PIZZERIA', 'KI SUSHI', 'LANDMARC', 'M. ARTHUR GENSLER JR. & ASSOCIATES,', 'ETCETERA', 'BONCHON', 'HAAB', 'YORKSHIRE WINES', 'BOTTINO TAKE OUT', 'IN & OUT CHICKEN', 'STEP<NAME>', "D'ALESSANDRO & ASSOCIATES", 'CAFFE CAFE', 'BLIND PIG', 'WALTER T. GORMAN, PE, PC', 'PATRON MEXICAN GRILL', 'SH ENGINEERING PLLC', 'CABRINI CAFE', 'COBA', 'TECTONIC ENGINEERING', 'SUN FLOWER DINER', 'STATION CAFE', 'MATCH', 'CHANG PAI', 'BAR 6', 'D<NAME>', 'GENSLER', 'PONGAL', "OTTO'S TACOS", 'EL RINCONCITO', 'THOMAS VELTRE. P.E., P.C.', 'CHINA ONE FRESH TACO', 'COTTA', '5TH FLOOR DESIGN', 'QAZ2', "HARRIET'S KITCHEN", 'MIKAKU', 'ANDREW FORMICHELLA', 'BEACH CAFE', 'CANTINA TAQUERIA & TEQUILA BAR', 'DIG INN SEASONAL MARKET', 'CHARLIE BIRD', "DANIELLO'S PIZZERIA", 'OFF SHORE RESTAURANT', 'CHINA KING RESTAURANT', 'PAUL CHRISTAKOS ARCHITECTURE', 'FEINGOLD AND GREGORY ARCHITECTS. PC', 'TRIMBLE ARCHITECTURE PC', 'ATAMI', 'MARKET DINER', 'LYCHEE HOUSE', 'ASTORIA PIZZERIA', 'ACELUCK THAI CUISINE', 'DONA BELLA PIZZA', 'GRAND SICHUAN EASTERN', 'JOSEPHS', 'EMPANADAS MONUMENTAL', 'PETER F. FARINELLA ARCHITECT, P.C.', "BABALUCCI'S", 'BRAVO KOSHER PIZZA', 'MOTORINO PIZZERIA', 'BEERZAAR NYC', 'BRAVO PIZZA', 'LINE BAGEL', 'THE RAIL LINE DINER', 'GU<NAME>', 'TSS ENGINEERS', 'SANARK DESIGN & ENGINEERING D.P.C', '200 FIFTH', 'FRANK R. NORA, P.E.', 'E2 PROJECT MANAGEMENT, LLC', 'UMI SUSHI', 'NEW YORK ENGINEERING ASSOCIATES, P.', 'LA ISLA', 'PET ARK', 'SANTA CLARITA RESTAURANT', 'BORU BORU', 'CHATEAU 49', 'SHUN LEE', 'CREATE & GO', 'MINGS', '<NAME>', "NEW KING'S FOOD RESTAURANT", 'BAGELS & MORE', 'AZOGUENITA BAKERY & RESTAURANT', 'ENGINEERING PROFESSIONAL SERVICE P.', "GENA'S GRILL", 'AWASH ETHIOPIAN RESTAURANT', 'OUR EVERGREEN', 'GHANDI', 'LA DELICIA TROPICAL', 'BELLA VIA RESTAURANT', 'CORNER CAFE & BAKERY', 'MAR<NAME> PIZZA', 'ANZALONE ARCHITECTURE, PLLC', '<NAME>', 'MEKONG', 'GRAND AVENUE PIZZA', 'SUSHI OF GARI TRIBECA', 'SANKAR K MANDAL P.E.', 'ARCHITECTURE PLUS INFORMATION', 'NORMAN C. LOK, MBA, P.E.', 'LCHIRO', 'KAM MAN RESTAURANT', '<NAME>', 'THE MANCHESTER', 'AMITY HALL', 'ANCHOR HEIGHTS WINE BAR', "CAFE D' ALSACE", 'GYRO UNO PIZZA', "MANCINI'S BRICK OVEN PIZZA", 'ESCOLAS CORP', 'BETTO LEIVA', 'STAND 4', 'LANGAN ENGINEERING', 'PERSEPOLIS', 'GOOD BURGER', 'OI ARCHITECTURE + DESIGN PLLC', 'AMCOOK FUSION CUISINE', 'TABRIZ GROUP DESIGN', 'UFC CHICKEN', 'GANDHI INDIAN RESTAURANT', "CLEM'S", 'NEW MEXICO PLACE', 'AZ PROFESSIONAL ENGINEERING PC', 'LEGEND 72', 'PIZZA BOY', 'LA FLOR DE IZUCAR', 'NEW CAPITAL RESTAURANT', 'OMONIA CAFE', '3RD AVE ALE HOUSE', 'BEARDED LADY', 'ANDREW BERMAN ARCHITECT PLLC', 'FATTY FISH', 'ESTACIA', 'EMPIRE ASIAN', 'B<NAME>', 'TASTY KING', 'LEGRANNE CAFE', 'BURRITO', 'FRESCO 57', 'ANTOJITOS MEXICANOS', 'LA NOSTRA PIZZERIA', 'MAX RESTAURANT', '<NAME> & SOUL', 'SOHO CAFE & GRILL', "CARACA'S BROOKLYN", 'LOMA #1 RESTAURANT', "JANELLE'S CARIBBEAN AMERICAN CUISINE & BAR", 'KMP DESIGN & ENGINEERING PLLC', '<NAME>, AIA', 'EMPIRE SZECHUAN', 'ADRIATIC PIZZA', 'BBQ DALLAS', 'MAMMA MIA', 'EL PUERTO MEXICANO', "ROCCO'S CALAMARI", 'BURGERFI', 'D & D COFFEE SHOP', 'KNF SERVICES CROP', "FRATELLI'S", 'MAD DOG', 'EL MIO CID', 'FIFTH AVENUE', 'MR. BROADWAY KOSHER DELI', 'MICHAEL KANG ARCHITECT, P.C.', 'NOODLE VILLAGE', 'NEW KIM WEI', 'BEDMOUNT TENT', 'EXCELLENT PORK CHOP', 'P.S. BURGERS', 'PATSYE', 'GREAT WALL RESTAURANT', 'MI CASA', 'TKA STUDIO', 'RASA', "ELLARY'S GREENS", 'CENTEK ENGINEERING, INC.', 'CHINA MOON', 'CUCINA BENE', "SAMMY'S NOODLE SHOP", 'M B KIM ARCHITECT P C', 'BLUESTONE LANE', 'BRICK OVEN PIZZA 33', 'SHUN LEE PALACE', 'MA-N-POP SOUL FOOD', 'HOP WON', 'SEO ARCHITECT PLLC', 'VERMICELLI', 'TAMAYO ARCHITECTS, P.C.', 'EAT', 'COSENTINI ASSOCIATES', 'BISTRO 61', 'COW GIRL', 'REYES MEXICAN DELI', 'IMA ENGINEERING PC', '1 AYAMA', 'BAGELSMITH', 'FRESCO TORTILLAS & LLC', 'WOK WOK', 'LA GRANJA', 'KAYMIL', 'ANTHONY MORALI ARCHITECT PLLC', 'FOOD SING', "ROSELLA'S PIZZERIA", 'BUFF PATTY RESTAURANT & BAKERY', 'BRIDE VIEW DINER', 'LA ESQUINA', 'KING ROYAL FRIED CHICKEN', 'VINNYS PIZZERIA', 'CAFE HIMALAYA', 'EL POLLITO MEXICANO', 'JG NEUKOMM ARCHITECTURE', 'DAL H CHUN ENGINEER PC', 'STAGE DOOR DELI', 'ROYAL CONST. PROF. SERVI. P.C.', 'RAJAKARUNA & ETTLINGER P.C.', "SAHARA'S", 'GF55 PARTNERS', "FRANK'S TRATTORIA/FELIPE'S PIZZA", "EL NUEVO POLLO'S", "GEORGIA'S EAST SIDE BBQ", 'BUILDING ON BOND', 'DELICIAS CUENCANAS RESTAURANT', 'MKM LANDSCAPE ARCHITECTURE', 'GEORGIOS COUNTRY GRILL', 'LENOX HILL PIZZA', 'FRIENDS PIZZA CAFE', 'ASHOKA', 'DAVID PIZZA', "LUKE'S PIZZA", 'OKI', 'ATMOSPHERE DESIGN & ARCH PLLC', 'MEI WEI HONG KONG RESTAURANT', "COSMO'S", 'Z & T ENGINEERING, P.C.', 'SONGKRAN', 'BONSIGNOUR CAFE', 'GREEN LEAVES CHINESE RESTAURANT', 'SAUL DAVID HAYUTIN ARCHITECTS', 'SHANGHAI CHINESE', 'BIBLIO', 'JOHNS RESTAURANT', "DOMINIO'S", 'DOMANI TECHNICAL & DESIGN SERVICES', 'MOORE ASSOCIATE, LLC', '<NAME>', 'STONEFIELD ENGINEERING & DESIGN, LL', 'FURMAN & FURMAN ARCHITECTS', 'DOKEBI BAR & GRILL', 'AKELIUS REAL ESTATE MANAGEMENT LLC', 'MOON CAKE FOODS', 'FLUSHING ENGINEERING SERVICE', 'FMC ENGINEERING, PC', 'HAPPY RESTAURANT', '<NAME>', 'EL VIEJO YAYO', 'KOREAN EXPRESS', 'LIDO', 'CITY BUILDING ARCHITECT P.C.', '<NAME>', 'AGE ENGINEERING, LLC', 'SILVER GARDEN', 'RAIN', 'JAPANESE NOMADO 33', 'CUONO ENGINEERING', 'EAST BROADWAY RESTAURANT', 'BLT PRIME', "GIOVANNI'S PIZZA", 'SEA BREEZE', 'BAGELS & SCHMEAR', 'PDMS DESIGN GROUP', 'SANTE FE', 'FRS CONSTRUCTION LLC', 'ASYA', 'BLACK FOREST BROOKLYN', 'EDWARDS BRUNCH', 'TIMES BUILDINGS ENGINEERING SER PC', "LUU'S BAGUETTE", 'EL PEQUE\xd1O COFFEE SHOP', 'ONE STOP PIZZA', 'HAMILTON', 'FLAVORS HEALTHY', 'BRONZINO ENGINEERING, P.C.', 'IDEAL MINI MART', 'FATOOSH', 'OFFICE OF XIAO YE ZHENG, P.E.', "ARTIE'S (LLC CORN BEEF EXPRESS)", 'B CUP CAFE', 'BLONDIES SPORTS', 'GREAT CHINA', "LIN'S GARDEN RESTAURANT", 'CAFE ALTRO PARADISO',
""" A commandline tool for semi-automatically converting CSV to RDF try: ``csv2rdf --help`` """ from __future__ import print_function import sys import re import csv import getopt import fileinput import codecs import time import datetime import warnings import configparser from urllib.parse import quote import rdflib from rdflib import RDF, RDFS from rdflib.namespace import split_uri __all__ = ["CSV2RDF"] HELP = """ csv2rdf.py \ -b <instance-base> \ -p <property-base> \ [-D <default>] \ [-c <classname>] \ [-i <identity column(s)>] \ [-l <label columns>] \ [-s <N>] [-o <output>] \ [-f configfile] \ [--col<N> <colspec>] \ [--prop<N> <property>] \ <[-d <delim>] \ [-C] [files...]" Reads csv files from stdin or given files if -d is given, use this delimiter if -s is given, skips N lines at the start Creates a URI from the columns given to -i, or automatically by numbering if none is given Outputs RDFS labels from the columns given to -l if -c is given adds a type triple with the given classname if -C is given, the class is defined as rdfs:Class Outputs one RDF triple per column in each row. Output is in n3 format. Output is stdout, unless -o is specified Long options also supported: \ --base, \ --propbase, \ --ident, \ --class, \ --label, \ --out, \ --defineclass Long options --col0, --col1, ... can be used to specify conversion for columns. Conversions can be: ignore, float(), int(), split(sep, [more]), uri(base, [class]), date(format) Long options --prop0, --prop1, ... can be used to use specific properties, rather than ones auto-generated from the headers -D sets the default conversion for columns not listed -f says to read config from a .ini/config file - the file must contain one section called csv2rdf, with keys like the long options, i.e.: [csv2rdf] out=output.n3 base=http://example.org/ col0=split(";") col1=split(";", uri("http://example.org/things/", "http://xmlns.com/foaf/0.1/Person")) col2=float() col3=int() col4=date("%Y-%b-%d %H:%M:%S") """ # bah - ugly global uris = {} def toProperty(label): """ CamelCase + lowercase inital a string FIRST_NM => firstNm firstNm => firstNm """ label = re.sub("[^\w]", " ", label) label = re.sub("([a-z])([A-Z])", "\\1 \\2", label) label = label.split(" ") return "".join([label[0].lower()] + [x.capitalize() for x in label[1:]]) def toPropertyLabel(label): if not label[1:2].isupper(): return label[0:1].lower() + label[1:] return label def index(l_, i): """return a set of indexes from a list >>> index([1,2,3],(0,2)) (1, 3) """ return tuple([l_[x] for x in i]) def csv_reader(csv_data, dialect=csv.excel, **kwargs): csv_reader = csv.reader(csv_data, dialect=dialect, **kwargs) for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: yield [str(cell, "utf-8", errors="replace") for cell in row] def prefixuri(x, prefix, class_=None): if prefix: r = rdflib.URIRef(prefix + quote(x.encode("utf8").replace(" ", "_"), safe="")) else: r = rdflib.URIRef(x) uris[x] = (r, class_) return r # meta-language for config class NodeMaker(object): def range(self): return rdflib.RDFS.Literal def __call__(self, x): return rdflib.Literal(x) class NodeUri(NodeMaker): def __init__(self, prefix, class_): self.prefix = prefix if class_: self.class_ = rdflib.URIRef(class_) else: self.class_ = None def __call__(self, x): return prefixuri(x, self.prefix, self.class_) def range(self): return self.class_ or rdflib.RDF.Resource class NodeLiteral(NodeMaker): def __init__(self, f=None): self.f = f class NodeFloat(NodeLiteral): def __call__(self, x): if not self.f: return rdflib.Literal(float(x)) if callable(self.f): return rdflib.Literal(float(self.f(x))) raise Exception("Function passed to float is not callable") def range(self): return rdflib.XSD.double class NodeInt(NodeLiteral): def __call__(self, x): if not self.f: return rdflib.Literal(int(x)) if callable(self.f): return rdflib.Literal(int(self.f(x))) raise Exception("Function passed to int is not callable") def range(self): return rdflib.XSD.int class NodeBool(NodeLiteral): def __call__(self, x): if not self.f: return rdflib.Literal(bool(x)) if callable(self.f): return rdflib.Literal(bool(self.f(x))) raise Exception("Function passed to bool is not callable") def range(self): return rdflib.XSD.bool class NodeReplace(NodeMaker): def __init__(self, a, b): self.a = a self.b = b def __call__(self, x): return x.replace(self.a, self.b) class NodeDate(NodeLiteral): def __call__(self, x): return rdflib.Literal(datetime.datetime.strptime(x, self.f)) def range(self): return rdflib.XSD.dateTime class NodeSplit(NodeMaker): def __init__(self, sep, f): self.sep = sep self.f = f def __call__(self, x): if not self.f: self.f = rdflib.Literal if not callable(self.f): raise Exception("Function passed to split is not callable!") return [self.f(y.strip()) for y in x.split(self.sep) if y.strip() != ""] def range(self): if self.f and isinstance(self.f, NodeMaker): return self.f.range() return NodeMaker.range(self) default_node_make = NodeMaker() def _config_ignore(*args, **kwargs): return "ignore" def _config_uri(prefix=None, class_=None): return NodeUri(prefix, class_) def _config_literal(): return NodeLiteral() def _config_float(f=None): return NodeFloat(f) def _config_replace(a, b): return NodeReplace(a, b) def _config_int(f=None): return NodeInt(f) def _config_bool(f=None): return NodeBool(f) def _config_date(format_): return NodeDate(format_) def _config_split(sep=None, f=None): return NodeSplit(sep, f) config_functions = { "ignore": _config_ignore, "uri": _config_uri, "literal": _config_literal, "float": _config_float, "int": _config_int, "date": _config_date, "split": _config_split, "replace": _config_replace, "bool": _config_bool, } def column(v): """Return a function for column mapping""" return eval(v, config_functions) class CSV2RDF(object): def __init__(self): self.CLASS = None self.BASE = None self.PROPBASE = None self.IDENT = "auto" self.LABEL = None self.DEFINECLASS = False self.SKIP = 0 self.DELIM = "," self.DEFAULT = None self.COLUMNS = {} self.PROPS = {} self.OUT = codecs.getwriter("utf-8")(sys.stdout, errors="replace") self.triples = 0 def triple(self, s, p, o): self.OUT.write("%s %s %s .\n" % (s.n3(), p.n3(), o.n3())) self.triples += 1 def convert(self, csvreader): start = time.time() if self.OUT: sys.stderr.write("Output to %s\n" % self.OUT.name) if self.IDENT != "auto" and not isinstance(self.IDENT, tuple): self.IDENT = (self.IDENT,) if not self.BASE: warnings.warn("No base given, using http://example.org/instances/") self.BASE = rdflib.Namespace("http://example.org/instances/") if not self.PROPBASE: warnings.warn("No property base given, using http://example.org/property/") self.PROPBASE = rdflib.Namespace("http://example.org/props/") # skip lines at the start for x in range(self.SKIP): next(csvreader) # read header line header_labels = list(csvreader.next()) headers = dict(enumerate([self.PROPBASE[toProperty(x)] for x in header_labels])) # override header properties if some are given for k, v in self.PROPS.items(): headers[k] = v header_labels[k] = split_uri(v)[1] if self.DEFINECLASS: # output class/property definitions self.triple(self.CLASS, RDF.type, RDFS.Class) for i in range(len(headers)): h, l_ = headers[i], header_labels[i] if h == "" or l_ == "": continue if self.COLUMNS.get(i, self.DEFAULT) == "ignore": continue self.triple(h, RDF.type, RDF.Property) self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l_))) self.triple(h, RDFS.domain, self.CLASS) self.triple( h, RDFS.range, self.COLUMNS.get(i, default_node_make).range() ) rows = 0 for l_ in csvreader: try: if self.IDENT == "auto": uri = self.BASE["%d" % rows] else: uri = self.BASE[ "_".join( [ quote(x.encode("utf8").replace(" ", "_"), safe="") for x in index(l_, self.IDENT) ] ) ] if self.LABEL: self.triple( uri, RDFS.label, rdflib.Literal(" ".join(index(l_, self.LABEL))) ) if self.CLASS: # type triple self.triple(uri, RDF.type, self.CLASS) for i, x in enumerate(l_): x = x.strip() if x != "": if self.COLUMNS.get(i, self.DEFAULT) == "ignore": continue try: o = self.COLUMNS.get(i, rdflib.Literal)(x) if isinstance(o, list): for _o in o: self.triple(uri, headers[i], _o) else: self.triple(uri, headers[i], o) except Exception as e: warnings.warn( "Could not process value for column " + "%d:%s in row %d, ignoring: %s " % (i, headers[i], rows, e.message) ) rows += 1 if rows % 100000 == 0: sys.stderr.write( "%d rows, %d triples, elapsed %.2fs.\n" % (rows, self.triples, time.time() - start) ) except: sys.stderr.write("Error processing line: %d\n" % rows) raise # output types/labels for generated URIs classes = set() for l_, x in uris.items(): u, c = x self.triple(u, RDFS.label, rdflib.Literal(l_)) if c: c = rdflib.URIRef(c) classes.add(c) self.triple(u, RDF.type, c) for c in classes: self.triple(c, RDF.type, RDFS.Class) self.OUT.close() sys.stderr.write("Converted %d rows into %d triples.\n" % (rows, self.triples)) sys.stderr.write("Took %.2f seconds.\n" % (time.time() - start)) def main(): csv2rdf = CSV2RDF() opts, files = getopt.getopt( sys.argv[1:], "hc:b:p:i:o:Cf:l:s:d:D:", [ "out=", "base=", "delim=", "propbase=", "class=", "default=" "ident=", "label=", "skip=", "defineclass", "help", ], ) opts = dict(opts) if "-h" in opts or "--help" in opts: print(HELP) sys.exit(-1) if "-f" in opts: config = configparser.ConfigParser() config.readfp(open(opts["-f"])) for k, v in config.items("csv2rdf"): if k == "out": csv2rdf.OUT = codecs.open(v, "w", "utf-8") elif k == "base": csv2rdf.BASE = rdflib.Namespace(v) elif k == "propbase": csv2rdf.PROPBASE = rdflib.Namespace(v) elif k == "class": csv2rdf.CLASS = rdflib.URIRef(v) elif k == "defineclass": csv2rdf.DEFINECLASS = bool(v) elif k == "ident": csv2rdf.IDENT = eval(v) elif k == "label": csv2rdf.LABEL = eval(v) elif k == "delim": csv2rdf.DELIM = v elif k == "skip": csv2rdf.SKIP = int(v) elif k == "default": csv2rdf.DEFAULT = column(v) elif k.startswith("col"): csv2rdf.COLUMNS[int(k[3:])] = column(v) elif k.startswith("prop"): csv2rdf.PROPS[int(k[4:])] = rdflib.URIRef(v) if "-o" in opts: csv2rdf.OUT = codecs.open(opts["-o"], "w", "utf-8") if "--out" in opts: csv2rdf.OUT = codecs.open(opts["--out"], "w", "utf-8") if "-b" in opts: csv2rdf.BASE = rdflib.Namespace(opts["-b"]) if "--base" in opts: csv2rdf.BASE = rdflib.Namespace(opts["--base"]) if "-d" in opts: csv2rdf.DELIM = opts["-d"] if "--delim" in opts: csv2rdf.DELIM = opts["--delim"] if "-D" in opts: csv2rdf.DEFAULT = column(opts["-D"]) if "--default" in opts: csv2rdf.DEFAULT = column(opts["--default"]) if "-p" in opts: csv2rdf.PROPBASE = rdflib.Namespace(opts["-p"]) if "--propbase" in opts: csv2rdf.PROPBASE = rdflib.Namespace(opts["--propbase"]) if "-l" in opts: csv2rdf.LABEL = eval(opts["-l"]) if "--label" in opts: csv2rdf.LABEL = eval(opts["--label"]) if "-i" in opts: csv2rdf.IDENT = eval(opts["-i"]) if "--ident" in opts: csv2rdf.IDENT =
<gh_stars>0 import validators from elasticsearch import Elasticsearch from pymongo import MongoClient import random from semantic_labeling.lib.column import Column from semantic_labeling.lib.source import Source from semantic_labeling.main.random_forest import MyRandomForest from semantic_labeling.search.indexer import Indexer from semantic_labeling.search.searcher import Searcher from service import * elastic_search = Elasticsearch() indexer = Indexer(elastic_search) searcher = Searcher(elastic_search) class Server(object): def __init__(self): self.db = MongoClient().data.service self.classifier = MyRandomForest({}, {}, DATA_MODEL_PATH) self.classifier.train([]) ################ Stuff for use in this file ################ def _create_column(self, column, type_id, column_name, source_name, model, force=False): """ Create a column in a semantic type and return the column's id if it was created successfully. Notes: If the column already exists and force is not set to true, a 409 will be returned and no data will be modified. :param type_id: Id of the semantic type this column belongs to :param column_name: Name of the column to be created :param source_name: Name of the source of the column to be created :param model: Model of the column to be created :param data: Data which will be added to the column on creation :param force: Force create the column, if this is true and the column exists the old column will be deleted (with all of its data) before creation :return: The id of the new column and a response code of 201 if the creation was successful, otherwise it will be an error message with the appropriate error code """ column_id = get_column_id(type_id, column_name, source_name, model) db_body = {ID: column_id, DATA_TYPE: DATA_TYPE_COLUMN, TYPE_ID: type_id, COLUMN_NAME: column_name, SOURCE_NAME: source_name, MODEL: model} if self.db.find_one(db_body): if force: self.db.delete_many(db_body) else: return "Column already exists", 409 db_body.update(column.to_json()) self.db.insert_one(db_body) return column_id, 201 def _predict_column(self, column_name, source_names, data): """ Predicts the semantic type of a column. :param column_name: Name of the column :param source_names: List of source names :param data: The data to predict based opon :return: A list of dictionaries which each contain the semantic type and confidence score """ att = Column(column_name, source_names[0]) # print(data) for value in data: att.add_value(value) att.semantic_type = "to_predict" att.prepare_data() return att.predict_type(searcher.search_types_data(INDEX_NAME, source_names), searcher.search_similar_text_data(INDEX_NAME, att.value_text, source_names), self.classifier) def _update_bulk_add_model(self, model, column_model): """ Updates the bulk add model in the db and also returns it. :param model: The current bulk add model :param column_model: The model of the columns which are being updated against :return: The updated bulk add model """ for n in model[BAC_GRAPH][BAC_NODES]: if n.get(BAC_COLUMN_NAME): if n[BAC_COLUMN_NAME] == BAC_COLUMN_NAME_FILE_NAME: continue column_id = get_column_id(get_type_id(n[BAC_USER_SEMANTIC_TYPES][0][BAC_CLASS][BAC_URI], n[BAC_USER_SEMANTIC_TYPES][0][BAC_PROPERTY][BAC_URI]), n[BAC_COLUMN_NAME], model[BAC_NAME], column_model) prediction = self._predict_column(n[BAC_COLUMN_NAME], [model[BAC_NAME]], self.db.find_one({DATA_TYPE: DATA_TYPE_COLUMN, ID: column_id})[DATA]) n[BAC_LEARNED_SEMANTIC_TYPES] = [] for t in prediction: type_info = decode_type_id(t[SL_SEMANTIC_TYPE]) od = collections.OrderedDict() od[BAC_CLASS] = {BAC_URI: type_info[0]} od[BAC_PROPERTY] = {BAC_URI: type_info[1]} od[BAC_CONFIDENCE_SCORE] = t[SL_CONFIDENCE_SCORE] n[BAC_LEARNED_SEMANTIC_TYPES].append(od) self.db.update_one({DATA_TYPE: DATA_TYPE_MODEL, ID: model[BAC_ID]}, {"$set": {BULK_ADD_MODEL_DATA: model}}) return model ################ Predict ################ def predict_post(self, data, namespaces=None, column_names=None, source_names=None, models=None): """ Predicts the semantic type of the given data. :param namespaces: List of allowed namespaces :param column_names: List of allowed column names :param source_names: List of allowed source names :param models: List of allowed column models :param data: List of the data values to predict. :return: A return message (if it is successful this will be a list of the predicted types) and a return code """ data = [x.strip() for x in data] data = [x for x in data if x] if not data: return "Predicting data cannot be empty", 500 if source_names is None: # If no source names are given just use all of the source names in the db source_names = set() for col in self.db.find({DATA_TYPE: DATA_TYPE_COLUMN}): source_names.add(col[SOURCE_NAME]) source_names = list(source_names) if len(source_names) < 1: return "You must have columns to be able to predict", 400 #### Predict the types ## Do the actual predicting using the semantic labeler predictions = self._predict_column(column_names[0], source_names, data) if len(predictions) < 1: return "No matches found", 404 ## Filter the results allowed_ids_namespaces = None allowed_ids_models = None all_allowed_ids = None if namespaces is not None: allowed_ids_namespaces = set() current_allowed_types = list( self.db.find({DATA_TYPE: DATA_TYPE_SEMANTIC_TYPE, NAMESPACE: {"$in": namespaces}})) for prediction in current_allowed_types: allowed_ids_namespaces.add(prediction[ID]) if models: allowed_ids_models = set() current_allowed_types = list(self.db.find({DATA_TYPE: DATA_TYPE_COLUMN, MODEL: {"$in": models}})) for c in current_allowed_types: allowed_ids_models.add(c[TYPE_ID]) if allowed_ids_namespaces is not None and allowed_ids_models is not None: all_allowed_ids = allowed_ids_namespaces & allowed_ids_models elif allowed_ids_namespaces is not None and allowed_ids_models is None: all_allowed_ids = allowed_ids_namespaces elif allowed_ids_namespaces is None and allowed_ids_models is not None: all_allowed_ids = allowed_ids_models return_body = [] for prediction in predictions: print(prediction) for type_id, exact_score in prediction[1]: if all_allowed_ids is not None: if prediction[SL_SEMANTIC_TYPE] not in all_allowed_ids: continue obj_dict = {TYPE_ID_PATH: type_id, SCORE: exact_score} type_class_property = decode_type_id(type_id) obj_dict[CLASS] = type_class_property[0] obj_dict[PROPERTY] = type_class_property[1] return_body.append(obj_dict) return_body.sort(key=lambda x: x[SCORE], reverse=True) return json_response(return_body, 200) ################ SemanticTypes ################ def semantic_types_get(self, class_=None, property_=None, namespaces=None, source_names=None, column_names=None, column_ids=None, models=None, return_columns=False, return_column_data=False): """ Returns all of the semantic types (and optionally their columns and columns' data) filtered by the given parameters. :param class_: The class of the semantic types to get :param property_: The property of the semantic types to get :param namespaces: The possible namespaces of the semantic types to get :param source_names: The possible source names of at least one column of a semantic type must have :param column_names: The possible column names of at least one column of a semantic type must have :param column_ids: The possible column ids of at least one column of a semantic type must have :param models: The possible column model of at least one column of a semantic type must have :param return_columns: True if all of the columns (but not the data in the columns) should be returned with the semantic types :param return_column_data: True if all of the columns and their data should be returned with the semantic types :return: All of the semantic types which fit the following parameters """ # Find all of the type ids that satisfy the class, property, and namespaces db_body = {DATA_TYPE: DATA_TYPE_SEMANTIC_TYPE} if class_ is not None: db_body[CLASS] = class_ if property_ is not None: db_body[PROPERTY] = property_ if namespaces is not None: db_body[NAMESPACE] = {"$in": namespaces} possible_result = list(self.db.find(db_body)) possible_type_ids = set() for t in possible_result: possible_type_ids.add(t[ID]) # Find all of the type ids from the columns which satisfy the other parameters if source_names or column_names or column_ids or models: db_body = {DATA_TYPE: DATA_TYPE_COLUMN} if source_names is not None: db_body[SOURCE_NAME] = {"$in": source_names} if column_names is not None: db_body[COLUMN_NAME] = {"$in": column_names} if column_ids is not None: db_body[ID] = {"$in": column_ids} if models is not None: db_body[MODEL] = {"$in": models} other_possible_ids = set() for col in self.db.find(db_body): other_possible_ids.add(col[TYPE_ID]) possible_type_ids = possible_type_ids & other_possible_ids # Construct the return body return_body = [] for t in possible_result: if t[ID] in possible_type_ids: o = collections.OrderedDict() o[TYPE_ID_PATH] = t[ID] o[CLASS] = t[CLASS] o[PROPERTY] = t[PROPERTY] o[NAMESPACE] = t[NAMESPACE] return_body.append(o) # Add the column data if requested if return_columns: db_body = {DATA_TYPE: DATA_TYPE_COLUMN} for type_ in return_body: db_body[TYPE_ID] = type_[TYPE_ID_PATH] type_[COLUMNS] = clean_columns_output(self.db.find(db_body), return_column_data) if len(return_body) < 1: return "No Semantic types matching the given parameters were found", 404 return json_response(return_body, 200) def semantic_types_post_put(self, class_, property_, force=False): """ Creates a semantic type and returns the id if it was successful. Notes: If the type already exists and force is not set to true a 409 will be returned and no data will be modified :param class_: The class of the semantic type, note that this must be a valid URL :param property_: The property of the semantic type :param force: Force create the semantic type, if this is true and the type already exists the existing type (and all of its columns and data) will be deleted before creation :return: The id of the new semantic type and a response code of 201 if the creation was successful, otherwise it will be an error message with the appropriate error code """ class_ = class_.rstrip("/") property_ = property_.rstrip("/") ## Verify that class is a valid uri and namespace is a valid uri namespace = "/".join(class_.replace("#", "/").split("/")[:-1]) ##
from s3db.pr import pr_descendants if current.deployment_settings.get_auth_user_realms_include_persons(): exclude_persons = False else: exclude_persons = True descendants = pr_descendants(entities, exclude_persons = exclude_persons) # Add the subsidiaries to the realms for group_id in realms: realm = realms[group_id] if realm is None: continue append = realm.append for entity in list(realm): if entity in descendants: for subsidiary in descendants[entity]: if subsidiary not in realm: append(subsidiary) # Administrators have all permissions if sr.ADMIN in realms: info("==> user is ADMIN") info("*** GRANTED ***") return True # Fall back to current request c = c or self.controller f = f or self.function if not self.use_cacls: info("==> simple authorization") # Fall back to simple authorization if logged_in: info("*** GRANTED ***") return True else: if self.page_restricted(c=c, f=f): permitted = racl == self.READ else: info("==> unrestricted page") permitted = True if permitted: info("*** GRANTED ***") else: info("*** DENIED ***") return permitted # Do we need to check the owner role (i.e. table+record given)? if t is not None and record is not None: owners = self.get_owners(t, record) is_owner = self.is_owner(t, record, owners = owners, user_id = user_id, realms = realms, ) entity = owners[0] else: owners = [] is_owner = True entity = None # Get the applicable ACLs acls = self.applicable_acls(racl, realms = realms, c = c, f = f, t = t, entity = entity ) permitted = None if acls is None: info("==> no ACLs defined for this case") permitted = True elif not acls: info("==> no applicable ACLs") permitted = False else: if entity: if entity in acls: uacl, oacl = acls[entity] elif "ANY" in acls: uacl, oacl = acls["ANY"] else: info("==> Owner entity outside realm") permitted = False else: uacl, oacl = self.most_permissive(acls.values()) info("==> uacl: %04X, oacl: %04X" % (uacl, oacl)) if permitted is None: if uacl & racl == racl: permitted = True elif oacl & racl == racl: if is_owner and record: info("==> User owns the record") elif record: info("==> User does not own the record") permitted = is_owner else: permitted = False if permitted is None: raise self.error("Cannot determine permission.") elif permitted and \ t is not None and record is not None and \ self.requires_approval(t): # Approval possible for this table? if not hasattr(t, "_tablename"): table = current.s3db.table(t) if not table: raise AttributeError("undefined table %s" % t) else: table = t if "approved_by" in table.fields: approval_methods = ("approve", "review", "reject") access_approved = not all([m in approval_methods for m in method]) access_unapproved = any([m in method for m in approval_methods]) if access_unapproved: if not access_approved: permitted = self.unapproved(table, record) if not permitted: info("==> Record already approved") else: permitted = self.approved(table, record) or \ self.is_owner(table, record, owners, strict=True, user_id=user_id, realms=realms) or \ self.check_permission(user_id, "review", t=table, record=record) if not permitted: info("==> Record not approved") info("==> is owner: %s" % is_owner) else: # Approval not possible for this table => no change pass if permitted: info("*** GRANTED ***") else: info("*** DENIED ***") return permitted # ------------------------------------------------------------------------- def accessible_query(self, method, table, c=None, f=None, deny=True): """ Returns a query to select the accessible records for method in table. Args: method: the method as string or a list of methods (AND) table: the database table or table name c: controller name (falls back to current request) f: function name (falls back to current request) """ # Get the table if not hasattr(table, "_tablename"): tablename = table error = AttributeError("undefined table %s" % tablename) table = current.s3db.table(tablename, db_only = True, default = error, ) if not isinstance(method, (list, tuple)): method = [method] #_debug("\naccessible_query(%s, '%s')", table, ",".join(method)) # Defaults ALL_RECORDS = (table._id > 0) NO_RECORDS = (table._id == 0) if deny else None # Record approval required? if self.requires_approval(table) and \ "approved_by" in table.fields: requires_approval = True APPROVED = (table.approved_by != None) UNAPPROVED = (table.approved_by == None) else: requires_approval = False APPROVED = ALL_RECORDS UNAPPROVED = NO_RECORDS # Approval method? approval_methods = ("review", "approve", "reject") unapproved = any([m in method for m in approval_methods]) approved = not all([m in approval_methods for m in method]) # What does ALL RECORDS mean? ALL_RECORDS = ALL_RECORDS if approved and unapproved \ else UNAPPROVED if unapproved \ else APPROVED # Auth override, system roles and login auth = self.auth if auth.override: #_debug("==> auth.override") #_debug("*** ALL RECORDS ***") return ALL_RECORDS sr = auth.get_system_roles() logged_in = auth.s3_logged_in() self.check_settings() # Get realms and delegations user = auth.user if not logged_in: realms = Storage({sr.ANONYMOUS: None}) else: realms = user.realms # Don't filter out unapproved records owned by the user if requires_approval and not unapproved and \ "owned_by_user" in table.fields: ALL_RECORDS = (table.approved_by != None) if user: owner_query = (table.owned_by_user == user.id) else: owner_query = self.owner_query(table, None) if owner_query is not None: ALL_RECORDS |= owner_query # Administrators have all permissions if sr.ADMIN in realms: #_debug("==> user is ADMIN") #_debug("*** ALL RECORDS ***") return ALL_RECORDS # Multiple methods? if len(method) > 1: query = None for m in method: q = self.accessible_query(m, table, c=c, f=f, deny=False) if q is not None: if query is None: query = q else: query |= q if query is None: query = NO_RECORDS return query key = "%s/%s/%s/%s/%s" % (method, table, c, f, deny) query_cache = self.query_cache if key in query_cache: query = query_cache[key] return query # Required ACL racl = self.required_acl(method) #_debug("==> required permissions: %04X", racl) # Use ACLs? if not self.use_cacls: #_debug("==> simple authorization") # Fall back to simple authorization if logged_in: #_debug("*** ALL RECORDS ***") return ALL_RECORDS else: permitted = racl == self.READ if permitted: #_debug("*** ALL RECORDS ***") return ALL_RECORDS else: #_debug("*** ACCESS DENIED ***") return NO_RECORDS # Fall back to current request c = c or self.controller f = f or self.function # Get the applicable ACLs acls = self.applicable_acls(racl, realms = realms, c = c, f = f, t = table ) if acls is None: #_debug("==> no ACLs defined for this case") #_debug("*** ALL RECORDS ***") query = query_cache[key] = ALL_RECORDS return query elif not acls: #_debug("==> no applicable ACLs") #_debug("*** ACCESS DENIED ***") query = query_cache[key] = NO_RECORDS return query oacls = [] uacls = [] for entity in acls: acl = acls[entity] if acl[0] & racl == racl: uacls.append(entity) elif acl[1] & racl == racl and entity not in uacls: oacls.append(entity) query = None no_realm = [] check_owner_acls = True if "ANY" in uacls: #_debug("==> permitted for any records") query = ALL_RECORDS check_owner_acls = False elif uacls: query = self.realm_query(table, uacls) if query is None: #_debug("==> permitted for any records") query = ALL_RECORDS check_owner_acls = False else: #_debug("==> permitted for records owned by entities %s", str(uacls)) no_realm = uacls if check_owner_acls: use_realm = "ANY" not in oacls owner_query = self.owner_query(table, user, use_realm = use_realm, realm = oacls, no_realm = no_realm, ) if owner_query is not None: #_debug("==> permitted for owned records (limit to realms=%s)", use_realm) if query is not None: query |= owner_query else: query = owner_query elif use_realm: #_debug("==> permitted for any records owned by entities %s", str(uacls+oacls)) query = self.realm_query(table, uacls+oacls) if query is not None and requires_approval: base_filter = None if approved and unapproved else \ UNAPPROVED if unapproved else APPROVED if base_filter is not None: query = base_filter & query # Fallback if query is None: query = NO_RECORDS #_debug("*** Accessible Query ***") #_debug(str(query)) query_cache[key] = query return query # ------------------------------------------------------------------------- def accessible_url(self, c = None, f = None, p = None, t = None, a = None, args = None, vars = None, anchor = "", extension = None, env = None ): """ Return a URL only if accessible by the user, otherwise False - used for Navigation Items Args: c: the controller f: the function p: the permission (defaults to READ) t: the tablename (defaults to <c>_<f>) a: the application name args: the URL arguments vars: the URL variables anchor: the anchor (#) of the URL extension: the request format
<reponame>byteface/domonic<filename>domonic/__init__.py """ domonic ==================================== API for creating and loading .pyml """ __version__ = "0.8.3" __license__ = 'MIT' __author__ = "@byteface" ''' __uri__ = "https://github.com/byteface/domonic" # https://domonic.readthedocs.io/ __title__ = 'domonic' __description__ = 'Generate HTML with python 3' __all__ = ( '__version__', '__license__', '__uri__', '__title__', '__description__' ) ''' # TITLE = __title__ VERSION = __version__ # LICENSE = __license__ import requests import re from domonic.svg import * from domonic.html import * from domonic.html import html_attributes as attributes from domonic.javascript import * from domonic.utils import Utils from domonic.components import Input class domonic: JS_MASTER = "assets/js/master.js" CSS_STYLE = "assets/css/style.css" @staticmethod def get(url: str): """ downloads html and converts to domonic """ r = requests.get(url) return domonic.parse(r.content.decode("utf-8")) # TODO - param to eval @staticmethod def loads(path: str, *args, **kwargs): """ [ given a path to a file will return the .pyml as a python object if you have variables in the template they can be pass as kwargs ] """ with open(path, "r") as pyml_string: content = pyml_string.read() # print("++++",content, type(content) ) prog = domonic.domonify(str(content), *args, **kwargs) if type(prog) is tuple: prog = prog[0] return prog @staticmethod def load(pyml: str, *args, **kwargs): """ [ turns a pyml string into a python object ] """ if not isinstance(pyml, str): raise ValueError("load requires a string not:", type(pyml)) page = domonic.parse(pyml) prog = domonic.domonify(page, *args, **kwargs) if type(prog) is tuple: if len(prog) < 2: prog = prog[0] elif prog[1] == None: prog = prog[0] return prog @staticmethod def domonify(pyml: str, *args, **kwargs): """ [ attempts to fix pyml ] Args: pyml (str): [a string in the form div(_class="123")] Returns: a python object Note: returns a potentially edited working program. (not the string) if it was ammeneded, render the returned object to get the new string """ # print(pyml) if not isinstance(pyml, str): raise ValueError("domonify requires a string not:", type(pyml)) # print("HI>>", pyml) s = domonic.evaluate(pyml, *args, **kwargs) # NOTE - valid chunks of pyml can still not eval if they are not wrapped # i.e. a list not in aa ul or ol. when on single line evaulate will fix # but on mulitple lines it will not. try: p = eval(s, {**kwargs, **globals()}) except Exception as e: print("Failed to evaluate as mulitline trying again:", e) pyml = ''.join(pyml.splitlines()).strip(',') # try again on a single line s = domonic.evaluate(pyml, *args, **kwargs) p = eval(s, {**kwargs, **globals()}) return p LAST_ERR = None # to stop re-eval @staticmethod def evaluate(pyml: str, *args, **kwargs): """ [ attempts to fix pyml by using eval to make sure we can contruct nodes. be careful. ] Args: pyml (str): [a string in the form div(_class="123")] Returns: a python object Note: returns a potentially edited working program. (not the string) if it was ammeneded, render the returned object to get the new string """ # print(pyml) if not isinstance(pyml, str): raise ValueError("evaluate requires a string not:", type(pyml)) try: # TODO - strip any potentially bad/dangerous code before eval. p = eval(pyml, {**kwargs, **globals()}) domonic.LAST_ERR = None return pyml # ???? except Exception as e: # import sys # old_log = sys.stdout # log_file = open("fail.log","w") # sys.stdout = log_file # print(e) # sys.stdout = old_log # if end of file err. add a closed curly if "EOF" in str(e): # unexpected EOF while parsing (<string>, line 471) err = str(e) if str(len(pyml.splitlines())) in err: pyml += ")" return domonic.evaluate(pyml) # try again if "positional argument follows keyword argument" in str(e): ''' # print(Utils.digits(e)) if str(e) == domonic.LAST_ERR: # only allow 1 error per line # raise ValueError("Recursion limit exceeded") domonic.LAST_ERR = None # raise # Exception("Recursion limit exceeded") # TODO - cant raise as called by self try: return except Exception as e: raise Exception("Recursion limit exceeded") else: domonic.LAST_ERR = str(e) # return ''' num = int(Utils.digits(str(e))) # go backwards from this line. to the one before it opened pyml = pyml.splitlines() # NOTE - working backwards from the error line. we try to wrap any content. # if already wrapped, we don't want to wrap again. so move back 1 line until we can wrap again # this is because a node may take several lines. countback = 2 start_line = pyml[num-countback] while '_' not in start_line: countback += 1 line = pyml[num-countback] if 'html' not in line: start_line = line pyml[num-countback] = start_line + ").html(" # need to know when to close tag comma vs wrap # pyml[num - 2] = pyml[num - 2] + ").html(" + str(num) # need to know when to close tag comma vs wrap pyml = '\n'.join(pyml) return domonic.evaluate(pyml) # try again # TODO - if " does not match opening parenthesis '{' (<string>, line 9) # TODO - keyword argument repeated (<string>, line 617) # keyword argument repeated (<string>, line 3) # TODO - invalid syntax (<string>, line 615) print('Eval failed! you will have to modify the output manually') return pyml @staticmethod def _is_valid_pyml(line): """ tests a line returns True or False with replacement """ try: test_line = line.strip('\n').strip() if '(' in line: test_line = line + ')' if line[0] in ['"', "_", "*"]: test_line = "div(" + line if test_line[len(test_line) - 1] != ')': test_line = test_line + ')' if line == "),": return True, line # print(test_line) l = eval(test_line) # print('PASS:', line) return True, line except Exception as e: print(test_line) print('FAIL:', line, e) # print(e) # rety fix_hyphen_tags if ')' in line: # if there was a bracket return that at least return False, "" return False, "" return False, "" @staticmethod def dent(pyml, use_tabs=False): """ [ proper dentage ] """ tabs_or_spaces = " " if use_tabs: tabs_or_spaces = "\t" dentage = 0 lastchar = "" dented = "" for count, char in enumerate(pyml): if char == "(": dentage += 1 if char == ")": dentage -= 1 if lastchar == "\n": # TODO - if file doesn't have newlines already char = tabs_or_spaces * dentage + char lastchar = char dented += char if dentage < 0: dentage = 0 return dented @staticmethod def parse( page: str, script_tags=False, style_tags=False, parse_svg=False, minify=False, # indent=True, remove_broken_lines=False): """ HTML as input and formats to a domonic_string : the pony he comes the result will NOT always be valid .pyml . often params will be in wrong order. evaluate can be used to try and resolve param order. """ if not isinstance(page, str): raise ValueError("Parse requires a string required not:", type(page)) # print('parsing parsing parsing!!') page = ''.join(page.split('<!DOCTYPE HTML>')) page = ''.join(page.split('<!DOCTYPE html>')) page = ''.join(page.split('<!doctype html>')) page = ''.join(page.split('<!doctype HTML>')) page = ''.join(page.split('<?xml version="1.0" encoding="utf-8"?>')) page = ''.join(page.split('<?xml version="1.0" encoding="utf-8" ?>')) page = ''.join(page.split('<?xml version="1.0" encoding="UTF-8" ?>')) # fully strip inline css and js if not script_tags: # scripts = re.compile(r'<(script).*?</\1>(?s)') # TODO fix #DeprecationWarning: Flags not at the start of the expression scripts = re.compile(r'<(script).*?</\1>') page = scripts.sub('', page) if not style_tags: # css = re.compile(r'<(style).*?</\1>(?s)') # TODO fix #DeprecationWarning: Flags not at the start of the expression css = re.compile(r'<(style).*?</\1>', re.DOTALL) page = css.sub('', page) # fully strip svg and code tags # svg = re.compile(r'<(svg).*?</\1>(?s)') # page = svg.sub('', page) # code = re.compile(r'<(code).*?</\1>(?s)') # TODO fix #DeprecationWarning: Flags not at the start of the expression code = re.compile(r'<(code).*?</\1>') page = code.sub('', page) comments = re.compile(r'<!--(.|\s)*?-->') page = comments.sub('', page) # page = page.strip('\n').strip() # remove abnormal spacing between tag attributes (TODO- maybe 2 spaces is valid somewhere?) page = page.replace(' ', ' ') page = page.replace(' ', ' ') page = page.replace(' ', ' ') # special quotes page = page.replace('“', '&ldquo;') page = page.replace('”', '&rdquo;') # REPLACE ANY STRINGS WE MATCH ON (NOT CONTAINED IN TAGS. REGY = re.compile(r'(\u0028)(?![^<>]*>)') page = REGY.sub('$LEFTPARENTHESIS$', page) REGY = re.compile(r'(\u0029)(?![^<>]*>)') page = REGY.sub('$RIGHTPARENTHESIS$', page) REGY = re.compile(r'(\u005F)(?![^<>]*>)') page = REGY.sub('$UNDERSCORE$', page) REGY = re.compile(r'(U+002D)(?![^<>]*>)') page = REGY.sub('$HYPHEN$', page) REGY = re.compile(r'(U+002D)(?![^<>]*>)') page = REGY.sub('$QUOTE$', page) REGY = re.compile(r'(U+u005B)(?![^<>]*>)') page = REGY.sub('$LEFTSQUARE$', page) REGY = re.compile(r'(U+u005D)(?![^<>]*>)') page = REGY.sub('$RIGHTSQUARE$', page) REGY
and namespaceURI and prefix: # filename is label, prefix is language thisDoc.extensionRoleLabels[namespaceURI].add( (filename, prefix) ) elif filetype == "schema-version" and filename: thisDoc.extensionSchemaVersion = filename elif filetype == "table-style" and filename == "xbrl-us": isUSGAAP = True elif filetype == "elements": genElementsDoc = thisDoc elif action == "meta" and filetype == "table-style" and filename == "xbrl-us": isUSGAAP = True elif action == "meta" and filetype == "generate-style" and filename == "import-separately": isGenerateAndImport = False elif action == "workbook" and filename: importFileName = filename elif action == "worksheet" and filename: importSheetNames.append(filename) elif action == "colheader" and filename and namespaceURI: if namespaceURI == "split": splitString = filename else: importColHeaderMap[filename].append(namespaceURI) if namespaceURI not in importColumnHeaders: fatalLoadingErrors.append("colheader {} definition {} not recognized.".format(filename, namespaceURI)) elif action == "skip rows" and filename: fromRow, _sep, toRow = filename.partition("-") try: skipRows.append((int(fromRow), int(toRow) if toRow else int(fromRow))) except (ValueError, TypeError): fatalLoadingErrors.append("Exception (at skip rows): {error}, Excel sheet: {excelSheet} row: {excelRow}" .format(error=err, excelSheet=dtsSheet, excelRow=iRow)) except Exception as err: fatalLoadingErrors.append("Exception: {error}, Excel sheet: {excelSheet} row: {excelRow}, Traceback: {traceback}" .format(error=err, excelSheet=dtsSheet, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2]))) # remove any imported linkbaseRefs that are also generated for thisDoc in genDocs.values(): linkbaseRefsToRemove = [i for i, (lbType, filename, generate) in enumerate(thisDoc.linkbaseRefs) if not generate and (lbType, filename, True) in thisDoc.linkbaseRefs] while len(linkbaseRefsToRemove): i = linkbaseRefsToRemove.pop() thisDoc.linkbaseRefs.pop(i) dtsWs = None # dereference genOrder = [] for name, doc in genDocs.items(): insertPos = len(genOrder) for i, otherDoc in enumerate(genOrder): if doc.name in otherDoc.imports: insertPos = i # put this doc before any firstr doc that imports it break genOrder.insert(insertPos, doc) if importFileName: # alternative workbook importExcelBook = load_workbook(importFileName, read_only=True, data_only=True) sheetNames = importExcelBook.get_sheet_names() if importSheetNames: for importSheetName in importSheetNames: if importSheetName not in sheetNames: fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName)) else: for s in sheetNames: if s.endswith("Concepts"): importSheetNames.append(s) if not importSheetNames: for s in sheetNames: if "xbrl" in s.lower() and "dts" not in s: importSheetNames.append(s) if not importSheetNames: fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName)) if not isUSGAAP and genOrder: # need extra namespace declaration genOrder[0].importXmlns["iod"] = "http://disclosure.edinet-fsa.go.jp/taxonomy/common/2013-03-31/iod" # find column headers row headerCols = OrderedDict() headerColsAllElrs = set() hasLinkroleSeparateRow = True hasPreferredLabelTextColumn = False hasConceptAttributeColumn = False hasDepthColumn = False hasPresentationParentColumn = False hasRelationshipToCol = False hasrelationshipAttributeColumn = False headerRows = set() topDepth = 999999 for importSheetName in importSheetNames: if importSheetName not in sheetNames: continue headerCols.clear() headerRows.clear() hasConceptAttributeColumn = False hasDepthColumn = False hasPresentationParentColumn = False hasRelationshipToCol = False hasrelationshipAttributeColumn = False conceptsWs = importExcelBook[importSheetName] def setHeaderCols(row): headerCols.clear() for iCol, colCell in enumerate(row): v = xlValue(colCell) if isinstance(v,str): v = v.strip() if v in importColHeaderMap: for hdr in importColHeaderMap[v]: if hdr in importColumnHeaders: headerCols[importColumnHeaders[hdr]] = iCol elif v in importColumnHeaders: headerCols[importColumnHeaders[v]] = iCol elif isinstance(v,str): if any(v.startswith(r) for r in ("label,", "labels,", "reference,", "references,", "relationship to,")): # custom/extension label/reference m = resourceParsePattern.match(v) if m: _resourceType = m.group(1) _resourceRole = "/" + m.group(2) # last path seg of role _resourceLangOrPart = m.group(4) # lang or part headerCols[(_resourceType, _resourceRole, _resourceLangOrPart)] = iCol else: # custom/extension non-label/reference value column headerCols[v] = iCol # find out which rows are header rows for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()): if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows): continue #for iCol, colCell in enumerate(row): setHeaderCols(row) # must have some of these to be a header col if (sum(1 for h in headerCols if h in ("name", "type", "depth", "periodType")) >= 3 or sum(1 for h in headerCols if h == "name" or (isinstance(h, tuple) and h[0] == "relationship to")) >= 2): # it's a header col headerRows.add(iRow+1) if 'linkrole' in headerCols: hasLinkroleSeparateRow = False if 'preferredLabel' in headerCols and any(isinstance(h, tuple) and h[0] == 'label' and h[1] == '/preferredLabel' for h in headerCols): hasPreferredLabelTextColumn = True if 'depth' in headerCols: hasDepthColumn = True if 'presentationParent' in headerCols: hasPresentationParentColumn = True if not hasDepthColumn and hasPresentationParentColumn: topDepth = 0 hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple)) headerCols.clear() def cellHasValue(row, header, _type): if header in headerCols: iCol = headerCols[header] return iCol < len(row) and isinstance(row[iCol].value, _type) return False def cellValue(row, header, strip=False, nameChars=False, default=None): if header in headerCols: iCol = headerCols[header] if iCol < len(row): v = xlValue(row[iCol]) if strip and isinstance(v, str): v = v.strip() if nameChars and isinstance(v, str): v = ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-')) if v is None: return default return v return default def valueNameChars(v): return ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-')) def rowPrefixNameValues(row): prefix = cellValue(row, 'prefix', nameChars=True) if cellHasValue(row, 'name', str): if not prefix: # maybe name is a qname prefix, _sep, _name = cellValue(row, 'name').partition(":") if not _sep: # no prefix at all, whole string is name prefix = "" name = cellValue(row, 'name', nameChars=True)[len(prefix):] else: name = cellValue(row, 'name', nameChars=True) else: name = None if not prefix and "prefix" not in headerCols and genElementsDoc is not None: prefix = genElementsDoc.extensionSchemaPrefix return prefix, name def checkImport(thisDoc, qname): prefix, sep, localName = qname.partition(":") if sep: if prefix not in thisDoc.imports: if prefix == "xbrldt": thisDoc.imports["xbrldt"] = ("namespace", XbrlConst.xbrldt), ("schemaLocation", "http://www.xbrl.org/2005/xbrldt-2005.xsd") elif prefix == "nonnum": thisDoc.imports["nonnum"] = ("namespace", "http://www.xbrl.org/dtr/type/non-numeric"), ("schemaLocation", "http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd") elif prefix != thisDoc.extensionSchemaPrefix and prefix != "xs": cntlr.addToLog("Warning: prefix schema file is not imported for: {qname}" .format(qname=qname), messageCode="importExcel:warning", file=thisDoc.extensionSchemaFilename) # find top depth for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()): if (iRow + 1) in headerRows: setHeaderCols(row) hasConceptAttributeColumn = any(v.startswith("attribute, ") for v in headerCols if isinstance(v,str)) hasRelationshipAttributeColumn = any(v.startswith("relationship attribute, ") for v in headerCols if isinstance(v,str)) elif not (hasLinkroleSeparateRow and (iRow + 1) in headerRows) and 'depth' in headerCols: depth = cellValue(row, 'depth') if isinstance(depth, int) and depth < topDepth: topDepth = depth # find header rows currentELR = currentELRdefinition = None for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()): useLabels = False eltEnumRefsParts = None if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows): continue if (all(col.value is None for col in row) or all(isinstance(row[i].value, str) and row[i].value.strip() == "n/a" for i in (headerCols.get("name"), headerCols.get("type"), headerCols.get("value")) if i is not None)): continue # skip blank row try: isHeaderRow = (iRow + 1) in headerRows isELRrow = hasLinkroleSeparateRow and (iRow + 2) in headerRows if isHeaderRow: setHeaderCols(row) headerColsAllElrs |= _DICT_SET(headerCols.keys()) # accumulate all header cols for role checks elif isELRrow: currentELR = currentELRdefinition = None for colCell in row: v = str(xlValue(colCell) or '') if v.startswith("http://"): currentELR = v elif not currentELRdefinition and v.endswith(" 科目一覧"): currentELRdefinition = v[0:-5] elif not currentELRdefinition: currentELRdefinition = v if currentELR or currentELRdefinition: if hasPreLB: preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) if hasPresentationParentColumn: preRels = set() if hasDefLB: defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) if hasCalLB: calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) calRels = set() # prevent duplications when same rel in different parts of tree if hasGenLB: genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) elif headerCols: if "linkrole" in headerCols and cellHasValue(row, 'linkrole', str): v = cellValue(row, 'linkrole', strip=True) _trialELR = _trialELRdefinition = None if v.startswith("http://"): _trialELR = v elif v.endswith(" 科目一覧"): _trialELRdefinition = v[0:-5] else: _trialELRdefinition = v if (_trialELR and _trialELR != currentELR) or (_trialELRdefinition and _trialELRdefinition != currentELRdefinition): currentELR = _trialELR currentELRdefinition = _trialELRdefinition if currentELR or currentELRdefinition: if hasPreLB: preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) if hasDefLB: defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) if hasCalLB: calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) calRels = set() # prevent duplications when same rel in different parts of tree if hasGenLB: genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) ) prefix, name = rowPrefixNameValues(row) if cellHasValue(row, 'depth', int): depth = cellValue(row, 'depth') elif hasDepthColumn: depth = None # non-ELR section, no depth else: # depth provided by parent reference depth = 0 subsGrp = cellValue(row, 'substitutionGroup') isConcept
the data by metadata properties to include only data in the data cube which all given conditions return `true` for (AND operation). Specify key-value-pairs with the key being the name of the metadata property, which can be retrieved with the openEO Data Discovery for Collections. The value must a condition (user-defined process) to be evaluated against the collection metadata, see the example. :return: A data cube for further processing. The dimensions and dimension properties (name, type, labels, reference system and resolution) correspond to the collection's metadata, but the dimension labels are restricted as specified in the parameters. """ return load_collection(id=self, spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands, properties=properties) def load_result(self) -> 'ProcessBuilder': """ Load batch job results :param self: The id of a batch job with results. :return: A data cube for further processing. """ return load_result(id=self) def load_uploaded_files(self, format, options=UNSET) -> 'ProcessBuilder': """ Load files from the user workspace :param self: The files to read. Folders can't be specified, instead specify all files. An error is thrown if a file can't be read. :param format: The file format to read from. It must be one of the values that the server reports as supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is not suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is *case insensitive*. :param options: The file format parameters to be used to read the files. Must correspond to the parameters that the server reports as supported parameters for the chosen `format`. The parameter names and valid values usually correspond to the GDAL/OGR format options. :return: A data cube for further processing. """ return load_uploaded_files(paths=self, format=format, options=options) def log(self, base) -> 'ProcessBuilder': """ Logarithm to a base :param self: A number to compute the logarithm for. :param base: The numerical base. :return: The computed logarithm. """ return log(x=self, base=base) def lt(self, y) -> 'ProcessBuilder': """ Less than comparison :param self: First operand. :param y: Second operand. :return: `true` if `x` is strictly less than `y`, `null` if any operand is `null`, otherwise `false`. """ return lt(x=self, y=y) def lte(self, y) -> 'ProcessBuilder': """ Less than or equal to comparison :param self: First operand. :param y: Second operand. :return: `true` if `x` is less than or equal to `y`, `null` if any operand is `null`, otherwise `false`. """ return lte(x=self, y=y) def mask(self, mask, replacement=UNSET) -> 'ProcessBuilder': """ Apply a raster mask :param self: A raster data cube. :param mask: A mask as raster data cube. Every pixel in `data` must have a corresponding element in `mask`. :param replacement: The value used to replace masked values with. :return: A masked raster data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged. """ return mask(data=self, mask=mask, replacement=replacement) def mask_polygon(self, mask, replacement=UNSET, inside=UNSET) -> 'ProcessBuilder': """ Apply a polygon mask :param self: A raster data cube. :param mask: A GeoJSON object containing a polygon. The provided feature types can be one of the following: * A `Polygon` geometry, * a `GeometryCollection` containing Polygons, * a `Feature` with a `Polygon` geometry or * a `FeatureCollection` containing `Feature`s with a `Polygon` geometry. :param replacement: The value used to replace masked values with. :param inside: If set to `true` all pixels for which the point at the pixel center **does** intersect with any polygon are replaced. :return: A masked raster data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged. """ return mask_polygon(data=self, mask=mask, replacement=replacement, inside=inside) def max(self, ignore_nodata=UNSET) -> 'ProcessBuilder': """ Maximum value :param self: An array of numbers. :param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a value. :return: The maximum value. """ return max(data=self, ignore_nodata=ignore_nodata) def mean(self, ignore_nodata=UNSET) -> 'ProcessBuilder': """ Arithmetic mean (average) :param self: An array of numbers. :param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a value. :return: The computed arithmetic mean. """ return mean(data=self, ignore_nodata=ignore_nodata) def median(self, ignore_nodata=UNSET) -> 'ProcessBuilder': """ Statistical median :param self: An array of numbers. :param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a value. :return: The computed statistical median. """ return median(data=self, ignore_nodata=ignore_nodata) def merge_cubes(self, cube2, overlap_resolver=UNSET, context=UNSET) -> 'ProcessBuilder': """ Merging two data cubes :param self: The first data cube. :param cube2: The second data cube. :param overlap_resolver: A reduction operator that resolves the conflict if the data overlaps. The reducer must return a value of the same data type as the input values are. The reduction operator may be a single process such as ``multiply()`` or consist of multiple sub-processes. `null` (the default) can be specified if no overlap resolver is required. :param context: Additional data to be passed to the overlap resolver. :return: The merged data cube. See the process description for details regarding the dimensions and dimension properties (name, type, labels, reference system and resolution). """ return merge_cubes(cube1=self, cube2=cube2, overlap_resolver=overlap_resolver, context=context) def min(self, ignore_nodata=UNSET) -> 'ProcessBuilder': """ Minimum value :param self: An array of numbers. :param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a value. :return: The minimum value. """ return min(data=self, ignore_nodata=ignore_nodata) def mod(self, y) -> 'ProcessBuilder': """ Modulo :param self: A number to be used as dividend. :param y: A number to be used as divisor. :return: The remainder after division. """ return mod(x=self, y=y) def multiply(self, y) -> 'ProcessBuilder': """ Multiplication of two numbers :param self: The multiplier. :param y: The multiplicand. :return: The computed product of the two numbers. """ return multiply(x=self, y=y) def ndvi(self, nir=UNSET, red=UNSET, target_band=UNSET) -> 'ProcessBuilder': """ Normalized Difference Vegetation Index :param self: A raster data cube with two bands that have the common names `red` and `nir` assigned. :param nir: The name of the NIR band. Defaults to the band that has the common name `nir` assigned. Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If unique band name and common name conflict, the unique band name has higher priority. :param red: The name of the red band. Defaults to the band that has the common name `red` assigned. Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If unique band name and common name conflict, the unique band name has higher priority. :param target_band: By default, the dimension of type `bands` is dropped. To keep the dimension specify a new band name in this parameter so that a new dimension label with the specified name will be added for the computed values. :return: A raster data cube containing the computed NDVI values. The structure of the data cube differs depending on the value passed to `target_band`: * `target_band` is `null`: The data cube does not contain the dimension of type `bands` any more, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged. * `target_band` is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the number of dimension labels for the dimension of type `bands`
del_items(0x8007B664) SetType(0x8007B664, "int GetTpY__FUs(unsigned short tpage)") del_items(0x8007B680) SetType(0x8007B680, "int GetTpX__FUs(unsigned short tpage)") del_items(0x8007B68C) SetType(0x8007B68C, "void Remove96__Fv()") del_items(0x8007B6C4) SetType(0x8007B6C4, "void AppMain()") del_items(0x8007B7DC) SetType(0x8007B7DC, "void MAIN_RestartGameTask__Fv()") del_items(0x8007B808) SetType(0x8007B808, "void GameTask__FP4TASK(struct TASK *T)") del_items(0x8007B8F0) SetType(0x8007B8F0, "void MAIN_MainLoop__Fv()") del_items(0x8007B938) SetType(0x8007B938, "void CheckMaxArgs__Fv()") del_items(0x8007B96C) SetType(0x8007B96C, "unsigned char GPUQ_InitModule__Fv()") del_items(0x8007B978) SetType(0x8007B978, "void GPUQ_FlushQ__Fv()") del_items(0x8007BAEC) SetType(0x8007BAEC, "void GPUQ_LoadImage__FP4RECTli(struct RECT *Rect, long ImgHandle, int Offset)") del_items(0x8007BBA0) SetType(0x8007BBA0, "void GPUQ_DiscardHandle__Fl(long hnd)") del_items(0x8007BC40) SetType(0x8007BC40, "void GPUQ_LoadClutAddr__FiiiPv(int X, int Y, int Cols, void *Addr)") del_items(0x8007BCDC) SetType(0x8007BCDC, "void GPUQ_MoveImage__FP4RECTii(struct RECT *R, int x, int y)") del_items(0x8007BD7C) SetType(0x8007BD7C, "unsigned char PRIM_Open__FiiiP10SCREEN_ENVUl(int Prims, int OtSize, int Depth, struct SCREEN_ENV *Scr, unsigned long MemType)") del_items(0x8007BE98) SetType(0x8007BE98, "unsigned char InitPrimBuffer__FP11PRIM_BUFFERii(struct PRIM_BUFFER *Pr, int Prims, int OtSize)") del_items(0x8007BF74) SetType(0x8007BF74, "void PRIM_Clip__FP4RECTi(struct RECT *R, int Depth)") del_items(0x8007C09C) SetType(0x8007C09C, "unsigned char PRIM_GetCurrentScreen__Fv()") del_items(0x8007C0A8) SetType(0x8007C0A8, "void PRIM_FullScreen__Fi(int Depth)") del_items(0x8007C0E4) SetType(0x8007C0E4, "void PRIM_Flush__Fv()") del_items(0x8007C2F8) SetType(0x8007C2F8, "unsigned long *PRIM_GetCurrentOtList__Fv()") del_items(0x8007C304) SetType(0x8007C304, "void ClearPbOnDrawSync(struct PRIM_BUFFER *Pb)") del_items(0x8007C340) SetType(0x8007C340, "unsigned char ClearedYet__Fv()") del_items(0x8007C34C) SetType(0x8007C34C, "void PrimDrawSycnCallBack()") del_items(0x8007C36C) SetType(0x8007C36C, "void SendDispEnv__Fv()") del_items(0x8007C390) SetType(0x8007C390, "struct POLY_F4 *PRIM_GetNextPolyF4__Fv()") del_items(0x8007C3A8) SetType(0x8007C3A8, "struct POLY_FT4 *PRIM_GetNextPolyFt4__Fv()") del_items(0x8007C3C0) SetType(0x8007C3C0, "struct POLY_GT4 *PRIM_GetNextPolyGt4__Fv()") del_items(0x8007C3D8) SetType(0x8007C3D8, "struct POLY_G4 *PRIM_GetNextPolyG4__Fv()") del_items(0x8007C3F0) SetType(0x8007C3F0, "struct POLY_F3 *PRIM_GetNextPolyF3__Fv()") del_items(0x8007C408) SetType(0x8007C408, "struct DR_MODE *PRIM_GetNextDrArea__Fv()") del_items(0x8007C420) SetType(0x8007C420, "bool ClipRect__FRC4RECTR4RECT(struct RECT *ClipRect, struct RECT *RectToClip)") del_items(0x8007C534) SetType(0x8007C534, "bool IsColiding__FRC4RECTT0(struct RECT *ClipRect, struct RECT *NewRect)") del_items(0x8007C59C) SetType(0x8007C59C, "void VID_AfterDisplay__Fv()") del_items(0x8007C5BC) SetType(0x8007C5BC, "void VID_ScrOn__Fv()") del_items(0x8007C5E4) SetType(0x8007C5E4, "void VID_DoThisNextSync__FPFv_v(void (*Func)())") del_items(0x8007C63C) SetType(0x8007C63C, "unsigned char VID_NextSyncRoutHasExecuted__Fv()") del_items(0x8007C648) SetType(0x8007C648, "unsigned long VID_GetTick__Fv()") del_items(0x8007C654) SetType(0x8007C654, "void VID_DispEnvSend()") del_items(0x8007C690) SetType(0x8007C690, "void VID_SetXYOff__Fii(int x, int y)") del_items(0x8007C6A0) SetType(0x8007C6A0, "int VID_GetXOff__Fv()") del_items(0x8007C6AC) SetType(0x8007C6AC, "int VID_GetYOff__Fv()") del_items(0x8007C6B8) SetType(0x8007C6B8, "void VID_SetDBuffer__Fb(bool DBuf)") del_items(0x8007C828) SetType(0x8007C828, "void MyFilter__FUlUlPCc(unsigned long MemType, unsigned long Size, char *Name)") del_items(0x8007C830) SetType(0x8007C830, "void SlowMemMove__FPvT0Ul(void *Dest, void *Source, unsigned long size)") del_items(0x8007C850) SetType(0x8007C850, "int GetTpY__FUs_addr_8007C850(unsigned short tpage)") del_items(0x8007C86C) SetType(0x8007C86C, "int GetTpX__FUs_addr_8007C86C(unsigned short tpage)") del_items(0x8007C878) SetType(0x8007C878, "struct FileIO *SYSI_GetFs__Fv()") del_items(0x8007C884) SetType(0x8007C884, "struct FileIO *SYSI_GetOverlayFs__Fv()") del_items(0x8007C890) SetType(0x8007C890, "void SortOutFileSystem__Fv()") del_items(0x8007C9CC) SetType(0x8007C9CC, "void MemCb__FlPvUlPCcii(long hnd, void *Addr, unsigned long Size, char *Name, int Users, int TimeStamp)") del_items(0x8007C9EC) SetType(0x8007C9EC, "void Spanker__Fv()") del_items(0x8007CA2C) SetType(0x8007CA2C, "void GaryLiddon__Fv()") del_items(0x8007CA34) SetType(0x8007CA34, "void ReadPad__Fi(int NoDeb)") del_items(0x8007CBF0) SetType(0x8007CBF0, "void DummyPoll__Fv()") del_items(0x8007CBF8) SetType(0x8007CBF8, "void DaveOwens__Fv()") del_items(0x8007CC20) SetType(0x8007CC20, "unsigned short GetDown__C4CPad(struct CPad *this)") del_items(0x8007CC48) SetType(0x8007CC48, "unsigned short GetCur__C4CPad(struct CPad *this)") del_items(0x8007CC70) SetType(0x8007CC70, "unsigned char CheckActive__4CPad(struct CPad *this)") del_items(0x8007CC7C) SetType(0x8007CC7C, "int GetTpY__FUs_addr_8007CC7C(unsigned short tpage)") del_items(0x8007CC98) SetType(0x8007CC98, "int GetTpX__FUs_addr_8007CC98(unsigned short tpage)") del_items(0x8007CCA4) SetType(0x8007CCA4, "void TimSwann__Fv()") del_items(0x8007CCAC) SetType(0x8007CCAC, "void stub__FPcPv(char *e, void *argptr)") del_items(0x8007CCB4) SetType(0x8007CCB4, "void eprint__FPcT0i(char *Text, char *File, int Line)") del_items(0x8007CCE8) SetType(0x8007CCE8, "void leighbird__Fv()") del_items(0x8007CD10) SetType(0x8007CD10, "struct FileIO *__6FileIOUl(struct FileIO *this, unsigned long OurMemId)") del_items(0x8007CD60) SetType(0x8007CD60, "void ___6FileIO(struct FileIO *this, int __in_chrg)") del_items(0x8007CDB4) SetType(0x8007CDB4, "long Read__6FileIOPCcUl(struct FileIO *this, char *Name, unsigned long RamId)") del_items(0x8007CF1C) SetType(0x8007CF1C, "int FileLen__6FileIOPCc(struct FileIO *this, char *Name)") del_items(0x8007CF80) SetType(0x8007CF80, "void FileNotFound__6FileIOPCc(struct FileIO *this, char *Name)") del_items(0x8007CFA0) SetType(0x8007CFA0, "bool StreamFile__6FileIOPCciPFPUciib_bii(struct FileIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)") del_items(0x8007D080) SetType(0x8007D080, "bool ReadAtAddr__6FileIOPCcPUci(struct FileIO *this, char *Name, unsigned char *Dest, int Len)") del_items(0x8007D144) SetType(0x8007D144, "void DumpOldPath__6FileIO(struct FileIO *this)") del_items(0x8007D1A8) SetType(0x8007D1A8, "void SetSearchPath__6FileIOPCc(struct FileIO *this, char *Path)") del_items(0x8007D284) SetType(0x8007D284, "bool FindFile__6FileIOPCcPc(struct FileIO *this, char *Name, char *Buffa)") del_items(0x8007D398) SetType(0x8007D398, "char *CopyPathItem__6FileIOPcPCc(struct FileIO *this, char *Dst, char *Src)") del_items(0x8007D440) SetType(0x8007D440, "void LockSearchPath__6FileIO(struct FileIO *this)") del_items(0x8007D498) SetType(0x8007D498, "void UnlockSearchPath__6FileIO(struct FileIO *this)") del_items(0x8007D4F0) SetType(0x8007D4F0, "bool SearchPathExists__6FileIO(struct FileIO *this)") del_items(0x8007D504) SetType(0x8007D504, "bool Save__6FileIOPCcPUci(struct FileIO *this, char *Name, unsigned char *Addr, int Len)") del_items(0x8007D540) SetType(0x8007D540, "struct PCIO *__4PCIOUl(struct PCIO *this, unsigned long OurMemId)") del_items(0x8007D5A8) SetType(0x8007D5A8, "void ___4PCIO(struct PCIO *this, int __in_chrg)") del_items(0x8007D600) SetType(0x8007D600, "bool FileExists__4PCIOPCc(struct PCIO *this, char *Name)") del_items(0x8007D644) SetType(0x8007D644, "bool LoReadFileAtAddr__4PCIOPCcPUci(struct PCIO *this, char *Name, unsigned char *Dest, int Len)") del_items(0x8007D708) SetType(0x8007D708, "int GetFileLength__4PCIOPCc(struct PCIO *this, char *Name)") del_items(0x8007D7C0) SetType(0x8007D7C0, "bool LoSave__4PCIOPCcPUci(struct PCIO *this, char *Name, unsigned char *Addr, int Len)") del_items(0x8007D894) SetType(0x8007D894, "bool LoStreamFile__4PCIOPCciPFPUciib_bii(struct PCIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)") del_items(0x8007DAA4) SetType(0x8007DAA4, "struct SysObj *__6SysObj(struct SysObj *this)") del_items(0x8007DABC) SetType(0x8007DABC, "void *__nw__6SysObji(int Amount)") del_items(0x8007DAE8) SetType(0x8007DAE8, "void *__nw__6SysObjiUl(int Amount, unsigned long RamID)") del_items(0x8007DB64) SetType(0x8007DB64, "void __dl__6SysObjPv(void *ptr)") del_items(0x8007DBD0) SetType(0x8007DBD0, "struct DatIO *__5DatIOUl(struct DatIO *this, unsigned long OurMemId)") del_items(0x8007DC0C) SetType(0x8007DC0C, "void ___5DatIO(struct DatIO *this, int __in_chrg)") del_items(0x8007DC64) SetType(0x8007DC64, "bool FileExists__5DatIOPCc(struct DatIO *this, char *Name)") del_items(0x8007DCA4) SetType(0x8007DCA4, "bool LoReadFileAtAddr__5DatIOPCcPUci(struct DatIO *this, char *Name, unsigned char *Dest, int Len)") del_items(0x8007DD64) SetType(0x8007DD64, "int GetFileLength__5DatIOPCc(struct DatIO *this, char *Name)") del_items(0x8007DE18) SetType(0x8007DE18, "bool LoSave__5DatIOPCcPUci(struct DatIO *this, char *Name, unsigned char *Addr, int Len)") del_items(0x8007DEC0) SetType(0x8007DEC0, "bool LoStreamFile__5DatIOPCciPFPUciib_bii(struct DatIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)") del_items(0x8007E0CC) SetType(0x8007E0CC, "struct TextDat *__7TextDat(struct TextDat *this)") del_items(0x8007E10C) SetType(0x8007E10C, "void ___7TextDat(struct TextDat *this, int __in_chrg)") del_items(0x8007E154) SetType(0x8007E154, "void Use__7TextDat(struct TextDat *this)") del_items(0x8007E348) SetType(0x8007E348, "bool TpLoadCallBack__FPUciib(unsigned char *Mem, int ReadSoFar, int Size, bool LastChunk)") del_items(0x8007E418) SetType(0x8007E418, "void StreamLoadTP__7TextDat(struct TextDat *this)") del_items(0x8007E4D0) SetType(0x8007E4D0, "void FinishedUsing__7TextDat(struct TextDat *this)") del_items(0x8007E52C) SetType(0x8007E52C, "void MakeBlockOffsetTab__7TextDat(struct TextDat *this)") del_items(0x8007E59C) SetType(0x8007E59C, "long MakeOffsetTab__C9CBlockHdr(struct CBlockHdr *this)") del_items(0x8007E6C8) SetType(0x8007E6C8, "void SetUVTp__7TextDatP9FRAME_HDRP8POLY_FT4ii(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_FT4 *FT4, int XFlip, int YFlip)") del_items(0x8007E7C4) SetType(0x8007E7C4, "struct POLY_FT4 *PrintMonster__7TextDatiiibi(struct TextDat *this, int Frm, int X, int Y, bool XFlip, int OtPos)") del_items(0x8007EBCC) SetType(0x8007EBCC, "void PrepareFt4__7TextDatP8POLY_FT4iiiii(struct TextDat *this, struct POLY_FT4 *FT4, int Frm, int X, int Y, int XFlip, int YFlip)") del_items(0x8007EE38) SetType(0x8007EE38, "unsigned char *GetDecompBufffer__7TextDati(struct TextDat *this, int Size)") del_items(0x8007EF98) SetType(0x8007EF98, "void SetUVTpGT4__7TextDatP9FRAME_HDRP8POLY_GT4ii(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_GT4 *FT4, int XFlip, int YFlip)") del_items(0x8007F094) SetType(0x8007F094, "void PrepareGt4__7TextDatP8POLY_GT4iiiii(struct TextDat *this, struct POLY_GT4 *GT4, int Frm, int X, int Y, int XFlip, int YFlip)") del_items(0x8007F2F0) SetType(0x8007F2F0, "void SetUVTpGT3__7TextDatP9FRAME_HDRP8POLY_GT3(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_GT3 *GT3)") del_items(0x8007F370) SetType(0x8007F370, "void PrepareGt3__7TextDatP8POLY_GT3iii(struct TextDat *this, struct POLY_GT3 *GT3, int Frm, int X, int Y)") del_items(0x8007F534) SetType(0x8007F534, "struct POLY_FT4 *PrintFt4__7TextDatiiiiii(struct TextDat *this, int Frm, int X, int Y, int XFlip, int OtPos, int YFlip)") del_items(0x8007F688) SetType(0x8007F688, "struct POLY_GT4 *PrintGt4__7TextDatiiiiii(struct TextDat *this, int Frm, int X, int Y, int XFlip, int OtPos, int YFlip)") del_items(0x8007F7DC) SetType(0x8007F7DC, "struct POLY_GT3 *PrintGt3__7TextDatiiii(struct TextDat *this, int Frm, int X, int Y, int OtPos)") del_items(0x8007F8C0) SetType(0x8007F8C0, "void DecompFrame__7TextDatP9FRAME_HDR(struct TextDat *this, struct FRAME_HDR *Fr)") del_items(0x8007FA14) SetType(0x8007FA14, "void MakeCreatureOffsetTab__7TextDat(struct TextDat *this)") del_items(0x8007FB54) SetType(0x8007FB54, "void MakePalOffsetTab__7TextDat(struct TextDat *this)") del_items(0x8007FC50) SetType(0x8007FC50, "void InitData__7TextDat(struct TextDat *this)") del_items(0x8007FC7C) SetType(0x8007FC7C, "void DumpData__7TextDat(struct TextDat *this)") del_items(0x8007FDC4) SetType(0x8007FDC4, "struct TextDat *GM_UseTexData__Fi(int Id)") del_items(0x8007FEE4) SetType(0x8007FEE4, "void GM_FinishedUsing__FP7TextDat(struct TextDat *Fin)") del_items(0x8007FF38) SetType(0x8007FF38, "void SetPal__7TextDatP9FRAME_HDRP8POLY_FT4(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_FT4 *FT4)") del_items(0x80080000) SetType(0x80080000, "int GetFrNum__7TextDatiiii(struct TextDat *this, int Creature, int Action, int Direction, int Frame)") del_items(0x80080054) SetType(0x80080054, "bool IsDirAliased__7TextDatiii(struct TextDat *this, int Creature, int Action, int Direction)") del_items(0x800800AC) SetType(0x800800AC, "void DoDecompRequests__7TextDat(struct TextDat *this)") del_items(0x800801D0) SetType(0x800801D0, "void FindDecompArea__7TextDatR4RECT(struct TextDat *this, struct RECT *R)") del_items(0x800802A4) SetType(0x800802A4, "struct CTextFileInfo *GetFileInfo__7TextDati(struct TextDat *this, int Id)") del_items(0x800802F4) SetType(0x800802F4, "int GetSize__C15CCreatureAction(struct CCreatureAction *this)") del_items(0x8008031C) SetType(0x8008031C, "int GetFrNum__C15CCreatureActionii(struct CCreatureAction *this, int Direction, int Frame)") del_items(0x800803C4) SetType(0x800803C4, "void InitDirRemap__15CCreatureAction(struct CCreatureAction *this)") del_items(0x80080484) SetType(0x80080484, "int GetFrNum__C12CCreatureHdriii(struct CCreatureHdr *this, int Action, int Direction, int Frame)") del_items(0x800804C8) SetType(0x800804C8, "struct CCreatureAction *GetAction__C12CCreatureHdri(struct CCreatureHdr *this, int ActNum)") del_items(0x80080558) SetType(0x80080558, "void InitActionDirRemaps__12CCreatureHdr(struct CCreatureHdr *this)") del_items(0x800805C8) SetType(0x800805C8, "int GetSize__C12CCreatureHdr(struct CCreatureHdr *this)") del_items(0x80080634) SetType(0x80080634, "long LoadDat__C13CTextFileInfo(struct CTextFileInfo *this)") del_items(0x80080684) SetType(0x80080684, "long LoadHdr__C13CTextFileInfo(struct CTextFileInfo *this)") del_items(0x800806AC) SetType(0x800806AC, "long GetFile__C13CTextFileInfoPc(struct CTextFileInfo *this, char *Ext)") del_items(0x80080748) SetType(0x80080748, "bool HasFile__C13CTextFileInfoPc(struct CTextFileInfo *this, char *Ext)") del_items(0x800807B0) SetType(0x800807B0, "void Un64__FPUcT0l(unsigned char *Src, unsigned char *Dest, long SizeBytes)") del_items(0x80080884) SetType(0x80080884, "struct CScreen *__7CScreen(struct CScreen *this)") del_items(0x800808B8) SetType(0x800808B8, "void Load__7CScreeniii(struct CScreen *this, int Id, int tpx, int tpy)") del_items(0x80080B34) SetType(0x80080B34, "void Unload__7CScreen(struct CScreen *this)") del_items(0x80080B58) SetType(0x80080B58, "void Display__7CScreeniiii(struct CScreen *this, int Id, int tpx, int tpy, int fadeval)") del_items(0x80080E38) SetType(0x80080E38, "void SetRect__5CPartR7TextDatR4RECT(struct CPart *this, struct TextDat *TDat, struct RECT *R)") del_items(0x80080EB0) SetType(0x80080EB0, "void GetBoundingBox__6CBlockR7TextDatR4RECT(struct CBlock *this, struct TextDat *TDat, struct RECT *R)") del_items(0x8008100C) SetType(0x8008100C, "void _GLOBAL__D_DatPool()") del_items(0x80081064) SetType(0x80081064, "void _GLOBAL__I_DatPool()") del_items(0x800810B8) SetType(0x800810B8, "void PRIM_GetPrim__FPP8POLY_GT3(struct POLY_GT3 **Prim)") del_items(0x80081134) SetType(0x80081134, "void PRIM_GetPrim__FPP8POLY_GT4(struct POLY_GT4 **Prim)") del_items(0x800811B0) SetType(0x800811B0, "void PRIM_GetPrim__FPP8POLY_FT4(struct POLY_FT4 **Prim)") del_items(0x8008122C) SetType(0x8008122C, "bool CanXferFrame__C7TextDat(struct TextDat *this)") del_items(0x80081254) SetType(0x80081254, "bool CanXferPal__C7TextDat(struct TextDat *this)") del_items(0x8008127C) SetType(0x8008127C, "bool IsLoaded__C7TextDat(struct TextDat *this)") del_items(0x80081288) SetType(0x80081288, "int GetTexNum__C7TextDat(struct TextDat *this)") del_items(0x80081294) SetType(0x80081294, "struct CCreatureHdr *GetCreature__7TextDati(struct TextDat *this, int Creature)") del_items(0x8008130C) SetType(0x8008130C, "int GetNumOfCreatures__7TextDat(struct TextDat *this)") del_items(0x80081320) SetType(0x80081320, "void SetFileInfo__7TextDatPC13CTextFileInfoi(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)") del_items(0x8008132C) SetType(0x8008132C, "int GetNumOfFrames__7TextDat(struct TextDat *this)") del_items(0x80081340) SetType(0x80081340, "struct PAL *GetPal__7TextDati(struct TextDat *this, int PalNum)") del_items(0x8008135C) SetType(0x8008135C, "struct FRAME_HDR *GetFr__7TextDati(struct TextDat *this, int FrNum)") del_items(0x80081378) SetType(0x80081378, "char *GetName__C13CTextFileInfo(struct CTextFileInfo *this)") del_items(0x80081384) SetType(0x80081384, "bool HasDat__C13CTextFileInfo(struct CTextFileInfo *this)") del_items(0x800813AC) SetType(0x800813AC, "bool HasTp__C13CTextFileInfo(struct CTextFileInfo *this)") del_items(0x800813D4) SetType(0x800813D4, "int GetSize__C6CBlock(struct CBlock *this)") del_items(0x800813E8) SetType(0x800813E8, "struct CdIO *__4CdIOUl(struct CdIO *this, unsigned long OurMemId)") del_items(0x8008142C) SetType(0x8008142C, "void ___4CdIO(struct CdIO *this, int __in_chrg)") del_items(0x80081484) SetType(0x80081484, "bool FileExists__4CdIOPCc(struct CdIO *this, char *Name)") del_items(0x800814A8) SetType(0x800814A8, "bool LoReadFileAtAddr__4CdIOPCcPUci(struct CdIO *this, char *Name, unsigned char *Dest, int Len)") del_items(0x800814D0) SetType(0x800814D0, "int GetFileLength__4CdIOPCc(struct CdIO *this, char *Name)") del_items(0x800814F4) SetType(0x800814F4, "bool LoSave__4CdIOPCcPUci(struct CdIO *this, char *Name, unsigned char *Addr, int Len)") del_items(0x800815D4) SetType(0x800815D4, "void LoStreamCallBack__Fi(int handle)") del_items(0x800815E4) SetType(0x800815E4, "bool CD_GetCdlFILE__FPCcP7CdlFILE(char *Name, struct CdlFILE *RetFile)") del_items(0x80081730) SetType(0x80081730, "bool LoStreamFile__4CdIOPCciPFPUciib_bii(struct CdIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)") del_items(0x800819E8) SetType(0x800819E8, "bool LoAsyncStreamFile__4CdIOPCciPFPUciib_bii(struct CdIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)") del_items(0x80081B24) SetType(0x80081B24, "void BL_InitEAC__Fv()") del_items(0x80081C10) SetType(0x80081C10, "long BL_ReadFile__FPcUl(char *Name, unsigned long RamId)") del_items(0x80081D3C) SetType(0x80081D3C, "long BL_AsyncReadFile__FPcUl(char *Name, unsigned long RamId)") del_items(0x80081EB0) SetType(0x80081EB0, "void BL_LoadDirectory__Fv()") del_items(0x8008201C) SetType(0x8008201C, "void BL_LoadStreamDir__Fv()") del_items(0x800822FC) SetType(0x800822FC, "struct STRHDR *BL_MakeFilePosTab__FPUcUl(unsigned char *BL_DirPtr, unsigned long NoStreamFiles)") del_items(0x800823FC) SetType(0x800823FC, "struct STRHDR *BL_FindStreamFile__FPcc(char *Name, char LumpID)") del_items(0x80082598) SetType(0x80082598, "bool BL_FileExists__FPcc(char *Name, char LumpID)") del_items(0x800825BC) SetType(0x800825BC, "int BL_FileLength__FPcc(char *Name, char LumpID)") del_items(0x800825F0) SetType(0x800825F0, "bool BL_LoadFileAtAddr__FPcPUcc(char *Name, unsigned char *Dest, char LumpID)") del_items(0x800826D8) SetType(0x800826D8, "bool BL_AsyncLoadDone__Fv()") del_items(0x800826E4) SetType(0x800826E4, "void BL_AsyncLoadTASK__FP4TASK(struct TASK *T)") del_items(0x80082750) SetType(0x80082750, "long BL_LoadFileAsync__FPcc(char *Name, char LumpID)") del_items(0x800828F4) SetType(0x800828F4, "bool BL_AsyncLoadFileAtAddr__FPcPUcc(char *Name, unsigned char *Dest, char LumpID)") del_items(0x800829BC) SetType(0x800829BC, "struct STRHDR *BL_OpenStreamFile__FPcc(char *Name, char LumpID)") del_items(0x800829E8) SetType(0x800829E8, "bool BL_CloseStreamFile__FP6STRHDR(struct STRHDR *StreamHDR)") del_items(0x80082A20) SetType(0x80082A20, "int LZNP_Decode__FPUcT0(unsigned char *in, unsigned char *out)") del_items(0x80082AF4) SetType(0x80082AF4, "void *Tmalloc__Fi(int MemSize)") del_items(0x80082C18) SetType(0x80082C18, "void Tfree__FPv(void *Addr)") del_items(0x80082CC8) SetType(0x80082CC8, "void InitTmalloc__Fv()") del_items(0x80082CF0) SetType(0x80082CF0, "void strupr__FPc(char *Buffa)") del_items(0x80082D44) SetType(0x80082D44, "void PauseTask__FP4TASK(struct TASK *T)") del_items(0x80082D90) SetType(0x80082D90, "int GetPausePad__Fv()") del_items(0x80082E7C) SetType(0x80082E7C, "bool TryPadForPause__Fi(int PadNum)") del_items(0x80082EA8) SetType(0x80082EA8, "void DoPause__14CPauseMessagesi(struct CPauseMessages *this, int nPadNum)") del_items(0x80083128) SetType(0x80083128, "bool DoPausedMessage__14CPauseMessages(struct CPauseMessages *this)") del_items(0x80083440) SetType(0x80083440, "int DoQuitMessage__14CPauseMessages(struct CPauseMessages *this)") del_items(0x80083560) SetType(0x80083560, "bool AreYouSureMessage__14CPauseMessages(struct CPauseMessages *this)") del_items(0x80083664) SetType(0x80083664, "bool PA_SetPauseOk__Fb(bool NewPause)") del_items(0x80083674) SetType(0x80083674, "bool PA_GetPauseOk__Fv()") del_items(0x80083680) SetType(0x80083680, "void MY_PausePrint__17CTempPauseMessageiPciP4RECT(struct CTempPauseMessage *this, int s, char *Txt, int Menu, struct RECT *PRect)") del_items(0x800837CC) SetType(0x800837CC, "void InitPrintQuitMessage__17CTempPauseMessage(struct CTempPauseMessage *this)") del_items(0x800837D4) SetType(0x800837D4, "void PrintQuitMessage__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)") del_items(0x800838E4) SetType(0x800838E4, "void LeavePrintQuitMessage__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)") del_items(0x800838EC) SetType(0x800838EC, "void InitPrintAreYouSure__17CTempPauseMessage(struct CTempPauseMessage *this)") del_items(0x800838F4) SetType(0x800838F4, "void PrintAreYouSure__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)") del_items(0x80083A04) SetType(0x80083A04, "void LeavePrintAreYouSure__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)") del_items(0x80083A0C) SetType(0x80083A0C, "void InitPrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)") del_items(0x80083A14) SetType(0x80083A14, "void PrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)") del_items(0x80083B40) SetType(0x80083B40, "void LeavePrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)") del_items(0x80083B48) SetType(0x80083B48, "void ___17CTempPauseMessage(struct CTempPauseMessage *this, int __in_chrg)") del_items(0x80083B70) SetType(0x80083B70, "void _GLOBAL__D_DoPause__14CPauseMessagesi()") del_items(0x80083B98) SetType(0x80083B98, "void _GLOBAL__I_DoPause__14CPauseMessagesi()") del_items(0x80083BC0) SetType(0x80083BC0, "struct CTempPauseMessage *__17CTempPauseMessage(struct
<filename>botbot/apps/logs/views.py<gh_stars>1-10 import datetime import json import math import random import re from django.conf import settings from django.contrib.humanize.templatetags import humanize from django.core.cache import cache from django.db.models import Q from django.http import Http404, HttpResponse from django.shortcuts import redirect, get_object_or_404 from django.utils.functional import cached_property from django.utils.translation import ugettext as _ from django.views.generic import ListView, TemplateView, View from django.views.decorators.cache import patch_cache_control import pytz from botbot.apps.bots.utils import reverse_channel from botbot.apps.bots.views import ChannelMixin from . import forms from botbot.apps.logs.models import Log from botbot.apps.kudos.models import KudosTotal class Help(ChannelMixin, TemplateView): """ Help page for a channel. """ template_name = 'logs/help.html' class PaginatorPageLinksMixin(object): def paginate_queryset(self, queryset, page_size): paginator, page, object_list, has_other_pages = super( PaginatorPageLinksMixin, self).paginate_queryset( queryset, page_size) self.next_page = self.get_next_page_link(page) self.prev_page = self.get_previous_page_link(page) self.current_page = self.get_current_page_link(page) return paginator, page, object_list, has_other_pages def get_next_page_link(self, page): url = self.request.path params = self.request.GET.copy() if not page.has_next(): return "" else: params['page'] = page.next_page_number() return '{0}?{1}'.format(url, params.urlencode()) def get_previous_page_link(self, page): url = self.request.path params = self.request.GET.copy() if not page.has_previous(): return "" else: params['page'] = page.previous_page_number() return '{0}?{1}'.format(url, params.urlencode()) def get_current_page_link(self, page): url = self.request.path params = self.request.GET.copy() params['page'] = page.number return '{0}?{1}'.format(url, params.urlencode()) class LogDateMixin(object): def _get_base_queryset(self): return self.channel.filtered_logs() def channel_date_url(self, date=None): if not date: date = self.date viewname = self.format == 'text' and 'log_day_text' or 'log_day' return reverse_channel( self.channel, viewname, kwargs=self._kwargs_with_date(date)) def _kwargs_with_date(self, date): kwargs = { 'year': date.year, 'month': "%02d" % date.month, 'day': "%02d" % date.day } return kwargs def _local_date_at_midnight(self, timestamp): # cast timestamp into local timezone localized = timestamp.astimezone(self.request_timezone) # create a new date object starting at midnight in that timezone return datetime.datetime(localized.year, localized.month, localized.day, tzinfo=localized.tzinfo) def _get_previous_date(self): """ Find the previous day, that has content. """ date = None try: ts = (self._get_base_queryset() .filter(timestamp__lt=self.date)[0].timestamp) date = self._local_date_at_midnight(ts) except IndexError: pass return date def _get_next_date(self): """ Find the next day, that has content. """ date = None try: ts = (self._get_base_queryset() .filter(timestamp__gte=datetime.timedelta(days=1) + self.date) .order_by('timestamp')[0].timestamp) date = self._local_date_at_midnight(ts) except IndexError: pass return date def _date_query_set(self, date): qs = self._get_base_queryset() return qs.filter(timestamp__gte=date, timestamp__lt=date + datetime.timedelta(days=1)) class LogStream(ChannelMixin, View): def get(self, request, channel_slug, bot_slug): response = HttpResponse() response['X-Accel-Redirect'] = '/internal-channel-stream/{}'.format( self.channel.pk) if 'HTTP_LAST_EVENT_ID' in request.META: response['Last-Event-ID'] = request.META['HTTP_LAST_EVENT_ID'] return response def _utc_now(): return datetime.datetime.now(tz=pytz.timezone('UTC')) def _find_pk(pk, queryset): """Find a PK in a queryset in memory""" found = None try: pk = int(pk) found = next(obj for obj in queryset if obj.pk == pk) except (ValueError, StopIteration): pass return found def _timeline_context(timeline): """ Context (template) vars needed for timeline display. """ if not timeline: return {} today = _utc_now().date() last_monday = today - datetime.timedelta(days=today.weekday()) last_week = last_monday - datetime.timedelta(days=7) # the last month in the timeline needs special treatment so it # doesn't get ordered ahead of the last/current weeks last_month = timeline[timeline.keyOrder[-1]].pop() if last_month >= last_week: last_month_adjusted = (last_week - datetime.timedelta(days=1)) elif last_month >= last_monday: last_month_adjusted = (last_monday - datetime.timedelta(days=1)) else: last_month_adjusted = last_month result = { 'timeline': timeline, 'this_week': last_monday, 'last_week': last_week, 'last_month': {'real': last_month, 'adjusted': last_month_adjusted}, } return result class LogViewer(ChannelMixin, object): context_object_name = "message_list" newest_first = False show_first_header = False # Display date header above first line paginate_by = 150 format = '' def __init__(self, *args, **kwargs): super(LogViewer, self).__init__(*args, **kwargs) self.next_page = "" self.prev_page = "" self.current_page = "" def dispatch(self, request, *args, **kwargs): self._setup_response_format() return super(LogViewer, self).dispatch(request, *args, **kwargs) def _setup_response_format(self): if self.format == 'text': self.include_timeline = False self.template_name = 'logs/logs.txt' self.content_type = 'text/plain; charset=utf-8' elif self.request.is_ajax(): self.format = 'ajax' self.include_timeline = False self.template_name = 'logs/log_display.html' # Default to HTML view else: self.format = 'html' self.include_timeline = True self.template_name = "logs/logs.html" def get_ordered_queryset(self, queryset): order = 'timestamp' if self.newest_first: order = '-timestamp' return queryset.order_by(order) def get_context_data(self, **kwargs): context = super(LogViewer, self).get_context_data(**kwargs) if self.include_timeline: context.update( _timeline_context(self.channel.get_months_active())) if self.format == 'html': context.update({ 'is_current': getattr(self, 'is_current', False), 'search_form': forms.SearchForm(), 'show_first_header': self.show_first_header, 'newest_first': self.newest_first, 'show_kudos': self.channel.user_can_access_kudos( self.request.user), }) size = self.channel.current_size() context.update({ 'size': size, 'big': (size >= settings.BIG_CHANNEL), 'prev_page': self.prev_page, 'next_page': self.next_page, 'current_page': self.current_page, }) return context def render_to_response(self, context, **response_kwargs): response = super(LogViewer, self).render_to_response( context, **response_kwargs) has_next_page = False if self.format == 'html': # Official SEO header links = [] if self.next_page: links.append('{0}; rel="next"'.format(self.next_page)) has_next_page = True if self.prev_page: links.append('{0}; rel="prev"'.format(self.prev_page)) response['Link'] = ','.join(links) else: # No HTML, pass page info in easily parseable headers if self.next_page: response['X-NextPage'] = self.next_page has_next_page = True if self.prev_page: response['X-PrevPage'] = self.prev_page if has_next_page and self.request.user.is_anonymous(): patch_cache_control( response, public=True, max_age=settings.CACHE_MIDDLEWARE_SECONDS) else: patch_cache_control(response, private=True) return response def _pages_for_queryset(self, queryset): return int(math.ceil(queryset.count() / float(self.paginate_by))) class DayLogViewer(PaginatorPageLinksMixin, LogDateMixin, LogViewer, ListView): show_first_header = False allow_empty = True def get(self, request, *args, **kwargs): self.date = self.set_view_date() self.object_list = self.get_queryset() # Redirect to nearby logs if this queryset is empty to avoid a 404 if not self.get_allow_empty() and not self.object_list.exists(): url = self._nearby_log_url() if url: return redirect(url) raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.") % {'class_name': self.__class__.__name__}) context = self.get_context_data() return self.render_to_response(context) def _nearby_log_url(self): """Find a date-based log URL that will not be empty""" # First check if there is anything in the past closet_qs = self.channel.filtered_logs().order_by( "-timestamp").filter(timestamp__lte=self.date) # If not go to the future if not closet_qs.exists(): closet_qs = self.channel.filtered_logs().order_by( "timestamp").filter( timestamp__gte=self.date) # Return the URL where the first log line found will be try: return self.channel_date_url(closet_qs[0].timestamp) except IndexError: pass return None def get_context_data(self): context = super(DayLogViewer, self).get_context_data() try: context.update({ 'highlight': int(self.request.GET.get('msg')), }) except (TypeError, ValueError): pass return context def get_queryset(self): qs = self.channel.filtered_logs() qs = self.get_ordered_queryset(qs) start = self.date end = start + datetime.timedelta(days=1) return qs.filter(timestamp__gte=start, timestamp__lt=end) def _date_paginator(self, date): qs = self._date_query_set(date) return self.get_paginator(qs, self.get_paginate_by(qs)) def paginate_queryset(self, queryset, page_size): paginator, page, object_list, has_other_pages = super( DayLogViewer, self).paginate_queryset(queryset, page_size) if not self.next_page: self.is_current = True return paginator, page, object_list, has_other_pages def get_previous_page_link(self, page): """ Generate a link to the next page, from the current one. """ url = self.channel_date_url() # copy, to maintain any params that came in to original request. params = self.request.GET.copy() if not page.has_previous(): date = self._get_previous_date() if not date: # We have no more logs! return None # Use new paginator to get dates max page number. paginator = self._date_paginator(date) params['page'] = paginator.num_pages url = self.channel_date_url(date) else: params['page'] = page.previous_page_number() return '{0}?{1}'.format(url, params.urlencode()) def get_next_page_link(self, page): """ Generate a link to the next page, from the current one. """ url = self.channel_date_url() # copy, to maintain any params that came in to original request. params = self.request.GET.copy() if not page.has_next(): date = self._get_next_date() if date: url = self.channel_date_url(date) params['page'] = 1 # If new date, always start at page 1. else: return "" else: params['page'] = page.next_page_number() return '{0}?{1}'.format(url, params.urlencode()) def get_current_page_link(self, page): # copy, to maintain any params that came in to original request. params = self.request.GET.copy() date = _utc_now() url = self.channel_date_url(date) params['page'] = page.number return '{0}?{1}'.format(url, params.urlencode()) @cached_property def request_timezone(self): """ Read timezone in from GET param otherwise use UTC """ try: tz = pytz.timezone(self.request.GET.get('tz', '')) except pytz.UnknownTimeZoneError: tz = pytz.timezone('UTC') return tz def set_view_date(self): """Determine start date for queryset""" if all([field in self.kwargs for field in ['year', 'month', 'day']]): # localize date so logs start at local time try: return datetime.datetime(year=int(self.kwargs['year']), month=int(self.kwargs['month']), day=int(self.kwargs['day']), tzinfo=self.request_timezone) except ValueError: raise Http404("Invalid date.") # Use the last page. self.kwargs['page'] = 'last' return _utc_now().date() class SearchLogViewer(PaginatorPageLinksMixin, LogViewer, ListView): show_first_header = True newest_first = True allow_empty = True include_timeline = False def get(self, request, *args, **kwargs): self.form = forms.SearchForm(request.GET) return super(SearchLogViewer, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): """ Add the search term to the context data. """ context = super(SearchLogViewer, self).get_context_data(**kwargs) context.update({ 'q': self.search_term, 'search_form': self.form, }) return context def get_queryset(self): """ Use search results rather than the standard queryset. """ self.form = forms.SearchForm(self.request.GET) if self.form.is_valid(): self.search_term = self.form.cleaned_data.get("q", "") else: self.search_term = "" self.search_term = self.search_term.replace('%', '%%') filter_args = self.channel.visible_commands_filter # If a user is mentioned, filter those users first matches = re.search(r'(\bnick:([\w\-]+)\b)', self.search_term) if matches: self.search_term = self.search_term.replace(matches.groups()[0], '') filter_args = filter_args & Q(nick__icontains=matches.groups()[1]) return self.channel.log_set.search(self.search_term).filter(filter_args) class SingleLogViewer(DayLogViewer): """ Find a single log line and redirect to a permalink to it. This inherits from DayLogViewer because it needs to use same queryset and pagination methods to ensure the page is found in the same place. """ def
<filename>src/robot/parsing/model.py # Copyright 2008-2010 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from robot.errors import DataError from robot.variables import is_var from robot.output import LOGGER from robot import utils from settings import (Documentation, Fixture, Timeout, Tags, Metadata, Library, Resource, Variables, Arguments, Return, Template) from populators import FromFilePopulator, FromDirectoryPopulator def TestData(parent=None, source=None, include_suites=[]): if os.path.isdir(source): return TestDataDirectory(parent, source, include_suites) return TestCaseFile(parent, source) class _TestData(object): def __init__(self, parent=None, source=None): self.parent = parent self.source = os.path.abspath(source) if source else None self.children = [] self._tables = None def _get_tables(self): if not self._tables: self._tables = utils.NormalizedDict({'Setting': self.setting_table, 'Settings': self.setting_table, 'Metadata': self.setting_table, 'Variable': self.variable_table, 'Variables': self.variable_table, 'Keyword': self.keyword_table, 'Keywords': self.keyword_table, 'User Keyword': self.keyword_table, 'User Keywords': self.keyword_table, 'Test Case': self.testcase_table, 'Test Cases': self.testcase_table}) return self._tables def start_table(self, header_row): table_name = header_row[0] try: table = self._valid_table(self._get_tables()[table_name]) except KeyError: return None else: if table is not None: table.set_header(header_row) return table @property def name(self): if not self.source: return None name = os.path.splitext(os.path.basename(self.source))[0] name = name.split('__', 1)[-1] # Strip possible prefix name = name.replace('_', ' ').strip() if name.islower(): name = ' '.join(w[0].upper() + w[1:] for w in name.split()) return name @property def keywords(self): return self.keyword_table.keywords @property def imports(self): return self.setting_table.imports def report_invalid_syntax(self, table, message, level='ERROR'): initfile = getattr(self, 'initfile', None) path = os.path.join(self.source, initfile) if initfile else self.source LOGGER.write("Invalid syntax in file '%s' in table '%s': %s" % (path, table, message), level) class TestCaseFile(_TestData): def __init__(self, parent=None, source=None): _TestData.__init__(self, parent, source) self.directory = os.path.dirname(self.source) if self.source else None self.setting_table = TestCaseFileSettingTable(self) self.variable_table = VariableTable(self) self.testcase_table = TestCaseTable(self) self.keyword_table = KeywordTable(self) if source: # FIXME: model should be decoupled from populating FromFilePopulator(self).populate(source) self._validate() def _validate(self): if not self.testcase_table.is_started(): raise DataError('File has no test case table.') def _valid_table(self, table): return table def has_tests(self): return True def __iter__(self): for table in [self.setting_table, self.variable_table, self.testcase_table, self.keyword_table]: yield table class ResourceFile(_TestData): def __init__(self, source=None): _TestData.__init__(self, source=source) self.directory = os.path.dirname(self.source) if self.source else None self.setting_table = ResourceFileSettingTable(self) self.variable_table = VariableTable(self) self.testcase_table = TestCaseTable(self) self.keyword_table = KeywordTable(self) if self.source: FromFilePopulator(self).populate(source) self._report_status() def _report_status(self): if self.setting_table or self.variable_table or self.keyword_table: LOGGER.info("Imported resource file '%s' (%d keywords)." % (self.source, len(self.keyword_table.keywords))) else: LOGGER.warn("Imported resource file '%s' is empty." % self.source) def _valid_table(self, table): if table is self.testcase_table: raise DataError('Test case table not allowed in resource file.') return table def __iter__(self): for table in [self.setting_table, self.variable_table, self.keyword_table]: yield table class TestDataDirectory(_TestData): def __init__(self, parent=None, source=None, include_suites=[]): _TestData.__init__(self, parent, source) self.directory = self.source self.initfile = None self.setting_table = InitFileSettingTable(self) self.variable_table = VariableTable(self) self.testcase_table = TestCaseTable(self) self.keyword_table = KeywordTable(self) if self.source: FromDirectoryPopulator().populate(self.source, self, include_suites) self.children = [ ch for ch in self.children if ch.has_tests() ] def _valid_table(self, table): if table is self.testcase_table: LOGGER.error('Test case table not allowed in test suite init file.') return None return table def add_child(self, path, include_suites): self.children.append(TestData(parent=self,source=path, include_suites=include_suites)) def has_tests(self): return any(ch.has_tests() for ch in self.children) def __iter__(self): for table in [self.setting_table, self.variable_table, self.keyword_table]: yield table class _Table(object): def __init__(self, parent): self.parent = parent self.header = None def set_header(self, header): self.header = header @property def name(self): return self.header[0] @property def source(self): return self.parent.source @property def directory(self): return self.parent.directory def report_invalid_syntax(self, message, level='ERROR'): self.parent.report_invalid_syntax(self.name, message, level) class _WithSettings(object): def get_setter(self, setting_name): if setting_name in self._setters: return self._setters[setting_name] self.report_invalid_syntax("Non-existing setting '%s'." % setting_name) def is_setting(self, setting_name): return setting_name in self._setters class _SettingTable(_Table, _WithSettings): type = 'setting' def __init__(self, parent): _Table.__init__(self, parent) self.doc = Documentation('Documentation', self) self.suite_setup = Fixture('Suite Setup', self) self.suite_teardown = Fixture('Suite Teardown', self) self.test_setup = Fixture('Test Setup', self) self.test_teardown = Fixture('Test Teardown', self) self.force_tags = Tags('Force Tags', self) self.default_tags = Tags('Default Tags', self) self.test_template = Template('Test Template', self) self.test_timeout = Timeout('Test Timeout', self) self.metadata = [] self.imports = [] self._setters = self._get_setters() def _get_adder(self, adder_method): def adder(value, comment): name = value[0] if value else '' adder_method(name, value[1:], comment) return adder def add_metadata(self, name, value='', comment=None): self.metadata.append(Metadata('Metadata', self, name, value, comment)) return self.metadata[-1] def add_library(self, name, args=None, comment=None): self.imports.append(Library(self, name, args, comment=comment)) return self.imports[-1] def add_resource(self, name, invalid_args=None, comment=None): self.imports.append(Resource(self, name, invalid_args, comment=comment)) return self.imports[-1] def add_variables(self, name, args=None, comment=None): self.imports.append(Variables(self, name, args, comment=comment)) return self.imports[-1] def __nonzero__(self): return any(setting.is_set() for setting in self) class TestCaseFileSettingTable(_SettingTable): def _get_setters(self): return utils.NormalizedDict({'Documentation': self.doc.populate, 'Document': self.doc.populate, 'Suite Setup': self.suite_setup.populate, 'Suite Precondition': self.suite_setup.populate, 'Suite Teardown': self.suite_teardown.populate, 'Suite Postcondition': self.suite_teardown.populate, 'Test Setup': self.test_setup.populate, 'Test Precondition': self.test_setup.populate, 'Test Teardown': self.test_teardown.populate, 'Test Postcondition': self.test_teardown.populate, 'Force Tags': self.force_tags.populate, 'Default Tags': self.default_tags.populate, 'Test Template': self.test_template.populate, 'Test Timeout': self.test_timeout.populate, 'Library': self._get_adder(self.add_library), 'Resource': self._get_adder(self.add_resource), 'Variables': self._get_adder(self.add_variables), 'Metadata': self._get_adder(self.add_metadata)}) def __iter__(self): for setting in [self.doc, self.suite_setup, self.suite_teardown, self.test_setup, self.test_teardown, self.force_tags, self.default_tags, self.test_template, self.test_timeout] \ + self.metadata + self.imports: yield setting class ResourceFileSettingTable(_SettingTable): def _get_setters(self): return utils.NormalizedDict({'Documentation': self.doc.populate, 'Document': self.doc.populate, 'Library': self._get_adder(self.add_library), 'Resource': self._get_adder(self.add_resource), 'Variables': self._get_adder(self.add_variables)}) def __iter__(self): for setting in [self.doc] + self.imports: yield setting class InitFileSettingTable(_SettingTable): def _get_setters(self): return utils.NormalizedDict({'Documentation': self.doc.populate, 'Document': self.doc.populate, 'Suite Setup': self.suite_setup.populate, 'Suite Precondition': self.suite_setup.populate, 'Suite Teardown': self.suite_teardown.populate, 'Suite Postcondition': self.suite_teardown.populate, 'Test Setup': self.test_setup.populate, 'Test Precondition': self.test_setup.populate, 'Test Teardown': self.test_teardown.populate, 'Test Postcondition': self.test_teardown.populate, 'Force Tags': self.force_tags.populate, 'Library': self._get_adder(self.add_library), 'Resource': self._get_adder(self.add_resource), 'Variables': self._get_adder(self.add_variables), 'Metadata': self._get_adder(self.add_metadata)}) def __iter__(self): for setting in [self.doc, self.suite_setup, self.suite_teardown, self.test_setup, self.test_teardown, self.force_tags] \ + self.metadata + self.imports: yield setting class VariableTable(_Table): type = 'variable' def __init__(self, parent): _Table.__init__(self, parent) self.variables = [] def add(self, name, value, comment=None): self.variables.append(Variable(name, value, comment)) def __iter__(self): return iter(self.variables) def __nonzero__(self): return bool(self.variables) class TestCaseTable(_Table): type = 'testcase' def __init__(self, parent): _Table.__init__(self, parent) self.tests = [] def add(self, name): self.tests.append(TestCase(self, name)) return self.tests[-1] def __iter__(self): return iter(self.tests) def __nonzero__(self): return bool(self.tests) def is_started(self): return bool(self.header) class KeywordTable(_Table): type = 'keyword' def __init__(self, parent): _Table.__init__(self, parent) self.keywords = [] def add(self, name): self.keywords.append(UserKeyword(self, name)) return self.keywords[-1] def __iter__(self): return iter(self.keywords) def __nonzero__(self): return bool(self.keywords) class Variable(object): def __init__(self, name, value, comment=None): self.name = name.rstrip('= ') if name.startswith('$') and value == []: value = '' if isinstance(value, basestring): value = [value] # Need to support scalar lists until RF 2.6 self.value = value self.comment = comment def as_list(self): ret = [self.name] + self.value if self.comment: ret.append('# %s' % self.comment) return ret def is_set(self): return True def is_for_loop(self): return False class _WithSteps(object): def add_step(self, content, comment=None): self.steps.append(Step(content, comment)) return self.steps[-1] class TestCase(_WithSteps, _WithSettings): def __init__(self, parent, name): self.parent = parent self.name = name self.doc = Documentation('[Documentation]', self) self.template = Template('[Template]', self) self.tags = Tags('[Tags]', self) self.setup = Fixture('[Setup]', self) self.teardown = Fixture('[Teardown]', self) self.timeout = Timeout('[Timeout]', self) self.steps = [] self._setters = self._get_setters() def _get_setters(self): return utils.NormalizedDict({'Documentation': self.doc.populate, 'Document': self.doc.populate, 'Template': self.template.populate, 'Setup': self.setup.populate, 'Precondition': self.setup.populate, 'Teardown': self.teardown.populate, 'Postcondition': self.teardown.populate, 'Tags': self.tags.populate, 'Timeout': self.timeout.populate}) @property def source(self): return self.parent.source @property def directory(self): return self.parent.directory def add_for_loop(self, data): self.steps.append(ForLoop(data)) return self.steps[-1] def report_invalid_syntax(self, message, level='ERROR'): type_ = 'test case' if type(self) is TestCase else 'keyword' message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message) self.parent.report_invalid_syntax(message, level) def __iter__(self): for element in [self.doc, self.tags, self.setup, self.template, self.timeout] \ + self.steps + [self.teardown]: yield element class UserKeyword(TestCase): def __init__(self, parent, name): self.parent = parent self.name = name self.doc = Documentation('[Documentation]', self) self.args = Arguments('[Arguments]', self) self.return_ = Return('[Return]', self) self.timeout = Timeout('[Timeout]', self) self.steps = [] self._setters = self._get_setters() def _get_setters(self): return utils.NormalizedDict({'Documentation': self.doc.populate, 'Document': self.doc.populate, 'Arguments': self.args.populate, 'Return': self.return_.populate, 'Timeout': self.timeout.populate}) def __iter__(self): for element in [self.args, self.doc, self.timeout] \ + self.steps + [self.return_]: yield element class ForLoop(_WithSteps): def __init__(self, content): self.range, index = self._get_range_and_index(content) self.vars = content[:index] self.items = content[index+1:] self.steps = [] def _get_range_and_index(self, content): for index, item in enumerate(content): item = item.upper().replace(' ', '') if item in ['IN', 'INRANGE']: return item == 'INRANGE', index return False, len(content) def is_comment(self): return False def is_for_loop(self): return True def apply_template(self, template): return self
"Searchlight": 0xEFF0BF, "Seared Earth": 0x9A5633, "Seared Grey": 0x495157, "Searing Gorge Brown": 0x6B3B23, "Seascape Blue": 0xA6BAD1, "Seascape Green": 0xB5E4E4, "Seashell": 0xFFF5EE, "Seashell Cove": 0x104C77, "Seashell Peach": 0xFFF6DE, "Seashell Pink": 0xF7C8C2, "Seashore Dreams": 0xB5DCEF, "Seaside Sand": 0xF2E9D7, "Seaside Villa": 0xE9D5C9, "Season Finale": 0xBEA27B, "Seasonal Beige": 0xE6B99F, "Seasoned Acorn": 0x7F6640, "Seasoned Apple Green": 0x8DB600, "Seasoned Salt": 0xCEC2A1, "Seattle Red": 0x7D212A, "Seawashed Glass": 0xA9C095, "Seaweed": 0x18D17B, "Seaweed Green": 0x35AD6B, "Seaweed Salad": 0x7D7B55, "Seaweed Tea": 0x5D7759, "Seaweed Wrap": 0x4D473D, "Seaworld": 0x125459, "Seaworthy": 0x314D58, "Sebright Chicken": 0xBD5701, "Secluded Canyon": 0xC6876F, "Secluded Green": 0x6F6D56, "Secluded Woods": 0x495A52, "Second Nature": 0x585642, "Second Pour": 0x887CA4, "Second Wind": 0xDFECE9, "Secrecy": 0x50759E, "Secret Blush": 0xE1D2D5, "Secret Cove": 0x68909D, "Secret Crush": 0xD7DFD6, "Secret Garden": 0x11AA66, "Secret Glade": 0xB5B88D, "Secret Journal": 0x7C6055, "Secret Meadow": 0x72754F, "Secret of Mana": 0x4166F5, "Secret Passageway": 0x6D695E, "Secret Path": 0x737054, "Secret Safari": 0xC6BB68, "Secret Scent": 0xE3D7DC, "Secret Society": 0x464E5A, "Secret Story": 0xFF1493, "Secure Blue": 0x5389A1, "Security": 0xD6E1C2, "Sedate Gray": 0xD1CDBF, "Sedge": 0xB1A591, "Sedge Green": 0x707A68, "Sedia": 0xB0A67E, "Sedona": 0xE7E0CF, "Sedona at Sunset": 0xBF7C45, "Sedona Pink": 0xD6B8A7, "Sedona Sage": 0x686D6C, "Sedona Shadow": 0x665F70, "Seduction": 0xFBF2BF, "Seductive Thorns": 0xA2C748, "Seed Pearl": 0xE6DAC4, "Seedless Grape": 0xD3C3D4, "Seedling": 0xC0CBA1, "Seeress": 0xA99BA9, "Sefid White": 0xFFF1F1, "Seiheki Green": 0x3A6960, "Seiji Green": 0x819C8B, "Sekichiku Pink": 0xE5ABBE, "Sekkasshoku Brown": 0x683F36, "Selago": 0xE6DFE7, "Selective Yellow": 0xFFBA00, "Self Powered": 0x8C7591, "Self-Destruct": 0xC2B398, "Seljuk Blue": 0x4488EE, "Sell Gold": 0xD4AE5E, "Sell Out": 0x90A2B7, "Semi Opal": 0xAB9649, "Semi Sweet": 0x6B5250, "Semi Sweet Chocolate": 0x6B4226, "Semi-Precious": 0x659B97, "Semolina": 0xCEB899, "Semolina Pudding": 0xFFE8C7, "Sēn Lín Lǜ Forest": 0x4CA973, "Senate": 0x4A515F, "Sencha Brown": 0x824B35, "Seneca Rock": 0x9A927F, "Senior Moment": 0xFDECC7, "Sensai Brown": 0x494A41, "Sensaicha brown": 0x3B3429, "Sensaimidori Green": 0x374231, "Sensational Sand": 0xBFA38D, "Sensible Hue": 0xEAD7B4, "Sensitive Scorpion": 0xCC2266, "Sensitive Tint": 0xCEC9CC, "Sensitivity": 0xA1B0BE, "Sensual Climax": 0xDA3287, "Sensual Fumes": 0xCD68E2, "Sensual Peach": 0xFFD2B6, "Sensuous": 0xB75E6B, "Sensuous Gray": 0x837D7F, "Sentimental": 0xE6D8D2, "Sentimental Beige": 0xE0D8C5, "Sentimental Lady": 0xC4D3DC, "Sentimental Pink": 0xF8EEF4, "Sentinel": 0xD2E0D6, "Sephiroth Grey": 0x8C92AC, "Sepia": 0x704214, "Sepia Black": 0x2B0202, "Sepia Brown": 0x4B3526, "Sepia Filter": 0xCBB499, "Sepia Rose": 0xD4BAB6, "Sepia Skin": 0x9F5C42, "Sepia Tint": 0x897560, "Sepia Tone": 0xB8A88A, "Sepia Wash": 0x995915, "Sepia Yellow": 0x8C7340, "September Gold": 0x8D7548, "September Morn": 0xEDE6B3, "September Morning": 0xFFE9BB, "September Song": 0xD5D8C8, "September Sun": 0xFDD7A2, "Sequesta": 0xD4D158, "Sequin": 0xE1C28D, "Sequoia": 0x804839, "Sequoia Dusk": 0x795953, "Sequoia Fog": 0xC5BBAF, "Sequoia Grove": 0x935E4E, "Sequoia Lake": 0x506C6B, "Sequoia Redwood": 0x763F3D, "Serape": 0xD88B4D, "Seraphim Sepia": 0xD7824B, "Seraphinite": 0x616F65, "Serbian Green": 0x3E644F, "Serena": 0xCFD0C1, "Serenade": 0xFCE9D7, "Serendibite Black": 0x4A4354, "Serendipity": 0xBDE1D8, "Serene": 0xDCE3E4, "Serene Blue": 0x1199BB, "Serene Breeze": 0xBDD9D0, "Serene Journey": 0xCFD8D1, "Serene Peach": 0xF5D3B7, "Serene Scene": 0xD2C880, "Serene Sea": 0x78A7C3, "Serene Setting": 0xC5D2D9, "Serene Sky": 0xC3E3EB, "Serene Stream": 0x819DAA, "Serene Thought": 0xC5C0AC, "Serenely": 0xCED7D5, "Serengeti Dust": 0xE7DBC9, "Serengeti Grass": 0xAB9579, "Serengeti Green": 0x77CC88, "Serengeti Sand": 0xFCE7D0, "Sereni Teal": 0x76BAA8, "Serenity": 0x91A8D0, "Serious Gray": 0x7D848B, "Serious Grey": 0xCEC9C7, "Seriously Sand": 0xDCCCB4, "Serpent": 0x817F6D, "Serpentine": 0x9B8E54, "Serpentine Green": 0xA2B37A, "Serpentine Shadow": 0x003300, "Serrano Pepper": 0x556600, "Seryi Grey": 0x9CA9AD, "Sesame": 0xBAA38B, "Sesame Crunch": 0xC26A35, "Sesame Seed": 0xE1D9B8, "Sesame Street Green": 0x00A870, "Settlement": 0x7E7970, "Settler": 0x8B9CAC, "Seven Days of Rain": 0xD3DAE1, "Seven Seas": 0x4A5C6A, "Seven Veils": 0xE3B8BD, "Severe Seal": 0xEEE7DE, "Seville Scarlet": 0x955843, "Shabby Chic": 0xBB8A8E, "Shabby Chic Pink": 0xEFDDD6, "Shade of Amber": 0xFF7E00, "Shade of Bone Marrow": 0x889988, "Shade of Marigold": 0xB88A3D, "Shade of Mauve": 0xAE7181, "Shade of Violet": 0x8601AF, "Shade-Grown": 0x4E5147, "Shaded Fern": 0x786947, "Shaded Fuchsia": 0x664348, "Shaded Glen": 0x8E824A, "Shaded Hammock": 0x859C9B, "Shaded Spruce": 0x00585E, "Shaded Sun": 0xF3EBA5, "Shades On": 0x605F5F, "Shadow": 0x837050, "Shadow Azalea Pink": 0xE96A97, "Shadow Blue": 0x778BA5, "Shadow Cliff": 0x7A6F66, "Shadow Dance": 0x877D83, "Shadow Effect": 0x788788, "Shadow Gargoyle": 0x686767, "Shadow Green": 0x9AC0B6, "Shadow Grey": 0xBBA5A0, "Shadow Leaf": 0x395648, "Shadow Lime": 0xCFE09D, "Shadow Mountain": 0x585858, "Shadow of the Colossus": 0xA3A2A1, "Shadow Planet": 0x221144, "Shadow Purple": 0x4E334E, "Shadow Ridge": 0x5B5343, "Shadow Warrior": 0x1A2421, "Shadow White": 0xEEF1EA, "Shadow Wood": 0x5E534A, "Shadow Woods": 0x8A795D, "Shadowdancer": 0x111155, "Shadowed Steel": 0x4B4B4B, "Shadows": 0x6B6D6A, "Shady": 0xDBD6CB, "Shady Blue": 0x42808A, "Shady Character": 0x4C4B4C, "Shady Glade": 0x006E5B, "Shady Green": 0x635D4C, "Shady Grey": 0x849292, "Shady Lady": 0x9F9B9D, "Shady Neon Blue": 0x5555FF, "Shady Oak": 0x73694B, "Shady Pink": 0xC4A1AF, "Shady White": 0xF0E9DF, "Shady Willow": 0x939689, "Shagbark Olive": 0x645D41, "Shaggy Barked": 0xB3AB98, "Shagreen": 0xCBC99D, "Shaker Blue": 0x748C96, "Shaker Grey": 0x6C6556, "Shaker Peg": 0x886A3F, "Shakespeare": 0x609AB8, "Shakker Red": 0x7F4340, "Shakshuka": 0xAA3311, "Shaku-Do Copper": 0x752100, "Shale": 0x4A3F41, "Shale Green": 0x739072, "Shale Grey": 0x899DA3, "Shalimar": 0xF8F6A8, "Shallot Bulb": 0x7B8D73, "Shallot Leaf": 0x505C3A, "Shallow End": 0xC5F5E8, "Shallow Sea": 0x9AB8C2, "Shallow Shoal": 0x9DD6D4, "Shallow Shore": 0xB0DEC8, "Shallow Water": 0x8AF1FE, "Shallow Water Ground": 0x8CAEAC, "Shamanic Journey": 0xCC855A, "Shampoo": 0xFFCFF1, "Shamrock": 0x009E60, "Shamrock Field": 0x358D52, "Shamrock Green": 0x4EA77D, "Shān Hú Hóng Coral": 0xFA9A85, "Shandy": 0xFFE670, "Shanghai Jade": 0xAAD9BB, "Shanghai Peach": 0xD79A91, "Shanghai Silk": 0xC8DFC3, "Shangri La": 0xECD4D2, "Shani Purple": 0x4C1050, "Shank": 0xA18B5D, "Sharbah Fizz": 0x9BE3D7, "Sharegaki Persimmon": 0xFFA26B, "Shark": 0xCADCDE, "Shark Bait": 0xEE6699, "Shark Fin": 0x969795, "Shark Tooth": 0xE4E1D3, "Sharknado": 0x35524A, "Sharkskin": 0x838487, "Sharp Blue": 0x2B3D54, "Sharp Green": 0xC6EC7A, "Sharp Grey": 0xC9CAD1, "Sharp Pebbles": 0xDBD6D8, "Sharp Yellow": 0xECC043, "Sharp-Rip Drill": 0xEAE1D6, "Shasta Lake": 0x355C74, "Shattan Gold": 0xBB5577, "Shattell": 0xB5A088, "Shattered Ice": 0xDAEEE6, "Shattered Porcelain": 0xEEE2E0, "Shattered Sky": 0xD0DDE9, "Shattered White": 0xF1F1E5, "Shaved Chocolate": 0x543B35, "Shaved Ice": 0xA9B4BA, "Shaving Cream": 0xE1E5E5, "Shawarma": 0xDD9955, "She Loves Pink": 0xE39B96, "Shea": 0xF8F1EB, "Shearwater Black": 0x5B5B6C, "Shebang": 0x81876F, "Sheen Green": 0x8FD400, "Sheepskin": 0xDAB58F, "Sheepskin Gloves": 0xAD9E87, "Sheer Apricot": 0xF3C99D, "Sheer Green": 0xB0C69A, "Sheer Lavender": 0xEFE2F2, "Sheer Lilac": 0xB793C0, "Sheer Peach": 0xFFF7E7, "Sheer Pink": 0xF6E5DB, "Sheer Rosebud": 0xFFE8E5, "Sheer Scarf": 0xE3D6CA, "Sheer Sunlight": 0xFFFEDF, "Sheet Blue": 0x52616F, "Sheet Metal": 0x5E6063, "Sheffield": 0x638F7B, "Sheffield Grey": 0x6B7680, "Sheikh Zayed White": 0xE6EFEF, "Shell": 0xE1CFC6, "Shell Brook": 0xEEE7E6, "Shell Brown": 0x56564B, "Shell Coral": 0xEA9575, "Shell Ginger": 0xF9E4D6, "Shell Haven": 0xEBDFC0, "Shell Pink": 0xF88180, "Shell Tint": 0xFDD7CA, "Shell White": 0xF0EBE0, "Shelter": 0xB8986C, "Sheltered Bay": 0x758F9A, "Shēn Chéng Orange": 0xC03F20, "Shēn Hóng Red": 0xBE0620, "Shepherd's Warning": 0xC06F68, "Sheraton Sage": 0x8F8666, "Sherbet Fruit": 0xF8C8BB, "Sheriff": 0xEBCFAA, "Sheringa Rose": 0x735153, "Sherpa Blue": 0x00494E, "Sherry Cream": 0xF9E4DB, "Sherwood Forest": 0x555A4C, "Sherwood Green": 0x1B4636, "Shetland Lace": 0xDFD0C0, "<NAME>": 0xE29F31, "Shiffurple": 0x9900AA, "Shifting Sand": 0xD8C0AD, "Shiitake": 0xA5988A, "Shiitake Mushroom": 0x736253, "Shikon": 0x2B2028, "Shilo": 0xE6B2A6, "Shimmer": 0x88C7E9, "Shimmering Blue": 0x82DBCC, "Shimmering Blush": 0xD98695, "Shimmering Brook": 0x64B3D3, "Shimmering Champagne": 0xF3DEBC, "Shimmering Expanse Cyan": 0x45E9FD, "Shimmering Glade": 0xA4943A, "Shimmering Love": 0xFF88CC, "Shimmering Pool": 0xD2EFE6, "Shimmering Sea": 0x2B526A, "Shimmering Sky": 0xDBD1E8, "Shin Godzilla": 0x9A373F, "Shinbashi": 0x59B9C6, "Shinbashi Azure": 0x006C7F, "Shindig": 0x00A990, "Shine Baby Shine": 0xA85E6E, "Shiner": 0x773CA7, "Shingle Fawn": 0x745937, "Shining Armor": 0x908B8E, "Shining Gold": 0xBAD147, "Shining Knight": 0x989EA7, "Shining Silver": 0xC7C7C9, "Shinkansen White": 0xDACDCD, "Shinshu": 0x8F1D21, "Shiny Armor": 0xA1A9A8, "Shiny Gold": 0xAE9F65, "Shiny Kettle": 0xCEA190, "Shiny Luster": 0xDBDDDB, "Shiny Nickel": 0xCCD3D8, "Shiny Rubber": 0x3A363B, "Shiny Shamrock": 0x5FA778, "Shiny Silk": 0xF7ECCA, "Ship Cove": 0x7988AB, "Ship Grey": 0x3E3A44, "Ship Steering Wheel": 0x62493B, "Ship's Harbour": 0x4F84AF, "Ship's Officer": 0x2D3A49, "Shipwreck": 0x968772, "Shipyard": 0x4F6F85, "Shiracha Brown": 0xC48E69, "Shiraz": 0x842833, "Shire": 0x646B59, "Shire Green": 0x68E52F, "Shiroi White": 0xEBF5F0, "Shironeri Silk": 0xFEDDCB, "Shirt Blue": 0x6598AF, "Shisha Coal": 0x3C3B3C, "Shishi Pink": 0xEFAB93, "Shishito Pepper Green": 0xBBF90F, "Shiso Green": 0x63A950, "Shiva Blue": 0x99DBFE, "Shock Jockey": 0xBB88AA, "Shocking": 0xE899BE, "Shocking Pink": 0xFE02A2, "Shockwave": 0x72C8B8, "Shoe Wax": 0x2B2B2B, "Shoelace": 0xEAE4D9, "Shoelace Beige": 0xF6EBD3, "Shōji": 0xDED5C7, "Shoji White": 0xE6DFD3, "Shojo's Blood": 0xE2041B, "Shōjōhi Red": 0xDC3023, "Shooting Star": 0xECF0EB, "Shopping Bag": 0x5A4743, "Shore Water": 0x6797A2, "Shoreland": 0xEAD9CB, "Shoreline Green": 0x58C6AB, "Shoreline Haze": 0xD2CBBC, "Short and Sweet": 0xEDD1D3, "Short Phase": 0xBBDFD5, "Shortbread": 0xF5E6D3, "Shortbread Cookie": 0xEACEB0, "Shortcake": 0xEEDAAC, "Shortgrass Prairie": 0x9E957C, "Shot Over": 0x4A5C69, "Shot-Put": 0x716B63, "Shovel Knight": 0x37C4FA, "Show Business": 0xDD835B, "Show Stopper": 0xA42E37, "Shower": 0x9FADB7, "Showstopper": 0x7F607F, "Shrimp": 0xE29A86, "Shrimp Boat": 0xF5BE9D, "Shrimp Boudin": 0xDBBFA3, "Shrimp Cocktail": 0xF4A460, "Shrimp Toast": 0xF7C5A0, "Shrine of Pleasures": 0xCC3388, "Shrinking Violet": 0x5D84B1, "Shrub Green": 0x003636, "Shrubbery": 0xA9C08A, "Shrubby Lichen": 0xB5D1DB, "Shu Red": 0xEB6101, "Shǔi Cǎo Lǜ Green": 0x40826D, "Shui Jiao Dumpling": 0xDCCCA3, "Shukra Blue": 0x2B64AD, "Shuriken": 0x333344, "Shutter Blue": 0x666E7F, "Shutter Copper": 0xBB6622, "Shutter Grey": 0x797F7D, "Shutterbug": 0xBBA262, "Shutters": 0x6C705E, "Shuttle Grey": 0x61666B, "Shy Beige": 0xE2DED6, "Shy Blunt": 0xD3D8DE, "Shy Candela": 0xD6DDE6, "Shy Cupid": 0xF0D6CA, "Shy Denim": 0xD7DADD, "Shy Girl":
= %s ) )' txt = fix_query( txt ) execute( c, txt, data ) # -------------------------------------------------------------- if MYSQL: txt = "ALTER TABLE raw_index ADD INDEX( src ), ADD INDEX( title_id ), ADD INDEX( local )" execute( c, txt ) if SQLITE: txt = "CREATE INDEX raw_index_index ON raw_index( src, local, title_id )" execute( c, txt ) # -------------------------------------------------------------- conn.commit() return 0 # -------------------------------------------------------------------------- def build_titles_distinct_from_one_index_source ( src, **kwargs ): fb.get_music_index_data_by_src( src, proc_one_book_for_titles_distinct, **kwargs ) # -------------------------------------------------------------------------- # Add each title to titles_distinct set(). # WRW 22 Feb 2022 - Was getting a lot of duplicates in titles_distinct. # Sets do not contain duplicates. Looks like capitalization problem # but that should have been done at add() time. Problem with diacriticals. # Corrected in fb_title_corrections(). # Only do corrections in fb_title_corrections() at add time. def proc_one_book_for_titles_distinct ( src, data, file, **kwargs ): c = kwargs[ 'c' ] titles_distinct = kwargs[ 'titles_distinct' ] # WRW 27 Mar 2022 - Now passing titles_distinct through kwargs[] raw_index = kwargs[ 'raw_index' ] # WRW 1 Apr 2022 - Add raw_index contents = data[ 'contents' ] local = data[ 'local' ] # WRW 5 Apr 2022 - Need this, too. for line, content in enumerate( contents ): title = content[ 'title' ] if title: titles_distinct.add( title ) else: print( f"WARNING: falsey title on line {line+1} of {file}", file=sys.stderr, flush=True ) if 'file' in content: file = content[ 'file' ] else: print( f"WARNING: 'file' not found on line {line+1} of {file}", file=sys.stderr, flush=True ) if 'line' in content: line = content[ 'line' ] else: print( f"WARNING: 'line' not found on line {line+1} of {file}", file=sys.stderr, flush=True ) raw_index.append( {'title' : title, 'src' : src, 'local' : local, 'file' : file, 'line' : line } ) titles_distinct.add( "_TitleFirst" ) titles_distinct.add( "_TitleLast" ) # ---------------------------------------------------------------------------------- def build_title2youtube( dc, c, conn, show_found, show_not_found ): print( "\nBuilding titles2youtube", file=sys.stderr, flush=True ) txt = 'DROP TABLE IF EXISTS title2youtube;' execute( c, txt ) if FULLTEXT and SQLITE: txt = 'DROP TABLE IF EXISTS title2youtube_fts;' execute( c, txt ) if MYSQL: txt = """CREATE TABLE title2youtube( title_id MEDIUMINT(8) UNSIGNED, ytitle VARCHAR(255), duration VARCHAR(255), yt_id VARCHAR(255), id MEDIUMINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(id) ) ENGINE = MYISAM CHARACTER SET 'utf8mb4' """ if SQLITE: txt = """CREATE TABLE title2youtube( title_id MEDIUMINT(8), ytitle VARCHAR(255), duration VARCHAR(255), yt_id VARCHAR(255), id MEDIUMINT AUTO_INCREMENT, PRIMARY KEY(id) ) """ execute( c, txt ) if FULLTEXT and SQLITE: txt = """CREATE VIRTUAL TABLE title2youtube_fts USING fts5( title, ytitle UNINDEXED, title_id UNINDEXED, duration UNINDEXED, yt_id UNINDEXED, content='title2youtube', content_rowid='id' ) """ execute( c, txt ) count_titles_total = count_titles_found = count_titles_not_found = 0 with gzip.open( conf.val( 'youtube_index'), 'rt') as ifd: data = json.load( ifd ) for content in data[ 'contents' ]: count_titles_total += 1 title = content[ 'title' ] links = content[ 'links' ] title_id = fb.get_title_id_from_title( dc, title ) if title_id: if show_found: print( title, file=sys.stderr, flush=True ) count_titles_found += 1 for link in links: ytitle = link[ 'ytitle' ] duration = link[ 'duration' ] # url: https://www.youtube.com/watch?v=DMo6Ju8SJ8o # url = link[ 'url' ] # yt_id = re.sub( 'https://www\.youtube\.com/watch\?v=', '', url ) yt_id = link[ 'id' ] data = ( title_id, ytitle, duration, yt_id ) txt = 'INSERT INTO title2youtube ( title_id, ytitle, duration, yt_id ) VALUES( %s, %s, %s, %s )' txt = fix_query( txt ) execute( c, txt, data ) if FULLTEXT and SQLITE: txt = 'INSERT INTO title2youtube_fts ( title_id, ytitle, duration, yt_id ) VALUES( ?, ?, ?, ? )' execute( c, txt, data ) else: count_titles_not_found += 1 if show_not_found: print( title, file=sys.stderr, flush=True ) # ---------------------- if MYSQL: txt = "ALTER TABLE title2youtube ADD INDEX( title_id ), ADD INDEX( ytitle ), ADD FULLTEXT( ytitle )" execute( c, txt ) if SQLITE: txt = "CREATE INDEX title2youtube_index ON title2youtube ( title_id, ytitle )" execute( c, txt ) # ---------------------- print( f""" Titles processed from {conf.val( 'youtube_index')}: {count_titles_total} Found in titles_distinct: {count_titles_found} Not found in titles_distinct: {count_titles_not_found}""", file=sys.stderr, flush=True ) conn.commit() return 0 # -------------------------------------------------------------------------- # Format: local book name | (starting page, offset) # WRW 5 Apr 2022 - get_page_from_sheet() was not working using sqlite. Looks # like problem is trying to use primary key 'id'. Add separate counter 'offset_id'. def build_sheet_offsets( c, conn ): print( "\nBuilding sheet_offsets", file=sys.stderr, flush=True ) txt = 'DROP TABLE IF EXISTS sheet_offsets;' execute( c, txt ) if MYSQL: txt = """CREATE TABLE sheet_offsets( id MEDIUMINT UNSIGNED AUTO_INCREMENT, src VARCHAR(255), local VARCHAR(255), sheet_start SMALLINT, sheet_offset SMALLINT, offset_id MEDIUMINT UNSIGNED, PRIMARY KEY(id) ) ENGINE = MYISAM CHARACTER SET 'utf8mb4' """ if SQLITE: txt = """CREATE TABLE sheet_offsets( id MEDIUMINT AUTO_INCREMENT, src VARCHAR(255), local VARCHAR(255), sheet_start SMALLINT, sheet_offset SMALLINT, offset_id MEDIUMINT, PRIMARY KEY(id) ) """ execute( c, txt ) fb.traverse_sources( int_build_sheet_offsets, c=c ) if MYSQL: txt = "ALTER TABLE sheet_offsets ADD INDEX( src ), ADD INDEX( local ), ADD INDEX( sheet_start)" if SQLITE: txt = "CREATE INDEX sheet_offsets_index ON sheet_offsets( src, local, sheet_start )" execute( c, txt ) conn.commit() return 0 # -------------------------------------------------------------------------- # select *, page + sheet_offset as corrected # from titles # join sheet_offsets using(local, src) # where page >= sheet_start limit 10; def int_build_sheet_offsets( src, **kwargs ): c = kwargs[ 'c' ] print( f" {src} {fb.get_source_from_src( src )}", file=sys.stderr, flush=True ) source = fb.get_source_from_src( src ) # WRW 5 Mar 2022 - Removed sheetoffsets from config file. # in lieu of hard-wired file name. # ifile = conf.val( 'sheetoffsets', source ) ifile = Path( conf.get_source_path( source ), conf.val( 'sheetoffsets', source )) with open( ifile ) as fd: offset_id = 0 for line in fb_utils.continuation_lines(fd): line = line.strip() if line.startswith( '#' ): continue if not len(line): continue local, offsets = line.split( '|' ) local = local.strip() offsets = offsets.strip() for mo in re.finditer( '\((.*?),(.*?)\)', offsets ): sheet_start = int( mo.group(1).strip() ) sheet_offset = int( mo.group(2).strip() ) data = ( src, local, sheet_start, sheet_offset, offset_id ) txt = 'INSERT INTO sheet_offsets ( src, local, sheet_start, sheet_offset, offset_id ) VALUES( %s, %s, %s, %s, %s )' txt = fix_query( txt ) execute( c, txt, data ) offset_id += 1 # -------------------------------------------------------------------------- # os.walk( folder ) returns generator that returns list of folders and list of files # in 'folder'. def old_listfiles( folder ): for root, folders, files in os.walk(folder): for file in files: yield (root, file) # yield os.path.join(root, file) # ----------------------------------------------------------------------- def build_music_files( c, conn ): print( "\nBuilding music_files", file=sys.stderr, flush=True ) txt = 'DROP TABLE IF EXISTS music_files;' execute( c, txt ) if FULLTEXT and SQLITE: txt = 'DROP TABLE IF EXISTS music_files_fts;' execute( c, txt ) if MYSQL: txt = """CREATE TABLE music_files( rpath VARCHAR(255), file VARCHAR(255), fb_flag CHAR(1), id MEDIUMINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(id) ) ENGINE = MYISAM CHARACTER SET 'utf8mb4' """ if SQLITE: txt = """CREATE TABLE music_files( rpath VARCHAR(255), file VARCHAR(255), fb_flag VARCHAR(1), id MEDIUMINT AUTO_INCREMENT, PRIMARY KEY(id) ) """ execute( c, txt ) if FULLTEXT and SQLITE: txt = """CREATE VIRTUAL TABLE music_files_fts USING fts5( rpath, file, fb_flag, content='music_files', content_rowid='id' ) """ execute( c, txt ) file_count = 0 file_count_by_ext = {} # ------------------------------------------------------------------------ # WRW 2 Mar 2022 - Recode this using Path() and add fakebook_folder flag. # A bit cleaner and understandable. Added fb_flag to limit files for canon2file editing. music_folders = [ x for x in conf.v.music_file_folders.split('\n') ] fakebook_folders = [ x for x in conf.v.c2f_editable_music_folders.split('\n') ] # Starting point for canon2file editing. root = Path( conf.v.music_file_root ).expanduser() for folder in music_folders: if folder in fakebook_folders: fb_flag = 'y' else: fb_flag = 'n' for file in Path( root, folder ).glob( '**/*.[pP][dD][fF]' ): rpath = file.relative_to( root ).parent.as_posix() data = (rpath, file.name, fb_flag ) txt = 'INSERT INTO music_files ( rpath, file, fb_flag ) VALUES( %s, %s, %s )' txt = fix_query( txt ) execute( c, txt, data ) if FULLTEXT and SQLITE: txt = 'INSERT INTO music_files_fts ( rpath, file, fb_flag ) VALUES( ?, ?, ? )' execute( c, txt, data ) file_count += 1 file_count_by_ext[ file.suffix ] =
[107.58996,-6.97689], [107.58992,-6.97698], [107.5899,-6.97705], [107.58988,-6.9771], [107.58987,-6.97725], [107.58988,-6.97729], [107.5899,-6.9774], [107.58992,-6.97745], [107.58994,-6.9775], [107.58998,-6.97758], [107.59,-6.97761], [107.59006,-6.97769], [107.59009,-6.97772], [107.59014,-6.97778], [107.5902,-6.97784], [107.59036,-6.978], [107.59038,-6.97802], [107.59056,-6.97824], [107.59062,-6.97831], [107.59073,-6.97845], [107.59097,-6.97875], [107.59106,-6.97891], [107.59168,-6.97943], [107.59189,-6.97966], [107.59197,-6.97975], [107.59201,-6.97979], [107.59205,-6.97984], [107.59211,-6.97991], [107.59217,-6.98], [107.59219,-6.98005], [107.59221,-6.98008], [107.59223,-6.98016], [107.59225,-6.98022], [107.59226,-6.9803], [107.59226,-6.98052], [107.59224,-6.98074], [107.59224,-6.98089], ]]) self.jalan.record(nama) self.jalan.line([[ [107.5884,-6.97232], [107.58852,-6.97234], [107.58869,-6.97237], [107.58954,-6.9725], [107.58958,-6.97251], [107.58979,-6.97254], [107.59009,-6.97262], [107.59041,-6.97269], [107.59066,-6.97274], [107.59072,-6.97275], [107.59082,-6.97277], [107.59095,-6.97278], [107.59125,-6.97283], [107.59162,-6.97288], [107.59202,-6.97297], [107.59213,-6.97299], [107.59228,-6.97301], [107.59241,-6.97302], [107.59252,-6.97303], [107.59264,-6.97305], [107.59284,-6.97307], [107.59306,-6.9731], [107.5932,-6.97313], [107.59329,-6.97314], [107.59331,-6.97315], [107.59339,-6.97317], [107.59343,-6.9732], [107.59345,-6.97322], [107.59347,-6.97325], [107.59348,-6.97329], [107.59348,-6.97333], [107.59349,-6.97335], [107.59349,-6.97353], [107.59349,-6.97356], [107.5935,-6.97358], [107.59351,-6.97359], [107.59352,-6.97361], [107.59354,-6.97362], [107.59356,-6.97363], [107.59359,-6.97364], [107.59366,-6.97365], [107.59386,-6.97368], [107.594,-6.97371], [107.5941,-6.97373], [107.59418,-6.97375], [107.59422,-6.97377], [107.59426,-6.97378], [107.59432,-6.97381], [107.59434,-6.97382], [107.59437,-6.97384], [107.59443,-6.97387], [107.59447,-6.97389], [107.59452,-6.97392], [107.59456,-6.97395], [107.59461,-6.97399], [107.59465,-6.97402], [107.59468,-6.97406], [107.5947,-6.97409], [107.59472,-6.97414], [107.59483,-6.97433], [107.59511,-6.97469], [107.59519,-6.97474], [107.59526,-6.97478], [107.59528,-6.97479], [107.59533,-6.97482], [107.59539,-6.97485], [107.59542,-6.97487], [107.59543,-6.97487], [107.59545,-6.97487], [107.59552,-6.9749], [107.59555,-6.9749], [107.59559,-6.97491], [107.59576,-6.97495], [107.59603,-6.97502], [107.59604,-6.97502], [107.59614,-6.97503], [107.5962,-6.97504], [107.5963,-6.97505], [107.59638,-6.97505], [107.59644,-6.97503], [107.59649,-6.97502], [107.59659,-6.97497], [107.59665,-6.97493], [107.59675,-6.97485], [107.59676,-6.97484], [107.59686,-6.97476], [107.59693,-6.97472], [107.59701,-6.97467], [107.59703,-6.97466], [107.59711,-6.97461], [107.59714,-6.97459], [107.59727,-6.97451], [107.59733,-6.97445], [107.59736,-6.97442], [107.59737,-6.97441], [107.59744,-6.97435], [107.59755,-6.97422], [107.59771,-6.97404], [107.59776,-6.97395], [107.5978,-6.97388], [107.59801,-6.97361], [107.59803,-6.97359], [107.59804,-6.97359], [107.59806,-6.97358], [107.59809,-6.97358], [107.59811,-6.97358], [107.59813,-6.97358], [107.59815,-6.97359], [107.59822,-6.97364], [107.59824,-6.97365], [107.59837,-6.97374], [107.59848,-6.97382], [107.59855,-6.97388], [107.59861,-6.97391], [107.59864,-6.97392], [107.59871,-6.97395], [107.59886,-6.97399], [107.59901,-6.974], [107.59919,-6.97402], [107.5993,-6.97403], [107.59942,-6.97403], [107.59947,-6.97403], [107.59951,-6.97404], [107.59954,-6.97406], [107.59959,-6.9741], [107.59965,-6.97416], [107.5997,-6.97423], [107.59974,-6.97428], [107.5998,-6.97433], [107.59984,-6.97435], ]]) self.jalan.record(nama) self.jalan.line([[ [107.59339,-6.96286], [107.5935,-6.96292], [107.59385,-6.96309], [107.59403,-6.96318], [107.59417,-6.96323], [107.59426,-6.96325], [107.59445,-6.96329], [107.59446,-6.9633], [107.59457,-6.96331], [107.59468,-6.96332], [107.5953,-6.96334], [107.5953,-6.96333], [107.5953,-6.96332], [107.59531,-6.96332], [107.59531,-6.96331], [107.59531,-6.9633], [107.59532,-6.9633], [107.59533,-6.9633], [107.59534,-6.96329], [107.59535,-6.96329], [107.59536,-6.9633], [107.59537,-6.9633], [107.59538,-6.9633], [107.59538,-6.96331], [107.59539,-6.96331], [107.59539,-6.96332], [107.5954,-6.96332], [107.5954,-6.96333], [107.5954,-6.96334], [107.5954,-6.96335], [107.5957,-6.96333], [107.59578,-6.96333], [107.59602,-6.96333], [107.59618,-6.96333], [107.59633,-6.96332], [107.59646,-6.96332], [107.59697,-6.96329], [107.59708,-6.96328], [107.59728,-6.96327], [107.5973,-6.96327], [107.59836,-6.96326], [107.59847,-6.96325], [107.59975,-6.96322], [107.60004,-6.9632], [107.60007,-6.96321], [107.60037,-6.96322], [107.60046,-6.96321], ]]) self.jalan.record(nama) self.jalan.line([[ [107.58863,-6.96472], [107.58886,-6.96477], [107.58888,-6.96478], [107.58902,-6.96482], [107.58905,-6.96483], [107.58916,-6.96487], [107.58923,-6.9649], [107.58929,-6.96493], [107.58938,-6.965], [107.58944,-6.96505], [107.58958,-6.96521], [107.58961,-6.96524], [107.58973,-6.96537], [107.58977,-6.96541], [107.5898,-6.96544], [107.58982,-6.96548], [107.58985,-6.96552], [107.58986,-6.96552], [107.58995,-6.96565], [107.5902,-6.96601], [107.59027,-6.96609], [107.59038,-6.96619], [107.59048,-6.9663], [107.59056,-6.96638], [107.59057,-6.96639], [107.59065,-6.96646], [107.59078,-6.96656], [107.59084,-6.9666], [107.59093,-6.96666], [107.59096,-6.96667], [107.59099,-6.96669], [107.59103,-6.9667], [107.59106,-6.96671], [107.59171,-6.9669], ]]) self.jalan.record(nama) self.jalan.line([[ [107.59646,-6.96332], [107.59647,-6.96369], [107.59647,-6.9637], [107.59648,-6.96396], [107.59649,-6.96428], [107.59649,-6.96458], [107.59648,-6.9648], [107.59648,-6.96482], [107.59647,-6.96485], [107.59647,-6.96506], [107.59647,-6.96511], [107.59617,-6.9651], [107.59616,-6.96524], [107.59615,-6.9655], [107.59614,-6.96578], [107.59614,-6.9658], [107.59613,-6.96604], [107.59611,-6.96623], [107.59611,-6.96629], [107.59642,-6.96634], [107.59643,-6.96634], [107.5966,-6.96637], [107.59668,-6.96638], [107.59672,-6.96639], [107.59676,-6.9664], [107.59708,-6.96651], [107.59721,-6.96656], [107.59714,-6.96688], [107.59701,-6.96687], [107.59681,-6.96686], [107.59662,-6.96684], [107.59652,-6.96683], [107.59651,-6.96724], [107.59648,-6.96752], ]]) self.jalan.record(nama) self.jalan.line([[ [107.59984,-6.97435], [107.59992,-6.97438], [107.59999,-6.97439], [107.60051,-6.97442], [107.60067,-6.97443], [107.60074,-6.97443], [107.60082,-6.97442], [107.60087,-6.97442], [107.60091,-6.97441], [107.60097,-6.97441], [107.60103,-6.97441], [107.60111,-6.97442], [107.60122,-6.97445], [107.60136,-6.97448], [107.60146,-6.9745], [107.60154,-6.97451], [107.60171,-6.97453], [107.60196,-6.97456], [107.60203,-6.97458], [107.60208,-6.97459], [107.60222,-6.97462], [107.60229,-6.97463], [107.60235,-6.97463], [107.60241,-6.97462], [107.60246,-6.97461], [107.60251,-6.97459], [107.60252,-6.97459], [107.60254,-6.97456], [107.60257,-6.97453], [107.6026,-6.9745], [107.60264,-6.97447], [107.60271,-6.97444], [107.60285,-6.9744], [107.60312,-6.97443], [107.60335,-6.97446], [107.60359,-6.97448], [107.60365,-6.97448], [107.60371,-6.97448], [107.60372,-6.97447], [107.60376,-6.97446], [107.60379,-6.97445], [107.60385,-6.97442], [107.60397,-6.97437], [107.60412,-6.97431], [107.60421,-6.97428], [107.60428,-6.97425], [107.60438,-6.9742], [107.6045,-6.97416], [107.60456,-6.97414], [107.60459,-6.97414], [107.60463,-6.97413], [107.60466,-6.97413], [107.60468,-6.97413], [107.60472,-6.97413], [107.60476,-6.97413], [107.60481,-6.97414], [107.6049,-6.97417], [107.605,-6.9742], [107.60539,-6.97431], [107.60569,-6.97439], [107.60575,-6.9744], [107.60591,-6.97442], [107.60622,-6.97448], [107.60672,-6.97459], [107.60682,-6.97461], [107.6069,-6.97463], [107.60707,-6.97466], [107.60718,-6.97467], [107.6078,-6.9747], [107.60792,-6.9747], [107.60819,-6.97469], [107.60832,-6.97469], [107.60842,-6.9747], [107.60856,-6.97471], [107.60866,-6.97472], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60379,-6.97974], [107.60391,-6.9797], [107.6041,-6.97965], [107.6043,-6.97962], [107.60441,-6.97961], [107.60451,-6.9796], [107.60465,-6.97963], [107.60466,-6.97963], [107.60482,-6.97964], [107.60498,-6.97965], [107.60506,-6.97967], [107.60519,-6.9797], [107.60521,-6.9797], [107.60533,-6.97975], [107.60552,-6.97984], [107.60576,-6.97997], [107.60585,-6.98001], [107.60628,-6.9802], [107.60662,-6.98034], [107.6067,-6.98036], [107.6068,-6.98039], [107.60685,-6.9804], [107.60689,-6.98042], [107.60696,-6.98042], [107.6071,-6.98041], [107.60727,-6.98036], [107.60743,-6.98032], [107.60758,-6.98024], [107.6078,-6.98007], [107.60802,-6.97986], [107.60815,-6.97969], [107.60838,-6.97936], [107.60852,-6.97917], [107.60853,-6.97914], [107.60847,-6.97914], [107.60835,-6.97912], [107.60825,-6.97908], [107.60811,-6.97903], [107.60804,-6.97903], [107.60791,-6.97902], [107.60775,-6.97898], [107.60761,-6.97897], [107.60761,-6.97891], [107.60761,-6.97885], [107.60762,-6.97882], [107.60761,-6.97876], [107.6076,-6.97872], [107.60759,-6.97868], [107.60758,-6.97866], [107.60756,-6.97864], [107.60754,-6.97863], [107.60752,-6.97862], [107.60748,-6.9786], [107.60747,-6.97859], [107.60746,-6.97858], [107.60745,-6.97858], [107.60745,-6.97857], [107.60745,-6.97856], [107.60745,-6.97854], [107.60746,-6.97851], [107.60747,-6.97845], [107.60748,-6.97837], [107.60747,-6.97832], [107.60746,-6.97828], [107.60744,-6.97823], [107.60744,-6.97822], [107.60743,-6.97818], [107.60744,-6.97813], [107.60745,-6.9781], [107.60745,-6.97807], [107.60748,-6.97798], [107.60748,-6.97796], [107.6075,-6.97732], [107.6075,-6.97728], [107.6075,-6.97723], [107.6075,-6.97719], [107.60749,-6.97716], [107.60748,-6.97713], [107.60747,-6.97712], [107.60745,-6.9771], [107.60744,-6.97709], [107.60742,-6.97708], [107.6074,-6.97707], [107.60738,-6.97706], [107.60736,-6.97705], [107.60734,-6.97704], [107.60733,-6.97703], [107.60732,-6.97701], [107.60732,-6.97697], [107.60733,-6.97656], [107.60732,-6.9762], [107.60731,-6.97613], [107.60731,-6.97611], [107.6073,-6.9761], [107.6073,-6.97609], [107.60728,-6.97608], [107.60727,-6.97607], [107.60725,-6.97607], [107.60714,-6.97606], [107.60712,-6.97606], [107.60711,-6.97606], [107.6071,-6.97606], [107.6071,-6.97605], [107.60709,-6.97605], [107.60709,-6.97604], [107.60708,-6.97603], [107.60708,-6.97601], [107.60708,-6.97599], [107.60708,-6.97596], [107.60709,-6.97574], [107.60709,-6.97573], [107.60711,-6.97537], [107.60711,-6.97521], [107.60712,-6.97505], [107.6071,-6.97497], [107.60708,-6.9749], [107.60707,-6.97484], [107.60707,-6.97483], [107.60707,-6.97472], [107.60707,-6.9747], [107.60707,-6.97466], [107.6069,-6.97463], [107.60682,-6.97461], [107.60672,-6.97459], [107.60622,-6.97448], [107.60591,-6.97442], [107.60575,-6.9744], [107.60575,-6.97442], [107.60543,-6.97554], ]]) # Jalan def jalanKelurahanCangkuangKulon(self, nama): self.jalan.record(nama) self.jalan.line([[ [107.60539,-6.97431], [107.605,-6.9742], [107.6049,-6.97417], [107.60481,-6.97414], [107.60476,-6.97413], [107.60472,-6.97413], [107.60468,-6.97413], [107.60466,-6.97413], [107.60463,-6.97413], [107.60459,-6.97414], [107.60456,-6.97414], [107.6045,-6.97416], [107.60438,-6.9742], [107.60428,-6.97425], [107.60421,-6.97428], [107.60412,-6.97431], [107.60397,-6.97437], [107.60397,-6.97429], [107.60397,-6.97424], [107.60398,-6.9742], [107.604,-6.97414], [107.60403,-6.97408], [107.60414,-6.97395], [107.60425,-6.97382], [107.60429,-6.97378], [107.60436,-6.9737], [107.60451,-6.97351], [107.60455,-6.97346], [107.60461,-6.97336], [107.60466,-6.97324], [107.60468,-6.97319], [107.60469,-6.97314], [107.60469,-6.97304], [107.6047,-6.97241], [107.6047,-6.97216], [107.6047,-6.97196], [107.6047,-6.97187], [107.60471,-6.97184], [107.60472,-6.9718], [107.60476,-6.97172], [107.60481,-6.97165], [107.60488,-6.97158], [107.60493,-6.97151], [107.60499,-6.97145], [107.60505,-6.97141], [107.60511,-6.97139], [107.60516,-6.97137], [107.60521,-6.97137], [107.60525,-6.97138], [107.60541,-6.97145], [107.60567,-6.97157], [107.60576,-6.9716], [107.6058,-6.97161], [107.60582,-6.97161], [107.60584,-6.9716], [107.60585,-6.97159], [107.60587,-6.97158], [107.60598,-6.97136], [107.6061,-6.97105], [107.60647,-6.9701], [107.6065,-6.97002], [107.60656,-6.96982], [107.60655,-6.96975], [107.60651,-6.96969], [107.60634,-6.96962], [107.60629,-6.96959], [107.60629,-6.96958], [107.60626,-6.96957], [107.60646,-6.96908], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60626,-6.96957], [107.60587,-6.96943], [107.60559,-6.9693], [107.6052,-6.96912], [107.605,-6.96899], [107.60498,-6.96898], [107.60495,-6.96895], [107.60494,-6.96892], [107.60494,-6.96889], [107.60495,-6.96884], [107.60496,-6.9688], [107.60497,-6.96872], [107.60498,-6.96866], [107.60498,-6.96861], [107.60497,-6.96857], [107.60497,-6.96855], [107.60499,-6.96851], [107.605,-6.96849], [107.60501,-6.96847], [107.60502,-6.96845], [107.60503,-6.96841], [107.60504,-6.96832], [107.60505,-6.96828], [107.60505,-6.96826], [107.60505,-6.96824], [107.60505,-6.96822], [107.60504,-6.96821], [107.60501,-6.96818], [107.60499,-6.96815], [107.60499,-6.96812], [107.60499,-6.96809], [107.605,-6.96802], [107.60501,-6.96788], [107.60502,-6.96778], [107.60502,-6.96771], [107.60501,-6.96768], [107.60501,-6.96767], [107.60502,-6.96766], [107.60503,-6.96765], [107.60504,-6.96763], [107.60506,-6.96762], [107.60508,-6.9676], [107.60509,-6.96757], [107.60509,-6.96753], [107.60509,-6.96748], [107.60509,-6.96745], [107.60509,-6.96744], [107.60509,-6.96743], [107.60518,-6.96729], [107.60524,-6.96717], [107.60527,-6.96711], [107.60533,-6.96693], [107.60534,-6.96684], [107.60535,-6.96683], [107.60536,-6.96683], [107.60538,-6.96683], [107.60539,-6.96684], [107.60545,-6.96686], [107.60548,-6.96687], [107.60549,-6.96688], [107.60555,-6.96674], [107.60558,-6.96666], [107.60559,-6.96665], [107.6056,-6.96663], [107.60559,-6.96661], [107.60557,-6.9666], [107.60556,-6.96659], [107.60555,-6.96658], [107.60555,-6.96655], [107.60558,-6.96648], [107.60562,-6.96633], [107.60566,-6.96609], [107.60568,-6.9659], [107.60568,-6.96578], [107.60565,-6.96571], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60431,-6.96375], [107.60426,-6.96395], [107.60423,-6.96401], [107.60421,-6.96406], [107.60413,-6.96439], [107.60397,-6.96434], [107.60379,-6.96429], [107.6037,-6.96449], [107.60357,-6.96482], [107.60342,-6.96524], [107.60326,-6.96565], [107.60321,-6.96582], [107.60315,-6.966], [107.60313,-6.96618], [107.60309,-6.96642], [107.60307,-6.96659], [107.60304,-6.96673], [107.60297,-6.96694], [107.60291,-6.96717], [107.60277,-6.9678], [107.60261,-6.96854], [107.6025,-6.96908], [107.60239,-6.96985], [107.60237,-6.96999], [107.60236,-6.97005], [107.60234,-6.97011], [107.60232,-6.9702], [107.6023,-6.97029], [107.60229,-6.97041], [107.60228,-6.97057], [107.60222,-6.97109], [107.60212,-6.9719], [107.60206,-6.97242], [107.60204,-6.97251], [107.60199,-6.97265], [107.60195,-6.97272], [107.60193,-6.97276], [107.60191,-6.9728], [107.6019,-6.97286], [107.60186,-6.97302], [107.60183,-6.97316], [107.60179,-6.97337], [107.60178,-6.97341], [107.60177,-6.97355], [107.60176,-6.97359], [107.60174,-6.97377], [107.6017,-6.97415], [107.6017,-6.9742], [107.60171,-6.97424], [107.60174,-6.97427], [107.60174,-6.97433], [107.60171,-6.97453], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60601,-6.96166], [107.606,-6.96169], [107.60597,-6.96193], [107.60595,-6.96213], [107.60595,-6.96214], [107.60592,-6.96229], [107.60589,-6.96241], [107.60586,-6.96249], [107.60584,-6.96253], [107.6058,-6.96264], [107.60575,-6.96274], [107.60566,-6.96286], [107.60554,-6.96301], [107.60544,-6.96309], [107.60539,-6.96313], [107.60531,-6.96319], [107.6052,-6.96326], [107.60513,-6.96329], [107.60506,-6.96332], [107.60493,-6.96337], [107.60488,-6.96338], [107.60484,-6.96339], [107.60477,-6.9634], [107.60468,-6.96342], [107.60461,-6.96342], [107.60455,-6.96342], [107.60438,-6.96341], [107.60429,-6.96339], [107.6042,-6.96337], [107.60408,-6.96333], [107.60392,-6.96326], [107.60379,-6.96321], [107.60372,-6.96316], [107.60362,-6.96308], [107.6035,-6.96296], [107.60343,-6.96288], [107.60338,-6.96283], [107.6033,-6.96274], [107.6032,-6.96262], [107.60309,-6.96253], [107.60303,-6.96248], [107.60297,-6.96243], [107.60288,-6.96237], [107.60282,-6.96234], [107.60272,-6.96228], [107.60267,-6.96225], [107.60257,-6.9622], [107.6025,-6.96217], [107.60246,-6.96216], [107.60241,-6.96213], [107.60234,-6.96211], [107.60225,-6.96208], [107.6021,-6.96205], [107.60197,-6.96202], [107.6019,-6.96201], [107.60182,-6.962], [107.60172,-6.96199], [107.60148,-6.96198], [107.6014,-6.96198], [107.60133,-6.96197], [107.60132,-6.96196], ]]) # Jalan def jalanKelurahanPasawahan(self, nama): self.jalan.record(nama) self.jalan.line([[ [107.61371,-6.96107], [107.61374,-6.96119], [107.61377,-6.96128], [107.61381,-6.96141], [107.61383,-6.96148], [107.61394,-6.9618], [107.61403,-6.96206], [107.61424,-6.96265], [107.61447,-6.96329], [107.6145,-6.96337], [107.61454,-6.96348], [107.61474,-6.96413], [107.61487,-6.96449], [107.61499,-6.96483], [107.61506,-6.96501], [107.61516,-6.96531], [107.61527,-6.96561], [107.61529,-6.96567], [107.61549,-6.96626], [107.61557,-6.9665], [107.61561,-6.96661], [107.61563,-6.96665], [107.61571,-6.96691], [107.61579,-6.96716], [107.61586,-6.96738], [107.61597,-6.96771], [107.61608,-6.96805], [107.61609,-6.9681], [107.6161,-6.96813], [107.61612,-6.9682], [107.61613,-6.96823], [107.6162,-6.96841], [107.61626,-6.9686], [107.61628,-6.96866], [107.61642,-6.96906], [107.61645,-6.96914], [107.61655,-6.96944], [107.61668,-6.96983], [107.61671,-6.9699], [107.61674,-6.97002], [107.61683,-6.97022], [107.61688,-6.97036], [107.61697,-6.97058], [107.61705,-6.97084], [107.61707,-6.97091], [107.6173,-6.97167], [107.61735,-6.97185], [107.61784,-6.97339], [107.61791,-6.97363], [107.61793,-6.97367], [107.61793,-6.97368], [107.61815,-6.97425], [107.61827,-6.97455], [107.61835,-6.97479], [107.61845,-6.97508], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60868,-6.97473], [107.60881,-6.97474], [107.60954,-6.97486], [107.61038,-6.975], [107.61097,-6.97508], [107.61161,-6.97515], [107.61219,-6.97521], [107.61262,-6.97528], [107.61303,-6.97532], [107.61348,-6.97537], [107.61359,-6.97538], [107.61368,-6.97538], [107.61378,-6.97538], [107.61389,-6.97535], [107.61403,-6.97533], [107.61443,-6.97525], [107.61458,-6.97521], [107.61462,-6.97519], [107.61466,-6.97517], [107.61469,-6.97514], [107.6147,-6.97513], [107.61472,-6.9751], [107.61473,-6.97506], [107.61474,-6.97501], [107.61475,-6.97471], [107.61476,-6.97437], [107.61478,-6.9743], [107.6148,-6.97426], [107.61483,-6.97422], [107.61486,-6.9742], [107.61487,-6.97419], [107.6149,-6.97419], [107.61494,-6.97418], [107.61513,-6.97418], [107.61545,-6.97421], [107.61552,-6.97421], [107.61569,-6.97422], [107.61591,-6.97423], [107.61608,-6.97423], [107.61619,-6.97423], [107.61634,-6.97423], [107.6164,-6.97423], [107.61647,-6.97421], [107.6166,-6.97418], [107.61696,-6.97403], [107.61716,-6.97395], [107.61724,-6.97392], [107.61761,-6.97379], [107.61793,-6.97368], [107.61793,-6.97367], [107.61791,-6.97363], [107.61794,-6.97361], [107.6181,-6.97356], [107.61821,-6.97353], [107.6185,-6.97347], [107.61859,-6.97345], [107.61864,-6.97345], ]]) self.jalan.record(nama) self.jalan.line([[ [107.62053,-6.96612], [107.6205,-6.9661], [107.62045,-6.96607], [107.6204,-6.96603], [107.62035,-6.96601], [107.62024,-6.96597], [107.6202,-6.96595], [107.62012,-6.9659], [107.62008,-6.96585], [107.62005,-6.96584], [107.61997,-6.96581], [107.61995,-6.96581], [107.61984,-6.96579], [107.61973,-6.96578], [107.61965,-6.96577], [107.61947,-6.96578], [107.61935,-6.96576], [107.61926,-6.96576], [107.61895,-6.9657], [107.61885,-6.96568], [107.61812,-6.96559], [107.61796,-6.96557], [107.61786,-6.96555], [107.61707,-6.96545], [107.61617,-6.96536], [107.61516,-6.96531], ]]) self.jalan.record(nama) self.jalan.line([[ [107.60923,-6.96365], [107.60937,-6.96356], [107.60942,-6.96352], [107.60952,-6.96342], [107.60978,-6.96317], [107.60982,-6.96315], [107.61007,-6.96295], [107.61011,-6.96291], [107.61023,-6.96281], [107.61033,-6.96271], [107.61045,-6.96261], [107.61048,-6.96259], [107.61051,-6.96258], [107.61052,-6.96258], [107.61054,-6.96257], [107.61061,-6.96256], [107.61067,-6.96256], [107.6107,-6.96255], [107.61077,-6.96255], [107.61082,-6.96255], [107.61087,-6.96255], [107.6109,-6.96255], [107.61091,-6.96255], [107.61092,-6.96255], [107.61093,-6.96254], [107.61094,-6.96252], [107.61095,-6.9625], [107.61096,-6.96248], [107.611,-6.96231], [107.61101,-6.9623], [107.61101,-6.96229], [107.61107,-6.96213], [107.61111,-6.96207], [107.61114,-6.96202], [107.61118,-6.96198], [107.61123,-6.96195], [107.61126,-6.96192], [107.6113,-6.96188], [107.61132,-6.96184], [107.61133,-6.96182], [107.61139,-6.96175], [107.61144,-6.96169], [107.61146,-6.96167], [107.6115,-6.96166], [107.61157,-6.96164], [107.61164,-6.96164], [107.61175,-6.96164], [107.61189,-6.96163], [107.61194,-6.96162], [107.61196,-6.96161], [107.61197,-6.9616], [107.61198,-6.96158], [107.61199,-6.96157], [107.61199,-6.96156], [107.61199,-6.96151], [107.61201,-6.96143], [107.61202,-6.96141], [107.61204,-6.9614], [107.61205,-6.96139], [107.61209,-6.96138], [107.61214,-6.96138], [107.6122,-6.9614], [107.61231,-6.96145], [107.61241,-6.96149], [107.6125,-6.96152], [107.61259,-6.96153], [107.61266,-6.96151], [107.61271,-6.96148], [107.61282,-6.96142], [107.61286,-6.96139], [107.6129,-6.96138], [107.613,-6.96136], [107.61305,-6.96135], [107.61317,-6.96134], [107.61338,-6.96133], [107.61373,-6.96128], [107.61377,-6.96128], [107.61374,-6.96119], [107.61406,-6.96115], [107.61451,-6.96114], [107.6146,-6.96114], [107.61464,-6.96113], [107.61492,-6.96111], [107.61506,-6.9611], [107.61513,-6.9611], [107.61526,-6.96111], [107.61541,-6.9611], [107.61555,-6.9611], [107.6156,-6.9611], [107.61567,-6.9611], [107.61576,-6.96111], [107.6159,-6.96111], [107.61594,-6.96112], [107.6161,-6.96113], [107.61637,-6.96116], [107.61647,-6.96116], ]]) self.jalan.record(nama) self.jalan.line([[ [107.61636,-6.96164], [107.61623,-6.96247], [107.61621,-6.96265], [107.61617,-6.96271], [107.61613,-6.96275], [107.6161,-6.96278], [107.61607,-6.9628], [107.61604,-6.96281], [107.61471,-6.9633], [107.6145,-6.96337], [107.61447,-6.96329], [107.61424,-6.96265], [107.61403,-6.96206], [107.61394,-6.9618], [107.61383,-6.96148], [107.61381,-6.96141], [107.61377,-6.96128], [107.61373,-6.96128], [107.61338,-6.96133], [107.61317,-6.96134], [107.61305,-6.96135], [107.613,-6.96136], [107.6129,-6.96138], [107.61286,-6.96139], [107.61282,-6.96142], [107.61271,-6.96148], [107.61266,-6.96151], [107.61259,-6.96153], [107.6125,-6.96152], [107.61241,-6.96149], [107.61231,-6.96145], [107.6122,-6.9614], [107.61214,-6.96138], [107.61209,-6.96138], [107.61205,-6.96139], [107.61204,-6.9614], [107.61202,-6.96141], [107.61201,-6.96143], [107.61199,-6.96151], [107.61199,-6.96156], [107.61199,-6.96157], [107.61198,-6.96158], [107.61197,-6.9616], [107.61196,-6.96161], [107.61194,-6.96162], [107.61189,-6.96163], [107.61175,-6.96164], [107.61164,-6.96164], [107.61157,-6.96164], [107.6115,-6.96166], [107.61146,-6.96167], [107.61144,-6.96169], [107.61139,-6.96175], [107.61133,-6.96182], [107.61132,-6.96184], [107.6113,-6.96188], [107.61126,-6.96192], [107.61123,-6.96195], [107.61118,-6.96198], [107.61114,-6.96202], [107.61111,-6.96207], [107.61107,-6.96213], [107.61101,-6.96229], [107.61101,-6.9623], [107.611,-6.96231], [107.61096,-6.96248], [107.61095,-6.9625], [107.61094,-6.96252], [107.61093,-6.96254], [107.61092,-6.96255], [107.61091,-6.96255], [107.6109,-6.96255], [107.61087,-6.96255], [107.61082,-6.96255], [107.61077,-6.96255], [107.6107,-6.96255], [107.61067,-6.96256], [107.61061,-6.96256], [107.61054,-6.96257], [107.61052,-6.96258], [107.61051,-6.96258], [107.61048,-6.96259], [107.61045,-6.96261], [107.61033,-6.96271], [107.61023,-6.96281], [107.61011,-6.96291], [107.61007,-6.96295], [107.60982,-6.96315], [107.60978,-6.96317], [107.60952,-6.96342], [107.60956,-6.96345], [107.60961,-6.96348], [107.60967,-6.96349], [107.60973,-6.96351], [107.60997,-6.96358], [107.61036,-6.96369], [107.61037,-6.9637], [107.61037,-6.96371], [107.61038,-6.96372], [107.61029,-6.96403], [107.60995,-6.96396], [107.60964,-6.96385], [107.6096,-6.96385], [107.60957,-6.96386], [107.60955,-6.96388], [107.60953,-6.96393], [107.60953,-6.96397], [107.60953,-6.96398], [107.60955,-6.96399], [107.60992,-6.96409], [107.6099,-6.9641], [107.60989,-6.96412], [107.60988,-6.96414], [107.60986,-6.96427], [107.60977,-6.96459], [107.60975,-6.96472], [107.60976,-6.96473], [107.60976,-6.96475], [107.60976,-6.96476], [107.60978,-6.96478], [107.61003,-6.96486], [107.60999,-6.96496], [107.60998,-6.965], [107.6101,-6.96501], [107.6101,-6.96499], [107.61013,-6.96488], [107.6108,-6.9651], [107.61104,-6.96516], [107.6112,-6.96521], [107.61136,-6.96525], [107.61147,-6.96527], [107.61155,-6.96528], [107.61169,-6.96528], [107.61184,-6.96528], [107.61189,-6.96528], [107.61195,-6.96528], [107.612,-6.96528], [107.61203,-6.96529], [107.61205,-6.96529], [107.61207,-6.96529], [107.61209,-6.96528], [107.61211,-6.96528], [107.61214,-6.96527], [107.61215,-6.96527], [107.61216,-6.96526], [107.61217,-6.96579], [107.61213,-6.96588], [107.61212,-6.96621], [107.6121,-6.96633], [107.61171,-6.96663], ]]) self.jalan.record(nama) self.jalan.line([[ [107.6133,-6.96697], [107.61326,-6.96708], [107.61322,-6.96715], [107.61317,-6.96718], [107.61314,-6.96718], [107.61309,-6.96718], [107.61303,-6.96717], [107.61297,-6.96716], [107.61292,-6.96718], [107.61285,-6.96721], [107.61281,-6.96726], [107.61274,-6.96738], [107.61272,-6.96748], [107.61272,-6.96754], [107.61273,-6.96761], [107.61279,-6.96765], [107.61286,-6.96768], [107.61323,-6.96777], [107.61442,-6.968], [107.6153,-6.96805], [107.61609,-6.9681], ]]) # Jalan def jalanKelurahanSukapura(self, nama): self.jalan.record(nama) self.jalan.line([[ [107.62053,-6.96612], [107.62055,-6.96614], [107.6206,-6.96619], [107.62065,-6.96625], [107.62069,-6.96628], [107.62074,-6.9663], [107.62075,-6.96631], [107.62114,-6.96637], [107.62126,-6.96639], [107.62129,-6.9664], [107.62149,-6.96643], [107.62274,-6.96664], [107.62273,-6.96678], [107.62269,-6.96717], [107.6229,-6.96721], [107.62295,-6.96721], [107.62299,-6.96722], [107.62304,-6.96722], [107.62315,-6.96723], [107.6232,-6.96723], [107.62339,-6.96723], [107.6234,-6.96723], [107.62351,-6.96722], [107.62356,-6.96722], [107.62362,-6.96719], [107.62363,-6.96719], [107.62365,-6.96718], [107.62369,-6.96717], [107.62378,-6.96716], [107.62381,-6.96716], [107.62384,-6.96716], [107.62388,-6.96716], [107.62403,-6.96716], [107.62409,-6.96717], [107.62413,-6.96717], [107.62418,-6.96718], [107.62422,-6.9672], [107.62424,-6.96724], [107.62426,-6.96729], [107.62433,-6.96768], [107.62436,-6.96778], [107.62438,-6.96787], [107.62439,-6.96794], [107.62441,-6.96798], [107.62442,-6.968], [107.62443,-6.96801], [107.62445,-6.96802], [107.62447,-6.96803], [107.62449,-6.96804], [107.62451,-6.96804], [107.6246,-6.96804], [107.62474,-6.96804], [107.62483,-6.96804], [107.62492,-6.96804], [107.62507,-6.96803], [107.62513,-6.96803], [107.62517,-6.96803], [107.62519,-6.96805], [107.62521,-6.96808], [107.62523,-6.96812], [107.62524,-6.96815], [107.62526,-6.96819], [107.62529,-6.96822], [107.62533,-6.96823], [107.62538,-6.96825],
['port:eth101/1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/48'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/49'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/50'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/1'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/2'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/7'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/1'], hostname=hn202) aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/2'], hostname=hn202) aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/1'], hostname=hn201) aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/2'], hostname=hn201) metric_name = 'cisco_aci.fabric.port.egr_total.bytes.rate' aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=12.69, tags=tags101 + ['port:eth1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=12.69, tags=tags101 + ['port:eth1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=57725.02, tags=tags101 + ['port:eth1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=4.057143, tags=tags101 + ['port:eth1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=4.057143, tags=tags101 + ['port:eth1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=905.104762, tags=tags101 + ['port:eth1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=455461817.719048, tags=tags101 + ['port:eth1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=854.752381, tags=tags101 + ['port:eth1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=536243.27619, tags=tags101 + ['port:eth1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=4.085714, tags=tags101 + ['port:eth1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=18.252381, tags=tags101 + ['port:eth1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=35.319048, tags=tags101 + ['port:eth1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=294.643478, tags=tags101 + ['port:eth1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=609002.547826, tags=tags101 + ['port:eth1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=17891.591304, tags=tags101 + ['port:eth1/49'], hostname=hn101) aggregator.assert_metric(metric_name, value=82382.26087, tags=tags101 + ['port:eth1/50'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=19.111538, tags=tags102 + ['port:eth1/33'], hostname=hn102) aggregator.assert_metric(metric_name, value=76.155556, tags=tags102 + ['port:eth1/48'], hostname=hn102) aggregator.assert_metric(metric_name, value=43222.107407, tags=tags102 + ['port:eth1/49'], hostname=hn102) aggregator.assert_metric(metric_name, value=44725.892593, tags=tags102 + ['port:eth1/50'], hostname=hn102) aggregator.assert_metric(metric_name, value=91797.314815, tags=tags102 + ['port:eth1/1'], hostname=hn102) aggregator.assert_metric(metric_name, value=127111.525926, tags=tags102 + ['port:eth1/2'], hostname=hn102) aggregator.assert_metric(metric_name, value=14.1, tags=tags102 + ['port:eth1/3'], hostname=hn102) aggregator.assert_metric(metric_name, value=5.259259, tags=tags102 + ['port:eth1/4'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102) aggregator.assert_metric(metric_name, value=5.071429, tags=tags102 + ['port:eth1/5'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102) aggregator.assert_metric(metric_name, value=545.053571, tags=tags102 + ['port:eth1/7'], hostname=hn102) aggregator.assert_metric(metric_name, value=797.571429, tags=tags102 + ['port:eth1/8'], hostname=hn102) aggregator.assert_metric(metric_name, value=4.057143, tags=tags102 + ['port:eth1/9'], hostname=hn102) aggregator.assert_metric(metric_name, value=5.107143, tags=tags102 + ['port:eth1/10'], hostname=hn102) aggregator.assert_metric(metric_name, value=4.085714, tags=tags102 + ['port:eth1/11'], hostname=hn102) aggregator.assert_metric(metric_name, value=17.746429, tags=tags102 + ['port:eth1/12'], hostname=hn102) aggregator.assert_metric(metric_name, value=41358.029538, tags=tags202 + ['port:eth1/1'], hostname=hn202) aggregator.assert_metric(metric_name, value=21604.055876, tags=tags202 + ['port:eth1/2'], hostname=hn202) aggregator.assert_metric(metric_name, value=42458.313431, tags=tags201 + ['port:eth1/1'], hostname=hn201) aggregator.assert_metric(metric_name, value=89949.365917, tags=tags201 + ['port:eth1/2'], hostname=hn201) metric_name = 'cisco_aci.fabric.port.ingr_total.pkts.rate' aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=363.5, tags=tags101 + ['port:eth1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=4.154545, tags=tags101 + ['port:eth1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=7641.245455, tags=tags101 + ['port:eth1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.618182, tags=tags101 + ['port:eth1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=310442.886364, tags=tags101 + ['port:eth1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=127.525532, tags=tags101 + ['port:eth1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=280.691304, tags=tags101 + ['port:eth1/49'], hostname=hn101) aggregator.assert_metric(metric_name, value=244.494589, tags=tags101 + ['port:eth1/50'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102) aggregator.assert_metric(metric_name, value=160.540741, tags=tags102 + ['port:eth1/48'], hostname=hn102) aggregator.assert_metric(metric_name, value=89.862963, tags=tags102 + ['port:eth1/49'], hostname=hn102) aggregator.assert_metric(metric_name, value=334.822222, tags=tags102 + ['port:eth1/50'], hostname=hn102) aggregator.assert_metric(metric_name, value=401.2, tags=tags102 + ['port:eth1/1'], hostname=hn102) aggregator.assert_metric(metric_name, value=583.388889, tags=tags102 + ['port:eth1/2'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102) aggregator.assert_metric(metric_name, value=3.353571, tags=tags102 + ['port:eth1/7'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.571429, tags=tags102 + ['port:eth1/8'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102) aggregator.assert_metric(metric_name, value=61.586206, tags=tags202 + ['port:eth1/1'], hostname=hn202) aggregator.assert_metric(metric_name, value=306.708934, tags=tags202 + ['port:eth1/2'], hostname=hn202) aggregator.assert_metric(metric_name, value=312.642784, tags=tags201 + ['port:eth1/1'], hostname=hn201) aggregator.assert_metric(metric_name, value=255.925206, tags=tags201 + ['port:eth1/2'], hostname=hn201) metric_name = 'cisco_aci.fabric.port.ingr_bytes.unicast.cum' aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=348576910354.0, tags=tags101 + ['port:eth1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=261593756336.0, tags=tags101 + ['port:eth1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=365920898063.0, tags=tags101 + ['port:eth1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101
<reponame>revnav/sandbox # coding: utf-8 # Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from __future__ import absolute_import from oci._vendor import requests # noqa: F401 from oci._vendor import six from oci import retry # noqa: F401 from oci.base_client import BaseClient from oci.config import get_config_value_or_default, validate_config from oci.signer import Signer from oci.util import Sentinel from .models import data_catalog_type_mapping missing = Sentinel("Missing") class DataCatalogClient(object): """ Use the Data Catalog APIs to collect, organize, find, access, understand, enrich, and activate technical, business, and operational metadata. """ def __init__(self, config, **kwargs): """ Creates a new service client :param dict config: Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__. The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config the dict using :py:meth:`~oci.config.validate_config` :param str service_endpoint: (optional) The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit need to specify a service endpoint. :param timeout: (optional) The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout. :type timeout: float or tuple(float, float) :param signer: (optional) The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values provided in the config parameter. One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__ by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument :type signer: :py:class:`~oci.signer.AbstractBaseSigner` :param obj retry_strategy: (optional) A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default. Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation. Any value provided at the operation level will override whatever is specified at the client level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. """ validate_config(config, signer=kwargs.get('signer')) if 'signer' in kwargs: signer = kwargs['signer'] else: signer = Signer( tenancy=config["tenancy"], user=config["user"], fingerprint=config["fingerprint"], private_key_file_location=config.get("key_file"), pass_phrase=get_config_value_or_default(config, "pass_phrase"), private_key_content=config.get("key_content") ) base_client_init_kwargs = { 'regional_client': True, 'service_endpoint': kwargs.get('service_endpoint'), 'timeout': kwargs.get('timeout'), 'base_path': '/20190325', 'service_endpoint_template': 'https://datacatalog.{region}.oci.{secondLevelDomain}', 'skip_deserialization': kwargs.get('skip_deserialization', False) } self.base_client = BaseClient("data_catalog", config, signer, data_catalog_type_mapping, **base_client_init_kwargs) self.retry_strategy = kwargs.get('retry_strategy') def change_catalog_compartment(self, change_catalog_compartment_details, catalog_id, **kwargs): """ Moves a resource into a different compartment. When provided, 'If-Match' is checked against 'ETag' values of the resource. :param ChangeCatalogCompartmentDetails change_catalog_compartment_details: (required) Details for the target compartment. :param str catalog_id: (required) Unique catalog identifier. :param str if_match: (optional) For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value. :param str opc_request_id: (optional) The client request ID for tracing. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type None :rtype: :class:`~oci.response.Response` """ resource_path = "/catalogs/{catalogId}/actions/changeCompartment" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "change_catalog_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "catalogId": catalog_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing), "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=change_catalog_compartment_details) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=change_catalog_compartment_details) def create_attribute(self, catalog_id, data_asset_key, entity_key, create_attribute_details, **kwargs): """ Creates a new entity attribute. :param str catalog_id: (required) Unique catalog identifier. :param str data_asset_key: (required) Unique data asset key. :param str entity_key: (required) Unique entity key. :param CreateAttributeDetails create_attribute_details: (required) The information used to create an entity attribute. :param str opc_request_id: (optional) The client request ID for tracing. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations. For example, if a resource has been deleted and purged from the system, then a retry of the original creation request might be rejected. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Attribute` :rtype: :class:`~oci.response.Response` """ resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/entities/{entityKey}/attributes" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_attribute got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "catalogId": catalog_id, "dataAssetKey": data_asset_key, "entityKey": entity_key } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing), "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_attribute_details, response_type="Attribute") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_attribute_details, response_type="Attribute") def create_attribute_tag(self, catalog_id, data_asset_key, entity_key, attribute_key, create_attribute_tag_details, **kwargs): """ Creates a new entity attribute tag. :param str catalog_id: (required) Unique catalog identifier. :param str data_asset_key: (required) Unique data asset key. :param str entity_key: (required) Unique entity key. :param str attribute_key: (required) Unique attribute key. :param CreateTagDetails create_attribute_tag_details: (required) The information used to create an entity attribute tag. :param str opc_request_id: (optional) The client request ID for tracing. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing
read') class InflowBoundaryField(BaseProperties): def __init__(self, casename, casedir='.', boundarydata_folder='boundaryData', avg_folder='Average', debug=False, **kwargs): self.casename, self.casedir = casename, casedir self.case_fullpath = casedir + '/' + casename + '/' + boundarydata_folder + '/' self.inflow_patches = os.listdir(self.case_fullpath) # Try remove "Average" folder from collected patch names try: self.inflow_patches.remove(avg_folder) except ValueError: pass self.avg_folder_path = self.case_fullpath + avg_folder + '/' # Patch folder paths in Average folder self.avg_folder_patchpaths, self.case_patchfullpath = [], [] for patch in self.inflow_patches: self.avg_folder_patchpaths.append(self.avg_folder_path + patch + '/') self.case_patchfullpath.append(self.case_fullpath + patch + '/') # Try making Average folder and its subfolder, if not already os.makedirs(self.avg_folder_path + patch + '/', exist_ok=True) self.data, self.data_mean = {}, {} # Exception for inheritance class DrivingPressureGradient try: self.times_all, self.times_all_raw = self._readTimes(**kwargs) except NotADirectoryError: pass self.debug = debug print('{} InflowBoundaryField object initialized'.format(casename)) def _readTimes(self, remove='points', **kwargs): times_all = os.listdir(self.case_patchfullpath[0]) try: times_all.remove(remove) except ValueError: pass # Raw all times that are string and can be integer and float mixed # Useful for locating time directories that can be integer times_all_raw = times_all # Numerical float all times and sort from low to high times_all = np.array([float(i) for i in times_all]) # Sort string all times by its float counterpart times_all_raw = [time_raw for time, time_raw in sorted(zip(times_all, times_all_raw))] # Use Numpy sort() to sort float all times times_all.sort() return times_all, times_all_raw @staticmethod def _trimBracketCharacters(data): # Get left and right column of U datacol0, datacol1, datacol2 = data['f0'], data['f1'], data['f2'] # New corrected data data_new = np.empty((data.shape[0], 3, data.shape[2])) # Go through each point then each time for l in range(data.shape[0]): # print(l) for m in range(data.shape[2]): newval0, newval2 = datacol0[l, 0, m].decode('utf-8'), datacol2[l, 0, m].decode('utf-8') data_new[l, 0, m] = float(newval0.lstrip('(')) data_new[l, 1, m] = datacol1[l, 0, m] # Right column doesn't need to strip ) since precision limit was 10 and not enough to reach ")" data_new[l, 2, m] = float(newval2.rstrip(')')) return data_new @timer def readPropertyData(self, filenames=('*',), skiprow=22, skipfooter=1, n_timesample=-1, lstr_precision=12, rstr_precision=20): # Ensure tuple inputs and interpret "*" as all files # ensemble_folderpath is a dummy variable here self.ensemble_folderpath = self.case_patchfullpath[0] + self.times_all_raw[0] + '/' self.filenames = self._ensureTupleInput(filenames) self.ensemble_folderpath = '' # Ensure same size as number of files specified skiprow = (skiprow,)*len(self.filenames) if isinstance(skiprow, int) else skiprow skipfooter = (skipfooter,)*len(self.filenames) if isinstance(skipfooter, int) else skipfooter # If n_timesample is -1 or sample interval < 1.5, then use all times sample_interval = 1 if n_timesample == -1 or n_timesample > len(self.times_all)/1.5 else int(np.ceil(len(self.times_all))/n_timesample) self.sample_times = [self.times_all[0]] if sample_interval > 1 else self.times_all # Go through all specified properties for i in range(len(self.filenames)): # String dtype for left and right column of U so that "(12345" can be read, precision is lstr_precision and rstr_precision dtype = ('|S' + str(lstr_precision), float, '|S' + str(rstr_precision)) if self.filenames[i] == 'U' else float # Go through all patches for j in range(len(self.inflow_patches)): print('\nReading {}'.format(self.filenames[i] + ' ' + self.inflow_patches[j])) filename_fullpath = self.case_patchfullpath[j] + self.times_all_raw[0] + '/' + self.filenames[i] property_dictkey = self.filenames[i] + '_' + self.inflow_patches[j] # Initialize first index in the 3rd dimension data = np.genfromtxt(filename_fullpath, skip_header=skiprow[i], skip_footer=skipfooter[i], dtype=dtype) # Then go through all times from 2nd time onward cnt, milestone = 0, 25 for k in range(sample_interval, len(self.times_all), sample_interval): # print(self.filenames[i] + ' ' + self.inflow_patches[j] + ' ' + str(self.times_all[k])) filename_fullpath = self.case_patchfullpath[j] + self.times_all_raw[k] + '/' + self.filenames[i] data_pertime = np.genfromtxt(filename_fullpath, skip_header=skiprow[i], skip_footer=skipfooter[i], dtype=dtype) data = np.dstack((data, data_pertime)) # Gauge progress cnt += sample_interval progress = cnt/(len(self.times_all) + 1)*100. if progress >= milestone: print(' ' + str(milestone) + '%...', end='') milestone += 25 # Some postprocessing after reading and dstacking data per time data = data.reshape((data.shape[1], data.shape[0], data.shape[2])) # If file is U, then strip "(" and ")" if self.filenames[i] == 'U': data_new = self._trimBracketCharacters(data) else: data_new = data # Finally, the property data self.data[property_dictkey] = data_new # Collect sample times self.sample_times = np.empty(data_new.shape[2]) i = 0 for k in range(0, len(self.times_all), sample_interval): self.sample_times[i] = self.times_all[k] i += 1 # Collect all property keys self.property_keys = tuple(self.data.keys()) # Numpy array treatment self.sample_times = np.array(self.sample_times) print('\n' + str(self.filenames) + ' read') def _readPoints(self, points_name='points', skiprow=18, skipfooter=1, lstr_precision=12, rstr_precision=20): self.points_name = points_name dtype = ('|S' + str(lstr_precision), float, '|S' + str(rstr_precision)) self.points = {} # Go through all patches for i in range(len(self.inflow_patches)): print('\nReading {}'.format(self.points_name + ' ' + self.inflow_patches[i])) filename_fullpath = self.case_patchfullpath[i] + '/' + self.points_name points_dictkey = self.points_name + '_' + self.inflow_patches[i] points = np.genfromtxt(filename_fullpath, skip_header=skiprow, skip_footer=skipfooter, dtype=dtype) # Strip "(" and ")". # Reshaping points to 3D as _trimBracketCharacters take data of shape (n_points, 1, n_times) points_new = self._trimBracketCharacters(points.reshape((points.shape[0], 1, 1))) self.points[points_dictkey] = points_new[..., 0] @timer def calculatePropertyMean(self, starttime=None, stoptime=None, get_tke_total=True, get_horizontal_mean=True, **kwargs): self.get_tke_total = get_tke_total self.get_horizontal_mean = get_horizontal_mean # Read points coordinates of each patch if also computing horizontal mean on top of temporal averaging if get_horizontal_mean: self._readPoints() # times_all in _selectTimes() should be sample_times in this case, thus temporarily change times_all to sample_times times_all_tmp = self.times_all.copy() self.times_all = self.sample_times # Find selected times and start, stop indices from sample_times self.times_selected, self.starttime_real, self.stoptime_real, istart, istop = self._selectTimes(starttime=starttime, stoptime=stoptime) # Switch times_all back self.times_all = times_all_tmp # Go through all properties and every patch for i in range(len(self.property_keys)): # Selected property data at selected times property_selected = self.data[self.property_keys[i]][:, :, istart:(istop + 1)] calc_tke_resolved = True if get_tke_total and 'U' in self.property_keys[i] else False # Keep in mind which patch current property lies in current_patch = self.property_keys[i].split('_')[1] # Property mean is sum(property_j*time_j)/sum(times) property_dot_dt_sum, dt_sum = 0., 0. tke_resolved_dot_dt_sum, dt_sum_tke_resolved = 0., 0. for j in range(1, len(self.times_selected)): # For subsequent times, dt = t_j - t_j-1 dt = self.times_selected[j] - self.times_selected[j - 1] # Linear interpolation between each value point property_interp = (property_selected[:, :, j - 1] + property_selected[:, :, j])/2. property_dot_dt = property_interp*dt property_dot_dt_sum += property_dot_dt dt_sum += dt # In case current property is U and total mean TKE is asked, # calculate it from U' = U - <U>, TKE_resolved = 0.5U'U' # <TKE_resolved> = 0.5<U'U'> if calc_tke_resolved: # <U> at current time correlation stage u_mean_stage = property_dot_dt_sum/dt_sum # U'U', instantaneous uuprime2 = (property_interp - u_mean_stage)**2. if self.debug: print("Instantaneous resolved TKE = {}".format(0.5*uuprime2)) # sum(TKE_resolved*dt) tke_resolved_dot_dt_sum += 0.5*np.sum(uuprime2, axis=1, keepdims=True)*dt # sum(dt) dt_sum_tke_resolved = dt_sum # Store in dictionary self.data_mean[self.property_keys[i]] = property_dot_dt_sum/dt_sum if calc_tke_resolved: self.data_mean['kResolved_' + current_patch] = tke_resolved_dot_dt_sum/dt_sum_tke_resolved # If also perform horizontal averaging if get_horizontal_mean: # To get horizontal mean, sort z and get its sorted index idx_sorted = np.argsort(self.points[self.points_name + '_' + current_patch][:, 2]) # Index to sort relevant arrays back to original order idx_revertsort = np.argsort(idx_sorted) # Sorted point coordinates of current patch points_sorted = self.points[self.points_name + '_' + current_patch][idx_sorted] # Then sort current property at current patch with this sorted index data_mean_sorted = self.data_mean[self.property_keys[i]][idx_sorted] # Do the same if resolved TKE is calculated if calc_tke_resolved: k_mean_sorted = self.data_mean['kResolved_' + current_patch][idx_sorted] n_points_per_lvl = len(points_sorted[points_sorted[:, 2] == points_sorted[0, 2]]) # Go through every height level and do averaging ih = 0 while ih < len(idx_sorted) - 1: data_mean_sorted[ih:ih + n_points_per_lvl] = np.mean(data_mean_sorted[ih:ih + n_points_per_lvl], axis=0) if calc_tke_resolved: k_mean_sorted[ih:ih + n_points_per_lvl] = np.mean(k_mean_sorted[ih:ih + n_points_per_lvl], axis=0) ih += n_points_per_lvl # Sort sorted arrays back to original order self.data_mean[self.property_keys[i]] = data_mean_sorted[idx_revertsort] if calc_tke_resolved: self.data_mean['kResolved_' + current_patch] = k_mean_sorted[idx_revertsort] if self.debug and get_horizontal_mean: self.data_mean_sorted = data_mean_sorted self.points_sorted = points_sorted # If accumulated time of TKE resolved is not 0, add it to the existing key called "k", a.k.a. SGS TKE if get_tke_total: # data_mean_keys has the addition of "kResolved_{patch}" that will merge with "k_{patch}" self.data_mean_keys = list(self.data_mean.keys()) # Go through properties to find mean resolved TKE of a patch for i in range(len(self.data_mean_keys)): if 'kResolved' in self.data_mean_keys[i]: # Go through properties again to find mean SGS TKE of the same patch for j in range(len(self.data_mean_keys)): if 'k' in self.data_mean_keys[j] \ and 'kResolved' not in self.data_mean_keys[j] \ and self.data_mean_keys[j].split('_')[1] == self.data_mean_keys[i].split('_')[1]: # data_mean_keys[j] is called k_{patch} # data_mean_keys[i] is called kResolved_{patch}
params["localid"] = "Y" # default value # Entry ID is a unique-enough string used w/ search and replace # params["defval"] = None if row[2].find( ".Entry_ID" ) >= 0 : params["defval"] = "NEED_ACC_NUM" elif row[24] is not None : params["defval"] = row[24].strip() if params["defval"] in ("?", ".") : params["defval"] = None # autoinsert codes # # code 8 is saveframe label tag that has a matching _ID tag w/ code 7. # it is "real data". other autoinsert codes are for automatically generated values that "aren't real" # becasue we can re-create them anytime # params["aditauto"] = "N" if row[25] is not None : if not str( params["aditauto"] ).isdigit() : pass if row[25] > 0 : if row[25] != 8 : params["aditauto"] = "Y" if self.verbose : sys.stdout.write( sql + "\n" ) pprint.pprint( params ) self._curs2.execute( sql, params ) # SF link flag # sql = "update tags set sflinkflag='Y' where tagname=:tag" self._curs.execute( "select tagname from tags where valtype='FRAMECODE'" ) while True : row = self._curs.fetchone() if row is None : break tag = row[0].replace( "_label", "_ID" ) self._curs2.execute( sql, { "tag" : tag } ) # fixup, just in case # self._curs2.execute( "update tags set aditauto='Y' where aditdefault is not null" ) #################################################################################################### # If mandatory is V or M, select mandatory from sfcats where sfcat = ? # if sfcat is optional, reset to R or C resp. I.e. # "mandatory if saveframe exists" (R,C) vs. "mandatory always (implies: saveframe must exist)" (V,M) # def fix_loopmandatory( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".fix_loopmandatory()\n" ) sql = "update tags set mandatory=:man where seq=:seq" qry = "select mandatory from sfcats where sfcat=:sfcat" self._curs.execute( "select seq,sfcat,mandatory,tagname from tags where mandatory='V' or mandatory='M'" ) params = {} while True : params.clear() row = self._curs.fetchone() if row is None : break params["seq"] = row[0] params["sfcat"] = row[1] if self.verbose : pprint.pprint( row ) sys.stdout.write( qry + "\n" ) pprint.pprint( params ) self._curs2.execute( qry, params ) sfrow = self._curs2.fetchone() if sfrow is None : raise Exception( "Error: no saveframe category for tag # %s", row[0] ) if sfrow[0] == "O" : params["man"] = row[2] if params["man"] == "V" : params["man"] = "R" elif params["man"] == "M" : params["man"] = "C" self._curs2.execute( sql, params ) #################################################################################################### # # Mandatory overrides # def load_overrides( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_overrides()\n" ) ovrsql = "insert into tagdeps (ctlseq,ctlvalue,seq,mandatory) values (:ctseq,:ctval,:seq,:man)" tagsql = "update tags set tagdepflag='Y' where seq=:seq" qry = "select t1.dictionaryseq,v.ctlvalue,t2.dictionaryseq,v.validateflags,t1.originaltag " \ + "from star.validationlinks v " \ + "join star.dict t1 on t1.originalcategory=v.ctlsfcategory and t1.originaltag=v.ctltag " \ + "join star.dict t2 on t2.originalcategory=v.depsfcategory and t2.originaltag=v.deptag" self._curs.execute( "select count(*) from star.validationlinks" ) row = self._curs.fetchone() if row[0] < 1 : raise Exception( "empty validationlinks table" ) self._curs.execute( "select count(*) from star.dict" ) row = self._curs.fetchone() if row[0] < 1 : raise Exception( "empty dict table" ) params = {} if self._verbose : sys.stdout.write( qry ) sys.stdout.write( "\n" ) self._curs.execute( qry ) while True : params.clear() row = self._curs.fetchone() if row is None : break if row[1] is not None : if row[1].strip() == "*" : continue # ADIT wildcard, not used by validator tag = row[4].strip() if tag in ("_Entry_interview.View_mode","_Entry_interview.PDB_deposition", "_Entry_interview.BMRB_deposition") : continue # ADIT view-only tags # let's not do that for now # if (tag == "_Entity.Number_of_monomers") and (row[1] == "polymer") : # Eldon's software can't 'V' this one # mandatory = "V" # else : params["man"] = row[3].strip().upper()[self._mode:self._mode+1] params["ctseq"] = row[0] params["ctval"] = row[1] params["seq"] = row[2] if self._verbose : sys.stdout.write( ovrsql ) pprint.pprint( params ) rc = self._curs2.execute( ovrsql, params ) if self._verbose : sys.stdout.write( "-- %s rows inserted\n" % (rc,) ) if self._verbose : sys.stdout.write( tagsql ) pprint.pprint( params ) rc = self._curs2.execute( tagsql, params ) if self._verbose : sys.stdout.write( "-- %s rows updated\n" % (rc,) ) #################################################################################################### # # Tag relationships # derived from foreign keys with no regard/support for compound keys # def load_parent_child( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_parent_child()\n" ) sql = "insert into tagrels (chldseq,prntseq) values (:childseq,:parentseq)" self._curs.execute( "select t1.dictionaryseq,t2.dictionaryseq from dict t1 " \ + "join dict t2 on t2.tagcategory=t1.foreigntable and t2.tagfield=t1.foreigncolumn " \ + "where t1.foreigntable is not null and t1.foreigncolumn is not null " \ + "order by t2.dictionaryseq" ) while True : row = self._curs.fetchone() if row is None : break self._curs2.execute( sql, { "childseq" : row[0], "parentseq" : row[1] } ) #################################################################################################### # # Turn off "enumclosed" flag for tags whose parent is _Experiment.Name # # Must run after load_parent_child() # def fix_experiment_names( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".fix_experiment_names()\n" ) sql = "update tags set enumclosedflag='N' where seq=:seq" self._curs.execute( "select r.chldseq from tagrels r join tags t on t.seq=r.prntseq " \ + "where t.tagname='_Experiment.Name'" ) while True : row = self._curs.fetchone() if row is None : break self._curs2.execute( sql, { "seq": row[0] } ) #################################################################################################### # # Saveframe link tags # def update_sf_links( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".update_sflinks()\n" ) # match _label and _ID tags # sql = "update tags set sflinkflag='Y' where tagname=:tag" qry = "select seq from tags t join tagrels r on r.chldseq=t.seq where t.tagname=:tag" self._curs.execute( "select seq,tagname from tags where valtype='FRAMECODE'" ) while True : row = self._curs.fetchone() if row is None : break if row[1].find( "_label" ) < 0 : self.errors.append( "tag %s does not end in '_label'" % (row[1],) ) continue idtag = row[1].replace( "_label", "_ID" ) self._curs2.execute( qry, { "tag" : idtag } ) qrow = self._curs2.fetchone() if qrow is None : self.errors.append( "tag %s not found in related tags table. Missing foreign key?" % (idtag,) ) continue self._curs2.execute( sql, { "tag" : idtag } ) # add _label to .Sf_framecode parent-child links # sql = "insert into tagrels (prntseq,chldseq) values (:parent,:child)" qry = "select t1.tagcat from tags t1 join tagrels r on r.prntseq=t1.seq join tags t2 on t2.seq=r.chldseq " \ + "where t2.tagname=:tag" qry1 = "select seq from tags where tagname=:tag" qry2 = "select prntseq,chldseq from tagrels where prntseq=:parent and chldseq=:child" self._curs.execute( "select seq,tagname from tags where valtype='FRAMECODE'" ) while True : row = self._curs.fetchone() if row is None : break idtag = row[1].replace( "_label", "_ID" ) self._curs2.execute( qry, { "tag" : idtag } ) qrow = self._curs2.fetchone() if qrow is None : self.errors.append( "parent tag for %s (%s) not found" % (idtag,row[1],) ) continue fctag = "_" + qrow[0] + ".Sf_framecode" self._curs2.execute( qry1, { "tag" : fctag } ) qrow = self._curs2.fetchone() if qrow is None : self.errors.append( "framecode tag %s (%s) not found" % (fctag,row[1],) ) continue # only add if not already there # self._curs2.execute( qry2, { "parent" : qrow[0], "child" : row[0] } ) qrow = self._curs2.fetchone() if qrow is None : self._curs2.execute( sql, { "parent" : qrow[0], "child" : row[0] } ) #################################################################################################### # # Datum types # This was supposed to drive table template generator on the website: tables whose tablegen flag is "y" # can be generated by that code. The list is in the properties file. # def load_datum_types( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_datum_types()\n" ) cats = self._props.get( "validict", "datum.categories" ).split() sql = "insert into datumtypes (tagcat,datumtype,tablegen) values (:table,:datum,:flag)" self._curs.execute( "select distinct tagcategory,datumcountflgs from star.dict " \ + "where datumcountflgs is not null" ) while True : row = self._curs.fetchone() if row is None : break flag = "N" if row[0] in cats : flag = "Y" self._curs2.execute( sql, { "table" : row[0], "datum" : row[1], "flag" : flag } ) #################################################################################################### # # this one's supposed to drive STARch PHP on the website. not that hard-coding it here is any # less work than hard-coding it there. # def load_starch_table( self ) : if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_starch_table()\n" ) sql = "insert into starch (tagname,displname,displseq,rowidx,seqid,compidxid," \ + "compid,atomid,atomtype,isotope,ambicode,val,minval,maxval,err,author," \ + "tablegen,groupid) values (:tag,:label,:order,:idx,'N','N','N','N','N','N','N','N','N'," \ + "'N','N','N','N',0)" qry = "select tagname,seq,rowidxflag from tags where metadata<>'Y' and tagname
# ---------------------------------------------------------------------------- # Copyright (c) 2020-2021, Pelion and affiliates. # # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ WebSocket notification channel related helpers. """ import base64 import datetime import json import logging import queue import threading from time import sleep from ws4py.client.threadedclient import WebSocketClient from ws4py.exc import WebSocketException from pelion_systest_lib.tools import build_random_string log = logging.getLogger(__name__) class WebsSocketNotificationChannel: def __init__(self, cloud_api, api_key, configuration=None): log.info('Register and open WebSocket notification channel') self.api_key = api_key self.cloud_api = cloud_api cloud_api.connect.register_websocket_channel(api_key, configuration=configuration, expected_status_code=[200, 201]) # IOTNS-205 sleep(5) # Get host part from api address host = cloud_api.rest_api.api_gw.split('//')[1] log.info('Opening WebSocket handler') self.ws = WebSocketRunner('wss://{}/v2/notification/websocket-connect'.format(host), api_key) self.handler = WebSocketHandler(self.ws) def close(self): try: self.ws.close() except BaseException as e: log.warning('Websocket closing error: {}'.format(e)) sleep(2) log.info('Deleting WebSocket channel') self.cloud_api.connect.delete_websocket_channel(self.api_key, expected_status_code=204) class WebSocketHandler: """ Class to handle messages via WebSocket callback :param ws: WebSocket Runner class """ def __init__(self, ws): self.ws = ws @property def api_key(self): """ Make able to use exactly right apikey in other places :return: api key """ return self.ws.api_key def check_registration(self, device_id): """ Check if WebSocket has registration message(s) :param device_id: string :return: """ for item in self.ws.events['registrations']: # If asked device_id is found return its data. Otherwise return False if item['ep'] == device_id: return item return False def check_deregistration(self, device_id): """ Check if WebSocket has de-registration message(s) for given device id :param device_id: string :return: """ for item in self.ws.events['de-registrations']: # If asked device_id is found return its data. Otherwise return False if item['ep'] == device_id: return item return False def check_registration_updates(self, device_id): """ Check if WebSocket has registration updates message(s) for given device id :param device_id: string :return: False / dict """ for item in self.ws.events['reg-updates']: # If asked device_id is found return its data. Otherwise return False if item['ep'] == device_id: return item return False def check_registration_expiration(self, device_id): """ Check if CALLBACK-HANDLER has registrations expired message(s) for given device id :param device_id: string :return: False / dict """ for item in self.ws.events['registrations-expired']: # If asked device_id is found return its data. Otherwise return False if item['ep'] == device_id: return item return False def get_notifications(self): """ Get all notifications from WebSocket data :return: dict """ return self.ws.events['notifications'] def get_async_response(self, async_response_id): """ Get async-response from WebSocket data for given async_id :param async_response_id: string :return: dict """ return self.ws.async_responses.get(async_response_id) def wait_for_multiple_notification(self, device_id, expected_notifications, timeout=30, assert_errors=False): """ Wait for given device id + resource path(s) + expected value(s) to appear in CALLBACK-HANDLER :param device_id: string :param expected_notifications: list of dicts of resource paths with expected values [{'resource_path': 'expected_value'}, {'resource_path_2}: {'excepted_value_2}, ... ] :param timeout: int :param assert_errors: boolean for user if to fail test case in case of expected notifications not received :return: False / list of received notifications or fail the test case if confirm_resp=True """ item_list = [] for _ in range(timeout): notifications = self.get_notifications() for item in notifications: if item['ep'] == device_id: # Check if received notification contains any combinations defined in expected_notifications. # If found, append item to item_list. If as many items are found as are expected, return list. if [expect_item for expect_item in expected_notifications if item['path'] in expect_item.keys() and base64.b64decode( item['payload']).decode('utf8') in expect_item.values()]: item_list.append(item) if len(item_list) == len(expected_notifications): return item_list sleep(1) log.debug('Expected {}, found only {}!'.format(expected_notifications, item_list)) if assert_errors: assert False, 'Failed to receive all expected notifications from device on websocket channel by ' \ 'timeout:{} seconds'.format(timeout) return False def wait_for_notification(self, device_id, resource_path, expected_value, timeout=30, assert_errors=False, delay=1): """ Wait for given device id + resource path + expected value to appear in WebSocket :param device_id: string :param resource_path: string :param expected_value: string :param timeout: int :param assert_errors: boolean for user if to fail test case in case of expected notification not received :param delay: Delay to check notification :return: dict or fail the test case if confirm_resp=True """ wait = 0 expected_value = str(expected_value) while wait <= timeout: for item in self.ws.events['notifications']: if item['ep'] == device_id and item['path'] == resource_path and \ base64.b64decode(item['payload']).decode('utf8') == expected_value: return item sleep(delay) wait += delay if assert_errors: assert False, 'Failed to receive notification from device on websocket channel by timeout: {}'.format( timeout) return False def wait_for_resource_notifications(self, device_id, resource_path, timeout=30, assert_errors=False, delay=1): """ Wait for given device id + resource path + expected value to appear in WebSocket :param device_id: string :param resource_path: string :param timeout: int :param assert_errors: boolean for user if to fail test case in case of expected notification not received :param delay: Delay to check notification :return: dict or fail the test case if confirm_resp=True """ wait = 0 while wait <= timeout: for item in self.ws.events['notifications']: if item['ep'] == device_id and item['path'] == resource_path: return item sleep(delay) wait += delay if assert_errors: assert False, 'Failed to receive notification from device on websocket channel by timeout: {}'.format( timeout) return False def wait_for_async_response(self, async_response_id, timeout=30, assert_errors=False): """ Wait for given async-response to appear in WebSocket data :param async_response_id: string :param timeout: int :param assert_errors: boolean for user if to fail test case in case of expected response not received :return: dict or fail the test case if confirm_resp=True """ for _ in range(timeout): async_response = self.ws.async_responses.get(async_response_id) if async_response: return async_response sleep(1) if assert_errors: assert False, 'Failed to receive async response from device with async_id:{} on websocket channel by ' \ 'timeout:{} seconds'.format(async_response_id, timeout) return False def wait_for_registration(self, device_id, timeout=30): """ Wait for given device id registration to appear in WebSocket :param device_id: string :param timeout: int :return: False / dict """ for _ in range(timeout): registration = self.check_registration(device_id) if registration: return registration sleep(1) return False def wait_for_registration_updates(self, device_id, timeout=30): """ Wait for given device id registration update notification to appear in WebSocket :param device_id: string :param timeout: int :return: False / dict """ for _ in range(timeout): registration = self.check_registration_updates(device_id) if registration: return registration sleep(1) return False def wait_for_registration_expiration(self, device_id, timeout=30): """ Wait for given device id registration expiration notification to appear in WebSocket :param device_id: string :param timeout: int :return: False / dict """ for _ in range(timeout): registration = self.check_registration_expiration(device_id) if registration: return registration sleep(1) return False def wait_for_deregistration(self, device_id, timeout=30): """ Wait for given device id de-registration to appear in WebSocket :param device_id: string :param timeout: int :return: False / dict """ for _ in range(timeout): deregistration = self.check_deregistration(device_id) if deregistration: return deregistration sleep(1) return False class WebSocketRunner: """ Class for handling WebSocket connection and storing data from notification service :param api: string URL for WebSocket connection endpoint :param api_key: string """ def __init__(self, api, api_key): self.async_responses = {} self.events = {'registrations': [], 'notifications': [], 'reg-updates': [], 'de-registrations': [], 'registrations-expired': []} self.run = False self.exit = False self.ws = None self.message_queue = queue.Queue() self.ret_code = [] self._api_url = api self._api_key = api_key self.open() @property def api_key(self): """ Make able to use exactly right apikey in other places :return: api key """ return self._api_key def _input_thread(self, api, api_key): """ Runner's input thread :param api: Api :param api_key: Api key """ while self.run: try: self.ws = CallbackClient(self.message_queue, api, protocols=['wss', 'pelion_{}'.format(api_key)]) log.debug('Connecting WebSocket') self.ws.connect() log.debug('Run forever WebSocket handler') self.ws.run_forever() if self.exit: self.ret_code = self.ws.ret_code log.info('WebSocket handler exited with return code {} and size {}'.format(self.ret_code, len(self.ret_code))) else: self.ret_code = self.ws.ret_code log.error('WebSocket handler exited return code {} and size {}'.format(self.ret_code, len(self.ret_code))) except (WebSocketException, RuntimeError) as e: if 'Unauthorized' in e: log.error('Failed to connect WebSocket! {}'.format(e)) sleep(1) self.run = False log.warning('WebSocket failed, retrying! {}'.format(e)) sleep(1) sleep(1) log.info('WebSocket input thread was stopped.') def _handle_thread(self): """ Runner's handle thread """ while self.run: data = self.message_queue.get() if data == {}: log.info('Received callback is empty') for notification_type,
#!/usr/bin/env python #TODO # add text colors and decorations # ############################################################################## # # Prerequisites # # ############################################################################## # # ############################################################################## # # Imports # # ############################################################################## import gmailConnect import dataManagement import sys import os import argparse import getpass import pymongo from itertools import chain from ConfigParser import SafeConfigParser # ############################################################################## # # Main # # ############################################################################## class spYDyishai(): def __init__(self): self.credentialslist_location = os.getcwd() self.credentialslist_filename = 'credentialslist.ini' self.db_location = os.getcwd() self.db_filename = 'resourceDB' self.credentialslist_dictionary = {} self.stdout_orig = sys.stdout class userInput(spYDyishai): def Credentials_Resorce(self): print ''' Please provide a set of username and password that are valid for Gmail login [!] user format > <EMAIL> ''' print '[-] Please type your username for the resource: ' resource_user = raw_input() print '[-] Please type your password for the resource: ' resource_pass = getpass.getpass() print '[-] Please retype your password for the resource:' resource_passRT = getpass.getpass() while resource_pass != resource_passRT: print '' print '[!] the passwords didn\'t match' print '[-] Please type your password for the resource: ' resource_pass = getpass.getpass() print '[-] Please retype your password for the resource:' resource_passRT = getpass.getpass() else: pass print 'Is the spelling correct for: ' +resource_user+ ' with password: '+resource_pass+' (Y/n)' resourceYN = raw_input().lower while resourceYN() not in ['y', 'Y', 'n', 'N', '']: print '[!] Please choose only "y", "n" or leave blank for default' resourceYN = raw_input().lower if resourceYN() in ['y','Y', '']: pass elif resourceYN() in ['n', 'N']: userInput.Credentials_Resorce(self) else: print 'You broke me :/' quit() self.credentialslist_dictionary.update({"username":resource_user, "password":resource_pass}) for i in str(resource_number): n = resource_number f = open(self.credentialslist_location+"/"+self.credentialslist_filename, 'a') sys.stdout = f print '[resource'+str(n)+']' n = n+1 for i in self.credentialslist_dictionary.keys(): print i +' = '+ self.credentialslist_dictionary[i] print '' sys.stdout = self.stdout_orig f.close() def Uresourcefile(self): print '[-] Please provide a full path for the credentialslist.ini file:' Ucredentialslist_location = raw_input() global userFilePath userFilePath = Ucredentialslist_location if os.path.isfile(Ucredentialslist_location+"/"+self.credentialslist_filename) == True: userInteraction.credentialslist_read(self) else: userInteraction.credentialslist_find(self) class userInteraction(spYDyishai, userInput): def wellcome(self): parser = argparse.ArgumentParser() parser.add_argument('--sh', help='print out a short help', action='store_true', default=False) parser.add_argument('--fh', help='print out the full extended help file', action='store_true', default=False) parser.add_argument('--p', help='set the current session proxy settings', action='store_true', default=False) parser.add_argument('--mi', help='manual input for new credentials and run', action='store_true', default=False) parser.add_argument('--fi', help='read new credentials from file and run', action='store_true', default=False) parser.add_argument('--dbi', help='run again from resource in th DB', action='store_true', default=False) parser.add_argument('--os', help='print the crwaler results only to screen', action='store_true', default=False) parser.add_argument('--of', help='write the crawler results to a file', action='store_true', default=False) parser.add_argument('--odb', help='write the crawler results to the DB', action='store_true', default=False) parser.add_argument('--rfdb', help='read results from the DB', action='store_true', default=False) parser.add_argument('--dfdb', help='delete results from the DB', action='store_true', default=False) parser.add_argument('--dadb', help='delete all the resources from the DB', action='store_true', default=False) parser.add_argument('--rff', help='read results from an output file', action='store_true', default=False) parser.add_argument('--dff', help='delete results from an output file', action='store_true', default=False) parser.add_argument('--daf', help='delete all results from an ouput file', action='store_true', default=False) args = parser.parse_args() if args.sh is True: userInteraction.short_help(self) elif args.fh is True: userInteraction.full_help(self) elif args.p is True: # TODO # set the current session proxy pass elif args.mi is True: userInput.Credentials_Resorce(self) # TODO # add run function elif args.fi is True: userInput.Uresourcefile(self) # TODO # add run function elif args.dbi is True: # TODO # add run function from DB pass elif args.os is True: # TODO # set ouput to screen pass elif args.of is True: # TODO # set output to file pass elif args.odb is True: # TODO # set output to DB pass elif args.rfdb is True: # TODO # call read from DB pass elif args.dfdb is True: # TODO # call delete from DB pass elif args.dadb is True: # TODO # call delete all DB pass elif args.rff is True: # TODO # call read from file pass elif args.dff is True: # TODO # call delete from file pass elif args.daf is True: # TODO # call delete all file pass def helpFile(self): print '~! spYDyisai helpfile !~'.center(75) print ''' For spYDyisai to work properly you'll need to supply some basic data: [*] - credentils - Are the username and password sets that the crawler will use to find additional credentials. [*] - # of resources - If you wnat to provide multiple resources just let spYDyisai know how may are you going to provide [*] - credentialslist - If you prefere you can provide all the configuration needed in a condig file which is super self explenatory instead useing the guided run. just edit credentialslist.ini If you choose to use the credentialslist.ini file to supply spYDyisai input, keep in mind that !!! after each tun the config file resets to it's default !!! in order to prevent credentials beeing saved in clear text. ''' def short_help(self): print '' print '' print '' print '~! Wellcome to spYDyisai !~' print ''' __ \ /|| \\\\ _ ___ ___ \/ || || |_|___ _ _ |__ |__| || || ||\/ | |__ |__| /_\ \/ __| | || ||_// || | __| | | | | || ''' print '[-] spYDyishai | The google based account credentials harverster [-]' print '' print ''' -- usage flags: -- ** run python spYDyishai.py with no flags for guided execution GENRAL * spYDyishai help --sh * spYDyishai help file --hf * set proxy --p USER INPUT * run manual input --mi * run from file --fi * run from DB --dbi SYSTEM OUTPUT * to screen --os * to flat file --of * to DB --odb MANAGE DATA * read from DB --rfdb * delete from DB --dfdb * delete all DB --dadb * read from file --rff * delete from file --dff * delete all file --daf ''' quit() def full_help(self): print '' print '[-] spYDyishai | The google based account credentials harverster [-]' print ''' -- Main help screen -- GENRAL ------ * set proxy --p | set the type:ip:port of your proxy USER INPUT ---------- [**] user and password format > <EMAIL>@domain.com:password [*] manual input --mi | takes a google username and password as an initial input [*] from file --fi | takes a list of google accounts from a ' ; ' separated file [*] from DB --dbi | takes a list of google accounts from an existing spYDyishai DB OUTPUT ------ [*] to screen --os | prints an ongoing status and results to STDOUT [*] to flat file --of | appends the results in to a flat file [*] to DB --odb | writes the results in to a mongoDB instance MANAGE DATA ----------- * read from DB --rfdb | read results from the DB * delete from DB --dfdb | delete results from the DB * delete all DB --dadb | delete all the resources from the DB * read from file --rff | read results from an output file * delete from file --dff | delete results from an output file * delete all file --daf | delete all results from an ouput file EXAMPLES -------- [*] python spYDyishai.py --mi <EMAIL>:password -os // this will start the credentials harvesting beginning with the manually supplied account // and will print all results directly to the screen (STDOUT) [*] python spYDyishai.py --mi <EMAIL>:password ; <EMAIL>:<PASSWORD> --of ~/spYDyishai_results // this will start the credentials harvesting beginning with the first manually supplied account // and will append all results to the given file [*] python spYDyishai.py --fi ~/accounts --fo ~/spYDyishai_results --p SOCKS:1.1.1.1:8080 // this will start the credentials harvesting form all listed accoutns form the flat file // and will append all results to the given file [*] python spYDyishai.py --dbi localhost:27017 --odb localhost:27018 // this will start the credentials harvesting form all listed accoutns form the database // and will write all results into a different database [*] python spYDyishai.py --rfdb localhost:27017 // this will read all the available resource from the DB and print them to the screen [*] python spYDyishai.py --dfdb localhost:27017 <EMAIL> // this will delete the <EMAIL> file from the DB [*] python spYDyishai.py --daf ~/spYDyishai_results // this will delete the content of the file ~/spYDyishai_results ''' quit() def guided_wellcome(self): print '' print '' print '' print '~! Wellcome to spYDyisai !~' print ''' __ \ /|| \\\\ _ ___ ___ \/ || || |_|___ _ _ |__ |__| || || ||\/ | |__ |__| /_\ \/ __| | || ||_// || | __| | | | | || ''' print ''' spYDishai is a gmail spider like crawler that seeks google saved credentials and allows pivoting from one account to another until the end of all times, just feed it with initial credentials and it will do the rest. spYDyisai will save and remember your links and credentials encrypted in a DB and reuse them when ever it's suposed to run. Just follow the instructions to get started.. ''' print '[?] Would you like to read the help file before we begin? (Y/n)' pHelp = raw_input().lower while pHelp() not in ['y', 'Y', 'n', 'N', '']: print '[!] Please choose only "y", "n" or leave blank for default' pHelp = raw_input().lower if pHelp() in ['y', 'Y', '']: userInteraction.helpFile(self) elif pHelp() in ['n', 'N']: userInteraction.guided_wellcome_options(self) else: print 'You broke me :/' quit() def guided_wellcome_options(self): print ''' [?] What would you like to do? [1] Provide crdentials [2] Read available resources from DB [3] Update DB resources [4] Delete resources from DB [5] Unleesh the spiders once again [6] Quit ''' base_option = raw_input().lower while base_option() not in ['1', '2', '3', '4', '5', '6']: print '[!] Please choone only from the available options 1-6' base_option = raw_input().lower if base_option() == '1': userInteraction.guide(self) # TODO # dataManagement.manageDB.findDB(self) # dataManagement.manageDB.writeToDB(self) # RESET MUST STAY LAST userInteraction.credentialslist_reset_Q(self) userInteraction.guided_wellcome_options(self) elif base_option() == '2': print '' print ''' [?] Would you like to list all the resources in you\'re DB? (N/tb/fd) Choose N - No, tb - Tables
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import multiprocessing import os from abc import ABC from copy import deepcopy from functools import partial from typing import Any, Callable, Collection, Dict, List, Optional, Tuple, Union from torch.utils.data import BatchSampler, DataLoader, RandomSampler, Sampler, SequentialSampler from torch.utils.data.dataset import IterableDataset from torch.utils.data.distributed import DistributedSampler import pytorch_lightning as pl from pytorch_lightning.accelerators import Accelerator from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.trainer.supporters import CombinedLoader from pytorch_lightning.utilities import rank_zero_warn from pytorch_lightning.utilities.apply_func import apply_to_collection from pytorch_lightning.utilities.auto_restart import ( _capture_metadata_collate, CaptureIterableDataset, CaptureMapDataset, FastForwardSampler, ) from pytorch_lightning.utilities.data import has_iterable_dataset, has_len from pytorch_lightning.utilities.enums import DistributedType from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _fault_tolerant_training from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.seed import pl_worker_init_function class TrainerDataLoadingMixin(ABC): # this is just a summary on variables used in this abstract class, # the proper values/initialisation should be done in child class val_check_interval: float tpu_local_core_rank: int train_dataloader: DataLoader num_training_batches: Union[int, float] val_check_batch: float val_dataloaders: Optional[List[DataLoader]] num_val_batches: List[Union[int, float]] test_dataloaders: Optional[List[DataLoader]] num_test_batches: List[Union[int, float]] limit_train_batches: Union[int, float] log_every_n_steps: int overfit_batches: Union[int, float] distributed_sampler_kwargs: dict accelerator: Accelerator accelerator_connector: AcceleratorConnector call_hook: Callable def _worker_check(self, dataloader: DataLoader, name: str) -> None: if not isinstance(dataloader, DataLoader): return using_spawn = self.accelerator_connector._distrib_type == DistributedType.DDP_SPAWN num_cpus = multiprocessing.cpu_count() # ddp_spawn + num_workers > 0 don't mix! tell the user if dataloader.num_workers > 0 and using_spawn: # checks for the attr persistent_workers available in pytorch >= 1.7 if hasattr(dataloader, "persistent_workers"): if not dataloader.persistent_workers: rank_zero_warn( "num_workers>0, persistent_workers=False, and accelerator=ddp_spawn" " may result in data loading bottlenecks." " Consider setting persistent_workers=True" " (this is a limitation of Python .spawn() and PyTorch)" ) else: rank_zero_warn( "num_workers>0 and accelerator=ddp_spawn do not mix well" " and may result in data loading bottlenecks." " Consider setting accelerator=ddp to use num_workers>0" " (this is a limitation of Python .spawn() and PyTorch)" ) elif dataloader.num_workers == 0 and using_spawn: # checks for the attr persistent_workers available in pytorch >= 1.7 if hasattr(dataloader, "persistent_workers"): if not dataloader.persistent_workers: rank_zero_warn( "accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks." " Consider setting num_workers>0 and persistent_workers=True" ) else: rank_zero_warn( "accelerator=ddp_spawn and num_workers=0 may result in data loading bottlenecks." " Consider setting accelerator=ddp and set num_workers>0" ) elif dataloader.num_workers <= 2 < num_cpus and not using_spawn: rank_zero_warn( f"The dataloader, {name}, does not have many workers which may be a bottleneck." " Consider increasing the value of the `num_workers` argument`" f" (try {num_cpus} which is the number of cpus on this machine)" " in the `DataLoader` init to improve performance." ) def auto_add_worker_init_fn(self, dataloader: DataLoader) -> None: if int(os.environ.get("PL_SEED_WORKERS", 0)) and dataloader.worker_init_fn is None: dataloader.worker_init_fn = partial(pl_worker_init_function, rank=self.global_rank) def _requires_distributed_sampler(self, dataloader) -> bool: return ( self.accelerator_connector.replace_sampler_ddp and self.accelerator_connector.is_distributed and not isinstance(dataloader.sampler, DistributedSampler) and not has_iterable_dataset(dataloader) ) def prepare_dataloader(self, dataloader: Any, shuffle: bool, mode: Optional[RunningStage] = None) -> Any: """This function handles to following functionalities: - Injecting a `DistributedDataSampler` into the `DataLoader` if on a distributed environment - Wrapping the datasets and samplers into fault-tolerant components """ if isinstance(dataloader, CombinedLoader): # apply `prepare_dataloader` on all the collection of loaders dataloader.loaders = apply_to_collection( dataloader.loaders, DataLoader, self.prepare_dataloader, shuffle, mode=mode ) return dataloader # don't do anything if it's not a dataloader if not isinstance(dataloader, DataLoader): return dataloader if ( _fault_tolerant_training() # injects components to track the state or self._requires_distributed_sampler(dataloader) # sets the distributed sampler or mode == RunningStage.PREDICTING # to track indices for the predictions or self.accelerator_connector.use_ipu # IPUs use a custom `DataLoader` ): sampler = self._resolve_sampler(dataloader, shuffle=shuffle, mode=mode) dataloader = self._update_dataloader(dataloader, sampler, mode=mode) return dataloader def _resolve_sampler(self, dataloader: DataLoader, shuffle: bool, mode: Optional[RunningStage] = None) -> Sampler: if self._requires_distributed_sampler(dataloader): if not isinstance(dataloader.sampler, (SequentialSampler, RandomSampler)): raise MisconfigurationException( "You seem to have configured a sampler in your DataLoader. This will be replaced " " by `DistributedSampler` since `replace_sampler_ddp` is True and you are using" " distributed training. Either remove the sampler from your DataLoader or set" " `replace_sampler_ddp=False` if you want to use your custom sampler." ) return self._get_distributed_sampler( dataloader, shuffle, mode=mode, overfit_batches=self.overfit_batches, **self.distributed_sampler_kwargs ) return dataloader.sampler @staticmethod def _dataloader_init_kwargs_resolve_sampler( dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None ) -> Dict[str, Any]: """This function is used to handle the sampler, batch_sampler arguments associated within a DataLoader for its re-instantiation. If the dataloader is being used for prediction, the sampler will be wrapped into an `IndexBatchSamplerWrapper`, so Lightning can keep track of its indices. If fault tolerant training is enabled, the sampler will be wrapped into a `FastForwardSampler`. """ batch_sampler = getattr(dataloader, "batch_sampler") is_predicting = mode == RunningStage.PREDICTING # checking the batch sampler type is different than PyTorch default. if (batch_sampler is not None and type(batch_sampler) is not BatchSampler) or is_predicting: batch_sampler = type(batch_sampler)( sampler, batch_size=batch_sampler.batch_size, drop_last=(False if is_predicting else batch_sampler.drop_last), ) if is_predicting: batch_sampler = IndexBatchSamplerWrapper(batch_sampler) if _fault_tolerant_training(): fast_forward_sampler = batch_sampler = FastForwardSampler(batch_sampler) fast_forward_sampler.setup(dataloader_batch_size=1) return { "sampler": None, "shuffle": False, "batch_sampler": batch_sampler, "batch_size": 1, "drop_last": False, } if _fault_tolerant_training(): fast_forward_sampler = sampler = FastForwardSampler(sampler) fast_forward_sampler.setup(dataloader_batch_size=dataloader.batch_size) return {"sampler": sampler, "shuffle": False, "batch_sampler": None} @staticmethod def _get_dataloader_init_kwargs( dataloader: DataLoader, sampler: Optional[Sampler], mode: Optional[RunningStage] = None ) -> Dict[str, Any]: if not isinstance(dataloader, DataLoader): raise ValueError(f"The dataloader {dataloader} needs to subclass `torch.utils.data.DataLoader`") # get the dataloader instance attributes attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith("_")} # not part of `vars` attrs["multiprocessing_context"] = dataloader.multiprocessing_context # get the dataloader instance `__init__` parameters params = dict(inspect.signature(dataloader.__init__).parameters) has_variadic_kwargs = any(p.kind is p.VAR_KEYWORD for p in params.values()) if has_variadic_kwargs: # if the signature takes **kwargs, assume they will be passed down with `super().__init__(**kwargs)` params.update(inspect.signature(DataLoader.__init__).parameters) del params["self"] # keep only the params whose default is different to the current attr value non_defaults = {name for name, p in params.items() if name in attrs and p.default != attrs[name]} # add `dataset` as it might have been replaced with `*args` non_defaults.add("dataset") # kwargs to re-construct the dataloader dl_kwargs = {k: v for k, v in attrs.items() if k in non_defaults} dl_kwargs.update( TrainerDataLoadingMixin._dataloader_init_kwargs_resolve_sampler(dataloader, sampler, mode=mode) ) required_args = { p.name for p in params.values() if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is p.empty and p.name not in dl_kwargs } # the dataloader has required args which we could not extract from the existing attributes if required_args: required_args = sorted(required_args) dataloader_cls_name = dataloader.__class__.__name__ raise MisconfigurationException( f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. " "This would fail as some of the `__init__` arguments are not available as instance attributes. " f"The missing attributes are {required_args}. " f"HINT: If you wrote the `{dataloader_cls_name}` class, define `self.missing_arg_name` or " "manually add the `DistributedSampler` as: " f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`." ) if not has_variadic_kwargs: # the dataloader signature does not allow keyword arguments that need to be passed missing_kwargs = dl_kwargs.keys() - params.keys() if missing_kwargs: missing_kwargs = sorted(missing_kwargs) dataloader_cls_name = dataloader.__class__.__name__ raise MisconfigurationException( f"Trying to inject `DistributedSampler` into the `{dataloader_cls_name}` instance. " "This would fail as it doesn't expose all its attributes in the `__init__` signature. " f"The missing arguments are {missing_kwargs}. " f"HINT: If you wrote the `{dataloader_cls_name}` class, add the `__init__` arguments or " "manually add the `DistributedSampler` as: " f"`{dataloader_cls_name}(dataset, sampler=DistributedSampler(dataset))`." ) if isinstance(dl_kwargs["dataset"], IterableDataset): dl_kwargs["batch_sampler"] = None dl_kwargs["sampler"] = None if _fault_tolerant_training(): if isinstance(dl_kwargs["dataset"], IterableDataset): # wrap the `IterableDataset` into a `CaptureIterableDataset` to record sampler states. dl_kwargs["dataset"] = CaptureIterableDataset(dataset=dl_kwargs["dataset"]) elif len(dl_kwargs["dataset"]): dl_kwargs["dataset"] = CaptureMapDataset(dataset=dl_kwargs["dataset"]) else: raise MisconfigurationException( "This shouldn't happen, please open an issue on Lightning Github repository." ) return dl_kwargs @staticmethod def _update_dataloader(dataloader: DataLoader, sampler: Sampler, mode: Optional[RunningStage] = None) -> DataLoader: dl_kwargs = TrainerDataLoadingMixin._get_dataloader_init_kwargs(dataloader, sampler, mode=mode) dl_cls = type(dataloader) dataloader = dl_cls(**dl_kwargs) return dataloader @staticmethod def _get_distributed_sampler( dataloader: DataLoader, shuffle: bool, overfit_batches:
tile in self.faces[LEFT].get_tiles(): output += c2f_map[tile] for tile in self.faces[BACK].get_tiles(): output += c2f_map[tile] return output # End def def print_cube(self): space = " " output = "" # Print Top face = self.faces[TOP] for row in range(self.dimension): output += space * self.dimension output += face.get_row_str(row) + "\n" # Print Middle for row in range(self.dimension): output += self.faces[LEFT].get_row_str(row) + self.faces[FRONT].get_row_str(row) + self.faces[RIGHT].get_row_str(row) + self.faces[BACK].get_row_str(row) + "\n" # Print Bottom face = self.faces[BOTTOM] for row in range(self.dimension): output += space * self.dimension output += face.get_row_str(row) + "\n" print(output) # End def def image_cube(self): color_dict = {'G': 'green', 'B': 'blue', 'O': 'orange', 'R': 'red', 'Y': 'yellow', 'W': 'white'} face_dict = {0 : TOP, 1 : LEFT, 2 : FRONT, 3 : RIGHT, 4 : BACK, 5 : BOTTOM} origx_dict = {0 : 250, 1 : 100, 2 : 250, 3 : 400, 4 : 550, 5 : 250} origy_dict = {0 : 100, 1 : 250, 2 : 250, 3 : 250, 4 : 250, 5 : 400} img = Image.new('RGB', (800, 800), color = 'white') d = ImageDraw.Draw(img) for i in range(6): face = self.faces[face_dict[i]] originx = origx_dict[i] originy = origy_dict[i] face_positions = [[originx, originy], [originx + 50, originy], [originx + 100, originy], [originx, originy + 50], [originx + 50, originy + 50], [originx + 100, originy + 50], [originx, originy + 100], [originx + 50, originy + 100], [originx + 100, originy + 100]] for row in range(self.dimension): block = face.get_row_str(row) d.rectangle(face_positions[row*3] + [x + 48 for x in face_positions[row*3]], fill=color_dict[block[1]], outline='black') d.rectangle(face_positions[row*3 + 1] + [x + 48 for x in face_positions[row*3 + 1]], fill=color_dict[block[3]], outline='black') d.rectangle(face_positions[row*3 + 2] + [x + 48 for x in face_positions[row*3 + 2]], fill=color_dict[block[5]], outline='black') img.save('pil_text_font.png') def verify_cube(self): ret_val = True if self.cube_complete(): counts = self.get_color_counts() if (counts[COLOR_UNKNOWN] != 0): ret_val = False for count in counts.keys(): if (count != COLOR_UNKNOWN): if (counts[count] != 9): ret_val = False else: ret_val = False if not ret_val: print("ERROR scanning cube. Please re-scan.") return ret_val # End def # End class #----------------------------------------------------------------------------- # Function Definitions #----------------------------------------------------------------------------- def capture_image(camera, width, height, debug=False, verbose=False): # Set image capture size # camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width) # camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height) camera.set(cv2.CAP_PROP_AUTOFOCUS, 1) # turn the autofocus off # os.system(uvcdynctrl -s "white balance temperature, auto" 0 ) # camera.set(cv2.CAP_PROP_BRIGHTNESS, 0.4) # camera.set(cv2.CAP_PROP_CONTRAST, 0.1) # camera.set(cv2.CAP_PROP_SATURATION, 0.4) # camera.set(cv2.CAP_PROP_HUE, 0) # camera.set(cv2.CAP_PROP_GAIN, 1) camera.set(cv2.CAP_PROP_FRAME_WIDTH, width) camera.set(cv2.CAP_PROP_FRAME_HEIGHT, height) #camera.set(cv2.CAP_PROP_AUTOFOCUS, 1) # turn the autofocus off time.sleep(2) # Capture image ret, frame = camera.read() # Wait for a bit # time.sleep(0.2) # Return image or None if (ret): return frame else: return None # End def def match_color(color, verbose=False): ret_val = COLOR_UNKNOWN '''#Eucledian distance method dist = [] for i in range(0, 6): dist.append([(color[0] - RGB_POINT[i][0]) ** 2 + (color[1] - RGB_POINT[i][1]) ** 2 + (color[2] - RGB_POINT[i][2]) ** 2]) ret_val = COLOR_DICT[dist.index(min(dist))]''' color_rgb = np.uint8([[[int(color[0]), int(color[1]), int(color[2])]]]) color_lab = cv2.cvtColor(color_rgb, cv2.COLOR_BGR2LAB) color_hsv = cv2.cvtColor(color_rgb, cv2.COLOR_BGR2HSV) for i in range(0, 6): if abs(color_lab[0][0][1] - LAB_POINT[i][1]) < 17 and abs(color_lab[0][0][2] - LAB_POINT[i][2]) < 17: ret_val = COLOR_DICT[i] if ret_val == ORANGE or ret_val == YELLOW or ret_val == RED: if abs(color_rgb[0][0][1] - RGB_POINT[4][1]) < 20: ret_val = ORANGE elif abs(color_rgb[0][0][1] - RGB_POINT[5][1]) < 20: ret_val = RED elif abs(color_rgb[0][0][1] - RGB_POINT[1][1]) < 20: ret_val = YELLOW break # Check color based on thresholds # if ((abs(color[0] - color[1]) < 40) and (abs(color[0] - color[2]) < 40) and # (abs(color[1] - color[2]) < 40)): '''if ((abs(color[0] - color[1]) < 40) and (abs(color[1] - color[2]) < 40)): if (color[1] > 170): ret_val = WHITE else: ret_val = BLUE elif ((color[0] < 30) and (color[2] > 100)): if (color[1] < 40): ret_val = RED elif (color[1] < 100): ret_val = ORANGE elif (color[2] > 140): ret_val = YELLOW elif ((color[0] < 100) and (color[1] > 100) and (color[2] > 140)): ret_val = YELLOW elif ((color[0] < 130) and (color[1] > 150) and (color[2] < 170)): ret_val = GREEN elif ((color[0] > 100) and (color[1] < 140) and (color[2] < 100)): ret_val = BLUE''' """ # Check color based on thresholds if ((color[0] > 100) and (color[1] > 100) and (color[2] > 100)): ret_val = WHITE if ((color[0] < 100) and (color[1] > 100) and (color[2] > 100)): ret_val = YELLOW if ((color[0] < 30) and (color[1] < 30) and (color[2] > 100)): ret_val = RED if ((color[0] < 30) and (color[1] < 100) and (color[2] > 100)): ret_val = ORANGE if ((color[0] < 120) and (color[1] > 100) and (color[2] > 80)): ret_val = GREEN if ((color[0] > 100) and (color[1] < 140) and (color[2] < 100)): ret_val = BLUE """ """ # Check color based on thresholds # NOTE: This method can have issues if there is any marks on the cube # Could add special case for WHITE center if (color[0] < BLUE_THRESHOLD_0): if (color[1] < GREEN_THRESHOLD_0): ret_val = RED elif (color[1] > GREEN_THRESHOLD_1): ret_val = YELLOW else: ret_val = ORANGE elif(color[0] > BLUE_THRESHOLD_1): if (color[2] < RED_THRESHOLD_0): ret_val = BLUE else: ret_val = WHITE else: if (color[2] < RED_THRESHOLD_1): ret_val = GREEN else: ret_val = YELLOW """ """ # This method is not that reliable as lighting conditions / cubes change # # Check each color for a match for c in COLORS.keys(): if ((color[0] > (COLORS[c][0] - COLOR_OFFSET)) and (color[0] < (COLORS[c][0] + COLOR_OFFSET)) and (color[1] > (COLORS[c][1] - COLOR_OFFSET)) and (color[1] < (COLORS[c][1] + COLOR_OFFSET)) and (color[2] > (COLORS[c][2] - COLOR_OFFSET)) and (color[2] < (COLORS[c][2] + COLOR_OFFSET))): return c """ # Print color choice if (verbose): print("Color RGB {0}: {1}, {2}, {3}".format(ret_val, color[0], color[1], color[2])) print("Color LAB {0}: {1}, {2}, {3}".format(ret_val, color_lab[0][0][0], color_lab[0][0][1], color_lab[0][0][2])) return ret_val # End def def show_cube_alignment(image): # Update image for tile in TILES: (x,y,w,h) = (tile[0], tile[1], tile[2], tile[3]) cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) print('Cube Alignment') cv2.imwrite('cube_alignment.png', image) # End def def process_cube_image(cube, camera, image, debug=False, verbose=False): # Performance monitor if (verbose): print('Processing Image') start_time = time.time() # Write original image if (debug): print('Write original image') cv2.imwrite('cube_01_orig.png', image) # Convert image to Grayscale if (False): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if (debug): print('Convert Grayscale image') cv2.imwrite('cube_02_gray.png', gray) else: gray = image # Blur image to reduce background noise if (False): blur = cv2.GaussianBlur(gray, (7, 7), 1.5, 1.5) if (debug): print('Bluring image') cv2.imwrite('cube_03_blur.png', blur) else: blur = gray # Perform edge detection if (False): edges = cv2.Canny(blur, 0, 30, 3) if (debug): print('Edges image') cv2.imwrite('cube_04_edge.png', edges) else: edges = blur # Perform edge dilation if (False): kernel = np.ones((2,2), np.uint8) dilated = cv2.dilate(edges, kernel, iterations=1) if (debug): print('Dilated Edges image') cv2.imwrite('cube_05_dilated.png', dilated) else: dilated = edges # Find contours if (False): squares = [] contours, hierarchy = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Test each contour for contour in contours: area = cv2.contourArea(contour) if ((area > AREA_MIN) and (area < AREA_MAX)): x, y, w, h = cv2.boundingRect(contour) # Contour must be a square if (abs(w - h) < HIGHT_WIDTH_DIFF_MAX): squares.append((x, y, w, h)) # Remove duplicates rectangles tiles = [] offset = DUPLICATE_OFFSET for square in squares: append = True for tile in tiles: if ((square[0] >= (tile[0] - offset)) and (square[0] <= (tile[0] + offset)) and (square[1] >= (tile[1] - offset)) and (square[1] <= (tile[1] + offset))): append = False if (append): tiles.append(square) if (debug): # Update image for tile in tiles: (x,y,w,h) = (tile[0], tile[1], tile[2], tile[3]) cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) print("{0} {1} {2} {3}".format(x, y, w, h)) print('Countours image') cv2.imwrite('cube_06_tiles.png', image) else: tiles
<gh_stars>0 from copy import deepcopy from collections import namedtuple from contextlib import contextmanager try: import sqlite3 except ImportError: sqlite3 = None try: import psycopg2 except ImportError: psycopg2 = None try: import MySQLdb as mysql except ImportError: mysql = None __version__ = "0.4.4" __all__ = [ "Clause", "ClauseError", "Database", "DatabaseError", "EstoultError", "Field", "FieldError", "fn", "op", "Query", "QueryError", ] class EstoultError(Exception): pass class ClauseError(EstoultError): pass class FieldError(EstoultError): pass class QueryError(EstoultError): pass class DatabaseError(EstoultError): pass _sql_ops = { "eq": "=", "lt": "<", "le": "<=", "gt": ">", "ge": ">=", "ne": "<>", } def _parse_arg(arg): if isinstance(arg, Clause): return arg elif isinstance(arg, Field): return str(arg), () elif isinstance(arg, Query): return arg._query, arg._params elif isinstance(arg, list) or isinstance(arg, tuple): placeholders = ", ".join(["%s"] * len(arg)) return placeholders, tuple(arg) return "%s", (arg,) def _parse_args(func): def wrapper(*args): return func(*[_parse_arg(a) for a in args]) return wrapper def _strip(string): string = string.rstrip(" ,") if string.endswith("and"): string = string[:-3] return string def _make_op(operator): @_parse_args def wrapper(lhs, rhs): return Clause(f"({lhs[0]}) {operator} ({rhs[0]})", tuple(lhs[1] + rhs[1])) return wrapper def _make_fn(name): def wrapper(*args): return Clause(f"{name}({str(', '.join([str(a) for a in args]))})", ()) return wrapper class ClauseMetaclass(type): def __new__(cls, clsname, bases, attrs): # Add op overloading for name, operator in _sql_ops.items(): attrs[f"__{name}__"] = _make_op(operator) return super(ClauseMetaclass, cls).__new__(cls, clsname, bases, attrs) class Clause(namedtuple("Clause", ["clause", "params"]), metaclass=ClauseMetaclass): def __str__(self): return self.clause def __hash__(self): return hash(str(self)) def __eq__(self, comp): return str(self) == comp class OperatorMetaclass(type): def __new__(cls, clsname, bases, attrs): for name, operator in _sql_ops.items(): attrs[name] = _make_op(operator) return super(OperatorMetaclass, cls).__new__(cls, clsname, bases, attrs) class op(metaclass=OperatorMetaclass): @classmethod def add_op(cls, name, op): def func(lhs, rhs): fn = _make_op(op) return fn(lhs, rhs) setattr(cls, name, staticmethod(func)) @staticmethod @_parse_args def or_(lhs, rhs): return Clause(f"(({_strip(lhs[0])}) or ({_strip(rhs[0])}))", (lhs[1] + rhs[1])) @staticmethod @_parse_args def and_(lhs, rhs): return Clause(f"(({_strip(lhs[0])}) and ({_strip(rhs[0])}))", (lhs[1] + rhs[1])) @staticmethod @_parse_args def in_(lhs, rhs): return Clause(f"(({_strip(lhs[0])}) in ({_strip(rhs[0])}))", (lhs[1] + rhs[1])) @staticmethod @_parse_args def like(lhs, rhs): return Clause(f"({lhs[0]}) like ({rhs[0]})", (lhs[1] + rhs[1])) @staticmethod @_parse_args def ilike(lhs, rhs): # Does a case insensitive `like`. Only postgres has this operator, # but we can hack it together for the others if psycopg2: return Clause(f"({lhs[0]}) ilike ({rhs[0]})", (lhs[1] + rhs[1])) return Clause(f"lower({lhs[0]}) like lower({rhs[0]})", (lhs[1] + rhs[1])) @staticmethod @_parse_args def not_(arg): return Clause(f"not ({arg[0]})", (arg[1])) @staticmethod @_parse_args def is_null(arg): return Clause(f"({arg[0]}) is null", (arg[1])) @staticmethod @_parse_args def not_null(arg): return Clause(f"({arg[0]}) is not null", (arg[1])) class FunctionMetaclass(type): sql_fns = [ "count", "sum", "avg", "ceil", "distinct", "concat", ] def __new__(cls, clsname, bases, attrs): for f in cls.sql_fns: attrs[f] = _make_fn(f) return super(FunctionMetaclass, cls).__new__(cls, clsname, bases, attrs) class fn(metaclass=FunctionMetaclass): @classmethod def add_fn(cls, name, sql_fn): def func(*args): fn = _make_fn(sql_fn) return fn(*args) setattr(cls, name, staticmethod(func)) @staticmethod def alias(lhs, rhs): s, p = _parse_arg(lhs) return Clause(f"{s} as {rhs}", tuple(p)) @staticmethod def cast(lhs, rhs): s, p = _parse_arg(lhs) return Clause(f"cast({s} as {rhs})", tuple(p)) @staticmethod def wild(schema): return Clause(f"{schema.__tablename__}.*", ()) class FieldMetaclass(type): def __new__(cls, clsname, bases, attrs): # Add op overloading for name, operator in _sql_ops.items(): attrs[f"__{name}__"] = _make_op(operator) return super(FieldMetaclass, cls).__new__(cls, clsname, bases, attrs) class Field(metaclass=FieldMetaclass): def __init__(self, type, name, **kwargs): self.type = type self.name = name self.caster = kwargs.get("caster") self.null = kwargs.get("null", True) self.default = kwargs.get("default") self.primary_key = kwargs.get("primary_key") is True @property def full_name(self): return f"{self.schema.__tablename__}.{self.name}" def __str__(self): return self.full_name def __hash__(self): return hash(str(self)) def __eq__(self, comp): return str(self) == comp class SchemaMetaclass(type): def __new__(cls, clsname, bases, attrs): # Deepcopy inherited fields for base in bases: at = dir(base) for a in at: f = getattr(base, a) if isinstance(f, Field): attrs[a] = deepcopy(f) c = super(SchemaMetaclass, cls).__new__(cls, clsname, bases, attrs) # Add schema to fields for key in dir(c): f = getattr(c, key) if isinstance(f, Field): f.schema = c return c @property def fields(cls): return [ getattr(cls, key) for key in dir(cls) if isinstance(getattr(cls, key), Field) ] @property def pk(cls): pk = None for field in cls.fields: if field.primary_key is True: return field if field.name == "id": pk = field return pk def __getitem__(cls, item): return getattr(cls, item) class Schema(metaclass=SchemaMetaclass): _database_ = None __tablename__ = None @classmethod def _cast(cls, updating, row): # Allow you to use a Field as key for key, value in list(row.items()): if isinstance(key, Field): row[key.name] = value else: row[key] = value changeset = {} for field in cls.fields: value = None if field.default is not None: value = field.default try: value = row[field.name] except KeyError: if updating is True or field.name == cls.pk.name: continue if value is not None: value = ( field.type(value) if field.caster is None else field.caster(value) ) changeset[field.name] = value return changeset @classmethod def _validate(cls, updating, row): changeset = {} for field in cls.fields: try: value = row[field.name] except KeyError: continue if field.null is False and value is None and updating is True: raise FieldError(f"{str(field)} cannot be None") changeset[field.name] = value return changeset @classmethod def casval(cls, row, updating): changeset = cls._cast(updating, row) changeset = cls._validate(updating, changeset) # A user specified validation function validate_func = getattr(cls, "validate", lambda x: x) changeset = validate_func(changeset) return changeset @classmethod def insert(cls, obj): changeset = cls.casval(obj, updating=False) params = list(changeset.values()) fields = ", ".join(changeset.keys()) placeholders = ", ".join(["%s"] * len(changeset)) sql = f"insert into {cls.__tablename__} (%s) values (%s)\n" % ( fields, placeholders, ) if psycopg2 is not None: sql += f"returning {cls.pk.name}\n" return cls._database_.insert(_strip(sql), params) @classmethod def update(cls, old, new): # This updates a single row only, if you want to update several # use `update` in `Query` changeset = cls.casval({**old, **new}, updating=True) sql = f"update {cls.__tablename__} set " params = [] for key, value in changeset.items(): sql += f"{key} = %s, " params.append(value) sql = f"{_strip(sql)} where " for key, value in old.items(): sql += f"{key} = %s and " params.append(value) return cls._database_.sql(_strip(sql), params) @classmethod def update_by_pk(cls, id, new): return cls.update({cls.pk.name: id}, new) @classmethod def delete(cls, row): # Deletes single row - look at `Query` for batch sql = f"delete from {cls.__tablename__} where " params = [] for key, value in row.items(): sql += f"{key} = %s and " params.append(value) return cls._database_.sql(_strip(sql), params) @classmethod def delete_by_pk(cls, id, new): return cls.delete({cls.pk.name: id}, new) class QueryMetaclass(type): sql_joins = [ "inner join", "left join", "left outer join", "right join", "right outer join", "full join", "full outer join", ] @staticmethod def make_join_fn(join_type): def join_fn(self, schema, on): q = f"{str(on[0])} = {str(on[1])}" self._add_node(f"{join_type} {schema.__tablename__} on {q}", ()) return self return join_fn def __new__(cls, clsname, bases, attrs): for join_type in cls.sql_joins: attrs[join_type.replace(" ", "_")] = QueryMetaclass.make_join_fn(join_type) return super(QueryMetaclass, cls).__new__(cls, clsname, bases, attrs) Node = namedtuple("Node", ["node", "params"]) class Query(metaclass=QueryMetaclass): def __init__(self, schema): self.schema = schema self._method = None self._nodes = [] def _add_node(self, node, params): self._nodes.append(Node(_strip(node), params)) @property def _query(self): return " ".join([x.node for x in self._nodes]) @property def _params(self): return tuple([p for x in self._nodes for p in x.params]) def select(self, *args): self._method = "select" query = "" params = [] if len(args) < 1: query += "*" else: for arg in args: if isinstance(arg, Clause): string, p = arg query += f"{string}, " params.extend(p) else: query += f"{arg}, " self._add_node( f"select {_strip(query)} from {self.schema.__tablename__}", params ) return self def update(self, changeset): self._method = "sql" changeset = self.schema.casval(changeset, updating=True) query = "" params = [] for key, value in changeset.items(): query += f"{key} = %s, " params.append(value) self._add_node(f"update {self.schema.__tablename__} set {query}", params) return self def delete(self): self._method = "sql" self._add_node(f"delete from {self.schema.__tablename__}", ()) return self def get(self, *args): self.select(*args) self._method = "get" return self def get_or_none(self, *args): self.select(*args) self._method = "get_or_none" return self def union(self): self._add_node("union", ()) return self def where(self, *clauses): query = "" params = [] for clause in clauses: string, p = clause # We can always add an `and` to the end cus it get stripped off ;) query += f"{string} and " params.extend(p) self._add_node(f"where {query}", params) return self def limit(self, *args): # Example: .limit(1) or limit(1, 2) if len(args) == 1: self._add_node("limit %s", (args,)) elif len(args) == 2: # `offset` works in mysql and postgres self._add_node("limit %s offset %s", args) else: raise QueryError("`limit` has too many arguments") return self def order_by(self, *args): # Example: .order_by(Frog.id, {Frog.name: "desc"}) query = "order by " params = [] for a in args: v = None
'EncryptedID' c_namespace = NAMESPACE c_children = EncryptedElementType_.c_children.copy() c_attributes = EncryptedElementType_.c_attributes.copy() c_child_order = EncryptedElementType_.c_child_order[:] c_cardinality = EncryptedElementType_.c_cardinality.copy() def encrypted_id_from_string(xml_string): return saml2.create_class_from_xml_string(EncryptedID, xml_string) class Issuer(NameIDType_): """The urn:oasis:names:tc:SAML:2.0:assertion:Issuer element """ c_tag = 'Issuer' c_namespace = NAMESPACE c_children = NameIDType_.c_children.copy() c_attributes = NameIDType_.c_attributes.copy() c_child_order = NameIDType_.c_child_order[:] c_cardinality = NameIDType_.c_cardinality.copy() def issuer_from_string(xml_string): return saml2.create_class_from_xml_string(Issuer, xml_string) class AssertionIDRef(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AssertionIDRef element """ c_tag = 'AssertionIDRef' c_namespace = NAMESPACE c_value_type = {'base': 'NCName'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def assertion_id_ref_from_string(xml_string): return saml2.create_class_from_xml_string(AssertionIDRef, xml_string) class AssertionURIRef(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AssertionURIRef element """ c_tag = 'AssertionURIRef' c_namespace = NAMESPACE c_value_type = {'base': 'anyURI'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def assertion_uri_ref_from_string(xml_string): return saml2.create_class_from_xml_string(AssertionURIRef, xml_string) class SubjectConfirmationDataType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationDataType element """ c_tag = 'SubjectConfirmationDataType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_attributes['NotBefore'] = ('not_before', 'dateTime', False) c_attributes['NotOnOrAfter'] = ('not_on_or_after', 'dateTime', False) c_attributes['Recipient'] = ('recipient', 'anyURI', False) c_attributes['InResponseTo'] = ('in_response_to', 'NCName', False) c_attributes['Address'] = ('address', 'string', False) c_any = {"namespace": "##any", "processContents": "lax", "minOccurs": "0", "maxOccurs": "unbounded"} c_any_attribute = {"namespace": "##other", "processContents": "lax"} def __init__(self, not_before=None, not_on_or_after=None, recipient=None, in_response_to=None, address=None, text=None, extension_elements=None, extension_attributes=None): SamlBase.__init__(self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.not_before = not_before self.not_on_or_after = not_on_or_after self.recipient = recipient self.in_response_to = in_response_to self.address = address def subject_confirmation_data_type__from_string(xml_string): return saml2.create_class_from_xml_string(SubjectConfirmationDataType_, xml_string) class KeyInfoConfirmationDataType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:KeyInfoConfirmationDataType element """ c_tag = 'KeyInfoConfirmationDataType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info', [ds.KeyInfo]) c_cardinality['key_info'] = {"min": 1} c_child_order.extend(['key_info']) def __init__(self, key_info=None, text=None, extension_elements=None, extension_attributes=None): SamlBase.__init__(self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.key_info = key_info or [] def key_info_confirmation_data_type__from_string(xml_string): return saml2.create_class_from_xml_string(KeyInfoConfirmationDataType_, xml_string) class ConditionAbstractType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:ConditionAbstractType element """ c_tag = 'ConditionAbstractType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() class Audience(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:Audience element """ c_tag = 'Audience' c_namespace = NAMESPACE c_value_type = {'base': 'anyURI'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def audience_from_string(xml_string): return saml2.create_class_from_xml_string(Audience, xml_string) class OneTimeUseType_(ConditionAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUseType element """ c_tag = 'OneTimeUseType' c_namespace = NAMESPACE c_children = ConditionAbstractType_.c_children.copy() c_attributes = ConditionAbstractType_.c_attributes.copy() c_child_order = ConditionAbstractType_.c_child_order[:] c_cardinality = ConditionAbstractType_.c_cardinality.copy() def one_time_use_type__from_string(xml_string): return saml2.create_class_from_xml_string(OneTimeUseType_, xml_string) class ProxyRestrictionType_(ConditionAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestrictionType element """ c_tag = 'ProxyRestrictionType' c_namespace = NAMESPACE c_children = ConditionAbstractType_.c_children.copy() c_attributes = ConditionAbstractType_.c_attributes.copy() c_child_order = ConditionAbstractType_.c_child_order[:] c_cardinality = ConditionAbstractType_.c_cardinality.copy() c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience', [Audience]) c_cardinality['audience'] = {"min": 0} c_attributes['Count'] = ('count', 'nonNegativeInteger', False) c_child_order.extend(['audience']) def __init__(self, audience=None, count=None, text=None, extension_elements=None, extension_attributes=None): ConditionAbstractType_.__init__( self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.audience = audience or [] self.count = count def proxy_restriction_type__from_string(xml_string): return saml2.create_class_from_xml_string(ProxyRestrictionType_, xml_string) class EncryptedAssertion(EncryptedElementType_): """The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion element """ c_tag = 'EncryptedAssertion' c_namespace = NAMESPACE c_children = EncryptedElementType_.c_children.copy() c_attributes = EncryptedElementType_.c_attributes.copy() c_child_order = EncryptedElementType_.c_child_order[:] c_cardinality = EncryptedElementType_.c_cardinality.copy() def encrypted_assertion_from_string(xml_string): return saml2.create_class_from_xml_string(EncryptedAssertion, xml_string) class StatementAbstractType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:StatementAbstractType element """ c_tag = 'StatementAbstractType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() class SubjectLocalityType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocalityType element """ c_tag = 'SubjectLocalityType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_attributes['Address'] = ('address', 'string', False) c_attributes['DNSName'] = ('dns_name', 'string', False) def __init__(self, address=None, dns_name=None, text=None, extension_elements=None, extension_attributes=None): SamlBase.__init__(self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.address = address self.dns_name = dns_name def subject_locality_type__from_string(xml_string): return saml2.create_class_from_xml_string(SubjectLocalityType_, xml_string) class AuthnContextClassRef(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextClassRef element """ c_tag = 'AuthnContextClassRef' c_namespace = NAMESPACE c_value_type = {'base': 'anyURI'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def authn_context_class_ref_from_string(xml_string): return saml2.create_class_from_xml_string(AuthnContextClassRef, xml_string) class AuthnContextDeclRef(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDeclRef element """ c_tag = 'AuthnContextDeclRef' c_namespace = NAMESPACE c_value_type = {'base': 'anyURI'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def authn_context_decl_ref_from_string(xml_string): return saml2.create_class_from_xml_string(AuthnContextDeclRef, xml_string) class AuthnContextDecl(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDecl element """ c_tag = 'AuthnContextDecl' c_namespace = NAMESPACE c_value_type = {'base': 'anyType'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def authn_context_decl_from_string(xml_string): return saml2.create_class_from_xml_string(AuthnContextDecl, xml_string) class AuthenticatingAuthority(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AuthenticatingAuthority element """ c_tag = 'AuthenticatingAuthority' c_namespace = NAMESPACE c_value_type = {'base': 'anyURI'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def authenticating_authority_from_string(xml_string): return saml2.create_class_from_xml_string(AuthenticatingAuthority, xml_string) class DecisionType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:DecisionType element """ c_tag = 'DecisionType' c_namespace = NAMESPACE c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny', 'Indeterminate']} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def decision_type__from_string(xml_string): return saml2.create_class_from_xml_string(DecisionType_, xml_string) class ActionType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:ActionType element """ c_tag = 'ActionType' c_namespace = NAMESPACE c_value_type = {'base': 'string'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_attributes['Namespace'] = ('namespace', 'anyURI', True) def __init__(self, namespace=None, text=None, extension_elements=None, extension_attributes=None): SamlBase.__init__(self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.namespace = namespace def action_type__from_string(xml_string): return saml2.create_class_from_xml_string(ActionType_, xml_string) class AttributeValue(AttributeValueBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AttributeValue element """ c_tag = 'AttributeValue' c_namespace = NAMESPACE c_value_type = {'base': 'anyType'} c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() def attribute_value_from_string(xml_string): return saml2.create_class_from_xml_string(AttributeValue, xml_string) class EncryptedAttribute(EncryptedElementType_): """The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute element """ c_tag = 'EncryptedAttribute' c_namespace = NAMESPACE c_children = EncryptedElementType_.c_children.copy() c_attributes = EncryptedElementType_.c_attributes.copy() c_child_order = EncryptedElementType_.c_child_order[:] c_cardinality = EncryptedElementType_.c_cardinality.copy() def encrypted_attribute_from_string(xml_string): return saml2.create_class_from_xml_string(EncryptedAttribute, xml_string) class BaseID(BaseIDAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:BaseID element """ c_tag = 'BaseID' c_namespace = NAMESPACE c_children = BaseIDAbstractType_.c_children.copy() c_attributes = BaseIDAbstractType_.c_attributes.copy() c_child_order = BaseIDAbstractType_.c_child_order[:] c_cardinality = BaseIDAbstractType_.c_cardinality.copy() def base_id_from_string(xml_string): return saml2.create_class_from_xml_string(BaseID, xml_string) class NameID(NameIDType_): """The urn:oasis:names:tc:SAML:2.0:assertion:NameID element From the Oasis SAML2 Technical Overview: "The <NameID> element within a <Subject> offers the ability to provide name identifiers in a number of different formats. SAML's predefined formats include: Email address, X.509 subject name, Windows domain qualified name, Kerberos principal name, Entity identifier, Persistent identifier, Transient identifier." """ c_tag = 'NameID' c_namespace = NAMESPACE c_children = NameIDType_.c_children.copy() c_attributes = NameIDType_.c_attributes.copy() c_child_order = NameIDType_.c_child_order[:] c_cardinality = NameIDType_.c_cardinality.copy() def name_id_from_string(xml_string): return saml2.create_class_from_xml_string(NameID, xml_string) class SubjectConfirmationData(SubjectConfirmationDataType_): """The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationData element """ c_tag = 'SubjectConfirmationData' c_namespace = NAMESPACE c_children = SubjectConfirmationDataType_.c_children.copy() c_attributes = SubjectConfirmationDataType_.c_attributes.copy() c_child_order = SubjectConfirmationDataType_.c_child_order[:] c_cardinality = SubjectConfirmationDataType_.c_cardinality.copy() def subject_confirmation_data_from_string(xml_string): return saml2.create_class_from_xml_string(SubjectConfirmationData, xml_string) class Condition(ConditionAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:Condition element """ c_tag = 'Condition' c_namespace = NAMESPACE c_children = ConditionAbstractType_.c_children.copy() c_attributes = ConditionAbstractType_.c_attributes.copy() c_child_order = ConditionAbstractType_.c_child_order[:] c_cardinality = ConditionAbstractType_.c_cardinality.copy() def condition_from_string(xml_string): return saml2.create_class_from_xml_string(Condition, xml_string) class AudienceRestrictionType_(ConditionAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:AudienceRestrictionType element """ c_tag = 'AudienceRestrictionType' c_namespace = NAMESPACE c_children = ConditionAbstractType_.c_children.copy() c_attributes = ConditionAbstractType_.c_attributes.copy() c_child_order = ConditionAbstractType_.c_child_order[:] c_cardinality = ConditionAbstractType_.c_cardinality.copy() c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience', [Audience]) c_cardinality['audience'] = {"min": 1} c_child_order.extend(['audience']) def __init__(self, audience=None, text=None, extension_elements=None, extension_attributes=None): ConditionAbstractType_.__init__( self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.audience = audience or [] def audience_restriction_type__from_string(xml_string): return saml2.create_class_from_xml_string(AudienceRestrictionType_, xml_string) class OneTimeUse(OneTimeUseType_): """The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUse element """ c_tag = 'OneTimeUse' c_namespace = NAMESPACE c_children = OneTimeUseType_.c_children.copy() c_attributes = OneTimeUseType_.c_attributes.copy() c_child_order = OneTimeUseType_.c_child_order[:] c_cardinality = OneTimeUseType_.c_cardinality.copy() def one_time_use_from_string(xml_string): return saml2.create_class_from_xml_string(OneTimeUse, xml_string) class ProxyRestriction(ProxyRestrictionType_): """The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestriction element """ c_tag = 'ProxyRestriction' c_namespace = NAMESPACE c_children = ProxyRestrictionType_.c_children.copy() c_attributes = ProxyRestrictionType_.c_attributes.copy() c_child_order = ProxyRestrictionType_.c_child_order[:] c_cardinality = ProxyRestrictionType_.c_cardinality.copy() def proxy_restriction_from_string(xml_string): return saml2.create_class_from_xml_string(ProxyRestriction, xml_string) class Statement(StatementAbstractType_): """The urn:oasis:names:tc:SAML:2.0:assertion:Statement element """ c_tag = 'Statement' c_namespace = NAMESPACE c_children = StatementAbstractType_.c_children.copy() c_attributes = StatementAbstractType_.c_attributes.copy() c_child_order = StatementAbstractType_.c_child_order[:] c_cardinality = StatementAbstractType_.c_cardinality.copy() def statement_from_string(xml_string): return saml2.create_class_from_xml_string(Statement, xml_string) class SubjectLocality(SubjectLocalityType_): """The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocality element """ c_tag = 'SubjectLocality' c_namespace = NAMESPACE c_children = SubjectLocalityType_.c_children.copy() c_attributes = SubjectLocalityType_.c_attributes.copy() c_child_order = SubjectLocalityType_.c_child_order[:] c_cardinality = SubjectLocalityType_.c_cardinality.copy() def verify(self): if self.address: # dotted-decimal IPv4 or RFC3513 IPv6 address if valid_ipv4(self.address) or valid_ipv6(self.address): pass else: raise ShouldValueError("Not an IPv4 or IPv6 address") elif self.dns_name: valid_domain_name(self.dns_name) return SubjectLocalityType_.verify(self) def subject_locality_from_string(xml_string): return saml2.create_class_from_xml_string(SubjectLocality, xml_string) class AuthnContextType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextType element """ c_tag = 'AuthnContextType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_children[ '{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextClassRef'] = ( 'authn_context_class_ref', AuthnContextClassRef) c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDecl'] = ( 'authn_context_decl', AuthnContextDecl) c_cardinality['authn_context_decl'] = {"min": 0, "max": 1} c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDeclRef'] = ( 'authn_context_decl_ref', AuthnContextDeclRef) c_cardinality['authn_context_decl_ref'] = {"min": 0, "max": 1} c_children[ '{urn:oasis:names:tc:SAML:2.0:assertion}AuthenticatingAuthority'] = ( 'authenticating_authority', [AuthenticatingAuthority]) c_cardinality['authenticating_authority'] = {"min": 0} c_child_order.extend(['authn_context_class_ref', 'authn_context_decl', 'authn_context_decl_ref', 'authenticating_authority']) def __init__(self, authn_context_class_ref=None, authn_context_decl=None, authn_context_decl_ref=None, authenticating_authority=None, text=None, extension_elements=None, extension_attributes=None): SamlBase.__init__(self, text=text, extension_elements=extension_elements, extension_attributes=extension_attributes) self.authn_context_class_ref = authn_context_class_ref self.authn_context_decl = authn_context_decl self.authn_context_decl_ref = authn_context_decl_ref self.authenticating_authority = authenticating_authority or [] def verify(self): if self.authn_context_decl and self.authn_context_decl_ref: raise Exception( "Invalid Response: " "Cannot have both <AuthnContextDecl> and <AuthnContextDeclRef>" ) return SamlBase.verify(self) def authn_context_type__from_string(xml_string): return saml2.create_class_from_xml_string(AuthnContextType_, xml_string) class Action(ActionType_): """The urn:oasis:names:tc:SAML:2.0:assertion:Action element """ c_tag = 'Action' c_namespace = NAMESPACE c_children = ActionType_.c_children.copy() c_attributes = ActionType_.c_attributes.copy() c_child_order = ActionType_.c_child_order[:] c_cardinality = ActionType_.c_cardinality.copy() def action_from_string(xml_string): return saml2.create_class_from_xml_string(Action, xml_string) class AttributeType_(SamlBase): """The urn:oasis:names:tc:SAML:2.0:assertion:AttributeType element """ c_tag = 'AttributeType' c_namespace = NAMESPACE c_children = SamlBase.c_children.copy() c_attributes = SamlBase.c_attributes.copy() c_child_order = SamlBase.c_child_order[:] c_cardinality = SamlBase.c_cardinality.copy() c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue']
LoaderFactory(loaders_config) datasets, task = create_parsers(config, loader_factory.get_base_transforms()) assert datasets and task is not None, "invalid dataset configuration (got empty list)" for dataset_name, dataset in datasets.items(): logger.info(f"parsed dataset: {str(dataset)}") logger.info(f"task info: {str(task)}") logger.debug("splitting datasets and creating loaders...") train_idxs, valid_idxs, test_idxs = loader_factory.get_split(datasets, task) if save_dir is not None: with open(os.path.join(data_logger_dir, "task.log"), "a+") as fd: fd.write(f"session: {session_name}-{logstamp}\n") fd.write(f"version: {repover}\n") fd.write(str(task) + "\n") for dataset_name, dataset in datasets.items(): dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log") if not loader_factory.skip_verif and os.path.isfile(dataset_log_file): logger.info(f"verifying sample list for dataset '{dataset_name}'...") log_content = thelper.utils.load_config(dataset_log_file, as_json=True, add_name_if_missing=False) assert isinstance(log_content, dict), "old split data logs no longer supported for verification" samples_old, samples_new = None, None if "samples" in log_content: assert isinstance(log_content["samples"], list), \ "unexpected dataset log content (bad 'samples' field, should be list)" samples_old = log_content["samples"] samples_new = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \ and len(dataset.samples) == len(dataset) else [] if len(samples_old) != len(samples_new): query_msg = f"old sample list for dataset '{dataset_name}' mismatch with current list; proceed?" answer = thelper.utils.query_yes_no(query_msg, bypass="n") if not answer: logger.error("sample list mismatch with previous run; user aborted") sys.exit(1) break for set_name, idxs in zip(["train_idxs", "valid_idxs", "test_idxs"], [train_idxs[dataset_name], valid_idxs[dataset_name], test_idxs[dataset_name]]): # index values were paired in tuples earlier, 0=idx, 1=label --- we unpack in the miniloop below if not np.array_equal(np.sort(log_content[set_name]), np.sort([idx for idx, _ in idxs])): query_msg = f"Old indices list for dataset '{dataset_name}' mismatch with current indices" \ f"list ('{set_name}'); proceed anyway?" answer = thelper.utils.query_yes_no(query_msg, bypass="n") if not answer: logger.error("indices list mismatch with previous run; user aborted") sys.exit(1) break printer = pprint.PrettyPrinter(indent=2) log_sample_metadata = thelper.utils.get_key_def(["log_samples", "log_samples_metadata"], config, default=False) for dataset_name, dataset in datasets.items(): dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log") samples = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \ and len(dataset.samples) == len(dataset) else [] log_content = { "metadata": { "session_name": session_name, "logstamp": logstamp, "version": repover, "dataset": str(dataset), }, # index values were paired in tuples earlier, 0=idx, 1=label "train_idxs": [int(idx) for idx, _ in train_idxs[dataset_name]], "valid_idxs": [int(idx) for idx, _ in valid_idxs[dataset_name]], "test_idxs": [int(idx) for idx, _ in test_idxs[dataset_name]] } if log_sample_metadata: log_content["samples"] = [printer.pformat(sample) for sample in samples] # now, always overwrite, as it can get too big otherwise with open(dataset_log_file, "w") as fd: json.dump(log_content, fd, indent=4, sort_keys=False) train_loader, valid_loader, test_loader = loader_factory.create_loaders(datasets, train_idxs, valid_idxs, test_idxs) return task, train_loader, valid_loader, test_loader def create_parsers(config, base_transforms=None): """Instantiates dataset parsers based on a provided dictionary. This function will instantiate dataset parsers as defined in a name-type-param dictionary. If multiple datasets are instantiated, this function will also verify their task compatibility and return the global task. The dataset interfaces themselves should be derived from :class:`thelper.data.parsers.Dataset`, be compatible with :class:`thelper.data.parsers.ExternalDataset`, or should provide a 'task' field specifying all the information related to sample dictionary keys and model i/o. The provided configuration will be parsed for a 'datasets' dictionary entry. The keys in this dictionary are treated as unique dataset names and are used for lookups. The value associated to each key (or dataset name) should be a type-params dictionary that can be parsed to instantiate the dataset interface. An example configuration dictionary is given in :func:`thelper.data.utils.create_loaders`. Args: config: a dictionary that provides unique dataset names and parameters needed for instantiation under the 'datasets' field. base_transforms: the transform operation that should be applied to all loaded samples, and that will be provided to the constructor of all instantiated dataset parsers. Returns: A 2-element tuple that contains: 1) the list of dataset interfaces/parsers that were instantiated; and 2) a task object compatible with all of those (see :class:`thelper.tasks.utils.Task` for more information). .. seealso:: | :func:`thelper.data.utils.create_loaders` | :class:`thelper.data.parsers.Dataset` | :class:`thelper.data.parsers.ExternalDataset` | :class:`thelper.tasks.utils.Task` """ if not isinstance(config, dict): raise AssertionError("unexpected session config type") if "datasets" not in config or not config["datasets"]: raise AssertionError("config missing 'datasets' field (must contain dict or str value)") config = config["datasets"] # no need to keep the full config here if isinstance(config, str): try: config = thelper.utils.load_config(config, add_name_if_missing=False) except Exception: raise AssertionError("'datasets' string should point to valid configuration file") logger.debug("loading datasets templates") if not isinstance(config, dict): raise AssertionError("invalid datasets config type (must be dictionary)") datasets = {} tasks = [] for dataset_name, dataset_config in config.items(): if isinstance(dataset_config, thelper.data.Dataset): dataset = dataset_config task = dataset.task else: logger.debug("loading dataset '%s' configuration..." % dataset_name) if "type" not in dataset_config: raise AssertionError("missing field 'type' for instantiation of dataset '%s'" % dataset_name) dataset_type = thelper.utils.import_class(dataset_config["type"]) dataset_params = thelper.utils.get_key_def(["params", "parameters"], dataset_config, {}) transforms = None if "transforms" in dataset_config and dataset_config["transforms"]: logger.debug("loading custom transforms for dataset '%s'..." % dataset_name) transforms = thelper.transforms.load_transforms(dataset_config["transforms"]) if base_transforms is not None: transforms = thelper.transforms.Compose([transforms, base_transforms]) elif base_transforms is not None: transforms = base_transforms if issubclass(dataset_type, thelper.data.Dataset): # assume that the dataset is derived from thelper.data.parsers.Dataset (it is fully sampling-ready) dataset_sig = inspect.signature(dataset_type) if "config" in dataset_sig.parameters: # pragma: no cover # @@@@ for backward compatibility only, will be removed in v0.3 dataset = dataset_type(transforms=transforms, config=dataset_params) else: dataset = dataset_type(transforms=transforms, **dataset_params) if "task" in dataset_config: logger.warning("'task' field detected in dataset '%s' config; dataset's default task will be ignored" % dataset_name) task = thelper.tasks.create_task(dataset_config["task"]) else: task = dataset.task else: if "task" not in dataset_config or not dataset_config["task"]: raise AssertionError("external dataset '%s' must define task interface in its configuration dict" % dataset_name) task = thelper.tasks.create_task(dataset_config["task"]) # assume that __getitem__ and __len__ are implemented, but we need to make it sampling-ready dataset = thelper.data.ExternalDataset(dataset_type, task, transforms=transforms, **dataset_params) if task is None: raise AssertionError("parsed task interface should not be None anymore (old code doing something strange?)") tasks.append(task) datasets[dataset_name] = dataset return datasets, thelper.tasks.create_global_task(tasks) def create_hdf5(archive_path, task, train_loader, valid_loader, test_loader, compression=None, config_backup=None): """Saves the samples loaded from train/valid/test data loaders into an HDF5 archive. The loaded minibatches are decomposed into individual samples. The keys provided via the task interface are used to fetch elements (input, groundtruth, ...) from the samples, and save them in the archive. The archive will contain three groups (`train`, `valid`, and `test`), and each group will contain a dataset for each element originally found in the samples. Note that the compression operates at the sample level, not at the dataset level. This means that elements of each sample will be compressed individually, not as an array. Therefore, if you are trying to compress very correlated samples (e.g. frames in a video sequence), this approach will be pretty bad. Args: archive_path: path pointing where the HDF5 archive should be created. task: task object that defines the input, groundtruth, and meta keys tied to elements that should be parsed from loaded samples and saved in the HDF5 archive. train_loader: training data loader (can be `None`). valid_loader: validation data loader (can be `None`). test_loader: testing data loader (can be `None`). compression: the compression configuration dictionary that will be parsed to determine how sample elements should be compressed. If a mapping is missing, that element will not be compressed. config_backup: optional session configuration file that should be saved in the HDF5 archive. Example compression configuration:: # the config is given as a dictionary { # each field is a key that corresponds to an element in each sample "key1": { # the 'type' identifies the compression approach to use # (see thelper.utils.encode_data for more information) "type": "jpg", # extra parameters might be needed to encode the data # (see thelper.utils.encode_data for more information) "encode_params": {} # these parameters are packed and kept for decoding # (see thelper.utils.decode_data for more information) "decode_params": {"flags": "cv.IMREAD_COLOR"} }, "key2": { # this explicitly means that no encoding should be performed "type": "none" }, ... # if a key is missing, its elements will not be compressed } .. seealso:: | :func:`thelper.cli.split_data` | :class:`thelper.data.parsers.HDF5Dataset` | :func:`thelper.utils.encode_data` | :func:`thelper.utils.decode_data` """ if compression is None: compression = {} if config_backup is None: config_backup = {} import h5py with h5py.File(archive_path, "w") as fd: fd.attrs["source"] = thelper.utils.get_log_stamp() fd.attrs["git_sha1"] = thelper.utils.get_git_stamp() fd.attrs["version"] = thelper.__version__ fd.attrs["task"] = str(task) fd.attrs["config"] = str(config_backup) fd.attrs["compression"] =
If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the ``Marker`` parameter and retrying the command. If the ``Marker`` field is empty, all response records have been retrieved for the request. - **Snapshots** *(list) --* A list of Snapshot instances. - *(dict) --* Describes a snapshot. - **SnapshotIdentifier** *(string) --* The snapshot identifier that is provided in the request. - **ClusterIdentifier** *(string) --* The identifier of the cluster for which the snapshot was taken. - **SnapshotCreateTime** *(datetime) --* The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time. - **Status** *(string) --* The snapshot status. The value of the status depends on the API operation used: * CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating". * DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed". * DeleteClusterSnapshot returns status as "deleted". - **Port** *(integer) --* The port that the cluster is listening on. - **AvailabilityZone** *(string) --* The Availability Zone in which the cluster was created. - **ClusterCreateTime** *(datetime) --* The time (UTC) when the cluster was originally created. - **MasterUsername** *(string) --* The master user name for the cluster. - **ClusterVersion** *(string) --* The version ID of the Amazon Redshift engine that is running on the cluster. - **SnapshotType** *(string) --* The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual". - **NodeType** *(string) --* The node type of the nodes in the cluster. - **NumberOfNodes** *(integer) --* The number of nodes in the cluster. - **DBName** *(string) --* The name of the database that was created when the cluster was created. - **VpcId** *(string) --* The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output. - **Encrypted** *(boolean) --* If ``true`` , the data in the snapshot is encrypted at rest. - **KmsKeyId** *(string) --* The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken. - **EncryptedWithHSM** *(boolean) --* A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. ``true`` indicates that the data is encrypted using HSM keys. - **AccountsWithRestoreAccess** *(list) --* A list of the AWS customer accounts authorized to restore the snapshot. Returns ``null`` if no accounts are authorized. Visible only to the snapshot owner. - *(dict) --* Describes an AWS customer account authorized to restore a snapshot. - **AccountId** *(string) --* The identifier of an AWS customer account authorized to restore a snapshot. - **AccountAlias** *(string) --* The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is ``amazon-redshift-support`` . - **OwnerAccount** *(string) --* For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot. - **TotalBackupSizeInMegaBytes** *(float) --* The size of the complete set of backup data that would be used to restore the cluster. - **ActualIncrementalBackupSizeInMegaBytes** *(float) --* The size of the incremental backup. - **BackupProgressInMegaBytes** *(float) --* The number of megabytes that have been transferred to the snapshot backup. - **CurrentBackupRateInMegaBytesPerSecond** *(float) --* The number of megabytes per second being transferred to the snapshot backup. Returns ``0`` for a completed backup. - **EstimatedSecondsToCompletion** *(integer) --* The estimate of the time remaining before the snapshot backup will complete. Returns ``0`` for a completed backup. - **ElapsedTimeInSeconds** *(integer) --* The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish. - **SourceRegion** *(string) --* The source region from which the snapshot was copied. - **Tags** *(list) --* The list of tags for the cluster snapshot. - *(dict) --* A tag consisting of a name/value pair for a resource. - **Key** *(string) --* The key, or name, for the resource tag. - **Value** *(string) --* The value for the resource tag. - **RestorableNodeTypes** *(list) --* The list of node types that this cluster snapshot is able to restore into. - *(string) --* - **EnhancedVpcRouting** *(boolean) --* An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide. If this option is ``true`` , enhanced VPC routing is enabled. Default: false - **MaintenanceTrackName** *(string) --* The name of the maintenance track for the snapshot. - **ManualSnapshotRetentionPeriod** *(integer) --* The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. - **ManualSnapshotRemainingDays** *(integer) --* The number of days until a manual snapshot will pass its retention period. - **SnapshotRetentionStartTime** *(datetime) --* A timestamp representing the start of the retention period for the snapshot. :type ClusterIdentifier: string :param ClusterIdentifier: The identifier of the cluster which generated the requested snapshots. :type SnapshotIdentifier: string :param SnapshotIdentifier: The snapshot identifier of the snapshot about which to return information. :type SnapshotType: string :param SnapshotType: The type of snapshots for which you are requesting information. By default, snapshots of all types are returned. Valid Values: ``automated`` | ``manual`` :type StartTime: datetime :param StartTime: A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the `ISO8601 Wikipedia page. <http://en.wikipedia.org/wiki/ISO_8601>`__ Example: ``2012-07-16T18:00:00Z`` :type EndTime: datetime :param EndTime: A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the `ISO8601 Wikipedia page. <http://en.wikipedia.org/wiki/ISO_8601>`__ Example: ``2012-07-16T18:00:00Z`` :type MaxRecords: integer :param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified ``MaxRecords`` value, a value is returned in a ``marker`` field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: ``100`` Constraints: minimum 20, maximum 100. :type Marker: string :param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in ``MaxRecords`` , AWS returns a value in the ``Marker`` field of the response. You can retrieve the next set of response records by providing the returned marker value in the ``Marker`` parameter and retrying the request. :type OwnerAccount: string :param OwnerAccount: The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter. :type TagKeys: list :param TagKeys: A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called ``owner`` and ``environment``
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), ) ### IDL colormap 4 :: BLUE ### color_map_luts['idl04'] = \ ( array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0273438, 0.0585938, 0.0859375, 0.1171875, 0.1445312, 0.1757812, 0.2031250, 0.2343750, 0.2617188, 0.2929688, 0.3203125, 0.3515625, 0.3789062, 0.4101562, 0.4375000, 0.4687500, 0.4882812, 0.5078125, 0.5273438, 0.5468750, 0.5664062, 0.5859375, 0.6054688, 0.6250000, 0.6445312, 0.6640625, 0.6835938, 0.7031250, 0.7226562, 0.7421875, 0.7617188, 0.7812500, 0.7812500, 0.7851562, 0.7851562, 0.7890625, 0.7890625, 0.7929688, 0.7929688, 0.7968750, 0.7968750, 0.8007812, 0.8007812, 0.8046875, 0.8046875, 0.8085938, 0.8085938, 0.8125000, 0.8125000, 0.8164062, 0.8164062, 0.8203125, 0.8203125, 0.8242188, 0.8242188, 0.8281250, 0.8281250, 0.8320312, 0.8320312, 0.8359375, 0.8359375, 0.8398438, 0.8398438, 0.8437500, 0.8437500, 0.8476562, 0.8476562, 0.8515625, 0.8515625, 0.8554688, 0.8554688, 0.8593750, 0.8593750, 0.8632812, 0.8632812, 0.8671875, 0.8671875, 0.8710938, 0.8710938, 0.8750000, 0.8750000, 0.8789062, 0.8789062, 0.8828125, 0.8828125, 0.8867188, 0.8867188, 0.8906250, 0.8906250, 0.8945312, 0.8945312, 0.8984375, 0.8984375, 0.9023438, 0.9023438, 0.9062500, 0.9062500, 0.9101562, 0.9101562, 0.9140625, 0.9140625, 0.9179688, 0.9179688, 0.9218750, 0.9218750, 0.9257812, 0.9257812, 0.9296875, 0.9296875, 0.9335938, 0.9335938, 0.9375000, 0.9375000, 0.9414062, 0.9414062, 0.9453125, 0.9453125, 0.9492188, 0.9492188, 0.9531250, 0.9531250, 0.9570312, 0.9570312, 0.9609375, 0.9609375, 0.9648438, 0.9648438, 0.9687500, 0.9687500, 0.9726562, 0.9726562, 0.9765625, 0.9765625, 0.9804688, 0.9804688, 0.9843750, 0.9843750, 0.9882812, 0.9882812, 0.9921875, 0.9921875, 0.9960938, 0.9960938]), array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0117188, 0.0234375, 0.0351562, 0.0468750, 0.0585938, 0.0703125, 0.0820312, 0.0976562, 0.1093750, 0.1210938, 0.1328125, 0.1445312, 0.1562500, 0.1679688, 0.1796875, 0.1953125, 0.2070312, 0.2187500, 0.2304688, 0.2421875, 0.2539062, 0.2656250, 0.2773438, 0.2929688, 0.3046875, 0.3164062, 0.3281250, 0.3398438, 0.3515625, 0.3632812, 0.3750000, 0.3906250, 0.4023438, 0.4140625, 0.4257812, 0.4375000, 0.4492188, 0.4609375, 0.4726562, 0.4882812, 0.5000000, 0.5117188, 0.5234375, 0.5351562, 0.5468750, 0.5585938, 0.5703125, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5859375, 0.5820312, 0.5781250, 0.5781250, 0.5742188, 0.5703125, 0.5703125, 0.5664062, 0.5664062, 0.5625000, 0.5585938, 0.5585938, 0.5546875, 0.5507812, 0.5507812, 0.5468750, 0.5468750, 0.5351562, 0.5273438, 0.5156250, 0.5078125, 0.4960938, 0.4882812, 0.4765625, 0.4687500, 0.4570312, 0.4492188, 0.4375000, 0.4296875, 0.4179688, 0.4101562, 0.3984375, 0.3906250, 0.3632812, 0.3398438, 0.3164062, 0.2929688, 0.2656250, 0.2421875, 0.2187500, 0.1953125, 0.1679688, 0.1445312, 0.1210938, 0.0976562, 0.0703125, 0.0468750, 0.0234375, 0.0000000, 0.0078125, 0.0156250, 0.0234375, 0.0351562, 0.0429688, 0.0507812, 0.0625000, 0.0703125, 0.0781250, 0.0898438, 0.0976562, 0.1054688, 0.1132812, 0.1250000, 0.1328125, 0.1406250, 0.1523438, 0.1601562, 0.1679688, 0.1796875, 0.1875000, 0.1953125, 0.2070312, 0.2148438, 0.2226562, 0.2304688, 0.2421875, 0.2500000, 0.2578125, 0.2695312, 0.2773438, 0.2851562, 0.2968750, 0.3046875, 0.3125000, 0.3242188, 0.3320312, 0.3398438, 0.3476562, 0.3593750, 0.3671875, 0.3750000, 0.3867188, 0.3945312, 0.4023438, 0.4140625, 0.4218750, 0.4296875, 0.4414062, 0.4492188, 0.4570312, 0.4648438, 0.4765625, 0.4843750, 0.4921875, 0.5039062, 0.5117188, 0.5195312, 0.5312500, 0.5390625, 0.5468750, 0.5546875, 0.5664062, 0.5742188, 0.5820312, 0.5937500, 0.6015625, 0.6093750, 0.6210938, 0.6289062, 0.6367188, 0.6484375, 0.6562500, 0.6640625, 0.6718750, 0.6835938, 0.6914062, 0.6992188, 0.7109375, 0.7187500, 0.7265625, 0.7382812, 0.7460938, 0.7539062, 0.7656250, 0.7734375, 0.7812500, 0.7890625, 0.8007812, 0.8085938, 0.8164062, 0.8281250, 0.8359375, 0.8437500, 0.8554688, 0.8632812, 0.8710938, 0.8828125, 0.8906250, 0.8984375, 0.9062500, 0.9179688, 0.9257812, 0.9335938, 0.9453125, 0.9531250, 0.9609375, 0.9726562, 0.9804688, 0.9882812, 0.9960938]), array([ 0.0000000, 0.0078125, 0.0156250, 0.0234375, 0.0312500, 0.0390625, 0.0468750, 0.0546875, 0.0625000, 0.0703125, 0.0781250, 0.0859375, 0.0976562, 0.1054688, 0.1132812, 0.1210938, 0.1289062, 0.1367188, 0.1445312, 0.1523438, 0.1601562, 0.1679688, 0.1757812, 0.1835938, 0.1953125, 0.2031250, 0.2109375, 0.2187500, 0.2265625, 0.2343750, 0.2421875, 0.2500000, 0.2578125, 0.2656250, 0.2734375, 0.2812500, 0.2929688, 0.3007812, 0.3085938, 0.3164062, 0.3242188, 0.3320312, 0.3398438, 0.3476562, 0.3554688, 0.3632812, 0.3710938, 0.3789062, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3906250, 0.3750000, 0.3632812, 0.3515625, 0.3398438, 0.3281250, 0.3164062, 0.3046875, 0.2929688, 0.2773438, 0.2656250, 0.2539062, 0.2421875, 0.2304688, 0.2187500, 0.2070312, 0.1953125, 0.1796875, 0.1679688, 0.1562500, 0.1445312, 0.1328125, 0.1210938, 0.1093750, 0.0976562, 0.0820312, 0.0703125, 0.0585938, 0.0468750, 0.0351562, 0.0234375, 0.0117188, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000]), array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), ) ### IDL colormap 5 :: STD GAMMA-II ### color_map_luts['idl05'] = \ ( array([ 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0156250, 0.0351562, 0.0546875, 0.0742188, 0.0898438, 0.1093750, 0.1289062, 0.1484375, 0.1640625, 0.1835938, 0.2031250, 0.2226562, 0.2382812, 0.2578125, 0.2773438, 0.2968750, 0.3164062, 0.3164062, 0.3164062, 0.3164062, 0.3164062, 0.3164062, 0.3164062, 0.3164062, 0.3125000, 0.3125000, 0.3125000, 0.3125000, 0.3125000, 0.3125000, 0.3125000, 0.3085938, 0.3281250, 0.3476562, 0.3671875, 0.3867188, 0.4062500, 0.4257812, 0.4453125, 0.4648438, 0.4843750, 0.5039062, 0.5234375, 0.5429688,
s: trace_id = (s.src,s.dst,1,s.type) else: trace_id = (s.src,s.dst,s.proto,0) trace = rt.get(trace_id,{}) ttl = IPv6 in s and s.hlim or s.ttl if not (ICMP in r and r[ICMP].type == 11) and not (IPv6 in r and ICMPv6TimeExceeded in r): if trace_id in ports_done: continue ports_done[trace_id] = None p = ports.get(r.src,[]) if TCP in r: p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%")) trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%') elif UDP in r: p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%")) trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%') elif ICMP in r: p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%")) trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%') else: p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}")) trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}') ports[r.src] = p else: trace[ttl] = r.sprintf('"%r,src%"') rt[trace_id] = trace # Fill holes with unk%i nodes unknown_label = incremental_label("unk%i") blackholes = [] bhip = {} for rtk in rt: trace = rt[rtk] k = trace.keys() for n in range(min(k), max(k)): if not trace.has_key(n): trace[n] = unknown_label.next() if not ports_done.has_key(rtk): if rtk[2] == 1: #ICMP bh = "%s %i/icmp" % (rtk[1],rtk[3]) elif rtk[2] == 6: #TCP bh = "%s %i/tcp" % (rtk[1],rtk[3]) elif rtk[2] == 17: #UDP bh = '%s %i/udp' % (rtk[1],rtk[3]) else: bh = '%s %i/proto' % (rtk[1],rtk[2]) ips[bh] = None bhip[rtk[1]] = bh bh = '"%s"' % bh trace[max(k)+1] = bh blackholes.append(bh) # Find AS numbers ASN_query_list = dict.fromkeys(map(lambda x:x.rsplit(" ",1)[0],ips)).keys() if ASres is None: ASNlist = [] else: ASNlist = ASres.resolve(*ASN_query_list) ASNs = {} ASDs = {} for ip,asn,desc, in ASNlist: if asn is None: continue iplist = ASNs.get(asn,[]) if ip in bhip: if ip in ports: iplist.append(ip) iplist.append(bhip[ip]) else: iplist.append(ip) ASNs[asn] = iplist ASDs[asn] = desc backcolorlist=colgen("60","86","ba","ff") forecolorlist=colgen("a0","70","40","20") s = "digraph trace {\n" s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n" s += "\n#ASN clustering\n" for asn in ASNs: s += '\tsubgraph cluster_%s {\n' % asn col = backcolorlist.next() s += '\t\tcolor="#%s%s%s";' % col s += '\t\tnode [fillcolor="#%s%s%s",style=filled];' % col s += '\t\tfontsize = 10;' s += '\t\tlabel = "%s\\n[%s]"\n' % (asn,ASDs[asn]) for ip in ASNs[asn]: s += '\t\t"%s";\n'%ip s += "\t}\n" s += "#endpoints\n" for p in ports: s += '\t"%s" [shape=record,color=black,fillcolor=green,style=filled,label="%s|%s"];\n' % (p,p,"|".join(ports[p])) s += "\n#Blackholes\n" for bh in blackholes: s += '\t%s [shape=octagon,color=black,fillcolor=red,style=filled];\n' % bh if padding: s += "\n#Padding\n" pad={} for snd,rcv in self.res: if rcv.src not in ports and rcv.haslayer(Padding): p = rcv.getlayer(Padding).load if p != "\x00"*len(p): pad[rcv.src]=None for rcv in pad: s += '\t"%s" [shape=triangle,color=black,fillcolor=red,style=filled];\n' % rcv s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n" for rtk in rt: s += "#---[%s\n" % `rtk` s += '\t\tedge [color="#%s%s%s"];\n' % forecolorlist.next() trace = rt[rtk] k = trace.keys() for n in range(min(k), max(k)): s += '\t%s ->\n' % trace[n] s += '\t%s;\n' % trace[max(k)] s += "}\n"; self.graphdef = s def graph(self, ASres=None, padding=0, **kargs): """x.graph(ASres=conf.AS_resolver, other args): ASres=None : no AS resolver => no clustering ASres=AS_resolver() : default whois AS resolver (riswhois.ripe.net) ASres=AS_resolver_cymru(): use whois.cymru.com whois database ASres=AS_resolver(server="whois.ra.net") type: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option target: filename or redirect. Defaults pipe to Imagemagick's display program prog: which graphviz program to use""" if ASres is None: ASres = conf.AS_resolver if (self.graphdef is None or self.graphASres != ASres or self.graphpadding != padding): self.make_graph(ASres,padding) return do_graph(self.graphdef, **kargs) ############ ## Fields ## ############ class Field: """For more informations on how this work, please refer to http://www.secdev.org/projects/scapy/files/scapydoc.pdf chapter ``Adding a New Field''""" islist=0 holds_packets=0 def __init__(self, name, default, fmt="H"): self.name = name if fmt[0] in "@=<>!": self.fmt = fmt else: self.fmt = "!"+fmt self.default = self.any2i(None,default) self.sz = struct.calcsize(self.fmt) self.owners = [] def register_owner(self, cls): self.owners.append(cls) def i2len(self, pkt, x): """Convert internal value to a length usable by a FieldLenField""" return self.sz def i2count(self, pkt, x): """Convert internal value to a number of elements usable by a FieldLenField. Always 1 except for list fields""" return 1 def h2i(self, pkt, x): """Convert human value to internal value""" return x def i2h(self, pkt, x): """Convert internal value to human value""" return x def m2i(self, pkt, x): """Convert machine value to internal value""" return x def i2m(self, pkt, x): """Convert internal value to machine value""" if x is None: x = 0 return x def any2i(self, pkt, x): """Try to understand the most input values possible and make an internal value from them""" return self.h2i(pkt, x) def i2repr(self, pkt, x): """Convert internal value to a nice representation""" if x is None: x = 0 return repr(self.i2h(pkt,x)) def addfield(self, pkt, s, val): """Add an internal value to a string""" return s+struct.pack(self.fmt, self.i2m(pkt,val)) def getfield(self, pkt, s): """Extract an internal value from a string""" return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, s[:self.sz])[0]) def do_copy(self, x): if hasattr(x, "copy"): return x.copy() if type(x) is list: x = x[:] for i in xrange(len(x)): if isinstance(x[i], Packet): x[i] = x[i].copy() return x def __repr__(self): return "<Field (%s).%s>" % (",".join(x.__name__ for x in self.owners),self.name) def copy(self): return copy.deepcopy(self) def randval(self): """Return a volatile object whose value is both random and suitable for this field""" fmtt = self.fmt[-1] if fmtt in "BHIQ": return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]() elif fmtt == "s": if self.fmt[0] in "0123456789": l = int(self.fmt[:-1]) else: l = int(self.fmt[1:-1]) return RandBin(l) else: warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt)) class Emph: fld = "" def __init__(self, fld): self.fld = fld def __getattr__(self, attr): return getattr(self.fld,attr) def __hash__(self): return hash(self.fld) def __eq__(self, other): return self.fld == other class ActionField: _fld = None def __init__(self, fld, action_method, **kargs): self._fld = fld self._action_method = action_method self._privdata = kargs def any2i(self, pkt, val): getattr(pkt, self._action_method)(val, self._fld, **self._privdata) return getattr(self._fld, "any2i")(pkt, val) def __getattr__(self, attr): return getattr(self._fld,attr) class ConditionalField: fld = None def __init__(self, fld, cond): self.fld = fld self.cond = cond def _evalcond(self,pkt): return self.cond(pkt) def getfield(self, pkt, s): if self._evalcond(pkt): return self.fld.getfield(pkt,s) else: return s,None def addfield(self, pkt, s, val): if self._evalcond(pkt): return self.fld.addfield(pkt,s,val) else: return s def __getattr__(self, attr): return getattr(self.fld,attr) class PadField: """Add bytes after the proxified field so that it ends at the specified alignment from its begining""" _fld = None def __init__(self, fld, align, padwith=None): self._fld = fld self._align = align self._padwith = padwith or "" def addfield(self, pkt, s, val): sval = self._fld.addfield(pkt, "", val) return s+sval+struct.pack("%is" % (-len(sval)%self._align), self._padwith) def __getattr__(self, attr): return getattr(self._fld,attr) class MACField(Field): def __init__(self, name, default): Field.__init__(self, name, default, "6s") def i2m(self, pkt, x): if x is None: return "\0\0\0\0\0\0" return mac2str(x) def m2i(self, pkt, x): return str2mac(x) def any2i(self, pkt, x): if type(x) is str and len(x) is 6: x = self.m2i(pkt, x) return x def i2repr(self, pkt, x): x = self.i2h(pkt, x) if self in conf.resolve: x = conf.manufdb._resolve_MAC(x) return x def randval(self): return RandMAC() class DestMACField(MACField): def __init__(self, name): MACField.__init__(self, name, None) def i2h(self, pkt, x): if x is None: dstip = None if isinstance(pkt.payload, IPv6): dstip = pkt.payload.dst elif isinstance(pkt.payload, IP): dstip = pkt.payload.dst elif isinstance(pkt.payload, ARP): dstip = pkt.payload.pdst if isinstance(dstip, Gen): dstip = dstip.__iter__().next() if dstip is not None: if isinstance(pkt.payload, IPv6): x = getmacbyip6(dstip, chainCC=1) else: x = getmacbyip(dstip, chainCC=1) if x is None: x = "ff:ff:ff:ff:ff:ff" warning("Mac address to reach %s not found\n"%dstip) return MACField.i2h(self, pkt, x) def i2m(self, pkt, x): return MACField.i2m(self, pkt, self.i2h(pkt, x)) class SourceMACField(MACField): def __init__(self, name): MACField.__init__(self, name, None) def i2h(self, pkt, x): if x is None: dstip = None if isinstance(pkt.payload, IPv6): dstip = pkt.payload.dst elif isinstance(pkt.payload, IP): dstip = pkt.payload.dst elif isinstance(pkt.payload, ARP): dstip = pkt.payload.pdst if isinstance(dstip, Gen): dstip = dstip.__iter__().next() if dstip is not None: if isinstance(pkt.payload, IPv6): iff,a,nh = conf.route6.route(dstip) else: iff,a,gw = conf.route.route(dstip) try: x = get_if_hwaddr(iff) except: pass if x is None: x = "00:00:00:00:00:00" return MACField.i2h(self, pkt, x) def i2m(self, pkt, x): return MACField.i2m(self, pkt, self.i2h(pkt, x)) class ARPSourceMACField(MACField): def __init__(self, name): MACField.__init__(self, name, None) def i2h(self, pkt, x): if x is None: dstip = pkt.pdst if isinstance(dstip, Gen): dstip = dstip.__iter__().next() if dstip is not None:
g = (special.erf((d+dx/2.)/n) - special.erf((d-dx/2.)/n))/2. return g/dx if density else g def find_largest_coherent_region(a): """ Find the largest coherent region in a 2D array. This is basically a wrapper for `scipy.ndimage.label`_ that associates adjacent pixels (including diagonally) into groups. The largest group is determined and a boolean array is returned that selects those pixels associated with that group. Args: a (`numpy.ndarray`_): A 2D array passed directly to `scipy.ndimage.label`_. Pulled from that documentation: "Any non-zero values in input are counted as features and zero values are considered the background." Perferrably this is an integer array. Returns: `numpy.ndarray`_: Boolean array with the same shape as the input that selects pixels that are part of the largest coherent group. """ labels, n = ndimage.label(a, structure=np.ones((3,3), dtype=int)) if n == 1: return labels == 1 # Only keep the largest coherent structure uniq_labels, npix = np.unique(labels, return_counts=True) indx = uniq_labels != 0 return labels == uniq_labels[indx][np.argmax(npix[indx])] def equal_shape(arr1, arr2, fill_value=0): ''' Take two 2D arrays and pad them to make them the same shape Args: arr1 (`numpy.ndarray`_): 2D arrays that will be padded to be the same shape arr2 (`numpy.ndarray`_): 2D arrays that will be padded to be the same shape fill_value (:obj:`float`, optional): Fill value for the padding Returns: :obj:`tuple`: Tuple of `numpy.ndarray`_ objects that are padded versions of the input arrays ''' #check for non 2D arrays if arr1.ndim != 2 or arr2.ndim != 2: raise ValueError('Can only accept 2D arrays') #trivial case if arr1.shape == arr2.shape: return arr1, arr2 #iterate through axes to pad each one appropriately for i in range(arr1.ndim): #figure out which array is smaller on this axis if arr1.shape[i] < arr2.shape[i]: smaller = arr1 bigger = arr2 order = 'fwd' elif arr1.shape[i] > arr2.shape[i]: smaller = arr2 bigger = arr1 order = 'rev' else: continue #add padding until appropriate size while smaller.shape[i] != bigger.shape[i]: fill = np.full((1,smaller.shape[1-i]), fill_value) if i: fill = fill.T #odd size difference if (bigger.shape[i] - smaller.shape[i])%2: smaller = np.concatenate([smaller, fill], axis=i) #even size difference else: smaller = np.concatenate([fill, smaller, fill], axis=i) if order == 'fwd': arr1, arr2 = [smaller, bigger] elif order == 'rev': arr2, arr1 = [smaller, bigger] return arr1, arr2 def trim_shape(arr1, arr2, fill_value=0): ''' Take one 2D array and make it the same shape as the other through trimming and padding Args: arr1 (`numpy.ndarray`_): 2D array to be reshaped arr2 (`numpy.ndarray`_): 2D array with target shape fill_value (:obj:`float`, optional): Fill value for the padding Returns: `numpy.ndarray`_: reshaped version of `arr1` with dimensions of `arr2` ''' #check for non 2D arrays if arr1.ndim != 2 or arr2.ndim != 2: raise ValueError('Can only accept 2D arrays') #trivial case if arr1.shape == arr2.shape: return arr1 #iterate through axes to figure out which need to be padded/trimmed for i in range(arr1.ndim): #if smaller, pad the array until appropriate size while arr1.shape[i] < arr2.shape[i]: fill = np.full((1, arr1.shape[1-i]), fill_value) if i: fill = fill.T #odd size difference if (arr2.shape[i] - arr1.shape[i])%2: arr1 = np.concatenate([arr1, fill], axis=i) #even size difference else: arr1 = np.concatenate([fill, arr1, fill], axis=i) #if bigger, trim down the outside while arr1.shape[i] > arr2.shape[i]: #odd size difference if (arr1.shape[i] - arr2.shape[i])%2: arr1 = arr1.take(range(arr1.shape[i]-1),i) #even size difference else: arr1 = arr1.take(range(arr1.shape[i]-1),i) return arr1 def gaussian_fill(img, sigma=1., mask=None, threshold=0.1, maxiter=None, debug=False): """ Fill masked image regions by Gaussian smoothing the valid pixels. Args: img (`numpy.ndarray`_, `numpy.ma.MaskedArray`_): Image to fill. If a `numpy.ndarray`_ and ``mask`` is None, a warning is issued and a copy of the input array is returned. sigma (:obj:`float`, optional): The sigma of the circular smoothing kernel. mask (`numpy.ndarray`_, optional): The image mask. Can be None if the input is a `numpy.ma.MaskedArray`_. If None and the input image is a `numpy.ndarray`_, a warning is issued and a copy of the input array is returned. threshold (:obj:`float`, optional): Minimum fraction of a pixel contributing to a masked pixel to be used when replacing the input value. maxiter (:obj:`int`, optional): Maximum number of smooth-replace iterations. If None, the iterations will continue until all masked pixels within the convex hull of the unmasked input pixels are filled. debug (:obj:`bool`, optional): Show plots as the function progresses for debugging Returns: `numpy.ndarray`_: The filled image. """ # Check input if maxiter is not None and maxiter < 1: raise ValueError('Provided maxiter must be None or >0.') # Set image _img = img.copy() if isinstance(img, np.ma.MaskedArray) \ else np.ma.MaskedArray(img.copy(), mask=mask) if mask is not None: _img[mask] = np.ma.masked if not np.any(np.ma.getmaskarray(_img)): # Nothing masked, so return warnings.warn('Input image is not masked. Returning copy of input image data.') return _img.data.copy() # Get the coordinates of the image pixels x, y = np.meshgrid(np.arange(_img.shape[1]), np.arange(_img.shape[0])) # Select the masked pixels bpm = np.ma.getmaskarray(_img) bcoo = np.column_stack((x[bpm], y[bpm])) # ... and the unmasked ones gpm = np.logical_not(bpm) gcoo = np.column_stack((x[gpm], y[gpm])) # Get the polygon defining the convex hull of the unmasked pixels hull = spatial.ConvexHull(gcoo).vertices if debug: pyplot.imshow(_img, origin='lower') pyplot.plot(gcoo[hull,0], gcoo[hull,1], color='C3') pyplot.title('Input') pyplot.show() # Iteratively fill the masked input pixels niter = 0 while np.any(geometry.point_inside_polygon(gcoo[hull], bcoo)) \ and (maxiter is None or niter < maxiter): niter += 1 # Convolve the image fimg = ndimage.gaussian_filter(_img.filled(0.0), sigma) # ... and its normalization mimg = ndimage.gaussian_filter(-np.ma.getmaskarray(_img).astype(float)+1, sigma) # Select the input masked pixels that have a normalization above the # threshold _gpm = np.ma.getmaskarray(_img) & (mimg > threshold) # Fill those pixels _img[_gpm] = fimg[_gpm] / mimg[_gpm] if debug: pyplot.imshow(_img, origin='lower') pyplot.plot(gcoo[hull,0], gcoo[hull,1], color='C3') pyplot.title(f'Iteration {niter}') pyplot.show() # Update the coordinates of the remaining masked pixels bpm = np.ma.getmaskarray(_img) bcoo = np.column_stack((x[bpm], y[bpm])) return _img.filled(0.0) def fig2data(fig): """ Convert a figure to an ARGB array. Stolen from somewhere on StackOverflow. Args: fig (`matplotlib.figure.Figure`): Figure to be converted into an ARGB array. Returns: `numpy.ndarray`_: ARGB array representing figure """ # draw the renderer fig.canvas.draw( ) # Get the RGBA buffer from the figure h,w = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) return buf def unpack(params, args, jump=None, bound=False, relative_pab=False): """ Utility function to carry around a bunch of values in the Bayesian fit. Takes all of the parameters that are being fit and turns them from a long and poorly organized tuple into an easily accessible dictionary that allows for much easier access to the values. Args: params (:obj:`tuple`): Tuple of parameters that are being fit. Assumes the standard order of parameters constructed in :func:`nirvana.fitting.fit`. args (:class:`~nirvana.data.fitargs.FitArgs`): Object containing all of the data and settings needed for the galaxy. jump (:obj:`int`, optional): How many indices to jump between different velocity components (i.e. how many bins there are). If not given, it will just determine this from `args.edges`. relative_pab (:obj:`bool`, optional): Whether to define the second order position angle relative to the first order position angle (better for fitting) or absolutely (better for output). Returns: :obj:`dict`: Dictionary with keys for inclination `inc`, first order position angle `pa`, second order position angle `pab`, systemic velocity `vsys`, x and y center coordinates `xc` and `yc`, `np.ndarray`_ of first order tangential velocities `vt`, `np.ndarray`_ objects of second order tangential and radial velocities `v2t` and `v2r`, and `np.ndarray`_ of velocity dispersions `sig`. Arrays have lengths that are the same as the number of bins (determined automatically or from `jump`). All angles are in degrees and all velocities must be in consistent units. """ paramdict = {} #global parameters with and without center paramdict['xc'], paramdict['yc'] = [0,0] if args.nglobs == 4: paramdict['inc'], paramdict['pa'], paramdict['pab'], paramdict['vsys'] = params[:args.nglobs] elif args.nglobs == 6: paramdict['inc'], paramdict['pa'], paramdict['pab'], paramdict['vsys'], paramdict['xc'], paramdict['yc'] = params[:args.nglobs] #adjust pab if necessary #if not relative_pab: # paramdict['pab'] = (paramdict['pab'] + paramdict['pa']) % 360 #figure out what indices to
<reponame>Matchoc/stockmarketpy import sys import os os.environ["path"] = os.path.dirname(sys.executable) + ";" + os.environ["path"] import glob import operator import datetime import dateutil.relativedelta import win32gui import win32ui import win32con import win32api import numpy import json import csv import xml.etree.ElementTree as ET import urllib.request import urllib.error import scipy.ndimage import multiprocessing import nltk import matplotlib.pyplot as plt from languageprocessing import * from datageneration import * from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.externals import joblib from time import strftime from time import sleep from PIL import Image from sklearn import svm from sklearn.neural_network import MLPRegressor from sklearn.preprocessing import StandardScaler from sklearn.metrics import label_ranking_average_precision_score #import feedparser # seem nice, doesn't import (crash on 'category' key doesn't exist error) MACHINE_NEWS = None SCALER_NEWS = None def set_skip_symbol(x): global SKIP_SYMBOL SKIP_SYMBOL = x PRINT_LEVEL=1 def myprint(msg, level=0): if (level >= PRINT_LEVEL): sys.stdout.buffer.write((str(msg) + "\n").encode('UTF-8')) class MLModelError(Exception): def __init__(self, error_msg, level): self.msg = error_msg self.lvl = level def sort_dict(v, asc=True): if asc: sorted_dict = sorted(v.items(), key=operator.itemgetter(1)) return sorted_dict else: pass def save_machine(): joblib.dump(MACHINE_NEWS, 'machine_news.save') joblib.dump(SCALER_NEWS, 'scaler_news.save') def load_machine(): global MACHINE_NEWS global SCALER_NEWS MACHINE_NEWS = joblib.load('machine_news.save') SCALER_NEWS = joblib.load('scaler_news.save') def get_news_date(news): if type(news) is dict: pubdatestr = news["pubDate"] else: pubdatestr = news result = datetime.datetime.strptime(pubdatestr, '%a, %d %b %Y %H:%M:%S %Z') return result def utc_to_local(utc_dt): return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None) def process_news(news, stopwords, filename): newscontent = " " if filename is not "": newscontent += get_important_text_from_news(filename) word_dict = extract_words(news + newscontent) remove_stopwords(word_dict, stopwords) save_word_dict(word_dict, filename + ".words") def process_all_news(symbol): stop_words = load_stopwords('./stopwords.txt') with open(get_news_json_path(symbol), 'r') as jsonfile: newslist = json.load(jsonfile) for news in newslist: title = news["title"] content = "" if "contents" in news and news["contents"] is not None: content = news["contents"] process_news(title, stop_words, content) def generate_word_counts(): wordglob = os.path.join(DATA_FOLDER, "**", "*.words") wordfiles = glob.glob(wordglob) all_words = count_all_words(wordfiles) cleanup_all_words(all_words) allwordspath = os.path.join(DATA_FOLDER, "allwords.json") with open(allwordspath, 'w') as fo: json.dump(all_words, fo, sort_keys=True, indent=4, separators=(',', ': ')) return all_words def gen_news_x(symbol, news): allwordspath = os.path.join(DATA_FOLDER, "allwords.json") with open(allwordspath, 'r') as jsonfile: allwords = json.load(jsonfile) newswordspath = news["contents"] + ".words" # skip news we couldn't download with open(news["contents"], 'rb') as testfo: text = testfo.read() if len(text) <= 0: raise MLModelError("[" + symbol + "] news " + news["contents"] + " download empty for " + news["title"] + " ( " + news["pubDate"] + " )", 1) with open(newswordspath, 'r') as jsonfile: newswords = json.load(jsonfile) sortedX = sorted(allwords.keys()) x = get_base_X(symbol, news) for key in sortedX: count = 0 if key in newswords: count += newswords[key] #if count > 0: # x.append(1) #else: # x.append(0) x.append(count) return [x] def gen_allnews_x(symbol, allnews): allwordspath = os.path.join(DATA_FOLDER, "allwords.json") with open(allwordspath, 'r') as jsonfile: allwords = json.load(jsonfile) sortedX = sorted(allwords.keys()) x = get_base_X(symbol, allnews[0]) x_words = [0] * len(sortedX) valid_news = False for news in allnews: newswordspath = news["contents"] + ".words" # skip news we couldn't download with open(news["contents"], 'rb') as testfo: text = testfo.read() if len(text) <= 0: continue with open(newswordspath, 'r') as jsonfile: newswords = json.load(jsonfile) count = 0 for key in sortedX: if key in newswords: x_words[count] += newswords[key] #x_words[count] = 1 count += 1 valid_news = True x = x + x_words if valid_news: return [x] else: raise MLModelError("[" + symbol + "] No news content for any of all " + str(len(allnews)) + " news at date " + allnews[0]["pubDate"], 1) def get_base_X(symbol, news): prices = get_price_json(symbol) prev_close_price = get_today_previous_close_price(symbol, news, prices) avg_price_week = calculate_average_price_over_time(symbol, news, datetime.timedelta(weeks=1), prices) avg_price_month = calculate_average_price_over_time(symbol, news, datetime.timedelta(weeks=4), prices) avg_price_year = calculate_average_price_over_time(symbol, news, datetime.timedelta(weeks=52), prices) days_up = get_num_days_up(symbol, news, prices) avg_return_week = calculate_return_over_time(symbol, news, datetime.timedelta(weeks=1), prices) avg_return_month = calculate_return_over_time(symbol, news, datetime.timedelta(weeks=4), prices) avg_return_year = calculate_return_over_time(symbol, news, datetime.timedelta(weeks=52), prices) std_week = calculate_std(symbol, news, datetime.timedelta(weeks=1), prices) std_month = calculate_std(symbol, news, datetime.timedelta(weeks=4), prices) std_year = calculate_std(symbol, news, datetime.timedelta(weeks=52), prices) x = [prev_close_price, avg_price_week, avg_price_month, avg_price_year, days_up, avg_return_week, avg_return_month, avg_return_year] x += [std_week, std_month, std_year] myprint(symbol + " : " + news["title"] + " prev close, avg week, avg month, avg year = " + str(x)) return x def get_price_date(pricejson, lookupdate): pricedatefmt = lookupdate.strftime("%Y-%m-%d") if pricedatefmt in pricejson: return pricejson[pricedatefmt] return None def get_num_days_up(symbol, news, pricejson = None): newsdate = get_news_date(news) if pricejson is None: pricejson = get_price_json(symbol) prev_day2 = get_previous_valid_market_date(newsdate, pricejson) if prev_day2 is not None: price2 = get_price_date(pricejson, prev_day2) else: raise MLModelError("[" + symbol + "] Could not find any valid date in get_num_days_up for news : " + news["title"], 1) prev_day = get_previous_valid_market_date(prev_day2, pricejson) if prev_day is not None: price = get_price_date(pricejson, prev_day) else: raise MLModelError("[" + symbol + "] Could not find any valid date in get_num_days_up for news : " + news["title"], 1) if price is None or price2 is None: raise MLModelError("[" + symbol + "] Could not find any valid date in get_num_days_up for news : " + news["title"], 1) isPositive = price2["Adj Close"] - price["Adj Close"] >= 0 curPositive = price2["Adj Close"] - price["Adj Close"] >= 0 count = 1 last_price = price while (price is None and (newsdate - prev_day).days < 365) or (isPositive == curPositive): prev_day = prev_day - datetime.timedelta(days=1) price = get_price_date(pricejson, prev_day) if price is not None: count += 1 curPositive = last_price["Adj Close"] - price["Adj Close"] >= 0 last_price = price else: curPositive = not isPositive if not isPositive: count = count * -1 return count def calculate_average_price_over_time(symbol, news, delta, prices = None): if prices is None: prices = get_price_json(symbol) news_date = get_news_date(news) start_date = news_date - delta cur_date = start_date avg_close_price = 0 count = 0 while cur_date < news_date: pricedatefmt = cur_date.strftime("%Y-%m-%d") if pricedatefmt in prices: avg_close_price += prices[pricedatefmt]["Adj Close"] count += 1 cur_date += datetime.timedelta(days=1) if count > 0: avg_close_price = avg_close_price / count else: raise MLModelError("[" + symbol + "] No price data for average price before " + news["title"] + " ( " + news["pubDate"] + " )", 1) return avg_close_price def calculate_return_over_time(symbol, news, delta, prices = None): if prices is None: prices = get_price_json(symbol) news_date = get_news_date(news) start_date = news_date - delta cur_date = start_date oldest_price = None newest_price = None while cur_date < news_date: pricedatefmt = cur_date.strftime("%Y-%m-%d") if pricedatefmt in prices: if oldest_price is None: oldest_price = prices[pricedatefmt]["Adj Close"] newest_price = prices[pricedatefmt]["Adj Close"] cur_date += datetime.timedelta(days=1) if oldest_price is None or newest_price is None: raise MLModelError("[" + symbol + "] Can't find oldest/newest price for delta of " + str(delta), 1) return newest_price - oldest_price def calculate_std(symbol, news, delta, prices = None): if prices is None: prices = get_price_json(symbol) news_date = get_news_date(news) start_date = news_date - delta cur_date = start_date num_dates = 0 avg_return = 0 while cur_date < news_date: pricedatefmt = cur_date.strftime("%Y-%m-%d") if pricedatefmt in prices: num_dates += 1 sum_variation = prices[pricedatefmt]["Adj Close"] - get_previous_close_price(cur_date, prices) cur_date += datetime.timedelta(days=1) if num_dates == 0: raise MLModelError("[" + symbol + "] Can't calculate STD, couldn't find any prices for " + news["title"], 1) avg_return = avg_return / num_dates cur_date = start_date sum_variation = 0 while cur_date < news_date: pricedatefmt = cur_date.strftime("%Y-%m-%d") if pricedatefmt in prices: sum_variation = ((prices[pricedatefmt]["Adj Close"] - get_previous_close_price(cur_date, prices)) - avg_return)**2 cur_date += datetime.timedelta(days=1) stdvariation = (sum_variation / num_dates)**(0.5) return stdvariation def get_valid_market_date(newsdate): offset = 0 if newsdate.time().utcoffset() is not None: offset = newsdate.time().utcoffset() offset -= 5 # toronto stock time zone finaldate = newsdate + datetime.timedelta(hours=offset) if finaldate.hour >= 16: finaldate = finaldate + datetime.timedelta(days=1) myprint(str(newsdate) + " is after 16 so going to use : " + str(finaldate), 0) if finaldate.weekday() == 5 or finaldate.weekday() == 6: myprint(str(finaldate) + " is a " + str(finaldate.weekday()), 0) finaldate = finaldate + dateutil.relativedelta.relativedelta(weekday=dateutil.relativedelta.MO(1)) myprint("final date = " + str(finaldate), 0) return finaldate def get_previous_valid_market_date(cur_date, prices): prev_day = cur_date - datetime.timedelta(days=1) end_search = cur_date - datetime.timedelta(days=365) pricedatefmt = prev_day.strftime("%Y-%m-%d") while pricedatefmt not in prices and prev_day > end_search: prev_day = prev_day - datetime.timedelta(days=1) pricedatefmt = prev_day.strftime("%Y-%m-%d") if pricedatefmt not in prices: # Should error ? return None return prev_day def get_today_previous_close_price(symbol, news, prices = None): if prices is None: prices = get_price_json(symbol) result = get_news_date(news) result = get_valid_market_date(result) result = result - datetime.timedelta(days=1) if result.weekday() == 5 or result.weekday() == 6: result = result + dateutil.relativedelta.relativedelta(weekday=dateutil.relativedelta.FR(-1)) pricedatefmt = result.strftime("%Y-%m-%d") try: while pricedatefmt not in prices: result = result - datetime.timedelta(days=1) pricedatefmt = result.strftime("%Y-%m-%d") except OverflowError as e: raise MLModelError("[" + symbol + "] No previous price day for " + news["title"] + " ( " + news["pubDate"] + " )", 1) if pricedatefmt in prices: final_price = prices[pricedatefmt]["Adj Close"] else: raise MLModelError("[" + symbol + "] No previous price day for " + news["title"] + " ( " + news["pubDate"] + " )", 1) return final_price def gen_news_y(symbol, news): # sample : "Fri, 16 Dec 2016 16:18:35 GMT" result = get_news_date(news) result = get_valid_market_date(result) csvpath = get_price_csv_path(symbol) jsonpath = csvpath.replace(".csv", ".json") with open(jsonpath, 'r') as jsonfile: prices = json.load(jsonfile) pricedatefmt = result.strftime("%Y-%m-%d") #pricedatefmt = str(year) + "-" + str(month) + "-" + str(day) if pricedatefmt in prices: price = prices[pricedatefmt] y = (price["Adj Close"] - get_previous_close_price(result, prices))# / price["Open"] return y raise MLModelError("[" + symbol + "] price not found for " + news["title"] + " ( " + pricedatefmt + " )", 1) def group_news_by_date(allnews): results = {} for news in allnews: newsdate = get_news_date(news) newsdate = get_valid_market_date(newsdate) pricedatefmt = newsdate.strftime("%Y-%m-%d") if pricedatefmt not in results: results[pricedatefmt] = [] results[pricedatefmt].append(news) return results def updateTraining_by_date(symbol): newspath = get_news_json_path(symbol) with open(newspath, 'r') as jsonfile: allnews = json.load(jsonfile) all_x = [] all_y = [] all_news = [] failedx = 0 failedy = 0 news_by_date = group_news_by_date(allnews) for key in news_by_date: try: y = gen_news_y(symbol, news_by_date[key][0]) x = gen_allnews_x(symbol, news_by_date[key]) all_x += x all_y.append(y) all_news.append(news_by_date[key]) # useful for debugging except MLModelError as e: failedx += 1 myprint(e.msg, e.lvl) myprint("[" + symbol + "] processed " + news_by_date[key][0]["pubDate"] + " with " + str(len(news_by_date[key])) + " news", 0) myprint("Failed to load " + str(failedx) + " news on a list of " + str(len(news_by_date)) + " dates", 2) results = {} results["X"] = all_x results["y"] = all_y results["news"] = all_news with open(get_training_json(symbol), 'w') as fo: json.dump(results, fo, sort_keys=True, indent=4, separators=(',', ': ')) return all_x, all_y def updateTraining(symbol): newspath = get_news_json_path(symbol) with open(newspath, 'r') as jsonfile: allnews = json.load(jsonfile) all_x = [] all_y =
import pytest import os from pathlib import Path import shutil import pandas as pd from wrftamer.main import project, list_projects, list_unassociated_exp, reassociate # ----------------------------------------------------------------------- # Trivial tests for nonexistent project # ----------------------------------------------------------------------- def test_nonexisting_project(base_test_env): test = project("some_random_name") # initialize class, no creation here # removal of project that does not exists should fail with pytest.raises(FileNotFoundError): test.remove(force=True) # renaming a project that does not exist should fail with pytest.raises(FileNotFoundError): test.rename("some_random_name2") # showing the size of a project that does not exist should fail with pytest.raises(FileNotFoundError): test.disk_use() # removing an experiment from a project that does not exist should fail with pytest.raises(FileNotFoundError): test.exp_remove("Some_Random_Exp_Name", force=True) # renaming an experiment of a project that does not exist should fail with pytest.raises(FileNotFoundError): test.exp_rename("Some_Random_Exp_Name", "Some_Random_Exp_Name2") # listing all experiments of a project that does not exist should fail. with pytest.raises(FileNotFoundError): test.list_exp() # adding an experiment to a project that does not exist should work (since the project is created on the fly!) configfile = os.path.split(os.path.realpath(__file__))[0] + "/resources/configure_test.yaml" test.exp_create("Some_Random_Exp_Name", "Some_Random_Comment", configfile) test.remove(force=True, verbose=False) # ----------------------------------------------------------------------- # Function Tests # ----------------------------------------------------------------------- def test_list_projects(testprojects): # listing all projects should work (even if no projects exist) list_projects(verbose=True) def test_list_unassociated_exp(testprojects): # listing unassociated exps (if none exist) will just return an empty list res = list_unassociated_exp(verbose=True) assert len(res) == 0 # ----------------------------------------------------------------------- # Tests for real project # ----------------------------------------------------------------------- def test_project(testprojects): testproject1 = testprojects[0] proj_name2 = "NEW_NAME" # creation of a project that already exists should fail with pytest.raises(FileExistsError): testproject1.create() # ------------------------------------------------------ # renaming should work testproject1.rename(proj_name2) # check that testproject variables have been renamed (tests further down depend on this) if ( testproject1.name == proj_name2 and Path(testproject1.proj_path).stem == proj_name2 and Path(testproject1.tamer_path).stem == proj_name2 ): pass else: raise ValueError # check if the files with the expected new names really exist if os.path.isdir(testproject1.proj_path): pass else: raise FileNotFoundError if os.path.isdir(testproject1.tamer_path): pass else: raise FileNotFoundError # ------------------------------------------------------ # showing the size of a project should work testproject1.disk_use() # ------------------------------------------------------ # for now, I expect a NotImplementedError with pytest.raises(NotImplementedError): testproject1.runtimes() # ------------------------------------------------------ # listing all experiments inside a project should work testproject1.list_exp() # Rewriting xls sheet should work (but won't do anything, since no experiments have been created) testproject1.rewrite_xls() # Case damaged project. proj_path missing, but tamer_path intact shutil.rmtree(testproject1.proj_path) with pytest.raises(FileNotFoundError): testproject1.rename(proj_name2) # Removal should work for broken project as well. testproject1.remove(force=True) # removing a project that does not exist should fail with pytest.raises(FileNotFoundError): testproject1.remove(force=True) # recreating project, otherwise, fixture fails. testproject1.create(proj_name2) # ----------------------------------------------------------------------- # Project-Project Interaction # ----------------------------------------------------------------------- def test_proj_rename3(testprojects): testproject1, testproject2 = testprojects with pytest.raises(FileExistsError): testproject1.rename(testproject2.name) def test_reassociate(testprojects): testproject1, testproject2 = testprojects exp_name1 = "TEST1" exp_name2 = "TEST2" exp_name3 = "TEST3" configfile = ( os.path.split(os.path.realpath(__file__))[0] + "/resources/configure_test.yaml" ) # ------------------------------------------------------ # adding an exp to a project should work fine. testproject1.exp_create(exp_name1, "First Experiment", configfile) testproject1.exp_create(exp_name3, "Third Experiment", configfile) testproject2.exp_create(exp_name1, "First Experiment", configfile) testproject2.exp_create(exp_name2, "Second Experiment", configfile) with pytest.raises(FileExistsError): reassociate( testproject1, testproject2, exp_name1 ) # exp_name1 is already part of testproject2 reassociate(testproject1, testproject2, exp_name3) list1 = testproject1.list_exp(verbose=False) list2 = testproject2.list_exp(verbose=False) assert len(list1) == 1 assert len(list2) == 3 if exp_name3 not in list2 or exp_name2 not in list2: raise ValueError # ======================================================================= # Experiment Tests (with and without project) # ======================================================================= # ----------------------------------------------------------------------- # Tests for experiment with and without project # ----------------------------------------------------------------------- @pytest.mark.config_req def test_experiment_creation_thoroughly(testprojects): """ Also checks if all files linked and created are really there. """ testproject1 = testprojects[0] proj_name1 = testproject1.name exp_name1 = "TEST1" configfile = os.path.split(os.path.realpath(__file__))[0] + "/resources/my_configure_test.yaml" proj = project(proj_name1) proj.exp_create(exp_name1, "some comment", configfile) # Test here that the whole experiment directory tree exists and that exp_path = proj.proj_path / exp_name1 list_of_expected_dirs = [ exp_path, exp_path / "log", exp_path / "out", exp_path / "plot", exp_path / "wrf", ] if proj.make_submit: list_of_expected_files = [ exp_path / "submit_real.sh", exp_path / "submit_wrf.sh", exp_path / "configure.yaml", ] else: list_of_expected_files = [exp_path / "configure.yaml"] missing = [] for tmp in list_of_expected_dirs: if not os.path.isdir(tmp): missing.append(tmp) for tmp in list_of_expected_files: if not os.path.isfile(tmp): missing.append(tmp) if len(missing) > 0: print("test_create: Missing files or directories!") for item in missing: print(item) raise FileNotFoundError # Tests that all links are established expected_links = [ "g1print.exe", "g2print.exe", "geogrid.exe", "metgrid.exe", "ndown.exe", "real.exe", "tc.exe", "ungrib.exe", "wrf.exe", "GENPARM.TBL", "GEOGRID.TBL", "HLC.TBL", "LANDUSE.TBL", "METGRID.TBL", "MPTABLE.TBL", "SOILPARM.TBL", "URBPARM.TBL", "VEGPARM.TBL", "ozone.formatted", "ozone_lat.formatted", "ozone_plev.formatted", "RRTM_DATA", "RRTMG_LW_DATA", "RRTMG_SW_DATA", "aux_file.txt", "link_grib.csh", "namelist.wps", "tslist", "Vtable", ] missing = [] for link in expected_links: if not os.path.exists(exp_path / f"wrf/{link}") or not os.path.islink( exp_path / f"wrf/{link}" ): missing.append(link) if len(missing) > 0: print("test_create: Problems encounterd with links") for item in missing: print(item) raise FileNotFoundError def experiment_checks(proj_name1, exp_name1): exp_name2 = "TEST2" exp_name3 = "TEST3" configfile = os.path.split(os.path.realpath(__file__))[0] + '/resources/configure_test.yaml' proj = project(proj_name1) proj.exp_create(exp_name1, "some comment", configfile) # ------------------------------------------------------ # creating the same experiment twice should fail with pytest.raises(FileExistsError): proj.exp_create(exp_name1, "some comment", configfile) # ------------------------------------------------------ # reusing an experiment, that never has been created should fail with pytest.raises(FileNotFoundError): proj.exp_copy( "some random name", exp_name2, "some comment" ) # exp_name2 exists now # reusing an experiment should work fine workdir = proj.get_workdir(exp_name1) # create a little testfile for code coverage with open(workdir / "wrf/OBS_DOMAIN101", "w") as f: f.write("data") proj.exp_copy(exp_name1, exp_name2, "some comment") # exp_name2 exists now # ------------------------------------------------------ # reusing an experiment with a name that alredy exists should fail. with pytest.raises(FileExistsError): proj.exp_copy(exp_name1, exp_name2, "some comment") # exp_name2 exists now # renaming of an experiment should work fine. proj.exp_rename(exp_name1, exp_name3) # renaming of an experiment to an existing name should fail with pytest.raises(FileExistsError): proj.exp_rename(exp_name3, exp_name2) # Displaying runtimes should work. proj.exp_runtime(exp_name1) # Displaying disk use should work proj.exp_du(exp_name1, verbose=True) # Calculating start_end should work proj.exp_start_end(exp_name1, verbose=True) # listing of available locations should work proj.exp_list_tslocs(exp_name1, verbose=True) proj.exp_get_maxdom_from_config(exp_name1) # Updating the database should work proj.update_xlsx() # Should check the other branch. proj.rewrite_xls() # Removing an experiment should work proj.exp_remove(exp_name1, force=True) proj._determine_status(exp_name2) def test_experiment_without_project(unassociated_exps): proj_name1 = None exp_name1 = "TEST1" # This should find no unassociate experiments. res = list_unassociated_exp(verbose=True) assert len(res) == 0 experiment_checks(proj_name1, exp_name1) def test_experiment_with_project(testprojects): testproject1 = testprojects[0] proj_name1 = testproject1.name exp_name1 = "TEST1" # ------------------------------------------------------ # listing projects should work res = list_projects(verbose=True) assert len(res) == 2 experiment_checks(proj_name1, exp_name1) def test_experiment_with_project2(testprojects): # with make_submit=False testproject1 = testprojects[0] testproject1.make_submit = False proj_name1 = testproject1.name exp_name1 = "TEST1" # ------------------------------------------------------ # listing projects should work res = list_projects(verbose=True) assert len(res) == 2 experiment_checks(proj_name1, exp_name1) def test_postprocessing(test_env2): test_proj, exp_name1 = test_env2 # Tests associated with this test require an experiment, that has been created and run on the cluster # Data must be ready for moval, postprocessing, archiving, restarting and displaying runtimes. # moving data from an experiment should work test_proj.exp_move(exp_name1, verbose=True) # moving data from an experiment a second time will just trigger a message. test_proj.exp_move(exp_name1, verbose=True) # A fake restart file does the job for testing purposes. test_proj.exp_restart(exp_name1, "wrfrst_d01_2020-05-17_03:00:00") with pytest.raises(NameError): test_proj.exp_restart(exp_name1, "wrfrst_d01_2020-05-17_03_00_00") # postprocessing of tslist data should work test_proj.exp_process_tslist( exp_name1, None, None, ["10"], True ) # averaging does not work? # additional test, since now, I have a namelist. test_proj.exp_start_end(exp_name1, verbose=True) # providing info should work test_proj.exp_provide_info() test_proj.exp_provide_info(exp_name1) test_proj.exp_provide_all_info() test_proj.exp_provide_all_info(exp_name1) test_proj.exp_runtime(exp_name1, verbose=True) test_proj.exp_get_maxdom_from_config(exp_name1) test_proj.cleanup_db(verbose=True) # moving data to the archive should work test_proj.exp_archive(exp_name1, keep_log=False, verbose=True) # This will just return. Error catched internally. test_proj.exp_copy( exp_name1, "Some_Random_Name", "some random comment", verbose=True ) test_proj.exp_rename(exp_name1, "Some_New_Name", verbose=True) test_proj.exp_runtime("Some_New_Name") # removing an archived experiment test_proj.exp_remove("Some_New_Name", force=True) # renaming a project which has some archived experiments should work test_proj.rename("some_random_new_name", verbose=True) def test_postprocessing2(test_env2): test_proj, exp_name1 = test_env2 # configure.yaml does not contain ppp info, so this won't do anything. test_proj.exp_run_postprocessing_protocol("TEST1", verbose=True) cfg = dict() cfg["pp_protocol"] = dict() cfg["pp_protocol"]["move"] = 1 cfg["pp_protocol"]["tslist_processing"] = 1 cfg["pp_protocol"]["create_maps"] = 1 test_proj.exp_run_postprocessing_protocol( "TEST1", verbose=True, cfg=cfg ) # use defaults def test_postprocessing3(test_env2): test_proj, exp_name1 = test_env2 cfg = dict() cfg["pp_protocol"] = dict() cfg["pp_protocol"]["move"] = 1 cfg["pp_protocol"]["tslist_processing"] = dict() cfg["pp_protocol"]["tslist_processing"]["location"] = "FINO" cfg["pp_protocol"]["tslist_processing"]["domain"] = "d01" cfg["pp_protocol"]["tslist_processing"]["timeavg"] = [10] cfg["pp_protocol"]["create_maps"] = dict() cfg["pp_protocol"]["create_maps"]["list_of_domains"] = ["d01"] cfg["pp_protocol"]["create_maps"]["list_of_model_levels"] = [5] cfg["pp_protocol"]["create_maps"]["list_of_variables"] = ["WSP"] cfg["pp_protocol"]["create_maps"]["store"] = ["False"] test_proj.exp_run_postprocessing_protocol("TEST1", verbose=True, cfg=cfg) def test_remove_with_correct_input1(base_test_env, monkeypatch): # monkeypatch the "input" function, so that it returns "Yes". # This simulates the user entering "Yes" in the terminal: monkeypatch.setattr("builtins.input", lambda _: "Yes") testproject = project("WRFTAMER_TEST1") exp_name1 = "TEST1" # ------------------------------------------------------ # adding an exp to
<filename>w_queues/reptod_wcancel.py<gh_stars>10-100 import numpy as np from patch import * from rvs import * from howtorep_exp import * # ############################################# Model ########################################## # ET_MAX = 10000 def ar_ub_reptod_wcancel(ns, J, S): EJ, ES = J.mean(), S.mean() return ns/EJ/ES """ def ET_reptotwo_wcancel(ns, ar, J, S): # B ~ J*S if Ts > X else J*(1-ro)*S2:1 ar = ar/ns EJ, EJ2 = J.moment(1), J.moment(2) ES, ES2 = S.moment(1), S.moment(2) print("EJ= {}, ES= {}".format(EJ, ES) ) EV, EV2 = EJ*ES, EJ2*ES2 print("EV= {}, EV2= {}".format(EV, EV2) ) S21 = X_n_k(S, 2, 1) ES21, ES21_2 = moment_ith(1, S21), moment_ith(2, S21) EV21, EV21_2 = EJ*ES21, EJ2*ES21_2 print("EV21= {}, EV21_2= {}".format(EV21, EV21_2) ) # # eq = lambda ro: ro - ar*(ro*EV + (1 - ro)*(1 - ro)*EV21) # eq = lambda ro: ro**2*ar*EV21 + ro*(ar*EV - 2*ar*EV21 - 1) + ar*EV21 # ro = scipy.optimize.brentq(eq, 0.0001, 1) # alog("ar= {}, ro= {}".format(ar, ro) ) # EB = ro*EV + (1 - ro)*(1 - ro)*EV21 # EB2 = ro*EV2 + (1 - ro)*(1 - ro)**2*EV21_2 # eq = lambda ro: ro - ar*(ro*EV + (1 - ro)*3/4*EV21) # Lap_J_ar = laplace(J, ar) Pr_T_lt_X = lambda ro: 1 - ro # (1 - ro)/Lap_J_ar p = lambda ro: ro*(1 - ro**2) + (1 - ro)*(1-ro) # (1 - ro**2)/2 # (1 - ro)/Lap_J_ar # 1 + (1 - ro)/Lap_J_ar/2 # 1 - ro/4 eq = lambda ro: ro - ar*(ro*EV + Pr_T_lt_X(ro)*p(ro)*EV21) ro = scipy.optimize.brentq(eq, 0.0001, 1) alog("ar= {}, \nro= {}".format(ar, ro) ) ar *= 1 - Pr_T_lt_X(ro)*(1 - p(ro) ) EB = ro*EV + Pr_T_lt_X(ro)*p(ro)*EV21 EB2 = ro*EV2 + Pr_T_lt_X(ro)*p(ro)*EV21_2 ET = EB + ar*EB2/2/(1 - ar*EB) return ET if ET < ET_MAX else None """ def ET_reptotwo_wcancel(ns, ar, J, S): # B ~ (1-ro)*J*S2:1 if Ts < Tp, Ts > X else J*S ar = ar/ns EJ, EJ2 = J.moment(1), J.moment(2) ES, ES2 = S.moment(1), S.moment(2) EV, EV2 = EJ*ES, EJ2*ES2 Pr_rep_makes_diff = lambda ro: (1 - ro**2)/2 eq = lambda ro: ro - ar*(Pr_rep_makes_diff(ro)*(1-ro)*EV + (1-Pr_rep_makes_diff(ro))*EV) ro = scipy.optimize.brentq(eq, 0.0001, 1) alog("ar= {}, \nro= {}".format(ar, ro) ) EB = Pr_rep_makes_diff(ro)*(1-ro)*EV + (1-Pr_rep_makes_diff(ro))*EV EB2 = Pr_rep_makes_diff(ro)*(1-ro)**2*EV2 + (1-Pr_rep_makes_diff(ro))*EV2 ar *= (1-Pr_rep_makes_diff(ro) ) ET = EB + ar*EB2/2/(1 - ar*EB) return ET if ET < ET_MAX else None def ET_ED_reptotwo_wcancel_hyperexpJ(ns, ar, J, S): ar = ar/ns EJ, EJ2 = J.moment(1), J.moment(2) ES, ES2 = S.moment(1), S.moment(2) EV, EV2 = EJ*ES, EJ2*ES2 Pr_rep_makes_diff = lambda ro: (1 - ro**2)/2 eq = lambda ro: ro - ar*(Pr_rep_makes_diff(ro)*(1-ro)*EV + (1-Pr_rep_makes_diff(ro))*EV) ro = scipy.optimize.brentq(eq, 0.0001, 1) alog("ar= {}, \nro= {}".format(ar, ro) ) EB = Pr_rep_makes_diff(ro)*(1-ro)*EV + (1-Pr_rep_makes_diff(ro))*EV EB2 = Pr_rep_makes_diff(ro)*(1-ro)*EV2 + (1-Pr_rep_makes_diff(ro))*EV2 # alog("EB= {}, EB2= {}".format(EB, EB2) ) ar *= (1-Pr_rep_makes_diff(ro) ) ET = EB + ar*EB2/2/(1 - ar*EB) alog("ET= {}".format(ET) ) # return ET if ET < ET_MAX else None # Y = HyperExp(J.p_l, [mu/(1-ro) for mu in J.mu_l] ) B_laplace = lambda s: (Pr_rep_makes_diff(ro)*(1-ro) + (1 - Pr_rep_makes_diff(ro) ) )*J.laplace(s) B_pdf = lambda t: mpmath.invertlaplace(B_laplace, t, method='talbot') B_moment = lambda i: scipy.integrate.quad(lambda t: t**i * B_pdf(t), 0, np.inf) # EB_, EB2_ = B_moment(1), B_moment(2) # alog("EB_= {}, EB2_= {}".format(EB_, EB2_) ) ro = ar*EB def T_laplace(s): Bs = B_laplace(s) return (1 - ro)*s/(s - ar + ar*Bs)*Bs T_pdf = lambda t: mpmath.invertlaplace(T_laplace, t, method='talbot') T_moment = lambda i: scipy.integrate.quad(lambda t: t**i * T_pdf(t), 0, 500) # mpmath.quad(lambda t: t**i * T_pdf(t), [0, mpmath.inf] ) # ET_ = T_moment(1) # alog("ET_= {}".format(ET_) ) # for t in np.linspace(0.01, 20, 100): # print("T_pdf({})= {}".format(t, T_pdf(t) ) ) def ETlb_reptotwo_wcancel(ns, ar, J, S): # B ~ J*S if Ts > X else J ar = ar/ns EJ, EJ2 = J.moment(1), J.moment(2) ES, ES2 = S.moment(1), S.moment(2) ro = ar*EJ/(1 - ar*EJ*ES + ar*EJ) alog("ar= {}, ro= {}".format(ar, ro) ) EB = ro*EJ*ES + (1 - ro)*EJ EB2 = ro*EJ**2*ES**2 + (1 - ro)*EJ**2 ET = EB + ar*EB2/2/(1 - ar*EB) return ET if ET < ET_MAX else None # ################################## Reptod-ifidle-wcancel ##################################### # def ET_reptod_ifidle(ns, d, J, S, ar): def EB1_mth(m): return J.moment(m)*S.moment(m) ar_toidleq = ar/ns*d def Pr_Si1_g_s(i, s): return S.tail(s)**i def ESi1_mth(i, m): return scipy.integrate.quad(lambda s: m*s**(m-1) * Pr_Si1_g_s(i, s), 0, np.inf)[0] def EBi1_mth(i, m): return J.moment(m)*ESi1_mth(i, m) Pr_jobfindsidle = lambda ro: 1 - ro**d def ET_given_jobfindsidle(ro): return sum([EBi1_mth(i, 1) * (1-ro)**i * ro**(d-i) * binom(d, i) / (1 - ro**d) for i in range(1, d+1) ] ) ar_mg1efs = lambda ro: ar/ns *ro**(d-1) def ESe_mth(ro, m): return ro**(d-1) * S.moment(m) \ + sum([ESi1_mth(i, m) * binom(d-1, i-1)*(1-ro)**(i-1)*ro**(d-i) for i in range(2, d+1) ] ) def EBe_mth(ro, m): return J.moment(m)*ESe_mth(ro, m) def ET_given_jobfindsnoidle(ro): ar_ = ar_mg1efs(ro) EB1, EB12 = EB1_mth(1), EB1_mth(2) EBe, EBe2 = EBe_mth(ro, 1), EBe_mth(ro, 2) return EB1 + ar_*EB12/2/(1 - ar_*EB1) + ar_*(EBe2 - EB12)/2/(1 - ar_*(EB1 - EBe) ) Elengthofbusyperiod = lambda ro: EBe_mth(ro, 1)/(1 - ar_mg1efs(ro)*EB1_mth(1) ) eq = lambda ro: ro - Elengthofbusyperiod(ro)/(Elengthofbusyperiod(ro) + 1/ar_toidleq) ro = scipy.optimize.brentq(eq, 0.0001, 1) alog("ro= {}".format(ro) ) alog("naive ro= {}".format(ar/ns*EB1_mth(1) ) ) ET = Pr_jobfindsidle(ro)*ET_given_jobfindsidle(ro) + \ (1 - Pr_jobfindsidle(ro))*ET_given_jobfindsnoidle(ro) alog("ET= {}".format(ET) ) def plot_reptod_ifidle(): ns, d = 100, 2 J = HyperExp([0.9, 0.1], [1, 0.01] ) # TPareto(1, 10**4, 1.1) # Exp(1) # DUniform(1, 1) S = Bern(1, 10, 0.1) # Exp(1) # Pareto(1, 2) T = ns*4000 # 10000 alog("ns= {}, d= {}, J= {}, S= {}, T= {}".format(ns, d, J, S, T) ) EJ, ES = J.mean(), S.mean() EB = EJ*ES print("EJ= {}, ES= {}, EB= {}".format(EJ, ES, EB) ) ar_ub = 0.99*ns/EB nf = 1 for ar in np.linspace(0.01, ar_ub, 7): print("> ar= {}".format(ar) ) sching_m = {'name': 'reptod-ifidle', 'd': d, 's_len': d} rosim, ETsim, EDsim = sim_reptod(nf, ns, sching_m, J, S, ar, T, jg_type='poisson') print("sching_m= {}".format(sching_m) ) print("rosim= {}, ETsim= {}, EDsim= {}".format(rosim, ETsim, EDsim) ) ET_reptod_ifidle(ns, d, J, S, ar) print("\n") # sching_m = {'name': 'reptod-ifidle-wcancel', 'd': d, 's_len': d, 'L': 0} # rosim, ETsim, EDsim = sim_reptod(nf, ns, sching_m, J, S, ar, T, jg_type='poisson') # print("sching_m= {}".format(sching_m) ) # print("rosim= {}, ETsim= {}, EDsim= {}".format(rosim, ETsim, EDsim) ) # ET_reptod_ifle_wcancel(ns, d, J, S, ar) # print("\n\n") # ############################### Reptod-ifidle-wcancel ############################### # def ET_reptod_ifle_wcancel(ns, d, J, S, ar): def EB1_mth(m): return J.moment(m)*S.moment(m) ar_toidleq = lambda ro: sum([ar/ns*d/(1+i) * binom(d-1, i)*(1-ro)**i*ro**(d-1-i) for i in range(d) ] ) def Pr_Sred_g_s(ro, s): return 1 - scipy.integrate.quad(lambda v: S.pdf(v)*math.exp(-ar_toidleq(ro)*v), 0, s)[0] def Pr_Si1_g_s(ro, i, s): return S.tail(s) * Pr_Sred_g_s(ro, s)**(i-1) def ESi1_mth(ro, i, m): return scipy.integrate.quad(lambda s: m*s**(m-1) * Pr_Si1_g_s(ro, i, s), 0, np.inf)[0] def EBi1_mth(ro, i, m): return J.moment(m)*ESi1_mth(ro, i, m) Pr_jobfindsidle = lambda ro: 1 - ro**d def ET_given_jobfindsidle(ro): return sum([EBi1_mth(ro, i, 1) * (1-ro)**i * ro**(d-i) * binom(d, i) / (1 - ro**d) for i in range(1, d+1) ] ) ar_mg1efs = lambda ro: ar/ns *ro**(d-1) def ESe_mth(ro, m): return ro**(d-1) * S.moment(m) \ + sum([ESi1_mth(ro, i, m) * binom(d-1, i-1)*(1-ro)**(i-1)*ro**(d-i) for i in range(2, d+1) ] ) def EBe_mth(ro, m): return J.moment(m)*ESe_mth(ro, m) def ET_given_jobfindsnoidle(ro): ar_ = ar_mg1efs(ro) EB1, EB12 = EB1_mth(1), EB1_mth(2) EBe, EBe2 = EBe_mth(ro, 1), EBe_mth(ro, 2) return EB1 + ar_*EB12/2/(1 - ar_*EB1) + ar_*(EBe2 - EB12)/2/(1 - ar_*(EB1 - EBe) ) Elengthofbusyperiod = lambda ro: EBe_mth(ro, 1)/(1 - ar_mg1efs(ro)*EB1_mth(1) ) eq = lambda ro: ro - Elengthofbusyperiod(ro)/(Elengthofbusyperiod(ro) + 1/ar_toidleq(ro) ) ro = scipy.optimize.brentq(eq, 0.0001, 1) alog("ro= {}".format(ro) ) alog("naive ro= {}".format(ar/ns*EB1_mth(1) ) ) ET = Pr_jobfindsidle(ro)*ET_given_jobfindsidle(ro) + \ (1 - Pr_jobfindsidle(ro))*ET_given_jobfindsnoidle(ro) alog("ET= {}".format(ET) ) def plot_reptod_ifidle_wcancel(): ns, d = 10, 2 sching_m = {'name': 'reptod-ifidle-wcancel', 'd': d, 's_len': d, 'L': 0} J = DUniform(1, 1) S = Exp(0.1) T = ns*5000 # 10000 alog("ns= {}, d= {}, J= {}, S= {}, T= {}".format(ns, d, J, S, T) ) EB = J.mean()*S.mean() ar_ub = 0.9*ns/EB nf = 1 for ar in np.linspace(0.01, ar_ub, 5): print("> ar= {}".format(ar)
"""Plangym API implementation.""" from abc import ABC from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union import gym from gym.envs.registration import registry as gym_registry from gym.spaces import Space import numpy import numpy as np wrap_callable = Union[Callable[[], gym.Wrapper], Tuple[Callable[..., gym.Wrapper], Dict[str, Any]]] class BaseEnvironment(ABC): """Inherit from this class to adapt environments to different problems.""" STATE_IS_ARRAY = True RETURNS_GYM_TUPLE = True SINGLETON = False def __init__( self, name: str, frameskip: int = 1, autoreset: bool = True, delay_init: bool = False, ): """ Initialize a :class:`Environment`. Args: name: Name of the environment. frameskip: Number of times ``step`` will be called with the same action. autoreset: Automatically reset the environment when the OpenAI environment returns ``end = True``. delay_init: If ``True`` do not initialize the ``gym.Environment`` \ and wait for ``init_env`` to be called later. """ self._name = name self.frameskip = frameskip self.autoreset = autoreset self.delay_init = delay_init if not delay_init: self.init_env() @property def unwrapped(self) -> "BaseEnvironment": """ Completely unwrap this Environment. Returns: plangym.Environment: The base non-wrapped plangym.Environment instance """ return self @property def name(self) -> str: """Return is the name of the environment.""" return self._name @property def obs_shape(self) -> Tuple[int]: """Tuple containing the shape of the observations returned by the Environment.""" raise NotImplementedError() @property def action_shape(self) -> Tuple[int]: """Tuple containing the shape of the actions applied to the Environment.""" raise NotImplementedError() def __del__(self): """Teardown the Environment when it is no longer needed.""" return self.close() def step( self, action: Union[numpy.ndarray, int, float], state: numpy.ndarray = None, dt: int = 1, ) -> tuple: """ Step the environment applying the supplied action. Optionally set the state to the supplied state before stepping it. Take ``dt`` simulation steps and make the environment evolve in multiples \ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. state: Set the environment to the given state before stepping it. dt: Consecutive number of times that the action will be applied. Returns: if state is None returns ``(observs, reward, terminal, info)`` else returns ``(new_state, observs, reward, terminal, info)`` """ if state is not None: self.set_state(state) obs, reward, terminal, info = self.step_with_dt(action=action, dt=dt) if state is not None: new_state = self.get_state() data = new_state, obs, reward, terminal, info else: data = obs, reward, terminal, info if terminal and self.autoreset: self.reset(return_state=False) return data def step_batch( self, actions: Union[numpy.ndarray, Iterable[Union[numpy.ndarray, int]]], states: Union[numpy.ndarray, Iterable] = None, dt: Union[int, numpy.ndarray] = 1, ) -> Tuple[numpy.ndarray, ...]: """ Vectorized version of the `step` method. It allows to step a vector of \ states and actions. The signature and behaviour is the same as `step`, but taking a list of \ states, actions and dts as input. Args: actions: Iterable containing the different actions to be applied. states: Iterable containing the different states to be set. dt: int or array containing the frameskips that will be applied. Returns: if states is None returns ``(observs, rewards, ends, infos)`` else returns ``(new_states, observs, rewards, ends, infos)`` """ dt = ( dt if isinstance(dt, (numpy.ndarray, Iterable)) else numpy.ones(len(actions), dtype=int) * dt ) no_states = states is None or states[0] is None states = [None] * len(actions) if no_states else states data = [self.step(action, state, dt=dt) for action, state, dt in zip(actions, states, dt)] return tuple(list(x) for x in zip(*data)) def init_env(self) -> None: """ Run environment initialization. Including in this function all the code which makes the environment impossible to serialize will allow to dispatch the environment to different workers and initialize it once it's copied to the target process. """ pass def close(self) -> None: """Tear down the current environment.""" pass def sample_action(self): """ Return a valid action that can be used to step the Environment. Implementing this method is optional, and it's only intended to make the testing process of the Environment easier. """ pass def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple: """ Take ``dt`` simulation steps and make the environment evolve in multiples \ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. dt: Consecutive number of times that the action will be applied. Returns: tuple containing ``(observs, reward, terminal, info)``. """ raise NotImplementedError() def reset( self, return_state: bool = True, ) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]: """ Restart the environment. Args: return_state: If ``True`` it will return the state of the environment. Returns: ``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``. """ raise NotImplementedError() def get_state(self) -> Any: """ Recover the internal state of the simulation. A state must completely describe the Environment at a given moment. """ raise NotImplementedError() def set_state(self, state: Any) -> None: """ Set the internal state of the simulation. Args: state: Target state to be set in the environment. Returns: None """ raise NotImplementedError() def get_image(self) -> Union[None, np.ndarray]: """ Return a numpy array containing the rendered view of the environment. Square matrices are interpreted as a greyscale image. Three-dimensional arrays are interpreted as RGB images with channels (Height, Width, RGB) """ return None def clone(self) -> "BaseEnvironment": """Return a copy of the environment.""" raise NotImplementedError() class PlanEnvironment(BaseEnvironment): """Base class for implementing OpenAI ``gym`` environments in ``plangym``.""" def __init__( self, name: str, frameskip: int = 1, episodic_live: bool = False, autoreset: bool = True, wrappers: Iterable[wrap_callable] = None, delay_init: bool = False, remove_time_limit=True, ): """ Initialize a :class:`PlanEnvironment`. Args: name: Name of the environment. Follows standard gym syntax conventions. frameskip: Number of times an action will be applied for each ``dt``. episodic_live: Return ``end = True`` when losing a live. autoreset: Automatically reset the environment when the OpenAI environment returns ``end = True``. wrappers: Wrappers that will be applied to the underlying OpenAI env. \ Every element of the iterable can be either a :class:`gym.Wrapper` \ or a tuple containing ``(gym.Wrapper, kwargs)``. delay_init: If ``True`` do not initialize the ``gym.Environment`` \ and wait for ``init_env`` to be called later. remove_time_limit: If True, remove the time limit from the environment. """ self._gym_env = None self.episodic_life = episodic_live self.remove_time_limit = remove_time_limit self._wrappers = wrappers super(PlanEnvironment, self).__init__( name=name, frameskip=frameskip, autoreset=autoreset, delay_init=delay_init, ) @property def gym_env(self): """Return the instance of the environment that is being wrapped by plangym.""" if self._gym_env is None and not self.SINGLETON: self.init_env() return self._gym_env @property def obs_shape(self) -> Tuple[int, ...]: """Tuple containing the shape of the observations returned by the Environment.""" return self.observation_space.shape @property def action_shape(self) -> Tuple[int, ...]: """Tuple containing the shape of the actions applied to the Environment.""" return self.action_space.shape @property def action_space(self) -> Space: """Return the action_space of the environment.""" return self.gym_env.action_space @property def observation_space(self) -> Space: """Return the observation_space of the environment.""" return self.gym_env.observation_space @property def reward_range(self): """Return the reward_range of the environment.""" if hasattr(self.gym_env, "reward_range"): return self.gym_env.reward_range @property def metadata(self): """Return the metadata of the environment.""" if hasattr(self.gym_env, "metadata"): return self.gym_env.metadata def init_env(self): """Initialize the target :class:`gym.Env` instance.""" self._gym_env = self.init_gym_env() if self._wrappers is not None: self.apply_wrappers(self._wrappers) def get_image(self) -> np.ndarray: """ Return a numpy array containing the rendered view of the environment. Square matrices are interpreted as a greyscale image. Three-dimensional arrays are interpreted as RGB images with channels (Height, Width, RGB) """ if hasattr(self.gym_env, "render"): return self.gym_env.render(mode="rgb_array") def reset( self, return_state: bool = True, ) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]: """ Restart the environment. Args: return_state: If ``True`` it will return the state of the environment. Returns: ``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``. """ if self.gym_env is None and self.delay_init: self.init_env() obs = self.gym_env.reset() return (self.get_state(), obs) if return_state else obs def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1): """ Take ``dt`` simulation steps and make the environment evolve in multiples\ of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps. Args: action: Chosen action applied to the environment. dt: Consecutive number of times that the action will be applied. Returns: if
<filename>experimental/fitTest/templates_electron.py inputTemplates = { "M3": { "QCD": [ [ 0.0, 0.0004418671450315185, 0.01605229202018541, 0.05825334528354453, 0.08815209477081883, 0.1030093733105401, 0.10513254530282935, 0.09543913272896494, 0.08512520799056993, 0.07489480051367764, 0.06022183314704696, 0.05275434234820922, 0.04426859109280948, 0.03513151550399711, 0.029967498933300033, 0.024464081551639382, 0.020998482981290444, 0.015085519053806937, 0.013212552619472365, 0.010921640496635408, 0.010786019690654257, 0.0079331542797628, 0.007046335967650891, 0.0066796992713355876, 0.00484778962979943, 0.0043565342466071995, 0.0040794364492200835, 0.0027885993970205324, 0.002756423064350152, 0.002331355056268065, 0.0021416660945894994, 0.0017759566793966012, 0.0011629584663072486, 0.001009451124859111, 0.0012831188698206983, 0.0007967669328031654, 0.004698017985185181, 0.0, 0.0, 0.0 ], [ 0.0, 0.0004418671450315186, 0.016052292020185415, 0.05825334528354453, 0.08815209477081884, 0.10300937331054011, 0.10513254530282935, 0.09543913272896497, 0.08512520799056993, 0.07489480051367763, 0.06022183314704697, 0.05275434234820922, 0.04426859109280948, 0.03513151550399711, 0.029967498933300037, 0.024464081551639386, 0.020998482981290444, 0.015085519053806937, 0.013212552619472366, 0.010921640496635408, 0.010786019690654257, 0.0079331542797628, 0.007046335967650892, 0.006679699271335588, 0.00484778962979943, 0.0043565342466071995, 0.004079436449220084, 0.002788599397020533, 0.002756423064350152, 0.002331355056268065, 0.0021416660945894994, 0.0017759566793966015, 0.0011629584663072486, 0.0010094511248591113, 0.0012831188698206985, 0.0007967669328031655, 0.004698017985185181, 0.0, 0.0, 0.0 ], [ 0.0, 0.00044186714503151845, 0.016052292020185405, 0.058253345283544505, 0.0881520947708188, 0.10300937331054005, 0.10513254530282928, 0.09543913272896491, 0.08512520799056988, 0.0748948005136776, 0.06022183314704694, 0.0527543423482092, 0.044268591092809464, 0.035131515503997096, 0.029967498933300023, 0.02446408155163937, 0.020998482981290433, 0.015085519053806931, 0.013212552619472361, 0.010921640496635406, 0.010786019690654252, 0.007933154279762796, 0.007046335967650889, 0.006679699271335586, 0.004847789629799428, 0.004356534246607198, 0.004079436449220083, 0.0027885993970205316, 0.002756423064350151, 0.0023313550562680647, 0.0021416660945894985, 0.0017759566793966006, 0.0011629584663072482, 0.0010094511248591109, 0.0012831188698206976, 0.0007967669328031652, 0.0046980179851851805, 0.0, 0.0, 0.0 ], [ 0.0, 0.00044186714503151856, 0.016052292020185415, 0.058253345283544526, 0.08815209477081883, 0.1030093733105401, 0.10513254530282934, 0.09543913272896495, 0.08512520799056993, 0.07489480051367763, 0.06022183314704697, 0.05275434234820922, 0.04426859109280948, 0.03513151550399711, 0.029967498933300033, 0.024464081551639382, 0.020998482981290444, 0.01508551905380694, 0.013212552619472365, 0.010921640496635408, 0.010786019690654257, 0.0079331542797628, 0.0070463359676508925, 0.006679699271335588, 0.0048477896297994295, 0.0043565342466071995, 0.004079436449220084, 0.002788599397020533, 0.002756423064350152, 0.0023313550562680655, 0.0021416660945894994, 0.0017759566793966015, 0.0011629584663072486, 0.0010094511248591113, 0.0012831188698206983, 0.0007967669328031656, 0.004698017985185182, 0.0, 0.0, 0.0 ], [ 0.0, 0.00044186714503151845, 0.016052292020185408, 0.05825334528354451, 0.08815209477081881, 0.10300937331054007, 0.10513254530282931, 0.09543913272896493, 0.0851252079905699, 0.07489480051367763, 0.060221833147046946, 0.05275434234820921, 0.044268591092809464, 0.0351315155039971, 0.02996749893330003, 0.02446408155163937, 0.02099848298129044, 0.015085519053806935, 0.013212552619472363, 0.010921640496635408, 0.010786019690654255, 0.007933154279762798, 0.00704633596765089, 0.006679699271335587, 0.004847789629799429, 0.004356534246607199, 0.0040794364492200835, 0.0027885993970205324, 0.002756423064350151, 0.0023313550562680647, 0.002141666094589499, 0.001775956679396601, 0.0011629584663072484, 0.001009451124859111, 0.0012831188698206976, 0.0007967669328031653, 0.0046980179851851805, 0.0, 0.0, 0.0 ], [ 0.0, 0.00044186714503151845, 0.016052292020185408, 0.05825334528354452, 0.08815209477081881, 0.1030093733105401, 0.10513254530282932, 0.09543913272896495, 0.08512520799056991, 0.07489480051367763, 0.060221833147046946, 0.052754342348209214, 0.044268591092809464, 0.0351315155039971, 0.02996749893330003, 0.02446408155163937, 0.02099848298129044, 0.015085519053806935, 0.013212552619472363, 0.010921640496635408, 0.010786019690654253, 0.007933154279762798, 0.007046335967650891, 0.006679699271335587, 0.0048477896297994295, 0.004356534246607198, 0.0040794364492200835, 0.0027885993970205324, 0.0027564230643501515, 0.002331355056268065, 0.002141666094589499, 0.001775956679396601, 0.0011629584663072484, 0.001009451124859111, 0.0012831188698206979, 0.0007967669328031655, 0.0046980179851851805, 0.0, 0.0, 0.0 ] ], "SingleTop": [ [ 0.0, 0.0, 0.005832177218207422, 0.03178392883540604, 0.07169459952763149, 0.11287210836676197, 0.12176478715865051, 0.12613012243319208, 0.09279832309977645, 0.0794145472218926, 0.06006907882516122, 0.05255651613575753, 0.040665134899883736, 0.03624912116256268, 0.019772311111878084, 0.021835717802889197, 0.01593884467082636, 0.019743744021810564, 0.008520333396281287, 0.008972922567677826, 0.00866376260113745, 0.010388777243208618, 0.007465864279390942, 0.008541920826701573, 0.0069814633847972846, 0.004641107502972211, 0.005051657691380684, 0.003139862804695983, 0.0064010107328637145, 0.0018922562881085226, 0.000984504344510512, 0.0006270305039092417, 0.0, 0.0004977320603021565, 0.0004854616714298998, 0.0007746782412963946, 0.006848591367048002, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.006464253844897605, 0.026118917653010134, 0.060112787514841975, 0.09584106586560479, 0.12925563138779625, 0.11715408034300653, 0.09440622815766804, 0.0716779573912037, 0.06916348777011985, 0.05498302637465252, 0.042697413569360194, 0.03739191469243603, 0.030265823351673254, 0.02017805782885195, 0.020967376788849502, 0.018012820429670742, 0.01701231602942316, 0.011241488781432862, 0.013054428387246958, 0.011576167379020312, 0.006085157039789595, 0.008138644498565663, 0.003003338789354045, 0.0031858415089799684, 0.005792637555725393, 0.004107311320351441, 0.0009511534181744243, 0.00275249480381622, 0.004384640907243928, 0.0018999453119997332, 0.0023273554030316707, 0.0016001069427164683, 0.0010718452947245862, 0.0012111180899201381, 0.005913165574839969, 0.0, 0.0, 0.0 ], [ 0.0, 0.000730898105715262, 0.004345582215870544, 0.02798607884595784, 0.058042809009575966, 0.08283302044377722, 0.12365219544544459, 0.11925280674249097, 0.07900946342989748, 0.0729889060967615, 0.06555652145311269, 0.05500702243367231, 0.03727507362592039, 0.03497935430893025, 0.038758173666694595, 0.022026642610227798, 0.020289974876133793, 0.02390005152672489, 0.015996129394221792, 0.015502623414619431, 0.013383170539170194, 0.008222802588575384, 0.008923541477108209, 0.009493835160659504, 0.007416365558061059, 0.006450296693177907, 0.006153831272067389, 0.004196024662363883, 0.003400842304431063, 0.005252293744180164, 0.0039361210244639845, 0.004358739163202468, 0.00287066741824079, 0.002285628915640503, 0.002764779086954986, 0.0014452620379491693, 0.011312470708004386, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.01600155485970672, 0.03639273911473297, 0.05968874574025049, 0.10598450262625334, 0.11388841443601717, 0.08307443249770091, 0.08639375480501353, 0.05741122320100601, 0.06314537056043235, 0.053346999957043284, 0.043790356322535307, 0.03638268872477183, 0.028617051623100895, 0.032593396108858065, 0.013798591164209608, 0.02230033822729821, 0.01580926917445075, 0.006236987296217145, 0.017497312659310434, 0.011933038792119726, 0.011202353000571373, 0.005562222992955815, 0.006945493185204689, 0.009975553245066583, 0.005022960250692474, 0.007047783899380394, 0.006433393500369129, 0.006724854313022172, 0.004743829720451964, 0.004778845074539503, 0.005801395394700851, 0.003081124868615164, 0.0023767450175057143, 0.016016677645895455, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0034855705626097616, 0.010025916336663884, 0.027364239400232984, 0.037694745308698235, 0.09663206840536868, 0.09970075477836253, 0.06106233761893696, 0.056412910319662585, 0.057912542580936144, 0.041328923907318675, 0.06704352646140234, 0.047223924190355336, 0.031021114902507748, 0.03758027943204309, 0.022834400563328236, 0.024808105880289092, 0.036654808787627724, 0.031893202006165335, 0.02739260061438384, 0.012395283525136105, 0.01054237296279351, 0.020567685747427896, 0.012583514912694046, 0.009008845505670094, 0.012527762077976097, 0.017938595283369566, 0.010789448025986654, 0.003632023138839643, 0.01816042267512064, 0.0015585333839998625, 0.0027675759622464006, 0.019484546123949304, 0.004103330254325486, 0.009631143922782703, 0.016236944440788555, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.0, 0.007336647376339268, 0.02347544978530838, 0.06263222854439522, 0.06610613086026246, 0.035369146080455695, 0.04408531650826217, 0.043560791950211564, 0.09582421142939976, 0.024197640811710778, 0.05814140173848918, 0.025521248131010324, 0.053856110255041584, 0.03162162916096726, 0.07115652068544676, 0.016567788392908175, 0.020825518489763186, 0.02428868407680062, 0.031889226678125646, 0.013637606734355396, 0.029101557862528757, 0.0, 0.025475944986739763, 0.029517463743032837, 0.01538260011507948, 0.006017436893000935, 0.01865575381438486, 0.015824792234214888, 0.0030866067227976217, 0.013980448208807261, 0.0, 0.008943475556169688, 0.025516356476877198, 0.058404265697113315, 0.0, 0.0, 0.0 ] ], "TTJet": [ [ 0.0, 0.0, 0.005866260054832973, 0.032014476569687776, 0.07611743723353019, 0.12269913163544673, 0.204669959277149, 0.1780022148266413, 0.0902222568133845, 0.06720996262179303, 0.045621861833730876, 0.03585930513808567, 0.027308801181492228, 0.022550773464298984, 0.01697272151879135, 0.014303918822958318, 0.009536675235431601, 0.008151062240943892, 0.006990013596176034, 0.005622428289760508, 0.005432733235420568, 0.004571707788875856, 0.0030247979877491816, 0.0025379990687445786, 0.0021322114740614994, 0.0018967639068011742, 0.001600198531484904, 0.001760907897112518, 0.0010930826959057282, 0.0012682896008723822, 0.0007330317183938927, 0.0007410646772215062, 0.000665137515949025, 0.0005346326125257873, 0.00039168832065960966, 0.00025771205206101376, 0.0016387805620258422, 0.0, 0.0, 0.0 ], [ 0.0, 7.262560201399413e-05, 0.005459965341619546, 0.03172214511310284, 0.07699353745814363, 0.12491126396204628, 0.20003559424615688, 0.17066165025335486, 0.08996890767405352, 0.06649813957714566, 0.05061011517828175, 0.03584184011202735, 0.027588795134809475, 0.023294770858547913, 0.01757932753487905, 0.013027107776830716, 0.012195905630129025, 0.009600558514058243, 0.006854961641894884, 0.0058081556303386135, 0.004698636898302152, 0.004593892903081976, 0.003409789788416189, 0.0021757282806029953, 0.002259041246620286, 0.00221698886107549, 0.0020069814624712992, 0.0014849074170667832, 0.0014821376683882836, 0.0009165103021072048, 0.0005584379824681107, 0.0007847520606728024, 0.0004956669967263232, 0.0007192863381122417, 0.00044359338561214204, 0.000283998679673084, 0.0027442824891683155, 0.0, 0.0, 0.0 ], [ 0.0, 0.00010999244899275781, 0.004524250010912452, 0.030522524117397948, 0.06765121697833965, 0.11925184104915117, 0.18208569394951088, 0.1678624854487025, 0.09332137586299127, 0.06492288997550127, 0.05287619322834948, 0.038590428081298624, 0.03148344300415997, 0.024806268142032767, 0.021888358435464855, 0.016696569370059054, 0.012486064354286668, 0.011756333188735998, 0.00958964683937508, 0.007049851662028582, 0.005738821121195343, 0.005400646562344024, 0.004162263767899146, 0.003934996784796343, 0.00356665539154776, 0.0026563631439693474, 0.0022957990527197767, 0.001734408902217747, 0.0018663108219949586, 0.0012663255620451552, 0.0008837863377775026, 0.0013098166043183537, 0.0005873395717754564, 0.0008883902275125473, 0.0008014397012084036, 0.0008264688444718471, 0.004604741454914939, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0023000264038757798, 0.01705439603921528, 0.04910070346416718, 0.09122001899630586, 0.17588293609181563, 0.18507124253611063, 0.09123159958610425, 0.05960162269734152, 0.05217691285173646, 0.04256359509315278, 0.03655345441422678, 0.03347575846500428, 0.024626447013499187, 0.020432978830003327, 0.018561221373061258, 0.015463103655853658,
because average line height is used when create docx. However, the contained lines # may be not reasonable after this step. So, this is just a pre-processing step focusing # on processing lines in horizontal direction, e.g. merging inline image to its text line. # Further steps, e.g. split back to original blocks, must be applied before further parsing. final_block.lines.join(line_overlap_threshold, line_merging_threshold) blocks.append(final_block) # add table blocks blocks.extend(self.table_blocks) self.reset(blocks) return self def split_back(self, *args): '''Split the joined lines back to original text block if possible. With preceding joining step, current text block may contain lines coming from various original blocks. Considering that different text block may have different line properties, e.g. height, spacing, this function is to split them back to original text block. .. note:: Don't split block if the splitting breaks flow layout, e.g. two blocks (i.e. two paragraphs in docx) in same row. ''' blocks = [] # type: list[TextBlock] lines = Lines() # collect lines for further step, or table block directly for block in self._instances: if block.is_text_image_block() and block.is_flow_layout(*args): lines.extend([line for line in block.lines if line.text.strip()]) # filter empty line else: blocks.append(block) # regroup lines for group_lines in lines.split_back(): text_block = TextBlock() text_block.lines.reset(group_lines) blocks.append(text_block) self.reset(blocks).sort_in_reading_order() def join_vertically(self, block_merging_threshold): '''Merge adjacent blocks in vertical direction when the distance between blocks: * is smaller than average line distance when multi-lines; or * is smaller that a threshold * block height when single line. .. note:: Blocks belonging to same paragraph might be split by ``PyMuPDF`` unreasonably. ''' blocks = [] # type: list[TextBlock] ref = None # type: TextBlock # check adjacent two text blocks for block in self._instances: merged = False # add block if previous isn't a text block if ref is None or not ref.is_text_image_block(): blocks.append(block) # add block if this isn't a text block elif not block.is_text_image_block(): blocks.append(block) # check two adjacent text blocks else: # block gap idx = 1 if ref.is_horizontal_text else 0 gap_block = block.bbox[idx] - ref.bbox[idx+2] # lines gap gap_line1, gap_line2 = ref.average_row_gap, block.average_row_gap # single line blocks if gap_line1==gap_line2==None: # block height h1 = ref.bbox[idx+2]-ref.bbox[idx] h2 = block.bbox[idx+2]-block.bbox[idx] merged = abs(gap_block-block_merging_threshold*min(h1, h2))<=constants.TINY_DIST # multi-lines block else: gap_line = gap_line1 if not gap_line1 is None else gap_line2 merged = abs(gap_block-gap_line) <= constants.TINY_DIST if merged: ref.add(block.lines) else: blocks.append(block) # NOTE: update ref block only no merging happens if not merged: ref = block self.reset(blocks) def parse_text_format(self, rects): '''Parse text format with style represented by stroke/fill shapes. Args: rects (Shapes): Potential styles applied on blocks. ''' # parse text block style one by one for block in filter(lambda e: e.is_text_block(), self._instances): block.parse_text_format(rects) def make_docx(self, doc): '''Create page based on parsed block structure. Args: doc (Document, _Cell): The container to make docx content. ''' def make_table(table_block, pre_table): # create dummy paragraph if table before space is set # - a minimum line height of paragraph is 0.7pt, so ignore before space if less than this value # - but tow adjacent tables will be combined automatically, so adding a minimum dummy paragraph is required if table_block.before_space>=constants.MIN_LINE_SPACING or pre_table: h = int(10*table_block.before_space)/10.0 # round(x,1), but to lower bound p = doc.add_paragraph() reset_paragraph_format(p, line_spacing=Pt(h)) # new table table = doc.add_table(rows=table_block.num_rows, cols=table_block.num_cols) table.autofit = False table.allow_autofit = False table_block.make_docx(table) pre_table = False cell_layout = isinstance(self.parent, Cell) for block in self._instances: # make paragraphs if block.is_text_image_block(): # new paragraph p = doc.add_paragraph() block.make_docx(p) pre_table = False # mark block type # make table elif block.is_table_block(): make_table(block, pre_table) pre_table = True # mark block type # NOTE: within a cell, there is always an empty paragraph after table, # so, delete it right here. # https://github.com/dothinking/pdf2docx/issues/76 if cell_layout: delete_paragraph(doc.paragraphs[-1]) # NOTE: If a table is at the end of a page, a new paragraph will be automatically # added by the rending engine, e.g. MS Word, which resulting in an unexpected # page break. The solution is to never put a table at the end of a page, so add # an empty paragraph and reset its format, particularly line spacing, when a table # is created. for block in self._instances[::-1]: # ignore float image block if block.is_float_image_block(): continue # nothing to do if not end with table block if not block.is_table_block(): break # otherwise, add a small paragraph p = doc.add_paragraph() reset_paragraph_format(p, Pt(constants.MIN_LINE_SPACING)) # a small line height def plot(self, page): '''Plot blocks in PDF page for debug purpose.''' for block in self._instances: block.plot(page) @staticmethod def _assign_block_to_tables(block:Block, tables:list, blocks_in_tables:list, blocks:list): '''Collect blocks contained in table region ``blocks_in_tables`` and rest text blocks in ``blocks``.''' # lines in block for further check if necessary lines = block.lines if block.is_text_image_block() else Lines() # collect blocks contained in table region # NOTE: there is a probability that only a part of a text block is contained in table region, # while the rest is in normal block region. for table, blocks_in_table in zip(tables, blocks_in_tables): # fully contained in one table if table.bbox.contains(block.bbox): blocks_in_table.append(block) break # not possible in current table, then check next table elif not table.bbox.intersects(block.bbox): continue # deep into line level for text block elif block.is_text_image_block(): table_lines, not_table_lines = lines.split_with_intersection(table.bbox, constants.FACTOR_MOST) # add lines to table text_block = TextBlock() text_block.add(table_lines) blocks_in_table.append(text_block) # lines not in table for further check if not_table_lines: lines = not_table_lines else: break # no more lines # Now, this block (or part of it) belongs to previous layout else: if block.is_table_block(): blocks.append(block) else: text_block = TextBlock() text_block.add(lines) blocks.append(text_block) def _parse_block_horizontal_spacing(self, *args): '''Calculate external horizontal space for text blocks, i.e. alignment mode and left spacing for paragraph in docx: - horizontal block -> take left boundary as reference - vertical block -> take bottom boundary as reference ''' # bbox of blocks # - page level, e.g. blocks in top layout # - table level, e.g. blocks in table cell bbox = self.parent.working_bbox for block in self._instances: block.parse_horizontal_spacing(bbox, *args) def _parse_block_vertical_spacing(self): '''Calculate external vertical space for text blocks, i.e. before/after space in docx. The vertical spacing is determined by the vertical distance to previous block. For the first block, the reference position is top margin. It's easy to set before-space or after-space for a paragraph with ``python-docx``, so, if current block is a paragraph, set before-space for it; if current block is not a paragraph, e.g. a table, set after-space for previous block (generally, previous block should be a paragraph). ''' # bbox of blocks # - page level, e.g. blocks in top layout # - table level, e.g. blocks in table cell bbox = self.parent.working_bbox # check text direction for vertical space calculation: # - normal reading direction (from left to right) -> the reference boundary is top border, i.e. bbox[1]. # - vertical text direction, e.g. from bottom to top -> left border bbox[0] is the reference idx = 1 if self.is_horizontal_text else 0 ref_block = self._instances[0] ref_pos = bbox[idx] for block in self._instances: # NOTE: the table bbox is counted on center-line of outer borders, so a half of top border # size should be excluded from the calculated vertical spacing if block.is_table_block(): dw = block[0][0].border_width[0] / 2.0 # use top border of the first cell else: dw = 0.0 start_pos = block.bbox[idx] - dw para_space = start_pos-ref_pos # modify vertical space in case the block is out of bottom boundary dy = max(block.bbox[idx+2]-bbox[idx+2], 0.0) para_space -= dy para_space = max(para_space, 0.0) # ignore negative value # ref to current (paragraph): set before-space for paragraph if block.is_text_block(): # spacing before this paragraph block.before_space = para_space # if ref to current (image): set before-space for
""" Class to create jira tickets for security group issues. """ import sys import logging import warnings import ipwhois from functools import lru_cache from ipwhois import IPWhois from collections import Counter from library.logger import set_logging, add_cw_logging from library.config import Config from library.jiraoperations import JiraReporting, JiraOperations from library.slack_utility import SlackNotification from library.aws.ec2 import EC2Operations from library.ddb_issues import IssueStatus, SecurityGroupIssue from library.ddb_issues import Operations as IssueOperations from library.utility import empty_converter, list_converter from library.aws.utility import Account from library.aws.security_groups import RestrictionStatus class CreateSecurityGroupsTickets(object): """ Class to create jira tickets for security group issues """ def __init__(self, config): self.config = config @staticmethod @lru_cache(maxsize=128) def get_registrant(cidr): ip = cidr.split("/")[0] with warnings.catch_warnings(): warnings.simplefilter("ignore") try: whois = IPWhois(ip).lookup_rdap() except ipwhois.exceptions.IPDefinedError: return "" registrants = [] for title, obj in whois.get('objects', {}).items(): if obj.get('contact') is None: continue if 'registrant' in obj.get('roles', []): registrants.append(f"{obj['contact'].get('name')} ({title})") break return ', '.join(registrants) def build_open_ports_table_jira(self, perms): open_partly = any([perm['status'] == 'open_partly' for perm in perms]) open_port_details = "||From Port||To Port||Protocol||CIDR||" if open_partly: open_port_details += "Registrant||" open_port_details += "\n" for open_port in perms: open_port_details += f"|{open_port['from_port']}|{open_port['to_port']}|{open_port['protocol']}|{open_port['cidr']}|" if open_partly: if open_port['status'] == 'open_partly': open_port_details += empty_converter(self.get_registrant(open_port['cidr'])) + "|" else: open_port_details += "-" open_port_details += "\n" return open_port_details def build_open_ports_table_slack(self, perms): open_port_details = "```\n" for open_port in perms: if open_port['from_port'] == open_port['to_port']: port_protocol = f"{open_port['to_port']}" else: port_protocol = f"{open_port['from_port']}-{open_port['to_port']}" port_protocol += f"/{open_port['protocol']}" open_port_details += f"{port_protocol:15}\t{open_port['cidr']}" if open_port['status'] == 'open_partly': open_port_details += " [" + empty_converter(self.get_registrant(open_port['cidr'])) + "]" open_port_details += "\n" open_port_details += "```" return open_port_details def build_instances_table(self, instances): instance_details = "" # security group has associated instances in_use = False # security group has associated instances with public ip in public subnet public = False # security group has associated instances with public ip in private subnet blind_public = False owners = [] bus = [] products = [] table_limit_reached = False if len(instances) > 0: in_use = True instance_details += ( f"||Instance ID||State" f"||Private Ip Address||Public Ip Address" f"||Owner||Business unit||Product||Component" f"||Subnet||\n") for ec2_instance in instances: if len(ec2_instance.public_ips) > 0: if ec2_instance.public_subnet: public = True else: blind_public = True owner = ec2_instance.tags.get('owner') bu = ec2_instance.tags.get('bu') product = ec2_instance.tags.get('product') component = ec2_instance.tags.get('component') if self.config.jira.text_field_character_limit == 0 or \ len(instance_details) < (self.config.jira.text_field_character_limit * 0.5): instance_details += ( f"|{ec2_instance.id}|{ec2_instance.state}" f"|{list_converter(ec2_instance.private_ips)}" f"|{list_converter(ec2_instance.public_ips)}" f"|{empty_converter(owner)}" f"|{empty_converter(bu)}" f"|{empty_converter(product)}" f"|{empty_converter(component)}" f"|{'public' if ec2_instance.public_subnet else 'private'}|\n" ) elif not table_limit_reached: table_limit_reached = True owners.append(owner) bus.append(bu) products.append(product) instance_details = f"*Instances{' (limited subset)' if table_limit_reached else ''}*:\n{instance_details}" # remove empty and count number of occurrences for each owner/bu/product owners = Counter([x for x in owners if x]) bus = Counter([x for x in bus if x]) products = Counter([x for x in products if x]) # find owner/bu/product with max occurrences owner = max(owners, key=lambda owner: owners[owner]) if owners else None bu = max(bus, key=lambda bu: bus[bu]) if bus else None product = max(products, key=lambda product: products[product]) if products else None # logging.debug(f"bu={bu}") # logging.debug(f"product={product}") return instance_details, in_use, public, blind_public, owner, bu, product def create_tickets_securitygroups(self): """ Class function to create jira tickets """ table_name = self.config.sg.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, SecurityGroupIssue) for issue in issues: group_id = issue.issue_id group_name = issue.issue_details.name group_region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue") comment = (f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue " f"in '{account_name} / {account_id}' account, '{group_region}' region") jira.close_issue( ticket_id=issue.jira_details.ticket, comment=comment ) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug(f"Updating security group '{group_name} / {group_id}' issue") comment = "Issue details are changed, please check again.\n" comment += self.build_open_ports_table_jira(issue.issue_details.perms) comment += JiraOperations.build_tags_table(tags) jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment ) slack.report_issue( msg=f"Security group '{group_name} / {group_id}' issue is changed " f"in '{account_name} / {account_id}' account, '{group_region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}" f"\n" f"{self.build_open_ports_table_slack(issue.issue_details.perms)}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{group_name} / {group_id}'") # issue has not been reported yet else: logging.debug(f"Reporting security group '{group_name} / {group_id}' issue") status = RestrictionStatus(issue.issue_details.status) # if owner/bu/product tags exist on security group - use it group_owner = tags.get("owner", None) group_bu = tags.get("bu", None) group_product = tags.get("product", None) open_port_details = self.build_open_ports_table_jira(issue.issue_details.perms) account_details = (f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*SG Name*: {group_name}\n" f"*SG ID*: {group_id}\n" f"*Region*: {group_region}\n\n") account = Account(id=account_id, name=account_name, region=group_region, role_name=self.config.aws.role_name_reporting) ec2_client = account.client("ec2") if account.session is not None else None sg_instance_details = ec2_owner = ec2_bu = ec2_product = None sg_in_use = sg_public = sg_blind_public = False if ec2_client is not None: ec2_instances = EC2Operations.get_instance_details_of_sg_associated(ec2_client, group_id) sg_instance_details, \ sg_in_use, sg_public, sg_blind_public, \ ec2_owner, ec2_bu, ec2_product = self.build_instances_table(ec2_instances) owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu product = group_product if group_product is not None else ec2_product if bu is None: bu = self.config.get_bu_by_name(group_name) source_description = f"has {status.value} status" if status == RestrictionStatus.OpenCompletely: source_description = "allows access from any IP address (0.0.0.0/0, ::/0)" elif status == RestrictionStatus.OpenPartly: source_description = "allows access from some definite public ip addresses or networks" if sg_public: priority = "Critical" summary_status = "Internet" issue_description = (f"Security group has EC2 instances in public subnets " f"with public IP address attached and " f"{source_description} " f"for following ports:\n") threat = ( f"*Threat*: " f"Instances associated with this security group are accessible via public route over Internet and " f"have ingress rules which allows access to critical services which should be accessible " f"only from VPN or Direct Connect. Accessing these instances via Internet can lead to leakage " f"to third parties of login credentials for such services as databases/remote access." f"Open and Unrestricted access from Internet increases opportunities for " f"malicious activity from public internet which can potentially result into " f"hacking, denial-of-service attacks, loss of data, etc. This also provides " f"an ingress point to the attackers to gain backdoor access within the other " f"critical services.\n" ) elif sg_blind_public: priority = "Critical" summary_status = "Internet" issue_description = (f"Security group has EC2 instances in private subnets " f"with public IP address attached and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"Instances listed below can be probed by external attack vectors and " f"make them vulnerable to blind injection based attacks, as although " f"the EC2 instances is in a private subnet, if security group and NACL " f"are allowing access from the internet incoming, traffic will reach " f"instances when someone is probing the public IP of the instances. " f"However, there will be no return traffic due to the lack of an IGW.\n") elif not sg_in_use: priority = "Minor" summary_status = "Unused" issue_description = (f"Security group has no EC2 instances attached and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"An unused SG can be leveraged to gain control/access within the network " f"if attached to any exposed instance. This unrestricted access increases " f"opportunities for malicious activity (hacking, denial-of-service attacks, " f"loss of data).\n") else: priority = "Major" summary_status = "Intranet" issue_description = ( f"Security group has EC2 instances in in private subnets and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"Open access within the network not only provides unrestricted access to " f"other servers but increases opportunities for malicious activity (hacking, " f"denial-of-service attacks, loss of data) if attacker gains access to the " f"services within the network, thus providing lateral movement.\n") tags_table = JiraOperations.build_tags_table(tags) issue_description = ( f"{issue_description}" f"{open_port_details}" f"{threat}" f"{account_details}") # auto_remediation_date = (self.config.now + self.config.sg.issue_retention_date).date() # desc += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += f"{tags_table}" issue_description += f"{sg_instance_details if sg_instance_details else ''}" issue_description += ( f"*Recommendation*: " f"Restrict access to only those IP
<reponame>rajeshkumarkarra/strawberryfields # Copyright 2019 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Unit tests for strawberryfields.gbs.similarity """ # pylint: disable=no-self-use,unused-argument,too-many-arguments import itertools from collections import Counter from unittest import mock import networkx as nx import numpy as np import pytest import strawberryfields as sf from strawberryfields.gbs import similarity pytestmark = pytest.mark.gbs all_orbits = { 3: [[1, 1, 1], [2, 1], [3]], 4: [[1, 1, 1, 1], [2, 1, 1], [3, 1], [2, 2], [4]], 5: [[1, 1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1], [2, 2, 1], [4, 1], [3, 2], [5]], } all_orbits_cumulative = [o for orbs in all_orbits.values() for o in orbs] all_events = { (3, 1): [3, None, None], (4, 1): [4, None, None, None, None], (5, 1): [5, None, None, None, None, None, None], (3, 2): [3, 3, None], (4, 2): [4, 4, None, 4, None], (5, 2): [5, 5, None, 5, None, None, None], } @pytest.mark.parametrize("dim", [3, 4, 5]) def test_sample_to_orbit(dim): """Test if function ``similarity.sample_to_orbit`` correctly returns the original orbit after taking all permutations over the orbit. The starting orbits are all orbits for a fixed photon number ``dim``.""" orb = all_orbits[dim] checks = [] for o in orb: sorted_sample = o.copy() sorted_sample_len = len(sorted_sample) if sorted_sample_len != dim: sorted_sample += [0] * sorted_sample_len permutations = itertools.permutations(sorted_sample) checks.append(all([similarity.sample_to_orbit(p) == o for p in permutations])) assert all(checks) @pytest.mark.parametrize("dim", [3, 4, 5]) class TestOrbits: """Tests for the function ``strawberryfields.gbs.similarity.orbits``""" def test_orbit_sum(self, dim): """Test if function generates orbits that are lists that sum to ``dim``.""" assert all([sum(o) == dim for o in similarity.orbits(dim)]) def test_orbit_sorted(self, dim): """Test if function generates orbits that are lists sorted in descending order.""" assert all([o == sorted(o, reverse=True) for o in similarity.orbits(dim)]) def test_orbits(self, dim): """Test if function returns all the integer partitions of 5. This test does not require ``similarity.orbits`` to return the orbits in any specified order.""" partition = all_orbits[dim] orb = similarity.orbits(dim) assert sorted(partition) == sorted(orb) @pytest.mark.parametrize("dim", [3, 4, 5]) @pytest.mark.parametrize("max_count_per_mode", [1, 2]) def test_sample_to_event(dim, max_count_per_mode): """Test if function ``similarity.sample_to_event`` gives the correct set of events when applied to all orbits with a fixed number of photons ``dim``. This test ensures that orbits exceeding the ``max_count_per_mode`` value are attributed the ``None`` event and that orbits not exceeding the ``max_count_per_mode`` are attributed the event ``dim``.""" orb = all_orbits[dim] target_events = all_events[(dim, max_count_per_mode)] ev = [similarity.sample_to_event(o, max_count_per_mode) for o in orb] assert ev == target_events class TestOrbitToSample: """Tests for the function ``strawberryfields.gbs.similarity.orbit_to_sample``""" def test_low_modes(self): """Test if function raises a ``ValueError`` if fed an argument for ``modes`` that does not exceed the length of the input orbit.""" with pytest.raises(ValueError, match="Number of modes cannot"): similarity.orbit_to_sample([1, 2, 3], 2) @pytest.mark.parametrize("orb_dim", [3, 4, 5]) @pytest.mark.parametrize("modes_dim", [6, 7]) def test_sample_length(self, orb_dim, modes_dim): """Test if function returns a sample that is of correct length ``modes_dim`` when fed a collision-free event of ``orb_dim`` photons.""" samp = similarity.orbit_to_sample(all_orbits[orb_dim][0], modes_dim) assert len(samp) == modes_dim def test_sample_composition(self): """Test if function returns a sample that corresponds to the input orbit. Input orbits are orbits from ``all_orbits_cumulative``, i.e., all orbits from 3-5 photons. This test checks if a sample corresponds to an orbit by counting the occurrence of elements in the sample and comparing to a count of elements in the orbit.""" modes = 5 all_orbits_zeros = [ [1, 1, 1, 0, 0], [2, 1, 0, 0, 0], [3, 0, 0, 0, 0], [1, 1, 1, 1, 0], [2, 1, 1, 0, 0], [3, 1, 0, 0, 0], [2, 2, 0, 0, 0], [4, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 1, 1, 1, 0], [3, 1, 1, 0, 0], [2, 2, 1, 0, 0], [4, 1, 0, 0, 0], [3, 2, 0, 0, 0], [5, 0, 0, 0, 0], ] # padding orbits with zeros at the end for comparison to samples counts = [Counter(similarity.orbit_to_sample(o, modes)) for o in all_orbits_cumulative] ideal_counts = [Counter(o) for o in all_orbits_zeros] assert counts == ideal_counts class TestEventToSample: """Tests for the function ``strawberryfields.gbs.similarity.event_to_sample``""" def test_low_count(self): """Test if function raises a ``ValueError`` if ``max_count_per_mode`` is negative.""" with pytest.raises(ValueError, match="Maximum number of photons"): similarity.event_to_sample(2, -1, 5) def test_high_photon(self): """Test if function raises a ``ValueError`` if ``photon_number`` is so high that it cannot correspond to a sample given the constraints of ``max_count_per_mode`` and ``modes``""" with pytest.raises(ValueError, match="No valid samples can be generated."): similarity.event_to_sample(5, 1, 4) @pytest.mark.parametrize("photon_num", [5, 6]) @pytest.mark.parametrize("modes_dim", [10, 11]) @pytest.mark.parametrize("count", [3, 4]) def test_sample_length(self, photon_num, modes_dim, count): """Test if function returns a sample that is of correct length ``modes_dim``.""" samp = similarity.event_to_sample(photon_num, count, modes_dim) assert len(samp) == modes_dim @pytest.mark.parametrize("photon_num", [5, 6]) @pytest.mark.parametrize("modes_dim", [10, 11]) @pytest.mark.parametrize("count", [3, 4]) def test_sample_sum(self, photon_num, modes_dim, count): """Test if function returns a sample that has the correct number of photons.""" samp = similarity.event_to_sample(photon_num, count, modes_dim) assert sum(samp) == photon_num @pytest.mark.parametrize("photon_num", [5, 6]) @pytest.mark.parametrize("modes_dim", [10, 11]) @pytest.mark.parametrize("count", [3, 4]) def test_sample_max_count(self, photon_num, modes_dim, count): """Test if function returns a sample that has maximum element not exceeding ``count``.""" samp = similarity.event_to_sample(photon_num, count, modes_dim) assert max(samp) <= count orbits = [ [(1, 1, 2), 4, 12], [(1, 1), 4, 6], [(1, 2, 3), 4, 24], [(1, 1, 1, 1), 5, 5], [(1, 1, 2), 5, 30], [(1, 2, 3), 5, 60], ] @pytest.mark.parametrize("orbit, max_photon, expected", orbits) def test_orbit_cardinality(orbit, max_photon, expected): """Test if function ``strawberryfields.gbs.similarity.orbit_cardinality`` returns the correct number of samples for some hard-coded examples.""" assert similarity.orbit_cardinality(list(orbit), max_photon) == expected events = [ [5, 3, 6, 216], [6, 3, 6, 336], [5, 2, 6, 126], [5, 3, 7, 413], [6, 3, 7, 728], [5, 2, 7, 266], ] @pytest.mark.parametrize("photons, max_count, modes, expected", events) def test_event_cardinality(photons, max_count, modes, expected): """Test if function ``strawberryfields.gbs.similarity.event_cardinality`` returns the correct number of samples for some hard-coded examples.""" assert similarity.event_cardinality(photons, max_count, modes) == expected class TestProbOrbitMC: """Tests for the function ``strawberryfields.gbs.similarity.prob_orbit_mc.``""" def test_invalid_samples(self): """Test if function raises a ``ValueError`` when a number of samples less than one is requested.""" g = nx.complete_graph(10) with pytest.raises(ValueError, match="Number of samples must be at least one"): similarity.prob_orbit_mc(g, [1, 1, 1, 1], samples=0) def test_invalid_n_mean(self): """Test if function raises a ``ValueError`` when the mean photon number is specified to be negative.""" g = nx.complete_graph(10) with pytest.raises(ValueError, match="Mean photon number must be non-negative"): similarity.prob_orbit_mc(g, [1, 1, 1, 1], n_mean=-1) def test_invalid_loss(self): """Test if function raises a ``ValueError`` when the loss parameter is specified outside of range.""" g = nx.complete_graph(10) with pytest.raises(ValueError, match="Loss parameter must take a value between zero and"): similarity.prob_orbit_mc(g, [1, 1, 1, 1], loss=2) def test_mean_computation_orbit(self, monkeypatch): """Tests if the calculation of the sample mean is performed correctly. The test monkeypatches the fock_prob function so that the probability is the same for each sample and is equal to 1/5, i.e., one over the number of samples in the orbit [1,1,1,1] for 5 modes.""" graph = nx.complete_graph(5) with monkeypatch.context() as m: m.setattr( "strawberryfields.backends.gaussianbackend.GaussianState.fock_prob", lambda *args, **kwargs: 0.2, ) assert np.allclose(similarity.prob_orbit_mc(graph, [1, 1, 1, 1]), 1.0) def test_prob_vacuum_orbit(self): """Tests if the function gives the right probability for the empty orbit when the GBS device has been configured to have zero mean photon number.""" graph = nx.complete_graph(10) assert similarity.prob_orbit_mc(graph, [], 0) == 1.0 def test_loss(self, monkeypatch): """Test if function correctly creates the SF program for lossy GBS.""" graph = nx.complete_graph(5) mock_eng_run = mock.MagicMock() with monkeypatch.context() as m: m.setattr(sf.LocalEngine, "run", mock_eng_run) similarity.prob_orbit_mc(graph, [1, 1, 1, 1], samples=1, loss=0.5) p_func = mock_eng_run.call_args[0][0] assert isinstance(p_func.circuit[1].op, sf.ops.LossChannel) def test_no_loss(self, monkeypatch): """Test if function correctly creates the SF program for GBS without loss.""" graph = nx.complete_graph(5) mock_eng_run = mock.MagicMock() with monkeypatch.context() as m: m.setattr(sf.LocalEngine, "run", mock_eng_run) similarity.prob_orbit_mc(graph, [1, 1, 1, 1], samples=1) p_func = mock_eng_run.call_args[0][0] assert not all([isinstance(op, sf.ops.LossChannel) for op in
<reponame>alexaushev/LFIwithDGPs """ Kernel Means Learning Module. Core methods for Bayesian learning of hyperparameters for likelihood-free inference with kernel means. """ import numpy as np import tensorflow as tf from kelfi.tensorflow_kernels import gaussian_kernel_gramix, gaussian_density_gramix_multiple, gaussian_density_gramix, convert_anisotropic, atleast_2d def kernel_means_weights(y, x_sim, theta_sim, eps, beta, reg=None): """ Compute the weights of the kernel means likelihood. Parameters ---------- y : tf.Tensor [size: (1, d)] Observed data or summary statistics x_sim : tf.Tensor [size: (m, s, d)] Simulated data or summary statistics theta_sim : tf.Tensor [size: (m, p)] Parameter values corresponding to the simulations eps : float or tf.Tensor [size: () or (1,) for isotropic; (d,) for anistropic] The simulator noise level(s) for the epsilon-kernel or epsilon-likelihood beta : float or tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The length scale(s) for the parameter kernel reg : float, optional The regularization parameter for the conditional kernel mean Returns ------- tf.Tensor [size: (m, 1)] The weights of the kernel means likelihood """ # size: (m, 1) if len(x_sim.get_shape().as_list()) == 3: data_epsilon_likelihood = tf.transpose(gaussian_density_gramix_multiple(y, x_sim, eps)) elif len(x_sim.get_shape().as_list()) == 2: data_epsilon_likelihood = tf.transpose(gaussian_density_gramix(y, x_sim, eps)) else: raise ValueError('Simulated dataset is neither 2D or 3D.') # The number of simulations m = theta_sim.get_shape().as_list()[0] # Set the regularization hyperparameter to some default value if not specified if reg is None: reg = 1e-3 * tf.reduce_min(beta) # Compute the weights at O(m^3) theta_sim_gramix = gaussian_kernel_gramix(theta_sim, theta_sim, beta) theta_sim_gramix_cholesky = tf.linalg.cholesky(theta_sim_gramix + m * reg * tf.eye(m)) weights = tf.cholesky_solve(theta_sim_gramix_cholesky, data_epsilon_likelihood) # size: (m, 1) return weights def kernel_means_likelihood(theta_query, theta_sim, weights, beta): """ Query the kernel means likelihood. Parameters ---------- theta_query : tf.Tensor [size: (n_query, p)] The parameters to query the likelihood at theta_sim : tf.Tensor [size: (m, p)] Parameter values corresponding to the simulations weights : tf.Tensor [size: (m, 1)] The weights of the kernel means likelihood beta : float or tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The length scale(s) for the parameter kernel Returns ------- tf.Tensor [size: (n_query,)] The kernel means likelihood values at the query points """ # size: (m, n_query) theta_evaluation_gramix = gaussian_kernel_gramix(theta_sim, theta_query, beta) # size: (n_query,) return tf.squeeze(tf.matmul(tf.transpose(theta_evaluation_gramix), weights)) def marginal_kernel_means_likelihood(theta_sim, weights, beta, prior_mean=None, prior_std=None): """ Compute the marginal kernel means likelihood under a diagonal Gaussian prior. Parameters ---------- theta_sim : tf.Tensor [size: (m, p)] Parameter values corresponding to the simulations weights : tf.Tensor [size: (m, 1)] The weights of the kernel means likelihood beta : float or tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The length scale(s) for the parameter kernel prior_mean : tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The mean(s) of the diagonal Gaussian prior prior_std : tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The standard deviation(s) of the diagonal Gaussian prior Returns ------- float The marginal kernel means likelihood """ # By defaut, the prior has zero mean if prior_mean is None: prior_mean = tf.zeros((1, theta_sim.get_shape().as_list()[-1])) # By default, the prior standard deviation is set to the same as the length scale of the parameter kernel if prior_std is None: prior_std = beta # Compute the final length scale and the ratio scalar coefficient of the resulting prior mean embedding prior_embedding_length_scale = tf.sqrt(beta ** 2 + prior_std ** 2) ratio = tf.reduce_prod(convert_anisotropic(beta / prior_embedding_length_scale, theta_sim.get_shape().as_list()[1])) # Compute the prior mean embedding [size: (m, 1)] prior_mean_embedding = ratio * gaussian_kernel_gramix(theta_sim, atleast_2d(prior_mean), prior_embedding_length_scale) # Compute the kernel means marginal likelihood return tf.reduce_sum(tf.multiply(prior_mean_embedding, weights)) def approximate_marginal_kernel_means_likelihood(theta_samples, theta_sim, weights, beta): """ Compute the approximate marginal kernel means likelihood using prior samples. Parameters ---------- theta_samples : tf.Tensor [size: (n_samples, p)] The prior parameter samples to marginalize over theta_sim : tf.Tensor [size: (m, p)] Parameter values corresponding to the simulations weights : tf.Tensor [size: (m, 1)] The weights of the kernel means likelihood beta : float or tf.Tensor [size: () or (1,) for isotropic; (p,) for anistropic] The length scale(s) for the parameter kernel Returns ------- float The approximate marginal kernel means likelihood """ return tf.reduce_mean(kernel_means_likelihood(theta_samples, theta_sim, weights, beta)) def kernel_means_hyperparameter_learning(y, x_sim, theta_sim, eps_tuple, beta_tuple, reg_tuple, eps_ratios=1., beta_ratios=1., offset=0., prior_samples=None, prior_mean=None, prior_std=None, learning_rate=0.01, n_iter=1000, display_steps=10): """ Bayesian hyperparameter learning for KELFI by maximizing the MKML. The API is written to take in numpy arrays. There is no need to pass in tensorflow tensors directly. If the prior is Gaussian, analytical forms exist. Specify its mean and standard deviation in 'prior_mean' and 'prior_std'. Otherwise, approximate forms exist by using prior samples. Provide the samples via 'prior_samples'. If neither is provided, the prior is assumed to be Gaussian with zero mean and standard deviation equal to the length scale of the parameter kernel. For anisotropic cases, it could be convenient to learn the multiplier on fixed length scale ratios. This is especially true for the parameter kernel, where the length scales can be set to a scaled multiple of the prior standard deviations. Provide these ratios in 'eps_ratios' and 'beta_ratios'. Parameters ---------- y : np.ndarray [size: (1, d)] Observed data or summary statistics x_sim : np.ndarray [size: (m, s, d)] Simulated data or summary statistics theta_sim : np.ndarray [size: (m, p)] Parameter values corresponding to the simulations eps_tuple : tuple (eps_init, learn_flag) eps_init: float or np.ndarray [size: () or (1,) for isotropic; (d,) for anistropic] The initial simulator noise level(s) for the epsilon-kernel or epsilon-likelihood learn_flag: str Indicate learning for this hyperparameter with 'learn' and use 'fix' otherwise beta_tuple : tuple (beta_init, learn_flag) beta_init: float or np.ndarray [size: () or (1,) for isotropic; (d,) for anistropic] The initial length scale(s) for the parameter kernel learn_flag: str Indicate learning for this hyperparameter with 'learn' and use 'fix' otherwise reg_tuple : tuple (reg_init, learn_flag) reg_init: float The initial regularization parameter for the conditional kernel mean learn_flag: str Indicate learning for this hyperparameter with 'learn' and use 'fix' otherwise eps_ratios: float or np.ndarray [size: () or (1,) for isotropic; (d,) for anistropic] Fixed ratios for the simulator noise level(s) for the epsilon-kernel or epsilon-likelihood beta_ratios: float or np.ndarray [size: () or (1,) for isotropic; (d,) for anistropic] Fixed ratios for the length scale(s) for the parameter kernel offset : float A positive offset in case approximate marginal kernel means likelihood values are slightly negative. prior_samples : np.ndarray [size: (n_samples, p)] The parameters samples to marginalize over prior_mean : np.ndarray [size: () or (1,) for isotropic; (p,) for anistropic] The mean(s) of the diagonal Gaussian prior prior_std : np.ndarray [size: () or (1,) for isotropic; (p,) for anistropic] The standard deviation(s) of the diagonal Gaussian prior learning_rate : float The learning rate for the gradient update n_iter : int Number of iterations display_steps : int Number of iterations before displaying the current optimization status """ # Short notation for converting into tensorflow constants or variables tfc = lambda array: tf.constant(array, dtype=tf.float32) tfv = lambda array: tf.Variable(array, dtype=tf.float32) # Convert hyperparameters to variable for learning or constants otherwise log_eps = tfv(np.log(eps_tuple[0])) if eps_tuple[1] == 'learn' else tfc(np.log(eps_tuple[0])) log_beta = tfv(np.log(beta_tuple[0])) if beta_tuple[1] == 'learn' else tfc(np.log(beta_tuple[0])) log_reg = tfv(np.log(reg_tuple[0])) if reg_tuple[1] == 'learn' else tfc(np.log(reg_tuple[0])) # Transform the hyperparameters into the actual hyperparameters if not already eps_ = tf.exp(log_eps) * eps_ratios beta_ = tf.exp(log_beta) * beta_ratios reg_ = tf.exp(log_reg) # Convert all data into constants y_, x_sim_, theta_sim_ = tfc(y), tfc(x_sim), tfc(theta_sim) # Compute the weights weights_ = kernel_means_weights(y_, x_sim_, theta_sim_, eps_, beta_, reg=reg_) # Compute the objective using either the analytical or empirical form if prior_samples is None: # By defaut, the prior has zero mean prior_mean_ = tf.zeros((1, theta_sim.get_shape().as_list()[-1])) if prior_mean is None else tfc(prior_mean) # By default, the prior standard deviation is set to the same as the length scale of the parameter kernel prior_std_ = beta if prior_std is
0xFD7C6E, "Hipsterfication": 0x88513E, "Hiroshima Aquamarine": 0x7FFFD4, "His Eyes": 0x9BB9E1, "Hisoku Blue": 0xABCED8, "Historic Cream": 0xFDF3E3, "Historic Shade": 0xADA791, "Historic Town": 0xA18A64, "Historic White": 0xEBE6D7, "Historical Grey": 0xA7A699, "Historical Ruins": 0xBFB9A7, "Hisui Kingfisher": 0x38B48B, "Hit Grey": 0xA1ADB5, "Hit Pink": 0xFDA470, "Hitchcock Milk": 0xEEFFA9, "Hitching Post": 0xC48D69, "Hitsujiyama Pink": 0xEE66FF, "Hive": 0xFFFF77, "Hobgoblin": 0x01AD8F, "Hockham Green": 0x59685F, "Hoeth Blue": 0x57A9D4, "Hog Bristle": 0xDCD1BB, "Hog-Maw": 0xFBE8E4, "Hog's Pudding": 0xDAD5C7, "Hokey Pokey": 0xBB8E34, "Hoki": 0x647D86, "Hokkaido Lavender": 0x7736D9, "Holbein Blue Grey": 0x547D86, "Hold Your Horses": 0x705446, "Hole In One": 0x4AAE97, "Holenso": 0x598069, "Holiday": 0x81C3B4, "Holiday Blue": 0x32BCD1, "Holiday Camp": 0x6D9E7A, "Holiday Road": 0xB1D1E2, "Holiday Turquoise": 0x8AC6BD, "Holland Red": 0xCB4543, "Holland Tile": 0xDD9789, "Holland Tulip": 0xF89851, "Hollandaise": 0xFFEE44, "Hollow Knight": 0x330055, "Holly": 0x25342B, "Holly Berry": 0xB44E5D, "Holly Bush": 0x355D51, "Holly Fern": 0x8CB299, "Holly Glen": 0xA2B7B5, "Holly Green": 0x0F9D76, "Holly Jolly Christmas": 0xB50729, "Holly Leaf": 0x2E5A50, "Hollyhock": 0x823270, "Hollyhock Bloom": 0xB7737D, "Hollyhock Blossom Pink": 0xBD79A5, "Hollyhock Pink": 0xC2A1B5, "Hollywood Asparagus": 0xDEE7D4, "Hollywood Cerise": 0xF400A0, "Hollywood Golden Age": 0xECD8B1, "Hollywood Starlet": 0xF2D082, "Holy Crow": 0x332F2C, "Holy Grail": 0xE8D720, "Holy Water": 0x466E77, "Holy White": 0xF5F5DC, "Homburg Gray": 0x666D69, "Home Body": 0xF3D2B2, "Home Brew": 0x897B66, "Home Plate": 0xF7EEDB, "Home Song": 0xF2EEC7, "Home Sweet Home": 0x9B7E65, "Homebush": 0x726E69, "Homeland": 0xB18D75, "Homeopathic": 0x5F7C47, "Homeopathic Blue": 0xDBE7E3, "Homeopathic Green": 0xE1EBD8, "Homeopathic Lavender": 0xE5E0EC, "Homeopathic Lilac": 0xE1E0EB, "Homeopathic Lime": 0xE9F6E2, "Homeopathic Mint": 0xE5EAD8, "Homeopathic Orange": 0xF2E6E1, "Homeopathic Red": 0xECDBE0, "Homeopathic Rose": 0xE8DBDD, "Homeopathic Yellow": 0xEDE7D7, "Homestead": 0xAC8674, "Homestead Brown": 0x6F5F52, "Homestead Red": 0x986E6E, "Homeworld": 0x2299DD, "Honed Soapstone": 0x9D9887, "Honed Steel": 0x867C83, "Honest": 0x9BB8E2, "Honest Blue": 0x5A839E, "Honesty": 0xDFEBE9, "Honey": 0xBA9238, "Honey and Thyme": 0xAAAA00, "Honey Baked Ham": 0xFFAA99, "Honey Bear": 0xE8C281, "Honey Bee": 0xFCDFA4, "Honey Beehive": 0xD39F5F, "Honey Bees": 0xFBD682, "Honey Beige": 0xF3E2C6, "Honey Bird": 0xFFD28D, "Honey Blush": 0xF5CF9B, "Honey Bunny": 0xDBB881, "Honey Butter": 0xF5D29B, "Honey Carrot Cake": 0xFF9955, "Honey Chili": 0x883344, "Honey Cream": 0xFAE8CA, "Honey Crusted Chicken": 0xFFBB55, "Honey Do": 0xEDEDC7, "Honey Flower": 0x5C3C6D, "Honey Fungus": 0xD18E54, "Honey Garlic Beef": 0x884422, "Honey Ginger": 0xA86217, "Honey Glow": 0xE8B447, "Honey Gold": 0xD1A054, "Honey Graham": 0xBC886A, "Honey Grove": 0xDCB149, "Honey Haven": 0xBC9263, "Honey Lime Chicken": 0xDDCCBB, "Honey Locust": 0xFFC367, "Honey Mist": 0xE5D9B2, "Honey Moth": 0xFBECCC, "Honey Mustard": 0xB68F52, "Honey N Cream": 0xF1DCB7, "Honey Nectar": 0xF1DDA2, "Honey Nougat": 0xE0BB96, "Honey Oat Bread": 0xFAEED9, "Honey Peach": 0xDCBD9E, "Honey Pink": 0xCC99AA, "Honey Pot": 0xFFC863, "Honey Robber": 0xDFBB86, "Honey Tea": 0xD8BE89, "Honey Teriyaki": 0xEE6611, "Honey Tone": 0xF8DC9B, "Honey Wax": 0xFFAA22, "Honey Yellow": 0xCA9456, "Honey Yellow Green": 0x937016, "Honey Yogurt Popsicles": 0xF3F0D9, "Honeycomb": 0xDDAA11, "Honeycomb Yellow": 0xDE9C52, "Honeydew": 0xF0FFF0, "Honeydew Melon": 0xE6ECCC, "Honeydew Peel": 0xD4FB79, "Honeypot": 0xF6DEB3, "Honeysuckle": 0xE8ED69, "Honeysuckle Blast": 0xB3833F, "Honeysuckle Vine": 0xFBF1C8, "Honeysuckle White": 0xF8ECD3, "Honeysweet": 0xE9CFC8, "Hóng Bǎo Shū Red": 0xE02006, "Hong Kong Mist": 0x948E90, "Hong Kong Skyline": 0x676E7A, "Hong Kong Taxi": 0xA8102A, "Hóng Lóu Mèng Red": 0xCF3F4F, "Hóng Sè Red": 0xFF0809, "Hóng Zōng Brown": 0x564A33, "Honied White": 0xFCEFD1, "Honky Tonk Blue": 0x446A8D, "Honolulu Blue": 0x007FBF, "Honorable Blue": 0x164576, "Hooked Mimosa": 0xFFC9C4, "Hooker's Green": 0x49796B, "Hooloovoo Blue": 0x4455FF, "Hopbush": 0xCD6D93, "Hope": 0xE581A0, "Hope Chest": 0x875942, "Hopeful": 0xF2D4E2, "Hopeful Blue": 0xA2B9BF, "Hopeful Dream": 0x95A9CD, "Hopi Blue Corn": 0x174871, "Hopi Moccasin": 0xFFE4B5, "Hopsack": 0x9E8163, "Hopscotch": 0xAFBB42, "Horchata": 0xF2E9D9, "Horenso Green": 0x789B73, "Horizon": 0x648894, "Horizon Blue": 0x289DBE, "Horizon Glow": 0xAD7171, "Horizon Grey": 0x9CA9AA, "Horizon Haze": 0x80C1E2, "Horizon Island": 0xCDD4C6, "Horizon Sky": 0xC2C3D3, "Hormagaunt Purple": 0x51576F, "Horn of Plenty": 0xBBA46D, "Hornblende": 0x332222, "Hornblende Green": 0x234E4D, "Horned Frog": 0xC2AE87, "Horned Lizard": 0xE8EAD5, "Hornet Nest": 0xD5DFD3, "Hornet Sting": 0xFF0033, "Hornet Yellow": 0xA67C08, "Horror Snob": 0xD34D4D, "Horse Liver": 0x543D37, "Horseradish": 0xE6DFC4, "Horseradish Cream": 0xEEEADD, "Horseradish Yellow": 0xFFDEA9, "Horses Neck": 0x6D562C, "Horsetail": 0x3D5D42, "Hortensia": 0x553B50, "Hosanna": 0xDBB8BF, "Hospital Green": 0x9BE5AA, "Hosta Flower": 0xDCDDE7, "Hostaleaf": 0x475A56, "Hot": 0xAC4362, "Hot and Spicy": 0xB35547, "Hot Aquarelle Pink": 0xFFB3DE, "Hot Beach": 0xFFF6D9, "Hot Bolognese": 0xCC5511, "Hot Butter": 0xE69D00, "Hot Cacao": 0xA5694F, "Hot Calypso": 0xFA8D7C, "Hot Chili": 0xAD756B, "Hot Chilli": 0xB7513A, "Hot Chocolate": 0x683B39, "Hot Cinnamon": 0xD1691C, "Hot Cocoa": 0x806257, "Hot Coral": 0xF35B53, "Hot Cuba": 0xBB0033, "Hot Curry": 0x815B28, "Hot Desert": 0xEAE4DA, "Hot Dog Relish": 0x717C3E, "Hot Embers": 0xF55931, "Hot Fever": 0xD40301, "Hot Flamingo": 0xB35966, "Hot Ginger": 0xA36736, "Hot Gossip": 0xE07C89, "Hot Green": 0x25FF29, "Hot Hazel": 0xDD6622, "Hot Hibiscus": 0xBB2244, "Hot Jazz": 0xBC3033, "Hot Lava": 0xAA0033, "Hot Lips": 0xC9312B, "Hot Magenta": 0xFF00CC, "Hot Mustard": 0x735C12, "Hot Orange": 0xF4893D, "Hot Pepper Green": 0x598039, "Hot Pink": 0xFF028D, "Hot Purple": 0xCB00F5, "Hot Sand": 0xCCAA00, "Hot Sauce": 0xAB4F41, "Hot Sauna": 0x3F3F75, "Hot Spice": 0xCC2211, "Hot Spot": 0xFFE597, "Hot Stone": 0xABA89E, "Hot Sun": 0xF9B82B, "Hot Toddy": 0xA7752C, "Hothouse Orchid": 0x755468, "Hotot Bunny": 0xF1F3F2, "Hotspot": 0xFF4433, "Hotter Butter": 0xE68A00, "Hotter Than Hell": 0xFF4455, "Hottest Of Pinks": 0xFF80FF, "Hourglass": 0xE5E0D5, "House Martin Eggs": 0xE2E0DB, "House Sparrow's Egg": 0xD6D9DD, "House Stark Grey": 0x4D495B, "Houseplant": 0x58713F, "How Handsome": 0xA0AEB8, "How Now": 0x886150, "Howdy Neighbor": 0xF9E4C8, "Howdy Partner": 0xC6A698, "Howling Coyote": 0x9C7F5A, "Hú Lán Blue": 0x1DACD1, "Huáng Dì Yellow": 0xF8FF73, "Huáng Jīn Zhōu Gold": 0xFADA6A, "Huáng Sè Yellow": 0xF0F20C, "Hubbard Squash": 0xE9BF8C, "Hubert's Truck Green": 0x559933, "Huckleberry": 0x5B4349, "Huckleberry Brown": 0x71563B, "Hudson": 0xEADBD2, "Hudson Bee": 0xFDEF02, "Huelveño Horizon": 0x17A9E5, "Hugh's Hue": 0x9FA09F, "Hugo": 0xE6CFCC, "Hūi Sè Grey": 0xC1C6D3, "Hula Girl": 0x929264, "Hulett Ore": 0x726F6C, "Hulk": 0x008000, "Hull Red": 0x4D140B, "Humble Blush": 0xE3CDC2, "Humble Gold": 0xEDC796, "Humble Hippo": 0xAAAA99, "Humboldt Redwoods": 0x1F6357, "Humid Cave": 0xC9CCD2, "Hummingbird": 0xCEEFE4, "Hummingbird Green": 0x5B724A, "Hummus": 0xEECC99, "Humorous Green": 0xC6B836, "Humpback Whale": 0x473B3B, "Humus": 0xB7A793, "Hunky Hummingbird": 0xBB11FF, "Hunt Club": 0x2A4F43, "Hunt Club Brown": 0x938370, "Hunter Green": 0x0B4008, "Hunter's Hollow": 0x989A8D, "Hunter's Orange": 0xDB472C, "Huntington Garden": 0x96A782, "Huntington Woods": 0x46554C, "Hurricane": 0x8B7E77, "Hurricane Green Blue": 0x254D54, "Hurricane Haze": 0xBDBBAD, "Hurricane Mist": 0xEBEEE8, "Hush": 0xC4BDBA, "Hush Grey": 0xE1DED8, "Hush Pink": 0xF8E9E2, "Hush Puppy": 0xE4B095, "Hush White": 0xE5DAD4, "Hush-A-Bye": 0x5397B7, "Hushed Auburn": 0xA8857A, "Hushed Green": 0xD8E9E5, "Hushed Violet": 0xD1C0BF, "Hushed White": 0xF1F2E4, "Husk": 0xB2994B, "Husky": 0xE0EBFA, "Husky Orange": 0xBB613E, "Hutchins Plaza": 0xAE957C, "Hyacinth": 0x936CA7, "Hyacinth Arbor": 0x6C6783, "Hyacinth Dream": 0x807388, "Hyacinth Mauve": 0x6F729F, "Hyacinth Red": 0xA75536, "Hyacinth Tint": 0xB9C4D3, "Hyacinth Violet": 0x8D4687, "Hyacinth White Soft Blue": 0xC1C7D7, "Hybrid": 0xD0CDA9, "Hydra": 0x006995, "Hydra Turquoise": 0x007A73, "Hydrangea": 0x849BCC, "Hydrangea Blossom": 0xA6AEBE, "Hydrangea Bouquet": 0xCAA6A9, "Hydrangea Floret": 0xE6EAE0, "Hydrangea Pink": 0xE7B6C8, "Hydrangea Purple": 0xCAA0FF, "Hydrargyrum": 0x9B9B9B, "Hydro": 0x426972, "Hydrogen Blue": 0x33476D, "Hydrology": 0x89ACAC, "Hydroport": 0x5E9CA1, "Hygge Green": 0xE0E1D8, "Hygiene Green": 0x5DBCB4, "Hyper Blue": 0x015F97, "Hyper Green": 0x55FF00, "Hyper Light Drifter": 0xEDDBDA, "Hypnotic": 0x687783, "Hypnotic Sea": 0x00787F, "Hypnotism": 0x32584C, "Hypothalamus Grey": 0x415D66, "Hyssop": 0x6D4976, "I Heart Potion": 0xA97FB1, "I Love To Boogie": 0xFFA917, "I Miss You": 0xDDDBC5, "I Pink I Can": 0xD47F8D, "I R Dark Green": 0x404034, "I'm a Local": 0xEBBF5C, "Ibex Brown": 0x482400, "Ibis": 0xF4B3C2, "Ibis Mouse": 0xE4D2D8, "Ibis Rose": 0xCA628F, "Ibis White": 0xF2ECE6, "Ibis Wing": 0xF58F84, "Ibiza Blue": 0x007CB7, "Ice": 0xD6FFFA, "Ice Age": 0xC6E4E9, "Ice Ballet": 0xEADEE8, "Ice Blue": 0x739BD0, "Ice Blue Grey": 0x717787, "Ice Bomb": 0xCCE2DD, "Ice Boutique Turquoise": 0xA2CDCB, "Ice Breaker": 0xD4E7E7, "Ice Cap Green": 0xB9E7DD, "Ice Castle": 0xD5EDFB, "Ice Cave": 0xA0BEDA, "Ice Climber": 0x25E2CD, "Ice Cold": 0xD2EAF1, "Ice Cold Green": 0xD9EBAC, "Ice Cold Stare": 0xB1D1FC, "Ice Cream Cone": 0xE3D0BF, "Ice Cream Parlour": 0xF7D3AD, "Ice Crystal Blue": 0xA6E3E0, "Ice Cube": 0xAFE3D6, "Ice Dagger": 0xCEE5DF, "Ice Dark Turquoise": 0x005456, "Ice Dream": 0xEAEBE1, "Ice Drop": 0xD3E2EE, "Ice Effect": 0xBBEEFF, "Ice Fishing": 0xDCECF5, "Ice Floe": 0xD8E7E1, "Ice Flow": 0xC6D2D2, "Ice Flower": 0xC3E7EC, "Ice Folly": 0xDBECE9, "Ice Glow": 0xFFFFE9, "Ice Green": 0x87D8C3, "Ice Grey": 0xCAC7C4, "Ice Gull Grey Blue": 0x9BB2BA, "Ice Hot Pink": 0xE4BDC2, "Ice Ice Baby": 0x00FFDD, "Ice Lemon": 0xFFFBC1, "Ice Mauve": 0xC9C2DD, "Ice Mist": 0xB6DBBF, "Ice Pack": 0xA5DBE3, "Ice Palace": 0xE2E4D7, "Ice Plant": 0xCF7EAD, "Ice Rink": 0xBBDDEE, "Ice Sculpture": 0xE1E6E5, "Ice Shard Soft Blue": 0xC1DEE2, "Ice Temple": 0x11FFEE, "Ice Water Green": 0xCDEBE1, "Ice Yellow": 0xFEFECD, "Ice-Cold White": 0xDFF0E2, "Iceberg": 0xDAE4EE, "Iceberg Green": 0x8C9C92, "Iced Aniseed": 0xCBD3C3, "Iced Apricot": 0xEFD6C0, "Iced Aqua": 0xABD3DB, "Iced Avocado": 0xC8E4B9, "Iced Cappuccino": 0x9C8866, "Iced Celery": 0xE5E9B7, "Iced Cherry": 0xE8C7BF, "Iced Coffee": 0xB18F6A, "Iced Copper": 0xD0AE9A, "Iced Espresso": 0x5A4A42, "Iced Green Apple": 0xECEBC9, "Iced Lavender": 0xC2C7DB, "Iced Mauve": 0xE8DCE3, "Iced Mocha": 0xA3846C, "Iced Orchid": 0x8E7D89,