input
stringlengths
2.65k
237k
output
stringclasses
1 value
> -1: aJet = jets[electron.jetIdx] aCSV = aJet.btagCSVV2 aDB = aJet.btagDeepB aDC = aJet.btagDeepC aDJ = aJet.btagDeepFlavB if electron.pt >= 15: self.hTree_bLepJetBtag.Fill(aCSV, ">15GeV e + CSVv2", 1.0) self.hTree_bLepJetBtag.Fill(aDB, ">15GeV e + Deep CSV B", 1.0) self.hTree_bLepJetBtag.Fill(aCSV, ">15GeV e + Deep CSV C", 1.0) self.hTree_bLepJetBtag.Fill(aDJ, ">15GeV e + DeepJet B", 1.0) else: self.hTree_bLepJetBtag.Fill(aCSV, "<15GeV e + CSVv2", 1.0) self.hTree_bLepJetBtag.Fill(aDB, "<15GeV e + Deep CSV B", 1.0) self.hTree_bLepJetBtag.Fill(aCSV, "<15GeV e + Deep CSV C", 1.0) self.hTree_bLepJetBtag.Fill(aDJ, "<15GeV e + DeepJet B", 1.0) for elTID in elTrueId: for elTIO in elTrueIso: self.hTree_bLepElIdIso.Fill(elTIO, elTID, 1.0) self.hTree_bLepElPtDz.Fill(electron.pt, electron.dz, 1.0) self.hTree_bLepElPtIp3d.Fill(electron.pt, electron.ip3d, 1.0) allTopLeps = [] ### All direct muons ### allMus = [treeMuon[m] for m in topMuons] allMus = [mu[0] for mu in allMus if len(mu) > 0] allTopLeps += [(m, muons[m].pt, "Muon") for m in allMus] ### All direct electrons ### allEls = [treeElectron[e] for e in topElectrons] allEls = [el[0] for el in allEls if len(el) > 0] allTopLeps += [(e, electrons[e].pt, "Electron") for e in allEls] ### All tau muons ### tauMus = [treeMuon[m] for m in topTauMuons] tauMus = [mu[0] for mu in tauMus if len(mu) > 0] tauMus.sort(key = lambda m : muons[m].pt, reverse=True) allTopLeps += [(m, muons[m].pt, "TauMuon") for m in tauMus] ### All tau electrons ### tauEls = [treeElectron[e] for e in topTauElectrons] tauEls = [el[0] for el in tauEls if len(el) > 0] allTopLeps += [(e, electrons[e].pt, "TauElectron") for e in tauEls] allTopLeps.sort(key = lambda l : l[1], reverse=True) #print("allTopLeps: " +str(allTopLeps)) ii = nLepTop - 1 #0-indexed counting of leptonic tops in the event #print("nLepTop: " + str(nLepTop)) for jj, lepTup in enumerate(allTopLeps): if lepTup[2] == "Electron": electron = electrons[lepTup[0]] #print("Found Ele") self.hTree_ElPtDz.Fill(electron.pt, electron.dz, 1.0) self.hTree_ElPtIp3d.Fill(electron.pt, electron.ip3d, 1.0) elTrueId = [] elTrueIso = [] elTrueId.append("basicSelection") elTrueIso.append("basicSelection") self.hTree_ElPtId[ii][jj].Fill(electron.pt, "basicSelection", 1.0) if electron.cutBased == 0: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "cutBased_fail", 1.0) elTrueId.append("cutBased_fail") if electron.cutBased == 1: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "cutBased_veto", 1.0) elTrueId.append("cutBased_veto") if electron.cutBased >= 2: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "cutBased_loose", 1.0) elTrueId.append("cutBased_loose") if electron.cutBased >= 3: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "cutBased_medium", 1.0) elTrueId.append("cutBased_medium") if electron.cutBased == 2: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "cutBased_tight", 1.0) elTrueId.append("cutBased_tight") if electron.mvaFall17V2Iso_WP80: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP80", 1.0) elTrueId.append("mvaFall17V2Iso_WP80") if electron.mvaFall17V2Iso_WP90: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP90", 1.0) elTrueId.append("mvaFall17V2Iso_WP90") if electron.mvaFall17V2Iso_WPL: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WPL", 1.0) elTrueId.append("mvaFall17V2Iso_WPL") if electron.mvaFall17V2noIso_WP80: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WP80", 1.0) elTrueId.append("mvaFall17V2noIso_WP80") if electron.mvaFall17V2noIso_WP90: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WP90", 1.0) elTrueId.append("mvaFall17V2noIso_WP90") if electron.mvaFall17V2noIso_WPL: self.hTree_ElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WPL", 1.0) elTrueId.append("mvaFall17V2noIso_WPL") for elTID in elTrueId: for elTIO in elTrueIso: self.hTree_ElIdIso.Fill(elTIO, elTID, 1.0) #if electron.mvaFall17V2Iso_WP80: # self.hTree_ElPtIso[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP80", 1.0) # elTrueIso.append("mvaFall17V2Iso_WP80") #if electron.mvaFall17V2Iso_WP90: # self.hTree_ElPtIso[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP90", 1.0) # elTrueIso.append("mvaFall17V2Iso_WP90") #if electron.mvaFall17V2Iso_WPL: # self.hTree_ElPtIso[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WPL", 1.0) # elTrueIso.append("mvaFall17V2Iso_WPL") elif lepTup[2] == "TauElectron": electron = electrons[lepTup[0]] #print("Found Tau Ele") self.hTree_TauToElPtDz.Fill(electron.pt, electron.dz, 1.0) self.hTree_TauToElPtIp3d.Fill(electron.pt, electron.ip3d, 1.0) elTrueId = [] elTrueIso = [] elTrueId.append("basicSelection") elTrueIso.append("basicSelection") self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "basicSelection", 1.0) if electron.cutBased == 0: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "cutBased_fail", 1.0) elTrueId.append("cutBased_fail") if electron.cutBased == 1: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "cutBased_veto", 1.0) elTrueId.append("cutBased_veto") if electron.cutBased >= 2: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "cutBased_loose", 1.0) elTrueId.append("cutBased_loose") if electron.cutBased >= 3: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "cutBased_medium", 1.0) elTrueId.append("cutBased_medium") if electron.cutBased == 2: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "cutBased_tight", 1.0) elTrueId.append("cutBased_tight") if electron.mvaFall17V2Iso_WP80: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP80", 1.0) elTrueId.append("mvaFall17V2Iso_WP80") if electron.mvaFall17V2Iso_WP90: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WP90", 1.0) elTrueId.append("mvaFall17V2Iso_WP90") if electron.mvaFall17V2Iso_WPL: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2Iso_WPL", 1.0) elTrueId.append("mvaFall17V2Iso_WPL") if electron.mvaFall17V2noIso_WP80: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WP80", 1.0) elTrueId.append("mvaFall17V2noIso_WP80") if electron.mvaFall17V2noIso_WP90: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WP90", 1.0) elTrueId.append("mvaFall17V2noIso_WP90") if electron.mvaFall17V2noIso_WPL: self.hTree_TauToElPtId[ii][jj].Fill(electron.pt, "mvaFall17V2noIso_WPL", 1.0) elTrueId.append("mvaFall17V2noIso_WPL") for elTID in elTrueId: for elTIO in elTrueIso: self.hTree_TauToElIdIso.Fill(elTIO, elTID, 1.0) #Reverse to make easier to read elif lepTup[2] == "Muon": muon = muons[lepTup[0]] #print("Found Mu") self.hTree_MuPtDz.Fill(muon.pt, muon.dz, 1.0) self.hTree_MuPtIp3d.Fill(muon.pt, muon.ip3d, 1.0) self.hTree_MuPtId[ii][jj].Fill(muon.pt, "looseId", 1.0) muTrueId = [] muTrueIso = [] muTrueId.append("looseId") muTrueIso.append("noIso") if muon.mediumId: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "mediumId", 1.0) muTrueId.append("mediumId") if muon.mediumPromptId: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "mediumPromptId", 1.0) muTrueId.append("mediumPromptId") if muon.tightId: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "tightId", 1.0) muTrueId.append("tightId") if muon.triggerIdLoose: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "triggerLooseId", 1.0) muTrueId.append("triggerLooseId") if muon.softId: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "softId", 1.0) muTrueId.append("softId") if muon.softMvaId: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "softMvaId", 1.0) muTrueId.append("softMvaId") if muon.mvaId >= 1: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "mvaLooseId", 1.0) muTrueId.append("mvaLooseId") if muon.mvaId >= 2: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "mvaMediumId", 1.0) muTrueId.append("mvaMediumId") if muon.mvaId == 3: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "mvaTightId", 1.0) muTrueId.append("mvaTightId") if muon.highPtId == 2: self.hTree_MuPtId[ii][jj].Fill(muon.pt, "highPtId", 1.0) muTrueId.append("highPtId") #Iso variables (booleans) if muon.pfIsoId >= 1: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryLoose", 1.0) muTrueIso.append("PFIsoVeryLoose") if muon.pfIsoId >= 2: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoLoose", 1.0) muTrueIso.append("PFIsoLoose") if muon.pfIsoId >= 3: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoMedium", 1.0) muTrueIso.append("PFIsoMedium") if muon.pfIsoId >= 4: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoTight", 1.0) muTrueIso.append("PFIsoTight") if muon.pfIsoId >= 5: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryTight", 1.0) muTrueIso.append("PFIsoVeryTight") if muon.pfIsoId == 6: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryVeryTight", 1.0) muTrueIso.append("PFIsoVeryVeryTight") if muon.multiIsoId >= 1: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MultiIsoLoose", 1.0) muTrueIso.append("MultiIsoLoose") if muon.multiIsoId == 2: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MultiIsoMedium", 1.0) muTrueIso.append("MultiIsoMedium") if muon.tkIsoId >= 1: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "TkIsoLoose", 1.0) muTrueIso.append("TkIsoLoose") if muon.tkIsoId == 2: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "TkIsoTight", 1.0) muTrueIso.append("TkIsoTight") if muon.miniIsoId >= 1: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MiniIsoLoose", 1.0) muTrueIso.append("MiniIsoLoose") if muon.miniIsoId >= 2: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MiniIsoMedium", 1.0) muTrueIso.append("MiniIsoMedium") if muon.miniIsoId >= 3: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MiniIsoTight", 1.0) muTrueIso.append("MiniIsoTight") if muon.miniIsoId == 4: self.hTree_MuIsoId[ii][jj].Fill(muon.pt, "MiniIsoVeryTight", 1.0) muTrueIso.append("MiniIsoVeryTight") for muTID in muTrueId: for muTIO in muTrueIso: self.hTree_MuIdIso.Fill(muTID, muTIO, 1.0) elif lepTup[2] == "TauMuon": muon = muons[lepTup[0]] #print("Found Tau Mu") self.hTree_TauToMuPtDz.Fill(muon.pt, muon.dz, 1.0) self.hTree_TauToMuPtIp3d.Fill(muon.pt, muon.ip3d, 1.0) self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "looseId", 1.0) muTrueId = [] muTrueIso = [] muTrueId.append("looseId") muTrueIso.append("noIso") if muon.mediumId: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "mediumId", 1.0) muTrueId.append("mediumId") if muon.mediumPromptId: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "mediumPromptId", 1.0) muTrueId.append("mediumPromptId") if muon.tightId: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "tightId", 1.0) muTrueId.append("tightId") if muon.triggerIdLoose: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "triggerLooseId", 1.0) muTrueId.append("triggerLooseId") if muon.softId: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "softId", 1.0) muTrueId.append("softId") if muon.softMvaId: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "softMvaId", 1.0) muTrueId.append("softMvaId") if muon.mvaId >= 1: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "mvaLooseId", 1.0) muTrueId.append("mvaLooseId") if muon.mvaId >= 2: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "mvaMediumId", 1.0) muTrueId.append("mvaMediumId") if muon.mvaId == 3: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "mvaTightId", 1.0) muTrueId.append("mvaTightId") if muon.highPtId == 2: self.hTree_TauToMuPtId[ii][jj].Fill(muon.pt, "highPtId", 1.0) muTrueId.append("highPtId") #Iso variables (booleans) if muon.pfIsoId >= 1: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryLoose", 1.0) muTrueIso.append("PFIsoVeryLoose") if muon.pfIsoId >= 2: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoLoose", 1.0) muTrueIso.append("PFIsoLoose") if muon.pfIsoId >= 3: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoMedium", 1.0) muTrueIso.append("PFIsoMedium") if muon.pfIsoId >= 4: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoTight", 1.0) muTrueIso.append("PFIsoTight") if muon.pfIsoId >= 5: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryTight", 1.0) muTrueIso.append("PFIsoVeryTight") if muon.pfIsoId == 6: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "PFIsoVeryVeryTight", 1.0) muTrueIso.append("PFIsoVeryVeryTight") if muon.multiIsoId >= 1: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MultiIsoLoose", 1.0) muTrueIso.append("MultiIsoLoose") if muon.multiIsoId == 2: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MultiIsoMedium", 1.0) muTrueIso.append("MultiIsoMedium") if muon.tkIsoId >= 1: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "TkIsoLoose", 1.0) muTrueIso.append("TkIsoLoose") if muon.tkIsoId == 2: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "TkIsoTight", 1.0) muTrueIso.append("TkIsoTight") if muon.miniIsoId >= 1: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MiniIsoLoose", 1.0) muTrueIso.append("MiniIsoLoose") if muon.miniIsoId >= 2: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MiniIsoMedium", 1.0) muTrueIso.append("MiniIsoMedium") if muon.miniIsoId >= 3: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MiniIsoTight", 1.0) muTrueIso.append("MiniIsoTight") if muon.miniIsoId == 4: self.hTree_TauToMuIsoId[ii][jj].Fill(muon.pt, "MiniIsoVeryTight", 1.0) muTrueIso.append("MiniIsoVeryTight") for muTID in muTrueId: for muTIO in muTrueIso: self.hTree_TauToMuIdIso.Fill(muTID, muTIO, 1.0) for sidx in xrange(len(gens)): if len(treeElectron[sidx]) > 0: self.hScratch.Fill(len(treeElectron[sidx]), "Electron", 1.0) if len(treeMuon[sidx]) > 0: self.hScratch.Fill(len(treeMuon[sidx]), "Muon", 1.0) if len(treeTau[sidx]) > 0: self.hScratch.Fill(len(treeTau[sidx]), "Tau", 1.0) if len(treeJet[sidx]) > 0: self.hScratch.Fill(len(treeJet[sidx]), "Jet", 1.0) # treeJetDR[sidx].sort() # for drc, dr in enumerate(treeJetDR[sidx]): # self.hTree_DeltaR.Fill(dr, str(drc+1)+" Jet", 1.0) if len(treeGenJet[sidx]) > 0: self.hScratch.Fill(len(treeGenJet[sidx]), "GenJet", 1.0) # treeGenJetDR[sidx].sort() # for drc, dr in enumerate(treeGenJetDR[sidx]): # self.hTree_DeltaR.Fill(dr, str(drc+1)+" GenJet", 1.0) if len(treeFatJet[sidx]) > 0: self.hScratch.Fill(len(treeFatJet[sidx]), "FatJet", 1.0) # treeFatJetDR[sidx].sort() # for drc, dr in enumerate(treeFatJetDR[sidx]): # self.hTree_DeltaR.Fill(dr, str(drc+1)+" FatJet", 1.0) if len(treeGenJetAK8[sidx]) > 0: self.hScratch.Fill(len(treeGenJetAK8[sidx]), "GenJetAK8", 1.0) # treeGenJetAK8DR[sidx].sort() # for drc, dr in enumerate(treeGenJetAK8DR[sidx]): # self.hTree_DeltaR.Fill(dr, str(drc+1)+" GenJetAK8", 1.0) ############# ### Dumps ### ############# #dumpGenCollection(gens) #dumpMuonCollection(muons) #dumpElectronCollection(electrons) #dumpJetCollection(jets) ################################################ ### Initialize Branch Variables to be Filled ### ################################################ #Arrays # electrons_PES = [] # muons_PES = [] # jets_PES = [] # jets_Tagged = [] # jets_Untagged = [] # for i in xrange(len(electrons)): # electrons_PES.append(False) # for j in xrange(len(muons)): # muons_PES.append(False) # for k in xrange(len(jets)): # jets_PES.append(False) # jets_Tagged.append(False) # jets_Untagged.append(False) #genTop_VARS ############################################# ### Write out slimmed selection variables ### ############################################# #Make dictionary that makes this more automated, as in the branch creation # self.out.fillBranch("Electron_PES", electrons_PES) # self.out.fillBranch("Muon_PES", muons_PES) # self.out.fillBranch("Jet_PES", jets_PES) # self.out.fillBranch("Jet_Tagged", jets_Tagged) # self.out.fillBranch("Jet_Untagged", jets_Untagged) # self.out.fillBranch("EventVar_H", H) # self.out.fillBranch("EventVar_H2M", H2M) # self.out.fillBranch("EventVar_HT", HT) # self.out.fillBranch("EventVar_HT2M", HT2M) # self.out.fillBranch("EventVar_HTH", HTH) # self.out.fillBranch("EventVar_HTRat", HTRat) # self.out.fillBranch("EventVar_nBTagJet", nBJets) # self.out.fillBranch("EventVar_nTotJet", (nOthJets + nBJets)) # self.out.fillBranch("EventVar_Trig_MuMu", passMuMu) # self.out.fillBranch("EventVar_Trig_ElMu", passElMu) # self.out.fillBranch("EventVar_Trig_ElEl", passElEl) # self.out.fillBranch("EventVar_Trig_Mu", passMu) #print("\n===========\nFinished Event #" + str(event.event) + "\n\n") return True class MCTrees(Module): def __init__(self, verbose=False, makeHistos=False, maxevt=-1, probEvt=None): self.writeHistFile=False self.verbose=verbose self._verbose = verbose self.probEvt = probEvt if probEvt: #self.probEvt = probEvt self.verbose = True self.MADEHistos=False self.makeHistos = makeHistos #Bits for status flag checking self.bits = {'isPrompt':0b000000000000001, 'isDecayedLeptonHadron':0b000000000000010,
m.x626 - m.x630 == 0) m.e299 = Constraint(expr= m.x191 - m.x619 - m.x623 - m.x627 - m.x631 == 0) m.e300 = Constraint(expr= m.x192 - m.x620 - m.x624 - m.x628 - m.x632 == 0) m.e301 = Constraint(expr= m.x181 - m.x585 - m.x589 - m.x593 - m.x597 == 0) m.e302 = Constraint(expr= m.x182 - m.x586 - m.x590 - m.x594 - m.x598 == 0) m.e303 = Constraint(expr= m.x183 - m.x587 - m.x591 - m.x595 - m.x599 == 0) m.e304 = Constraint(expr= m.x184 - m.x588 - m.x592 - m.x596 - m.x600 == 0) m.e305 = Constraint(expr= m.x197 - m.x649 - m.x653 - m.x657 - m.x661 == 0) m.e306 = Constraint(expr= m.x198 - m.x650 - m.x654 - m.x658 - m.x662 == 0) m.e307 = Constraint(expr= m.x199 - m.x651 - m.x655 - m.x659 - m.x663 == 0) m.e308 = Constraint(expr= m.x200 - m.x652 - m.x656 - m.x660 - m.x664 == 0) m.e309 = Constraint(expr= m.x193 - m.x633 - m.x637 - m.x641 - m.x645 == 0) m.e310 = Constraint(expr= m.x194 - m.x634 - m.x638 - m.x642 - m.x646 == 0) m.e311 = Constraint(expr= m.x195 - m.x635 - m.x639 - m.x643 - m.x647 == 0) m.e312 = Constraint(expr= m.x196 - m.x636 - m.x640 - m.x644 - m.x648 == 0) m.e313 = Constraint(expr= m.x209 - m.x697 - m.x701 - m.x705 - m.x709 == 0) m.e314 = Constraint(expr= m.x210 - m.x698 - m.x702 - m.x706 - m.x710 == 0) m.e315 = Constraint(expr= m.x211 - m.x699 - m.x703 - m.x707 - m.x711 == 0) m.e316 = Constraint(expr= m.x212 - m.x700 - m.x704 - m.x708 - m.x712 == 0) m.e317 = Constraint(expr= m.x213 - m.x713 - m.x717 - m.x721 - m.x725 == 0) m.e318 = Constraint(expr= m.x214 - m.x714 - m.x718 - m.x722 - m.x726 == 0) m.e319 = Constraint(expr= m.x215 - m.x715 - m.x719 - m.x723 - m.x727 == 0) m.e320 = Constraint(expr= m.x216 - m.x716 - m.x720 - m.x724 - m.x728 == 0) m.e321 = Constraint(expr= m.x205 - m.x681 - m.x685 - m.x689 - m.x693 == 0) m.e322 = Constraint(expr= m.x206 - m.x682 - m.x686 - m.x690 - m.x694 == 0) m.e323 = Constraint(expr= m.x207 - m.x683 - m.x687 - m.x691 - m.x695 == 0) m.e324 = Constraint(expr= m.x208 - m.x684 - m.x688 - m.x692 - m.x696 == 0) m.e325 = Constraint(expr= m.x237 - m.x745 - m.x749 - m.x753 - m.x757 == 0) m.e326 = Constraint(expr= m.x238 - m.x746 - m.x750 - m.x754 - m.x758 == 0) m.e327 = Constraint(expr= m.x239 - m.x747 - m.x751 - m.x755 - m.x759 == 0) m.e328 = Constraint(expr= m.x240 - m.x748 - m.x752 - m.x756 - m.x760 == 0) m.e329 = Constraint(expr= m.x201 - m.x665 - m.x669 - m.x673 - m.x677 == 0) m.e330 = Constraint(expr= m.x202 - m.x666 - m.x670 - m.x674 - m.x678 == 0) m.e331 = Constraint(expr= m.x203 - m.x667 - m.x671 - m.x675 - m.x679 == 0) m.e332 = Constraint(expr= m.x204 - m.x668 - m.x672 - m.x676 - m.x680 == 0) m.e333 = Constraint(expr= m.x245 - m.x761 - m.x765 - m.x769 - m.x773 == 0) m.e334 = Constraint(expr= m.x246 - m.x762 - m.x766 - m.x770 - m.x774 == 0) m.e335 = Constraint(expr= m.x247 - m.x763 - m.x767 - m.x771 - m.x775 == 0) m.e336 = Constraint(expr= m.x248 - m.x764 - m.x768 - m.x772 - m.x776 == 0) m.e337 = Constraint(expr= m.x233 - m.x729 - m.x733 - m.x737 - m.x741 == 0) m.e338 = Constraint(expr= m.x234 - m.x730 - m.x734 - m.x738 - m.x742 == 0) m.e339 = Constraint(expr= m.x235 - m.x731 - m.x735 - m.x739 - m.x743 == 0) m.e340 = Constraint(expr= m.x236 - m.x732 - m.x736 - m.x740 - m.x744 == 0) m.e341 = Constraint(expr= m.x261 - m.x793 - m.x797 - m.x801 - m.x805 == 0) m.e342 = Constraint(expr= m.x262 - m.x794 - m.x798 - m.x802 - m.x806 == 0) m.e343 = Constraint(expr= m.x263 - m.x795 - m.x799 - m.x803 - m.x807 == 0) m.e344 = Constraint(expr= m.x264 - m.x796 - m.x800 - m.x804 - m.x808 == 0) m.e345 = Constraint(expr= m.x257 - m.x777 - m.x781 - m.x785 - m.x789 == 0) m.e346 = Constraint(expr= m.x258 - m.x778 - m.x782 - m.x786 - m.x790 == 0) m.e347 = Constraint(expr= m.x259 - m.x779 - m.x783 - m.x787 - m.x791 == 0) m.e348 = Constraint(expr= m.x260 - m.x780 - m.x784 - m.x788 - m.x792 == 0) m.e349 = Constraint(expr= m.x553 - 148.75 * m.b957 <= 0) m.e350 = Constraint(expr= m.x554 - 127.5 * m.b958 <= 0) m.e351 = Constraint(expr= m.x555 - 127.5 * m.b959 <= 0) m.e352 = Constraint(expr= m.x556 - 127.5 * m.b960 <= 0) m.e353 = Constraint(expr= m.x557 - 148.75 * m.b961 <= 0) m.e354 = Constraint(expr= m.x558 - 127.5 * m.b962 <= 0) m.e355 = Constraint(expr= m.x559 - 127.5 * m.b963 <= 0) m.e356 = Constraint(expr= m.x560 - 127.5 * m.b964 <= 0) m.e357 = Constraint(expr= m.x561 - 148.75 * m.b965 <= 0) m.e358 = Constraint(expr= m.x562 - 127.5 * m.b966 <= 0) m.e359 = Constraint(expr= m.x563 - 127.5 * m.b967 <= 0) m.e360 = Constraint(expr= m.x564 - 127.5 * m.b968 <= 0) m.e361 = Constraint(expr= m.x565 - 148.75 * m.b969 <= 0) m.e362 = Constraint(expr= m.x566 - 127.5 * m.b970 <= 0) m.e363 = Constraint(expr= m.x567 - 127.5 * m.b971 <= 0) m.e364 = Constraint(expr= m.x568 - 127.5 * m.b972 <= 0) m.e365 = Constraint(expr= m.x569 - 254.045833333333 * m.b973 <= 0) m.e366 = Constraint(expr= m.x570 - 218.468333333333 * m.b974 <= 0) m.e367 = Constraint(expr= m.x571 - 216.568333333333 * m.b975 <= 0) m.e368 = Constraint(expr= m.x572 - 211.216666666667 * m.b976 <= 0) m.e369 = Constraint(expr= m.x573 - 254.045833333333 * m.b977 <= 0) m.e370 = Constraint(expr= m.x574 - 218.468333333333 * m.b978 <= 0) m.e371 = Constraint(expr= m.x575 - 216.568333333333 * m.b979 <= 0) m.e372 = Constraint(expr= m.x576 - 211.216666666667 * m.b980 <= 0) m.e373 = Constraint(expr= m.x577 - 254.045833333333 * m.b981 <= 0) m.e374 = Constraint(expr= m.x578 - 218.468333333333 * m.b982 <= 0) m.e375 = Constraint(expr= m.x579 - 216.568333333333 * m.b983 <= 0) m.e376 = Constraint(expr= m.x580 - 211.216666666667 * m.b984 <= 0) m.e377 = Constraint(expr= m.x581 - 254.045833333333 * m.b985 <= 0) m.e378 = Constraint(expr= m.x582 - 218.468333333333 * m.b986 <= 0) m.e379 = Constraint(expr= m.x583 - 216.568333333333 * m.b987 <= 0) m.e380 = Constraint(expr= m.x584 - 211.216666666667 * m.b988 <= 0) m.e381 = Constraint(expr= m.x601 - 20.4166666666667 * m.b989 <= 0) m.e382 = Constraint(expr= m.x602 - 17.9666666666667 * m.b990 <= 0) m.e383 = Constraint(expr= m.x603 - 17.9666666666667 * m.b991 <= 0) m.e384 = Constraint(expr= m.x604 - 16.3333333333333 * m.b992 <= 0) m.e385 = Constraint(expr= m.x605 - 20.4166666666667 * m.b993 <= 0) m.e386 = Constraint(expr= m.x606 - 17.9666666666667 * m.b994 <= 0) m.e387 = Constraint(expr= m.x607 - 17.9666666666667 * m.b995 <= 0) m.e388 = Constraint(expr= m.x608 - 16.3333333333333 * m.b996 <= 0) m.e389 = Constraint(expr= m.x609 - 20.4166666666667 * m.b997 <= 0) m.e390 = Constraint(expr= m.x610 - 17.9666666666667 * m.b998 <= 0) m.e391 = Constraint(expr= m.x611 - 17.9666666666667 * m.b999 <= 0) m.e392 = Constraint(expr= m.x612 - 16.3333333333333 * m.b1000 <= 0) m.e393 = Constraint(expr= m.x613 - 20.4166666666667 * m.b1001 <= 0) m.e394 = Constraint(expr= m.x614 - 17.9666666666667 * m.b1002 <= 0) m.e395 = Constraint(expr= m.x615 - 17.9666666666667 * m.b1003 <= 0) m.e396 = Constraint(expr= m.x616 - 16.3333333333333 * m.b1004 <= 0) m.e397 = Constraint(expr= m.x617 - 20.4166666666667 * m.b989 <= 0) m.e398 = Constraint(expr= m.x618 - 17.9666666666667 * m.b990 <= 0) m.e399 = Constraint(expr= m.x619 - 17.9666666666667 * m.b991 <= 0) m.e400 = Constraint(expr= m.x620 - 16.3333333333333 * m.b992 <= 0) m.e401 = Constraint(expr= m.x621 - 20.4166666666667 * m.b993 <= 0) m.e402 = Constraint(expr= m.x622 - 17.9666666666667 * m.b994 <= 0) m.e403 = Constraint(expr= m.x623 - 17.9666666666667 * m.b995 <= 0) m.e404 = Constraint(expr= m.x624 - 16.3333333333333 * m.b996 <= 0) m.e405 = Constraint(expr= m.x625 - 20.4166666666667 * m.b997 <= 0) m.e406 = Constraint(expr= m.x626 - 17.9666666666667 * m.b998 <= 0) m.e407 = Constraint(expr= m.x627 - 17.9666666666667 * m.b999 <= 0) m.e408 = Constraint(expr= m.x628 - 16.3333333333333 * m.b1000 <= 0) m.e409 = Constraint(expr= m.x629 - 20.4166666666667 * m.b1001 <= 0) m.e410 = Constraint(expr= m.x630 - 17.9666666666667 * m.b1002 <= 0) m.e411 = Constraint(expr= m.x631 - 17.9666666666667 * m.b1003 <= 0) m.e412 = Constraint(expr= m.x632 - 16.3333333333333 * m.b1004 <= 0) m.e413 = Constraint(expr= m.x649 - 18.75 * m.b1005 <= 0) m.e414 = Constraint(expr= m.x650 - 16.5 * m.b1006 <= 0) m.e415 = Constraint(expr= m.x651 - 16.5 * m.b1007 <= 0) m.e416 = Constraint(expr= m.x652 - 15 * m.b1008 <= 0) m.e417 = Constraint(expr= m.x653 - 18.75 * m.b1009 <= 0) m.e418 = Constraint(expr= m.x654 -
# Copyright (c) Microsoft Corporation # # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import print_function import datetime import io import os import time import azure.storage.blob as azureblob import azure.batch.models as batchmodels import logging _STANDARD_OUT_FILE_NAME = 'stdout.txt' _STANDARD_ERROR_FILE_NAME = 'stderr.txt' _SAMPLES_CONFIG_FILE_NAME = 'configuration.cfg' class TimeoutError(Exception): """An error which can occur if a timeout has expired. """ def __init__(self, message): self.message = message def decode_string(string, encoding=None): """Decode a string with specified encoding :type string: str or bytes :param string: string to decode :param str encoding: encoding of string to decode :rtype: str :return: decoded string """ if isinstance(string, str): return string if encoding is None: encoding = 'utf-8' if isinstance(string, bytes): return string.decode(encoding) raise ValueError('invalid string type: {}'.format(type(string))) def select_latest_verified_vm_image_with_node_agent_sku( batch_client, publisher, offer, sku_starts_with): """Select the latest verified image that Azure Batch supports given a publisher, offer and sku (starts with filter). :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str publisher: vm image publisher :param str offer: vm image offer :param str sku_starts_with: vm sku starts with filter :rtype: tuple :return: (node agent sku id to use, vm image ref to use) """ # get verified vm image list and node agent sku ids from service node_agent_skus = batch_client.account.list_node_agent_skus() # pick the latest supported sku skus_to_use = [ (sku, image_ref) for sku in node_agent_skus for image_ref in sorted( sku.verified_image_references, key=lambda item: item.sku) if image_ref.publisher.lower() == publisher.lower() and image_ref.offer.lower() == offer.lower() and image_ref.sku.startswith(sku_starts_with) ] # skus are listed in reverse order, pick first for latest sku_to_use, image_ref_to_use = skus_to_use[0] return (sku_to_use.id, image_ref_to_use) def wait_for_tasks_to_complete(batch_client, job_id, timeout): """Waits for all the tasks in a particular job to complete. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job to monitor. :param timeout: The maximum amount of time to wait. :type timeout: `datetime.timedelta` """ time_to_timeout_at = datetime.datetime.now() + timeout while datetime.datetime.now() < time_to_timeout_at: print("Checking if all tasks are complete...") try: tasks = batch_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if not incomplete_tasks: return except Exception as e: print("Checking failed...") logging.error(e) time.sleep(5) raise TimeoutError("Timed out waiting for tasks to complete") def print_task_output(batch_client, job_id, task_ids, encoding=None): """Prints the stdout and stderr for each task specified. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job to monitor. :param task_ids: The collection of tasks to print the output for. :type task_ids: `list` :param str encoding: The encoding to use when downloading the file. """ for task_id in task_ids: file_text = read_task_file_as_string( batch_client, job_id, task_id, _STANDARD_OUT_FILE_NAME, encoding) print("{} content for task {}: ".format( _STANDARD_OUT_FILE_NAME, task_id)) print(file_text) file_text = read_task_file_as_string( batch_client, job_id, task_id, _STANDARD_ERROR_FILE_NAME, encoding) print("{} content for task {}: ".format( _STANDARD_ERROR_FILE_NAME, task_id)) print(file_text) def print_configuration(config): """Prints the configuration being used as a dictionary :param config: The configuration. :type config: `configparser.ConfigParser` """ configuration_dict = {s: dict(config.items(s)) for s in config.sections() + ['DEFAULT']} print("Configuration is:") print(configuration_dict) def _read_stream_as_string(stream, encoding): """Read stream as string :param stream: input stream generator :param str encoding: The encoding of the file. The default is utf-8. :return: The file content. :rtype: str """ output = io.BytesIO() try: for data in stream: output.write(data) if encoding is None: encoding = 'utf-8' return output.getvalue().decode(encoding) finally: output.close() raise RuntimeError('could not write data to stream or decode bytes') def read_task_file_as_string( batch_client, job_id, task_id, file_name, encoding=None): """Reads the specified file as a string. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job. :param str task_id: The id of the task. :param str file_name: The name of the file to read. :param str encoding: The encoding of the file. The default is utf-8. :return: The file content. :rtype: str """ stream = batch_client.file.get_from_task(job_id, task_id, file_name) return _read_stream_as_string(stream, encoding) def read_compute_node_file_as_string( batch_client, pool_id, node_id, file_name, encoding=None): """Reads the specified file as a string. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str pool_id: The id of the pool. :param str node_id: The id of the node. :param str file_name: The name of the file to read. :param str encoding: The encoding of the file. The default is utf-8 :return: The file content. :rtype: str """ stream = batch_client.file.get_from_compute_node( pool_id, node_id, file_name) return _read_stream_as_string(stream, encoding) def create_pool_if_not_exist(batch_client, pool): """Creates the specified pool if it doesn't already exist :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param pool: The pool to create. :type pool: `batchserviceclient.models.PoolAddParameter` """ try: print("Attempting to create pool:", pool.id) batch_client.pool.add(pool) print("Created pool:", pool.id) except batchmodels.BatchErrorException as e: if e.error.code != "PoolExists": raise else: print("Pool {!r} already exists".format(pool.id)) def create_job(batch_service_client, job_id, pool_id): """ Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. """ print('Creating job [{}]...'.format(job_id)) job = batchmodels.JobAddParameter( job_id, batchmodels.PoolInformation(pool_id=pool_id)) try: batch_service_client.job.add(job) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) if err.error.code != "JobExists": raise else: print("Job {!r} already exists".format(job_id)) def wait_for_all_nodes_state(batch_client, pool, node_state): """Waits for all nodes in pool to reach any specified state in set :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param pool: The pool containing the node. :type pool: `batchserviceclient.models.CloudPool` :param set node_state: node states to wait for :rtype: list :return: list of `batchserviceclient.models.ComputeNode` """ print('waiting for all nodes in pool {} to reach one of: {!r}'.format( pool.id, node_state)) i = 0 while True: # refresh pool to ensure that there is no resize error pool = batch_client.pool.get(pool.id) if pool.resize_error is not None: raise RuntimeError( 'resize error encountered for pool {}: {!r}'.format( pool.id, pool.resize_error)) nodes = list(batch_client.compute_node.list(pool.id)) if (len(nodes) >= pool.target_dedicated and all(node.state in node_state for node in nodes)): return nodes i += 1 if i % 3 == 0: print('waiting for {} nodes to reach desired state...'.format( pool.target_dedicated)) time.sleep(10) def create_container_and_create_sas( block_blob_client, container_name, permission, expiry=None, timeout=None): """Create a blob sas token :param block_blob_client: The storage block blob client to use. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the container to upload the blob to. :param expiry: The SAS expiry time. :type expiry: `datetime.datetime` :param int timeout: timeout in minutes from now for expiry, will only be used if expiry is not specified :return: A SAS token :rtype: str """ if expiry is None: if timeout is None: timeout = 30 expiry = datetime.datetime.utcnow() + datetime.timedelta( minutes=timeout) block_blob_client.create_container( container_name, fail_on_exist=False) return block_blob_client.generate_container_shared_access_signature( container_name=container_name, permission=permission, expiry=expiry) def create_sas_token( block_blob_client, container_name, blob_name, permission, expiry=None, timeout=None): """Create a blob sas token :param block_blob_client: The storage block blob client to use. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the container to upload the blob to. :param str blob_name: The name of the blob to upload the local file to. :param expiry: The SAS expiry time. :type expiry: `datetime.datetime` :param int timeout: timeout in minutes from now for expiry, will only be used if expiry is not specified :return: A SAS token :rtype: str """ if expiry is
# coding: utf-8 # from __future__ import absolute_import # Standard-library imports import imp import os.path import sys try: from urllib.request import urlopen # Python 3 except ImportError: from urllib2 import urlopen # Python 2 # __version__ = '0.3' # ----- # define `exec_` and `raise_` that are 2*3 compatible. # # Modified from `six`: # https://bitbucket.org/gutworth/six/src/ # cc9fce6016db076497454f9352e55b4758ccc07c/six.py?at=default#cl-632 if sys.version_info[0] == 2: # def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") # exec_("""def raise_(exc, tb=None): raise exc, None, tb """) else: # exec_ = eval('exec') # def raise_(exc, tb=None): if tb is not None and exc.__traceback__ is not tb: raise exc.with_traceback(tb) else: raise exc # ===== # def add_to_sys_modules(mod_name, mod_obj=None): """Add a module object to |sys.modules|. @param mod_name: module name, used as key to |sys.modules|. If |mod_name| is |a.b.c| while modules |a| and |a.b| are not existing, empty modules will be created for |a| and |a.b| as well. @param mod_obj: a module object. If None, an empty module object will be created. """ # mod_sname_s = mod_name.split('.') # parent_mod_name = '' parent_mod_obj = None for mod_sname in mod_sname_s: # if parent_mod_name == '': cur_mod_name = mod_sname else: cur_mod_name = parent_mod_name + '.' + mod_sname # if cur_mod_name == mod_name: # cur_mod_obj = mod_obj else: # cur_mod_obj = sys.modules.get(cur_mod_name, None) # if cur_mod_obj is None: # create an empty module cur_mod_obj = imp.new_module(cur_mod_name) # sys.modules[cur_mod_name] = cur_mod_obj # if parent_mod_obj is not None: setattr(parent_mod_obj, mod_sname, cur_mod_obj) # parent_mod_name = cur_mod_name parent_mod_obj = cur_mod_obj # def import_module_by_code(mod_code, mod_name, sys_add=True, sys_use=True): """Create a module object by code. @param mod_code: the code that the module contains. @param mod_name: module name. @param sys_use: whether use an existing module with the same name in |sys.modules|, instead of creating a new one. @param sys_add: whether add the module object to |sys.modules|. If |sys_add| is on, |mod_name| is used as key to |sys.modules|. If |sys_add| is on, and if |mod_name| is |a.b.c| while modules |a| and |a.b| are not existing, empty modules will be created for |a| and |a.b| as well. """ # mod_obj_old = sys.modules.get(mod_name, None) # if mod_obj_old is not None and sys_use: return mod_obj_old # mod_obj = imp.new_module(mod_name) # 3plQeic exec_(mod_code, mod_obj.__dict__, mod_obj.__dict__) # if sys_add: # add_to_sys_modules(mod_name=mod_name, mod_obj=mod_obj) # return mod_obj # def import_module_by_name( mod_name, ns_dir=None, sys_use=True, sys_add=True, ): """Import a module by module name. @param mod_name: module name in Python namespace. @param ns_dir: load from which namespace dir. Namespace dir means the dir is considered as if it's in |sys.path|. If |ns_dir| is specified, only load from that dir. Otherwise load from any namespace dirs in |sys.path|. """ # if ns_dir is None: # try: mod_obj_old = sys.modules[mod_name] except KeyError: mod_obj_old = None # if sys_use: # if mod_obj_old is not None: return mod_obj_old # 3pRKQd1 # if not want to use existing module in "sys.modules", need re-import # by calling "__import__" at 2eys2rL. But "__import__" will return # existing module in "sys.modules", so we must delete existing module # before calling "__import__". else: # try: del sys.modules[mod_name] except KeyError: pass # try: # 2eys2rL __import__(mod_name) # raise ImportError if the module not exists. # raise any error from the imported module. except Exception: # if mod_obj_old is not None: # restore to "sys.modules" the old module deleted at 3pRKQd1 sys.modules[mod_name] = mod_obj_old # raise # mod_obj = sys.modules[mod_name] # if not sys_add: # par_mod = None rdot_idx = mod_name.rfind('.') if rdot_idx != -1: # par_mod_name = mod_name[0:rdot_idx] mod_sname = mod_name[rdot_idx + 1:] # can None par_mod = sys.modules.get(par_mod_name, None) # if mod_obj_old is not None: # restore to "sys.modules" the old module deleted at 3pRKQd1 sys.modules[mod_name] = mod_obj_old # restore to parent module's attribute the old module deleted # at 3pRKQd1 if par_mod is not None \ and getattr(par_mod, mod_sname, None) is mod_obj: try: setattr(par_mod, mod_sname, mod_obj_old) except AttributeError: pass # else: # delete from "sys.modules" the module newly loaded at 2eys2rL. try: del sys.modules[mod_name] except KeyError: pass # if par_mod is not None \ and getattr(par_mod, mod_sname, None) is mod_obj: # delete from parent module's attribute the module # newly loaded at 2eys2rL. try: delattr(par_mod, mod_sname) except AttributeError: pass # return mod_obj # # assert ns_dir is not None # mod_file_name_s = mod_name.split('.') # |file_name| means the bare name, without extension. # # E.g. 'a.b.c' to ['a', 'b', 'c'] # parent_mod_name = '' # change in each iteration below mod_file_dir = ns_dir # change in each iteration below for mod_file_name in mod_file_name_s: # if parent_mod_name == '': parent_mod_obj = None mod_name = mod_file_name else: parent_mod_obj = sys.modules[parent_mod_name] mod_name = parent_mod_name + '.' + mod_file_name # if parent_mod_obj: __import__(mod_name) mod_obj = sys.modules[mod_name] else: file_handle = None try: # tup = imp.find_module(mod_file_name, [mod_file_dir]) # raise ImportError # mod_obj = imp.load_module(mod_name, *tup) # raise any error from the imported module. # file_handle = tup[0] finally: if file_handle is not None: file_handle.close() # parent_mod_name = mod_name mod_file_dir = os.path.join(mod_file_dir, mod_file_name) # return mod_obj # def import_module_by_path(mod_path, mod_name, sys_add=True, sys_use=True): """Import a module by module file path. @param mod_path: module file path. @param mod_name: module name to be imported as. @param sys_use: see func |import_module_by_code|'s same name arg. @param sys_add: see func |import_module_by_code|'s same name arg. """ # mod_code = open(mod_path).read() # raise error # mod_obj = import_module_by_code( mod_code=mod_code, mod_name=mod_name, sys_use=sys_use, sys_add=sys_add, ) # raise error # mod_obj.__file__ = mod_path # return mod_obj # def import_module_by_http(uri, mod_name, sys_use=True, sys_add=True): """Download module code via HTTP and create the module object from the code. @param uri: HTTP URI of the module file. @param mod_name: module name to be imported as. @param sys_use: see func |import_module_by_code|'s same name arg. @param sys_add: see func |import_module_by_code|'s same name arg. """ # resp = urlopen(uri) # raise error # mod_code = resp.read() # raise error # mod_obj = import_module_by_code( mod_code=mod_code, mod_name=mod_name, sys_use=sys_use, sys_add=sys_add, ) # raise error # return mod_obj # def uri_split(uri, mod_attr_sep='::'): # uri_part_s = uri.split(mod_attr_sep, 2) # use |split| instead of |partition| to be compatible with Python 2.4- if len(uri_part_s) == 2: mod_uri, attr_chain = uri_part_s else: mod_uri = uri_part_s[0] attr_chain = None # if uri.startswith('http://'): # prot = 'http' # mod_uri is file url # mod_uri = mod_uri # elif uri.startswith('https://'): prot = 'https' # mod_uri is file url # mod_uri = mod_uri # elif mod_uri.startswith('py://'): # prot = 'py' # mod_uri is module name mod_uri = mod_uri[5:] # elif mod_uri.startswith('file://'): # prot = 'file' # mod_uri is file path mod_uri = mod_uri[7:] # # This means if no protocol prefix is present, and the uri ends with |.py|, # then consider the uri as module file path instead of module name. elif mod_uri.endswith('.py'): # prot = 'file' # mod_uri is file path # mod_uri = mod_uri else: # prot = 'py' # mod_uri is module name # mod_uri = mod_uri # res = (prot, mod_uri, attr_chain) return res # def getattr_chain(obj, attr_chain, sep='.'): """Get the last attribute of a specified chain of attributes from a specified object. E.g. |getattr_chain(x, 'a.b.c')| is equivalent to |x.a.b.c|. @param obj: an object @param attr_chain: a chain of attribute names @param sep: separator for the chain of attribute names """ # if sep is None: sep = '.' # attr_name_s = attr_chain.split(sep) # new_obj = obj for attr_name in attr_name_s: new_obj = getattr(new_obj, attr_name) # return new_obj # def load_obj( uri, mod_name=None, sys_use=True, sys_add=True, mod_attr_sep='::', attr_chain_sep='.', retn_mod=False, uri_parts=None, ): """Load an object from a module (specified by module name in Python namespace) or from a module file (specified by module file path). @param uri: an uri specifying which object to load. An |uri| consists of two parts: |module uri| and |attr chain|, e.g. |a/b/c.py::x.y.z| or |a.b.c::x.y.z| # module uri |a/b/c.py| or |a.b.c| is the |module uri| part. Can be either a file path or a module name in Python namespace. Whether it is a file path is determined
# Generated with SMOP 0.41-beta try: from smop.libsmop import * except ImportError: raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None # PROJ_Cliquet_EIA_StochVol.m @function def PROJ_Cliquet_EIA_StochVol(numeric_param=None,M=None,r=None,q=None,T=None,psi_J=None,model=None,modparam=None,contract=None,contractParams=None,*args,**kwargs): varargin = PROJ_Cliquet_EIA_StochVol.varargin nargin = PROJ_Cliquet_EIA_StochVol.nargin ######################################################### # About: Pricing Function for American PUT Option using CTMC Approximation + PROJ method # Models Supported: Stochastic Volatility (including jumps) # Returns: price of contract # Author: <NAME> # References: (1) A unified approach to Bermudan and Barrier options under stochastic # volatility models with jumps. J. Economic Dynamics and Control, 2017 # (2) Robust barrier option pricing by Frame Projection under # exponential Levy Dynamics. Applied Mathematical Finance, 2018. # ---------------------- # Contract Params # ---------------------- # T : number of years (T = 2 is two years, T = .5 is half a year) # M : number of subintervals of [0,T] (total of M+1 points in time grid) # contract: 1 = sum of local caps # 2 = sum of local caps & floors # 3 = cliquet: local & global caps & floors # 4 = cliquet: local floor & cap, global floor, NO GLOBAL CAP # 5 = MPP: ie monthly point-to-point or Monthly Cap Sum (Bernard, Li) # contractParams: # K : Strike/Notional # C : Local Cap # CG : Global cap # F : Local Floor # FG : Global Floor # # ---------------------- # Model Params # ---------------------- # S_0: initial Underlying # r : interest rate # psi_J: characteristic exponenent of jump part... # function handdle: psi_J(xi) = lambda*(phi(xi) -1) # model: # 1 = HESTON: Sigmav, v0, rho, eta, theta # 2 = STEIN-STEIN: Sigmav, v0, rho, eta, theta # 3 = 3/2 MODEL: Sigmav, v0, rho, eta, theta # 4 = 4/2 MODEL: Sigmav, v0, rho, eta, theta, aa, bb # 5 = HULL-WHITE: Sigmav, v0, rho # 6 = SCOTT: Sigmav, v0, rho, eta, theta # 7 = ALPHA-HYPER: Sigmav, v0, rho, eta, theta # modparam: contains all necessary params for the specific model (see below during assingment which ones are needed) # ---------------------- # Numerical Params # ---------------------- # numeric_parm: container of numerical params # N : size of density grid (value grid is K:=N/2) # alph: density gridwith param, density on [-alph,alph]... value grid width = alph # m_0: number of states to approximate the Heston model with # gamma: var grid width parameter, grid is +/- gamma*stddev(variance process) # gridMethod: which type of var grid to use (typcially use 4) ######################################################### N=numeric_param.N # PROJ_Cliquet_EIA_StochVol.m:62 alph=numeric_param.alph # PROJ_Cliquet_EIA_StochVol.m:63 m_0=numeric_param.m_0 # PROJ_Cliquet_EIA_StochVol.m:64 gridMethod=numeric_param.gridMethod # PROJ_Cliquet_EIA_StochVol.m:65 gamma=numeric_param.gamma # PROJ_Cliquet_EIA_StochVol.m:66 varGridMult=numeric_param.gridMultParam # PROJ_Cliquet_EIA_StochVol.m:67 dx=dot(2,alph) / (N - 1) # PROJ_Cliquet_EIA_StochVol.m:69 a=1 / dx # PROJ_Cliquet_EIA_StochVol.m:69 dt=T / M # PROJ_Cliquet_EIA_StochVol.m:70 xmin=dot((1 - N / 2),dx) # PROJ_Cliquet_EIA_StochVol.m:72 ### Contract Parameters (Not all of these apply to every contact type) K=contractParams.K # PROJ_Cliquet_EIA_StochVol.m:75 C=contractParams.C # PROJ_Cliquet_EIA_StochVol.m:77 F=contractParams.F # PROJ_Cliquet_EIA_StochVol.m:78 CG=contractParams.CG # PROJ_Cliquet_EIA_StochVol.m:79 FG=contractParams.FG # PROJ_Cliquet_EIA_StochVol.m:80 lc=log(1 + C) # PROJ_Cliquet_EIA_StochVol.m:83 lf=log(1 + F) # PROJ_Cliquet_EIA_StochVol.m:84 ### Choose xmin so that CAP lc is a member klc=floor(dot(a,(lc - xmin))) + 1 # PROJ_Cliquet_EIA_StochVol.m:87 xklc=xmin + dot((klc - 1),dx) # PROJ_Cliquet_EIA_StochVol.m:88 xmin=xmin + (lc - xklc) # PROJ_Cliquet_EIA_StochVol.m:89 klf=floor(dot(a,(lf - xmin))) + 1 # PROJ_Cliquet_EIA_StochVol.m:91 #xklf = xmin + (klf - 1)*dx; #NOTE: defined with the new xmin if contract == 1 or contract == 5: hlocalCF=lambda x=None: multiply((exp(x) - 1),(x < lc)) + dot(C,(x >= lc)) # PROJ_Cliquet_EIA_StochVol.m:95 else: if contract == 2 or contract == 3 or contract == 4: #NOTE: we should then possibly stretch the grid so that lf is a member if klc != klf: dx=(lc - lf) / (klc - klf) # PROJ_Cliquet_EIA_StochVol.m:99 a=1 / dx # PROJ_Cliquet_EIA_StochVol.m:99 xmin=lf - dot((klf - 1),dx) # PROJ_Cliquet_EIA_StochVol.m:100 hlocalCF=lambda x=None: dot(F,(x <= lf)) + multiply(multiply((exp(x) - 1),(x < lc)),(x > lf)) + dot(C,(x >= lc)) # PROJ_Cliquet_EIA_StochVol.m:102 A=dot(32,a ** 4) # PROJ_Cliquet_EIA_StochVol.m:105 C_aN=A / N # PROJ_Cliquet_EIA_StochVol.m:106 ####//////////////////////////////////////////////////////// #### Intialize Q matrix and variance set ####//////////////////////////////////////////////////////// t=T / 2 # PROJ_Cliquet_EIA_StochVol.m:111 lx,v0,ux=get_variance_grid_boundaries(model,modparam,t,gamma,nargout=3) # PROJ_Cliquet_EIA_StochVol.m:112 mu_func,sig_func=get_SV_variance_grid_diffusion_funcs(model,modparam,nargout=2) # PROJ_Cliquet_EIA_StochVol.m:114 boundaryMethod=1 # PROJ_Cliquet_EIA_StochVol.m:115 center=copy(v0) # PROJ_Cliquet_EIA_StochVol.m:116 Q,v=Q_Matrix_AllForms(m_0,mu_func,sig_func,lx,ux,gridMethod,varGridMult,center,boundaryMethod,nargout=2) # PROJ_Cliquet_EIA_StochVol.m:118 ####//////////////////////////////////////////////////////// #### Populate the Matrix Exponentials ####//////////////////////////////////////////////////////// dxi=dot(dot(2,pi),a) / N # PROJ_Cliquet_EIA_StochVol.m:123 xi=dot(dxi,(arange(0,N - 1)).T) # PROJ_Cliquet_EIA_StochVol.m:124 v1,v2,fv=get_SV_matrix_expo_inputs(model,modparam,psi_J,dt,v,dxi,r,nargout=3) # PROJ_Cliquet_EIA_StochVol.m:126 # Compute Matrix Exponentials for each xi(j) EXP_A=get_SV_matrix_exponential(Q,dt,xi,v1,v2,fv,psi_J,m_0,N) # PROJ_Cliquet_EIA_StochVol.m:128 # ################################################################### # ### PSI Matrix: 5-Point GAUSSIAN # ################################################################# if contract == 2 or contract == 3 or contract == 4: leftGridPoint=lf - dx # PROJ_Cliquet_EIA_StochVol.m:134 NNM=klc - klf + 3 # PROJ_Cliquet_EIA_StochVol.m:135 else: if contract == 1 or contract == 5: leftGridPoint=copy(xmin) # PROJ_Cliquet_EIA_StochVol.m:138 NNM=klc + 1 # PROJ_Cliquet_EIA_StochVol.m:139 else: #NOTE: this can be made more efficient by putting an upper bound, to reflect lc leftGridPoint=copy(xmin) # PROJ_Cliquet_EIA_StochVol.m:142 NNM=copy(N) # PROJ_Cliquet_EIA_StochVol.m:143 PSI=zeros(N - 1,NNM) # PROJ_Cliquet_EIA_StochVol.m:147 #### Sample Neta=dot(5,(NNM)) + 15 # PROJ_Cliquet_EIA_StochVol.m:150 Neta5=(NNM) + 3 # PROJ_Cliquet_EIA_StochVol.m:151 g2=sqrt(5 - dot(2,sqrt(10 / 7))) / 6 # PROJ_Cliquet_EIA_StochVol.m:152 g3=sqrt(5 + dot(2,sqrt(10 / 7))) / 6 # PROJ_Cliquet_EIA_StochVol.m:153 v1=dot(0.5,128) / 225 # PROJ_Cliquet_EIA_StochVol.m:154 v2=dot(0.5,(322 + dot(13,sqrt(70)))) / 900 # PROJ_Cliquet_EIA_StochVol.m:154 v3=dot(0.5,(322 - dot(13,sqrt(70)))) / 900 # PROJ_Cliquet_EIA_StochVol.m:154 thet=zeros(1,Neta) # PROJ_Cliquet_EIA_StochVol.m:156 thet[dot(5,(arange(1,Neta5))) - 2]=leftGridPoint - dot(1.5,dx) + dot(dx,(arange(0,Neta5 - 1))) # PROJ_Cliquet_EIA_StochVol.m:157 thet[dot(5,(arange(1,Neta5))) - 4]=leftGridPoint - dot(1.5,dx) + dot(dx,(arange(0,Neta5 - 1))) - dot(dx,g3) # PROJ_Cliquet_EIA_StochVol.m:158 thet[dot(5,(arange(1,Neta5))) - 3]=leftGridPoint - dot(1.5,dx) + dot(dx,(arange(0,Neta5 - 1))) - dot(dx,g2) # PROJ_Cliquet_EIA_StochVol.m:159 thet[dot(5,(arange(1,Neta5))) - 1]=leftGridPoint - dot(1.5,dx) + dot(dx,(arange(0,Neta5 - 1))) + dot(dx,g2) # PROJ_Cliquet_EIA_StochVol.m:160 thet[dot(5,(arange(1,Neta5)))]=leftGridPoint - dot(1.5,dx) + dot(dx,(arange(0,Neta5 - 1))) + dot(dx,g3) # PROJ_Cliquet_EIA_StochVol.m:161 #### Weights sig=concat([- 1.5 - g3,- 1.5 - g2,- 1.5,- 1.5 + g2,- 1.5 + g3,- 0.5 - g3,- 0.5 - g2,- 0.5,- 0.5 + g2,- 0.5 + g3]) # PROJ_Cliquet_EIA_StochVol.m:164 sig[arange(1,5)]=(sig(arange(1,5)) + 2) ** 3 / 6 # PROJ_Cliquet_EIA_StochVol.m:165 sig[arange(6,10)]=2 / 3 - dot(0.5,(sig(arange(6,10))) ** 3) - (sig(arange(6,10))) ** 2 # PROJ_Cliquet_EIA_StochVol.m:166 sig[concat([1,5,6,10])]=dot(v3,sig(concat([1,5,6,10]))) # PROJ_Cliquet_EIA_StochVol.m:168 sig[concat([2,4,7,9])]=dot(v2,sig(concat([2,4,7,9]))) # PROJ_Cliquet_EIA_StochVol.m:168 sig[concat([3,8])]=dot(v1,sig(concat([3,8]))) # PROJ_Cliquet_EIA_StochVol.m:168 ################################## ###NEW STEP: multiple sig by Upsilon_{a,N} sig=dot(C_aN,sig) # PROJ_Cliquet_EIA_StochVol.m:172 ################################## #### Fill Matrix #### NOTE: this can be made MORE EFFICIENT by using symmetery of x^2 #zz = exp(1i*dxi*log(1+exp(thet))); #zz = exp(1i*dxi*thet.^2); ## in general, 1i*dxh(thet) zz=exp(dot(dot(1j,dxi),hlocalCF(thet))) # PROJ_Cliquet_EIA_StochVol.m:180 thet=copy(zz) # PROJ_Cliquet_EIA_StochVol.m:181 for j in arange(1,N - 1).reshape(-1): PSI[j,arange()]=dot(sig(1),(thet(arange(1,Neta - 19,5)) + thet(arange(20,Neta,5)))) + dot(sig(2),(thet(arange(2,Neta - 18,5)) + thet(arange(19,Neta - 1,5)))) + dot(sig(3),(thet(arange(3,Neta - 17,5)) + thet(arange(18,Neta - 2,5)))) + dot(sig(4),(thet(arange(4,Neta - 16,5)) + thet(arange(17,Neta - 3,5)))) + dot(sig(5),(thet(arange(5,Neta - 15,5)) + thet(arange(16,Neta - 4,5)))) + dot(sig(6),(thet(arange(6,Neta - 14,5)) + thet(arange(15,Neta - 5,5)))) + dot(sig(7),(thet(arange(7,Neta - 13,5)) + thet(arange(14,Neta - 6,5)))) + dot(sig(8),(thet(arange(8,Neta - 12,5)) + thet(arange(13,Neta - 7,5)))) + dot(sig(9),(thet(arange(9,Neta - 11,5)) + thet(arange(12,Neta - 8,5)))) + dot(sig(10),(thet(arange(10,Neta - 10,5)) + thet(arange(11,Neta - 9,5)))) # PROJ_Cliquet_EIA_StochVol.m:184 thet=multiply(thet,zz) # PROJ_Cliquet_EIA_StochVol.m:195 # ################################################################### # ### Find phi_{Y_1} # ################################################################# xi=dot(dxi,(arange(1,N - 1)).T) # PROJ_Cliquet_EIA_StochVol.m:202 b0=1208 / 2520 # PROJ_Cliquet_EIA_StochVol.m:204 b1=1191 / 2520 # PROJ_Cliquet_EIA_StochVol.m:204 b2=120 / 2520 # PROJ_Cliquet_EIA_StochVol.m:204 b3=1 / 2520 # PROJ_Cliquet_EIA_StochVol.m:204 zeta=(sin(xi / (dot(2,a))) / xi) ** 4.0 / (b0 + dot(b1,cos(xi / a)) + dot(b2,cos(dot(2,xi) / a)) + dot(b3,cos(dot(3,xi) / a))) # PROJ_Cliquet_EIA_StochVol.m:205 hvec=multiply(exp(dot(dot(- 1j,xmin),xi)),zeta) # PROJ_Cliquet_EIA_StochVol.m:206 PHIY_old=zeros(N - 1,m_0) # PROJ_Cliquet_EIA_StochVol.m:208 PHIY_new=zeros(N - 1,m_0) # PROJ_Cliquet_EIA_StochVol.m:209 #BetaTemp = zeros(N,1); PHI=zeros(m_0,m_0,N - 1) # PROJ_Cliquet_EIA_StochVol.m:211 grand=zeros(N - 1,1) # PROJ_Cliquet_EIA_StochVol.m:212 expFxi=exp(dot(dot(1j,F),xi)) # PROJ_Cliquet_EIA_StochVol.m:214 expCxi=exp(dot(dot(1j,C),xi)) # PROJ_Cliquet_EIA_StochVol.m:215 if contract == 2 or contract == 3 or contract == 4: for j in arange(1,m_0).reshape(-1): #Step 1: characteristic function of log return for n in arange(1,N - 1).reshape(-1): PHIY_old[n,j]=sum(EXP_A(arange(1,m_0),j,n + 1)) # PROJ_Cliquet_EIA_StochVol.m:221 #Step 2: invert characteristic function of log return (ie this is beta) BetaTemp=real(fft(concat([[1 / A],[multiply(PHIY_old(arange(),j),hvec)]]))) # PROJ_Cliquet_EIA_StochVol.m:224 #Step 3: Phi_{Y_1}^j PHIY_new[arange(),j]=dot(PSI,BetaTemp(arange(klf - 1,klc + 1))) # PROJ_Cliquet_EIA_StochVol.m:227 sumBetaLeft=dot(C_aN,sum(BetaTemp(arange(1,klf - 2)))) # PROJ_Cliquet_EIA_StochVol.m:229 sumBetaRight=1 - sumBetaLeft - dot(C_aN,sum(BetaTemp(arange(klf - 1,klc + 1)))) # PROJ_Cliquet_EIA_StochVol.m:230 PHIY_new[arange(),j]=PHIY_new(arange(),j) + dot(expFxi,sumBetaLeft) + dot(expCxi,sumBetaRight) # PROJ_Cliquet_EIA_StochVol.m:231 # Define xiBig so that it can be added to a 3D matrix xiBigF=zeros(1,1,N - 1) # PROJ_Cliquet_EIA_StochVol.m:235 xiBigC=zeros(1,1,N - 1) # PROJ_Cliquet_EIA_StochVol.m:236 xiBigF[1,1,arange()]=expFxi # PROJ_Cliquet_EIA_StochVol.m:237 xiBigC[1,1,arange()]=expCxi # PROJ_Cliquet_EIA_StochVol.m:238 if M > 1: for j in arange(1,m_0).reshape(-1): for k in arange(1,m_0).reshape(-1): #First Invert chf to get p_{j,k} for n in arange(1,N - 1).reshape(-1): grand[n]=dot(hvec(n),EXP_A(k,j,n + 1)) # PROJ_Cliquet_EIA_StochVol.m:246 BetaTemp=real(fft(concat([[EXP_A(k,j,1) / A],[grand]]))) # PROJ_Cliquet_EIA_StochVol.m:248 PHI[j,k,arange()]=dot(PSI,BetaTemp(arange(klf - 1,klc + 1))) # PROJ_Cliquet_EIA_StochVol.m:250 sumBetaLeft=dot(C_aN,sum(BetaTemp(arange(1,klf - 2)))) # PROJ_Cliquet_EIA_StochVol.m:251 sumBetaRight=dot(C_aN,sum(BetaTemp(arange(klc + 2,N)))) # PROJ_Cliquet_EIA_StochVol.m:252 PHI[j,k,arange()]=PHI(j,k,arange()) + dot(xiBigF,sumBetaLeft) + dot(xiBigC,sumBetaRight) # PROJ_Cliquet_EIA_StochVol.m:254 else: if contract == 5: ### ADD CODE fprintf('-------------------------------\n') fprintf('NOTE: HAVENT ADDED CODE FOR THIS CONTRACT\n\n\n') fprintf('-------------------------------\n') #Main Recursion for m in arange(2,M).reshape(-1): for n in arange(1,N - 1).reshape(-1): PHIY_new[n,arange()]=dot(PHIY_new(n,arange()),PHI(arange(),arange(),n).T) # PROJ_Cliquet_EIA_StochVol.m:268 ########################################################################## ########################################################################## ### Redfine ymin for the final inversion #REDO FOR contract == 2 or ==3 if
<filename>hxl/scripts.py """ Console scripts <NAME> April 2015 This is a big, ugly module to support the libhxl console scripts, including (mainly) argument parsing. License: Public Domain Documentation: https://github.com/HXLStandard/libhxl-python/wiki """ from __future__ import print_function import argparse, json, logging, os, re, requests, sys # Do not import hxl, to avoid circular imports import hxl.converters, hxl.filters, hxl.io logger = logging.getLogger(__name__) # In Python2, sys.stdin is a byte stream; in Python3, it's a text stream STDIN = sys.stdin.buffer # Posix exit codes EXIT_OK = 0 EXIT_ERROR = 1 EXIT_SYNTAX = 2 # # Console script entry points # def hxladd(): """Console script for hxladd.""" run_script(hxladd_main) def hxlappend(): """Console script for hxlappend.""" run_script(hxlappend_main) def hxlclean(): """Console script for hxlclean""" run_script(hxlclean_main) def hxlcount(): """Console script for hxlcount.""" run_script(hxlcount_main) def hxlcut(): """Console script for hxlcut.""" run_script(hxlcut_main) def hxldedup(): """Console script for hxldedup.""" run_script(hxldedup_main) def hxlhash(): """Console script for hxlhash.""" run_script(hxlhash_main) def hxlmerge(): """Console script for hxlmerge.""" run_script(hxlmerge_main) def hxlrename(): """Console script for hxlrename.""" run_script(hxlrename_main) def hxlreplace(): """Console script for hxlreplace.""" run_script(hxlreplace_main) def hxlfill(): """Console script for hxlreplace.""" run_script(hxlfill_main) def hxlexpand(): """Console script for hxlexpand.""" run_script(hxlexpand_main) def hxlexplode(): """Console script for hxlexplode.""" run_script(hxlexplode_main) def hxlimplode(): """Console script for hxlimplode.""" run_script(hxlimplode_main) def hxlselect(): """Console script for hxlselect.""" run_script(hxlselect_main) def hxlsort(): """Console script for hxlsort.""" run_script(hxlsort_main) def hxlspec(): """Console script for hxlspec.""" run_script(hxlspec_main) def hxltag(): """Console script for hxltag.""" run_script(hxltag_main) def hxlvalidate(): """Console script for hxlvalidate.""" run_script(hxlvalidate_main) # # Main scripts for command-line tools. # def hxladd_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxladd with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Add new columns with constant values to a HXL dataset.') parser.add_argument( '-s', '--spec', help='Constant value to add to each row (may repeat option)', metavar='header#<tag>=<value>', action='append', required=True ) parser.add_argument( '-b', '--before', help='Add new columns before existing ones rather than after them.', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.AddColumnsFilter(source, specs=args.spec, before=args.before) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlappend_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlappend with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Concatenate two HXL datasets') # repeatable argument parser.add_argument( '-a', '--append', help='HXL file to append (may repeat option).', metavar='file_or_url', action='append', default=[] ) parser.add_argument( '-l', '--list', help='URL or filename of list of URLs (may repeat option). Will appear after sources in -a options.', action='append', default=[] ) parser.add_argument( '-x', '--exclude-extra-columns', help='Don not add extra columns not in the original dataset.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'From --append datasets, include only rows matching at least one query.') args = parser.parse_args(args) do_common_args(args) append_sources = [] for append_source in args.append: append_sources.append(hxl.data(append_source, True)) for list_source in args.list: for append_source in hxl.filters.AppendFilter.parse_external_source_list(hxl.data(list_source, True)): append_sources.append(hxl.data(append_source, True)) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.AppendFilter( source, append_sources=append_sources, add_columns=(not args.exclude_extra_columns), queries=args.query ) hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags) return EXIT_OK def hxlclean_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlclean with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Clean data in a HXL file.') parser.add_argument( '-w', '--whitespace', help='Comma-separated list of tag patterns for whitespace normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-u', '--upper', help='Comma-separated list of tag patterns for uppercase conversion.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-l', '--lower', help='Comma-separated list of tag patterns for lowercase conversion.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-d', '--date', help='Comma-separated list of tag patterns for date normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '--date-format', help='Date formatting string in strftime format (defaults to %%Y-%%m-%%d).', default=None, metavar='format', ) parser.add_argument( '-n', '--number', help='Comma-separated list of tag patternss for number normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '--number-format', help='Number formatting string in printf format (without leading %%).', default=None, metavar='format', ) parser.add_argument( '--latlon', help='Comma-separated list of tag patterns for lat/lon normalisation.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-p', '--purge', help='Purge unparseable dates, numbers, and lat/lon during cleaning.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'Clean only rows matching at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.CleanDataFilter( source, whitespace=args.whitespace, upper=args.upper, lower=args.lower, date=args.date, date_format=args.date_format, number=args.number, number_format=args.number_format, latlon=args.latlon, purge=args.purge, queries=args.query ) hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags) return EXIT_OK def hxlcount_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlcount with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ # Command-line arguments parser = make_args('Generate aggregate counts for a HXL dataset') parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to count.', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list, default='loc,org,sector,adm1,adm2,adm3' ) parser.add_argument( '-a', '--aggregator', help='Aggregator statement', metavar='statement', action='append', type=hxl.filters.Aggregator.parse, default=[] ) add_queries_arg(parser, 'Count only rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.CountFilter(source, patterns=args.tags, aggregators=args.aggregator, queries=args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlcut_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args('Cut columns from a HXL dataset.') parser.add_argument( '-i', '--include', help='Comma-separated list of column tags to include', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-x', '--exclude', help='Comma-separated list of column tags to exclude', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-s', '--skip-untagged', help="Skip columns without HXL hashtags", action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.ColumnFilter(source, args.include, args.exclude, args.skip_untagged) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxldedup_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args('Remove duplicate rows from a HXL dataset.') parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to use for deduplication (by default, use all values).', metavar='tag,tag...', type=hxl.model.TagPattern.parse_list ) add_queries_arg(parser, 'Leave rows alone if they don\'t match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter = hxl.filters.DeduplicationFilter(source, args.tags, args.query) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlhash_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): parser = make_args( 'Generate an MD5 hash for a HXL dataset (or just its header rows).', hxl_output=False ) parser.add_argument( '-H', '--headers-only', help='Hash only the header and hashtag rows.', action='store_const', const=True, default=False ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source: if args.headers_only: print(source.columns_hash) else: print(source.data_hash) return EXIT_OK def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlmerge with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Merge part of one HXL dataset into another.') parser.add_argument( '-m', '--merge', help='HXL file to write (if omitted, use standard output).', metavar='filename', required=True ) parser.add_argument( '-k', '--keys', help='HXL tag(s) to use as a shared key.', metavar='tag,tag...', required=True, type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-t', '--tags', help='Comma-separated list of column tags to include from the merge dataset.', metavar='tag,tag...', required=True, type=hxl.model.TagPattern.parse_list ) parser.add_argument( '-r', '--replace', help='Replace empty values in existing columns (when available) instead of adding new ones.', action='store_const', const=True, default=False ) parser.add_argument( '-O', '--overwrite', help='Used with --replace, overwrite existing values.', action='store_const', const=True, default=False ) add_queries_arg(parser, 'Merged data only from rows that match at least one query.') args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.io.data(args.merge, True) if args.merge else None as merge_source: filter = hxl.filters.MergeDataFilter( source, merge_source=merge_source, keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite, queries=args.query ) hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags) return EXIT_OK def hxlrename_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlrename with command-line arguments. @param args A list of arguments, excluding the script name @param stdin Standard input for the script @param stdout Standard output for the script @param stderr Standard error for the script """ parser = make_args('Rename and retag columns in a HXL dataset') parser.add_argument( '-r', '--rename', help='Rename an old tag to a new one, with an optional new text header (may repeat option).', action='append', metavar='#?<original_tag>:<Text header>?#?<new_tag>', default=[], type=hxl.filters.RenameFilter.parse_rename ) args = parser.parse_args(args) do_common_args(args) with make_source(args, stdin) as source, make_output(args, stdout) as output: filter =
<filename>infra/tools/antibody/compute_stats.py # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import itertools import json def ratio_calculator(numerator, denominator): """Computes the ratio of the counts in two lists Args: numerator(list): list of lists with a year-month string as the first index and a count as the second index denominator(list): list of lists with a year-month string as the first index and a count as the second index Return: ratios(list): a list of lists with ratios rounded to three decimal places e.g. [['2015-01', .251], ['2014-10', .742]] """ ratios = [] for i in numerator: for j in denominator: if i[0] == j[0] and j[1] != 0: ratios.append([i[0], round(float(i[1]) / j[1], 3)]) break elif i[0] == j[0]: ratios.append([i[0], 0]) return ratios def totaled_ratio_calculator(numerator, denominator): """Computes the ratio of the counts in two lists Args: numerator(int): a totaled count denominator(int): a totaled count Return: ratio(float): a ratio rounded to three decimal places """ ratio = round(float(numerator) / denominator, 3) if denominator != 0 else 0 return ratio # functions that return stats calculated as lists of lists by month def total_commits(cc): # pragma: no cover """Counts all the git commits sorted by month and year Args: cc(cursor) Return: results(list): a list of lists e.g. [['2014-01', 20], ['2014-02', 45]] """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM git_commit GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") output = cc.fetchall() results = [[timestamp, int(count)] for timestamp, count in output] return results def total_suspicious(cc): # pragma: no cover """Counts the number of commits with no review url, TBRed with no lgtm, or with a blank tbr (TBR= ), sorted by month and year Args: cc(cursor) Return: results(list): a list of lists """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM review INNER JOIN git_commit ON git_commit.review_url = review.review_url INNER JOIN commit_people ON commit_people.git_commit_hash = git_commit.hash LEFT JOIN ( SELECT review_url, COUNT(*) AS c FROM review_people WHERE type = 'lgtm' GROUP BY review_url) lgtm_count ON review.review_url = lgtm_count.review_url WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL AND commit_people.type = 'tbr' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") no_lgtm = cc.fetchall() cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM git_commit WHERE git_commit.review_url = '' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") no_review = cc.fetchall() cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.people_email_address = 'NOBODY' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") no_person_tbr = cc.fetchall() counts_list = [[timestamp, int(count)] for timestamp, count in no_lgtm] + [ [timestamp, int(count)] for timestamp, count in no_review] + [ [timestamp, int(count)] for timestamp, count in no_person_tbr] values = sorted(counts_list, key=lambda x: x[0]) results = [] for timestamp, group in itertools.groupby(values, lambda x: x[0]): results.append([timestamp, sum(v[1] for v in group)]) return results def total_tbr(cc): # pragma: no cover """Counts the number of commits with a TBR Args: cc(cursor) Return: results(list): a list of lists """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(DISTINCT git_commit_hash) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.type = 'tbr' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") output = cc.fetchall() results = [[timestamp, int(count)] for timestamp, count in output] return results def tbr_no_lgtm(cc): # pragma: no cover """Counts the number of commits with a TBR that have not been lgtm'ed Args: cc(cursor) Return: results(list): a list of lists """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(DISTINCT git_commit.hash) FROM review INNER JOIN git_commit ON review.review_url = git_commit.review_url INNER JOIN commit_people ON commit_people.git_commit_hash = git_commit.hash LEFT JOIN ( SELECT review_url, COUNT(*) AS c FROM review_people WHERE type = 'lgtm' GROUP BY review_url) lgtm_count ON review.review_url = lgtm_count.review_url WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL AND commit_people.type = 'tbr' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") output = cc.fetchall() results = [[timestamp, int(count)] for timestamp, count in output] return results def nobody_tbr(cc): # pragma: no cover """Counts the number of occurences of TBR= with no reviewer listed (indicated in the commit_people table with people_email_address = NOBODY) Args: cc(cursor) Return: results(list): a list of lists """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.people_email_address = 'NOBODY' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") output = cc.fetchall() results = [[timestamp, int(count)] for timestamp, count in output] return results def no_review_url(cc): # pragma: no cover """Counts the number of commits with no review url Args: cc(cursor) Return: results(list): a list of lists """ cc.execute("""SELECT DATE_FORMAT(git_commit.timestamp, '%Y-%m'), COUNT(*) FROM git_commit WHERE git_commit.review_url = '' GROUP BY DATE_FORMAT(git_commit.timestamp, '%Y-%m') ORDER BY DATE_FORMAT(git_commit.timestamp, '%Y-%m')""") output = cc.fetchall() results = [[timestamp, int(count)] for timestamp, count in output] return results # functions that return totaled stats for a set period back in time def totaled_total_commits(cc, sql_time_specification): # pragma: no cover """Counts all the git commits in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: result(int): a count of all the commits """ cc.execute("""SELECT COUNT(*) FROM git_commit WHERE %s""" % sql_time_specification) result = cc.fetchone() return int(result[0]) def totaled_total_suspicious(cc, sql_time_specification): # pragma: no cover """Counts the number of commits with no review url or TBRed with no lgtm in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: result(int): a count of all the suspicious commits """ cc.execute("""SELECT COUNT(*) FROM review INNER JOIN git_commit ON review.review_url = git_commit.review_url INNER JOIN commit_people ON commit_people.git_commit_hash = git_commit.hash LEFT JOIN ( SELECT review_url, COUNT(*) AS c FROM review_people WHERE type = 'lgtm' GROUP BY review_url) lgtm_count ON review.review_url = lgtm_count.review_url WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL AND commit_people.type = 'tbr' AND %s""" % sql_time_specification) no_lgtm = cc.fetchone() cc.execute("""SELECT COUNT(*) FROM git_commit WHERE review_url = '' AND %s""" % sql_time_specification) no_review = cc.fetchone() cc.execute("""SELECT COUNT(*) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.people_email_address = 'NOBODY' AND %s""" % sql_time_specification) blank_tbr = cc.fetchone() result = int(no_lgtm[0]) + int(no_review[0]) + int(blank_tbr[0]) return result def totaled_total_tbr(cc, sql_time_specification): # pragma: no cover """Counts the total number of commits with a TBR in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: result(int): a count of all commits with a TBR """ cc.execute("""SELECT COUNT(DISTINCT git_commit_hash) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.type = 'tbr' AND %s""" % sql_time_specification) result = cc.fetchone() return int(result[0]) def totaled_tbr_no_lgtm(cc, sql_time_specification): """Counts the number of commits with a TBR that have not been lgtm'ed in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: count(int): a count of all commits with a TBR and no lgtm results(list): a list of lists with all tbr'ed commits with no lgtm in the format [rietveld_url, git_timestamp, git_subject, git_hash] """ cc.execute("""SELECT git_commit.review_url, git_commit.timestamp, git_commit.subject, git_commit.hash FROM review INNER JOIN git_commit ON review.review_url = git_commit.review_url INNER JOIN commit_people ON commit_people.git_commit_hash = git_commit.hash LEFT JOIN ( SELECT review_url, COUNT(*) AS c FROM review_people WHERE type = 'lgtm' GROUP BY review_url) lgtm_count ON review.review_url = lgtm_count.review_url WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL AND commit_people.type = 'author' AND %s""" % sql_time_specification) result = cc.fetchall() count = len(result) formatted_data = [] for data in result: subject = data[2] formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"), subject.replace('-', ' '), data[3]]) results = sorted(formatted_data, key=lambda x: x[1], reverse=True) return count, results def totaled_blank_tbr(cc, sql_time_specification): # pragma: no cover """Counts the number of occurences of TBR= with no reviewer listed in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: count(int): a count of all blank TBRs (TBR=) results(list): a list of lists with all tbr'ed commits with no lgtm in the format [rietveld_url, git_timestamp, git_subject, git_hash] """ cc.execute("""SELECT git_commit.review_url, git_commit.timestamp, git_commit.subject, git_commit.hash FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.people_email_address = 'NOBODY' AND %s""" % sql_time_specification) result = cc.fetchall() count = len(result) formatted_data =
lag duration time [fig2.2]_. Parameters ---------- model_fit : lmfit.model.ModelResult the result of a model fitting procedure params : lmfit.parameter.Parameters, optional if provided, these parameters will override `model_fit`'s parameters Returns ------- lam : float the lag phase duration in the units of the `model_fit` ``Time`` variable (usually hours). References ---------- .. [fig2.2] Fig. 2.2 pg. 19 in <NAME>., 2010. `Modelling and parameter estimation of bacterial growth with distributed lag time. <http://www2.sci.u-szeged.hu/fokozatok/PDF/Baranyi_Jozsef/Disszertacio.pdf>`_. See also -------- find_lag_ci has_lag """ if params is None: params = model_fit.params y0 = params['y0'].value K = params['K'].value t = model_fit.userkws['t'] t = np.linspace(t.min(), t.max()) def f(t): return model_fit.model.eval(t=t, params=params) y = f(t) dfdt = derivative(f, t) idx = y > K / np.e if idx.sum() == 0: warn("All values are below K/e") return np.nan t = t[idx] y = y[idx] dfdt = dfdt[idx] a = dfdt.max() i = dfdt.argmax() t1 = t[i] y1 = y[i] b = y1 - a * t1 lam = (y0 - b) / a return lam def find_lag_ci(model_fit, param_samples, ci=0.95): """Estimates a confidence interval for the lag duration from the model fit. The lag duration for each parameter sample is calculated. The confidence interval of the lag is the lower and higher percentiles such that `ci` percent of the random lag durations are within the confidence interval. Parameters ---------- model_fit : lmfit.model.ModelResult the result of a model fitting procedure param_samples : pandas.DataFrame parameter samples, generated using :function:`sample_params` or :function:`bootstrap_params` ci : float, optional the fraction of lag durations that should be within the calculated limits. 0 < `ci` <, defaults to 0.95. Returns ------- low, high : float the lower and the higher boundaries of the confidence interval of the lag phase duration in the units of the `model_fit` ``Time`` variable (usually hours). See also -------- find_lag has_lag """ if not 0 <= ci <= 1: raise ValueError("ci must be between 0 and 1") nsamples = param_samples.shape[0] lags = np.zeros(nsamples) for i in range(param_samples.shape[0]): sample = param_samples.iloc[i,:] params = model_fit.params.copy() for k,v in params.items(): if v.vary: params[k].set(value=sample[k]) lags[i] = find_lag(model_fit, params=params) margin = (1.0 - ci) * 50.0 idx = np.isfinite(lags) if not idx.all(): warn("Warning: omitting {0} non-finite lag values".format(len(lags) - idx.sum())) lags = lags[idx] idx = (lags >= 0) if not idx.all(): warn("Warning: omitting {0} negative lag values".format(len(lags) - idx.sum())) if not idx.any(): # no legal lag values left return np.nan, np.nan, np.nan lags = lags[idx] low = np.percentile(lags, margin) high = np.percentile(lags, ci * 100.0 + margin) assert high >= low, lags.tolist() return low, high def has_lag(model_fits, alfa=0.05, PRINT=False): r"""Checks if if the best fit has statisticaly significant lag phase :math:`\lambda > 0`. If the best fitted model doesn't has a lag phase to begin with, return :const:`False`. This includes the logistic model and Richards model. Otherwise, a likelihood ratio test will be perfomed with nesting determined according to Figure 1. The null hypothesis of the test is that :math:`\frac{1}{v} = 0` , i.e. the adjustment rate :math:`v` is infinite and therefore there is no lag phase. The function will return :const:`True` if the null hypothesis is rejected, otherwise it will return :const:`False`. Parameters ---------- model_fits : sequence of lmfit.model.ModelResult the results of several model fitting procedures, ordered by their statistical preference. Generated by :py:func:`fit_model`. alfa : float, optional test significance level, defaults to 0.05 = 5%. PRINT : bool, optional if :const:`True`, the function will print the result of the underlying statistical test; defaults to :const:`False`. Returns ------- bool the result of the hypothesis test. :const:`True` if the null hypothesis was rejected and the data suggest that there is a significant lag phase. Raises ------ ValueError raised if the fittest of the :py:class:`lmfit.model.ModelResult` objects in `model_fits` is of an unknown model. """ m1 = model_fits[0] if np.isposinf(m1.best_values.get('q0', np.inf)) and np.isposinf(m1.best_values.get('v', np.inf)): if PRINT: print('H1 model has no lag') return False try: m0_model_class = m1.model.nested_models['lag'] except KeyError: raise ValueError("The best fit model {0} has no nested model for testing lag".format(m1.model.name)) try: m0 = [m for m in model_fits if isinstance(m.model, m0_model_class)][0] except IndexError: raise ValueError("No {0} in model results.".format(m0_model_class.name)) prefer_m1, pval, D, ddf = lrtest(m0, m1, alfa=alfa) if PRINT: print("Tested H0: %s vs. H1: %s; D=%.2g, ddf=%d, p-value=%.2g" % (m0.model.name, m1.model.name, D, ddf, pval)) return prefer_m1 def has_nu(model_fits, alfa=0.05, PRINT=False): r"""Checks if if the best fit has :math:`\nu \ne 1` and if so if that is statisticaly significant. If the best fitted model has :math:`\nu = 1` to begin with, return :const:`False`. This includes the logistic model. Otherwise, a likelihood ratio test will be perfomed with nesting determined according to Figure 1. The null hypothesis of the test is that :math:`\nu = 1`; if it is rejected than the function will return :const:`True`. Otherwise it will return :const:`False`. Parameters ---------- model_fits : list lmfit.model.ModelResult the results of several model fitting procedures, ordered by their statistical preference. Generated by :py:func:`fit_model`. alfa : float, optional test significance level, defaults to 0.05 = 5%. PRINT : bool, optional if :const:`True`, the function will print the result of the underlying statistical test; defaults to :const:`False`. Returns ------- bool the result of the hypothesis test. :const:`True` if the null hypothesis was rejected and the data suggest that :math:`\nu` is significantly different from one. Raises ------ ValueError raised if the fittest of the :py:class:`lmfit.model.ModelResult` objects in `model_fits` is of an unknown model. """ m1 = model_fits[0] if m1.best_values.get('nu', 1.0) == 1.0: return False try: m0_model_class = m1.model.nested_models['nu'] except KeyError: raise ValueError("The best fit model {} has no nested model for testing nu".format(m1.model.name)) try: m0 = [m for m in model_fits if isinstance(m.model, m0_model_class)][0] except IndexError: raise ValueError("No {} in model results.".format(m0_model_class.name)) prefer_m1, pval, D, ddf = lrtest(m0, m1, alfa=alfa) if PRINT: msg = "Tested H0: %s (nu=%.2g) vs. H1: %s (nu=%.2g); D=%.2g, ddf=%d, p-value=%.2g" print(msg % (m0.model.name, m0.best_values.get('nu', 1), m1.model.name, m1.best_values.get('nu', 1), D, ddf, pval)) return prefer_m1 def make_Dfun(model, params): expr, t, args = model.get_sympy_expr(params) partial_derivs = [None]*len(args) for i,x in enumerate(args): dydx = expr.diff(x) dydx = sympy.lambdify(args=(t,) + args, expr=dydx, modules="numpy") partial_derivs[i] = dydx def Dfun(params, y, a, t): values = [ par.value for par in params.values() if par.vary ] res = np.array([dydx(t, *values) for dydx in partial_derivs]) expected_shape = (len(values), len(t)) if res.shape != expected_shape: raise TypeError("Dfun result shape for {0} is incorrect, expected {1} but it is {2}.".format(model.name, expected_shape, res.shape)) return res return Dfun def cooks_distance(df, model_fit, use_weights=True): """Calculates Cook's distance of each well given a specific model fit. Cook's distance is an estimate of the influence of a data curve when performing model fitting; it is used to find wells (growth curve replicates) that are suspicious as outliers. The higher the distance, the more suspicious the curve. Parameters ---------- df : pandas.DataFrame growth curve data, see :py:mod:`curveball.ioutils` for a detailed definition. model_fit : lmfit.model.ModelResult result of model fitting procedure use_weights : bool, optional should the function use standard deviation across replicates as weights for the fitting procedure, defaults to :const:`True`. Returns ------- dict a dictionary of Cook's distances: keys are wells (from the `Well` column in `df`), values are Cook's distances. Notes ----- `Wikipedia <https://en.wikipedia.org/wiki/Cook's_distance>`_ """ p = model_fit.nvarys MSE = model_fit.chisqr / model_fit.ndata wells = df.Well.unique() D = {} for well in wells: _df = df[df.Well != well] time = _df.Time.to_numpy() OD = _df.OD.to_numpy() weights = calc_weights(_df) if use_weights else None model_fit_i = copy.deepcopy(model_fit) model_fit_i.fit(data=OD, t=time, weights=weights) D[well] = model_fit_i.chisqr / (p * MSE) return D def find_outliers(df, model_fit, deviations=2, use_weights=True, ax=None, PLOT=False): """Find outlier wells in growth curve data. Uses the Cook's distance approach (`cooks_distance`); values of Cook's distance that are `deviations` standard deviations **above** the mean are defined as outliers. Parameters ---------- df : pandas.DataFrame growth curve data, see :py:mod:`curveball.ioutils` for
# -*- coding: utf-8 -*- from __future__ import division from collections import OrderedDict from ..exceptions import CleoException from .table_style import TableStyle from .table_cell import TableCell from .table_separator import TableSeparator from .helper import Helper class Table(object): """ Provides helpers to display a table. """ styles = None def __init__(self, output): """ Constructor. :param output: An Output instance :type output: Output """ self._output = output self._headers = [] self._rows = [] self._column_widths = {} self._number_of_columns = None self._style = None self._column_styles = {} if not self.__class__.styles: self.__class__.styles = self._init_styles() self.set_style('default') @classmethod def set_style_definition(cls, name, table_style): """ Sets a style definition. :param name: The name of the style :type name: str :param table_style: A TableStyle instance :type table_style: TableStyle """ if not cls.styles: cls.styles = cls._init_styles() cls.styles[name] = table_style def set_style(self, name): """ Sets table style. :param name: The name of the style :type name: str """ if isinstance(name, TableStyle): self._style = name elif name in self.styles: self._style = self.styles[name] else: raise CleoException('Style "%s" is not defined.' % name) return self def get_style(self): """ :rtype: TableStyle """ return self._style def set_column_style(self, column_index, name): """ Sets table column style. :param column_index: Colun index :type column_index: int :param name: The name of the style :type name: str or TableStyle :rtype: Table """ column_index = int(column_index) if isinstance(name, TableStyle): self._column_styles[column_index] = name elif name in self.styles: self._column_styles[column_index] = self.styles[name] else: raise CleoException('Style "%s" is not defined.' % name) def get_column_style(self, column_index): """ Gets the current style for a column. If style was not set, it returns the global table style. :param column_index: Colun index :type column_index: int :rtype: TableStyle """ if column_index in self._column_styles: return self._column_styles[column_index] return self._style def set_headers(self, headers): if headers and not isinstance(headers[0], list): headers = [headers] self._headers = headers return self def set_rows(self, rows): self._rows = [] self.add_rows(rows) return self def add_rows(self, rows): for row in rows: self.add_row(row) return self def add_row(self, row): if isinstance(row, TableSeparator): self._rows.append(row) return self if not isinstance(row, list): raise CleoException('A row must be a list or a TableSeparator instance.') self._rows.append(row) return self def set_row(self, column, row): self._rows[column] = row return self def render(self): """ Renders table to output. Example: +---------------+-----------------------+------------------+ | ISBN | Title | Author | +---------------+-----------------------+------------------+ | 99921-58-10-7 | Divine Comedy | <NAME> | | 9971-5-0210-0 | A Tale of Two Cities | <NAME> | | 960-425-059-0 | The Lord of the Rings | <NAME> | +---------------+-----------------------+------------------+ """ self._calculate_number_of_columns() rows = self._build_table_rows(self._rows) headers = self._build_table_rows(self._headers) self._calculate_columns_width(headers + rows) self._render_row_separator() if headers: for header in headers: self._render_row(header, self._style.cell_header_format) self._render_row_separator() for row in rows: if isinstance(row, TableSeparator): self._render_row_separator() else: self._render_row(row, self._style.cell_row_format) if rows: self._render_row_separator() self._cleanup() def _render_row_separator(self): """ Renders horizontal header separator. Example: +-----+-----------+-------+ """ count = self._number_of_columns if not count: return if not self._style.horizontal_border_char and not self._style.crossing_char: return markup = self._style.crossing_char for column in range(0, count): markup += self._style.horizontal_border_char * self._column_widths[column]\ + self._style.crossing_char self._output.writeln(self._style.border_format % markup) def _render_column_separator(self): """ Renders vertical column separator. """ self._output.write(self._style.border_format % self._style.vertical_border_char) def _render_row(self, row, cell_format): """ Renders table row. Example: | 9971-5-0210-0 | A Tale of Two Cities | <NAME> | :param row: The row to render :type: row: list :param cell_format: The cell format :type cell_format: str """ if not row: return self._render_column_separator() for column in self._get_row_columns(row): self._render_cell(row, column, cell_format) self._render_column_separator() self._output.writeln('') def _render_cell(self, row, column, cell_format): """ Renders table cell with padding. :param row: The row to render :type: row: list :param column: The column to render :param cell_format: The cell format :type cell_format: str """ try: cell = row[column] except IndexError: cell = '' width = self._column_widths[column] if isinstance(cell, TableCell) and cell.colspan > 1: # add the width of the following columns(numbers of colspan). for next_column in range(column + 1, column + cell.colspan): width += self._get_column_separator_width() + self._column_widths[next_column] # Encoding fix width += len(cell) - Helper.len(cell) style = self.get_column_style(column) if isinstance(cell, TableSeparator): self._output.write(style.border_format % (style.horizontal_border_char * width)) else: width += Helper.len(cell) - Helper.len_without_decoration(self._output.get_formatter(), cell) content = style.cell_row_content_format % cell self._output.write(cell_format % getattr(content, style.pad_type)(width, style.padding_char)) def _calculate_number_of_columns(self): """ Calculate number of columns for this table. """ if self._number_of_columns is not None: return columns = [0] for row in self._headers + self._rows: if isinstance(row, TableSeparator): continue columns.append(self._get_number_of_columns(row)) self._number_of_columns = max(columns) def _build_table_rows(self, rows): unmerged_rows = OrderedDict() row_key = 0 while row_key < len(rows): rows = self._fill_next_rows(rows, row_key) # Remove any new line breaks and replace it with a new line for column, cell in enumerate(rows[row_key]): if '\n' not in cell: continue lines = cell.split('\n') for line_key, line in enumerate(lines): if isinstance(cell, TableCell): line = TableCell(line, colspan=cell.colspan) if 0 == line_key: rows[row_key][column] = line else: if row_key not in unmerged_rows: unmerged_rows[row_key] = OrderedDict() if line_key not in unmerged_rows[row_key]: unmerged_rows[row_key][line_key] = OrderedDict() unmerged_rows[row_key][line_key][column] = line row_key += 1 table_rows = [] for row_key, row in enumerate(rows): table_rows.append(self._fill_cells(row)) if row_key in unmerged_rows: for line in unmerged_rows[row_key]: if line <= len(table_rows): new_row = [] for column, value in enumerate(row): if column in unmerged_rows[row_key][line]: new_row.append(unmerged_rows[row_key][line][column]) else: new_row.append('') table_rows.append(new_row) else: for column in unmerged_rows[row_key][line]: table_rows[line][column] = unmerged_rows[row_key][line][column] return table_rows def _fill_next_rows(self, rows, line): """ Fill rows that contains rowspan > 1. :param rows: The rows to fill :type rows: list :type line: int :rtype: list """ unmerged_rows = OrderedDict() for column, cell in enumerate(rows[line]): if isinstance(cell, TableCell) and cell.rowspan > 1: nb_lines = cell.rowspan - 1 lines = [cell] if '\n' in cell: lines = cell.split('\n') if len(lines) > nb_lines: nb_lines = cell.count('\n') rows[line][column] = TableCell(lines[0], colspan=cell.colspan) # Create a two dimensional array (rowspan x colspan) placeholder = OrderedDict([(k, OrderedDict()) for k in range(line + 1, line + 1 + nb_lines)]) for k, v in unmerged_rows.items(): if k in placeholder: for l, m in unmerged_rows[k].items(): if l in placeholder[k]: placeholder[k][l].update(m) else: placeholder[k][l] = m else: placeholder[k] = v unmerged_rows = placeholder for unmerged_row_key, unmerged_row in unmerged_rows.items(): value = '' if unmerged_row_key - line < len(lines): value = lines[unmerged_row_key - line] unmerged_rows[unmerged_row_key][column] = TableCell(value, colspan=cell.colspan) for unmerged_row_key, unmerged_row in unmerged_rows.items(): # we need to know if unmerged_row will be merged or inserted into rows if (unmerged_row_key < len(rows) and isinstance(rows[unmerged_row_key], list) and (self._get_number_of_columns(rows[unmerged_row_key]) + self._get_number_of_columns(list(unmerged_rows[unmerged_row_key].values())) <= self._number_of_columns)): # insert cell into row at cell_key position for cell_key, cell in unmerged_row.items(): rows[unmerged_row_key].insert(cell_key, cell) else: row = self._copy_row(rows, unmerged_row_key - 1) for column, cell in unmerged_row.items(): if len(cell): row[column] = unmerged_row[column] rows.insert(unmerged_row_key, row) return rows def _fill_cells(self, row): """ Fill cells for a row that contains colspan > 1. :type row: list :rtype: list """ new_row = [] for column, cell in enumerate(row): new_row.append(cell) if isinstance(cell, TableCell) and cell.colspan > 1: for position in range(column + 1, column + cell.colspan): # insert empty value at column position new_row.append('') if new_row: return new_row return row def _copy_row(self, rows, line): """ Copy a row :type rows: list :type line: int :rtype: list """ row = [x for x in rows[line]] for cell_key, cell_value in enumerate(row): row[cell_key] = '' if isinstance(cell_value, TableCell): row[cell_key] = TableCell('', colspan=cell_value.colspan) return row def _get_number_of_columns(self, row): """ Gets number of columns by row. :param row: The row :type row: list :rtype: int """ columns = len(row) for column in row: if isinstance(column, TableCell): columns += column.colspan - 1 return columns def _get_row_columns(self, row): """ Gets list of columns for the given row. :type row: list :rtype: list """ columns = list(range(0, self._number_of_columns)) for cell_key, cell in enumerate(row): if isinstance(cell, TableCell) and cell.colspan > 1: # exclude grouped columns. columns = [x for x in columns if x not in list(range(cell_key + 1, cell_key + cell.colspan))] return columns def _calculate_columns_width(self, rows): """ Calculates columns widths. """ for column in range(0, self._number_of_columns): lengths = [] for row in rows: if isinstance(row, TableSeparator): continue lengths.append(self._get_cell_width(row, column)) self._column_widths[column] = max(lengths) + len(self._style.cell_row_content_format) - 2 def _get_column_separator_width(self): return len(self._style.border_format % self._style.vertical_border_char) def _get_cell_width(self, row, column): """ Gets cell width. :type row: list :type column: int :rtype: int """ try: cell = row[column] cell_width = Helper.len_without_decoration(self._output.get_formatter(), cell) if isinstance(cell, TableCell)
in zip(x,y,angles,labels): line = plt.Line2D([0,xi],[0,yi],linestyle='dashed',color='lightgray',lw=0.8) self.axes.add_line(line) xo,yo = 0,0 if ang>90 and ang<180: xo = -10 yo = 3 elif ang == 180: xo = -15 yo = -3 elif ang>180 and ang<270: xo = -12 yo = -10 elif ang == 270: xo = -10 yo = -8 elif ang >270 and ang<360: yo = -5 self.axes.annotate(str(lb), xy=(xi,yi), xycoords='data', xytext=(xo,yo), textcoords='offset points', arrowprops=None,size=10) def draw_range_ring(self): """ draw zeniths with 30 intervals """ zeniths = np.arange(0,R+1,30) angle = 135. for r in zeniths: circ = plt.Circle((0, 0),radius=r,linestyle='dashed',color='lightgray',lw=0.8,fill=False) self.axes.add_patch(circ) x = R * np.cos(np.pi*angle/180.) * r/R y = R * np.sin(np.pi*angle/180.) * r/R print 'r=',r, x, y self.axes.annotate(int(r), xy=(x,y), xycoords='data', arrowprops=None,size=10) def draw_colorbar(self,im,vmin,vmax): """ draw colorbar """ if self.cb: self.fig.delaxes(self.fig.axes[1]) self.fig.subplots_adjust(right=0.90) pos = self.axes.get_position() l, b, w, h = pos.bounds cax = self.fig.add_axes([l, b-0.06, w, 0.03]) # colorbar axes cmap=self.cMap(self.varName) substName = self.varName if not self.cMap.ticks_label.has_key(self.varName): # we couldn't find 'vel_f', so try searching for 'vel' u = self.varName.find('_') if u: substName = self.varName[:u] if not self.cMap.ticks_label.has_key(substName): msgBox = gui.QMessageBox() msgBox.setText( """ Please define a color scale for '{0}' in your configuration file """.format(self.varName)) msgBox.exec_() raise RuntimeError( """ Please define a color scale for '{0}' in your configuration file """.format(self.varName)) bounds = self.cMap.ticks_label[substName] norm = mpl.colors.BoundaryNorm(bounds, cmap.N) self.cb = ColorbarBase(cax, cmap=cmap, norm=norm, orientation='horizontal', boundaries=bounds,ticks=bounds)#, format='%1i') ## spacing='proportional' -- divide proportionally by the value self.cb.ax.tick_params(labelsize=8) #t = [str(int(i)) for i in bounds] t = [str(i) for i in bounds] self.cb.set_ticklabels(t,update_ticks=True) self.cb.set_label('Color Scale', size=8) def resetFactors(self): """ reset factors """ self.zoomer = [] self.setWindow(core.QRect(-1 * RENDER_PIXELS/2, 1 * RENDER_PIXELS/2, 1 * RENDER_PIXELS, 1 * RENDER_PIXELS)) # self.update_figure() self.fig.canvas.draw() def changeZoomerPointer(self, ind=None): """ method called when mouse button is pressed, changing zoomer pointer """ if ind is None: if len(self.zoomer)>0: zoomWindow = self.zoomer[-1] self.zoomTo(zoomWindow) self.zoomer.pop() else: if len(self.zoomer)>0: zoomWindow = self.zoomer[0] self.zoomTo(zoomWindow) self.zoomer=[] def getAspectRatio(self): return self._aspectRatio def keyPressEvent(self, event): """ method called when key press """ print 'RadialDisplay::keyPressEvent: ', event.key() if event.key() == core.Qt.Key_C: self.resetFactors() event.accept() ''' def mousePressEvent(self, event): """ method called when mouse press""" pos = event.pos() print 'Mpl::mousePressEvent: [%d , %d]' % (pos.x(), pos.y()) #, (event.xdata,event.ydata) if event.button() == core.Qt.LeftButton: # if not self.rubberBand: self.origins = core.QPoint(event.pos()) self.ignorePaint = True self.rubberBand.setGeometry(core.QRect(self.origins, core.QSize())) self.rubberBand.show() self.oldMouseX = event.pos().x() self.oldMouseY = event.pos().y() def mouseMoveEvent(self, event): """ method called when mouse button is pressed and moved """ pos = event.pos() if not self.origins.isNull(): self.bottomRight = event.pos() deltaX = pos.x() - self.oldMouseX deltaY = pos.y() - self.oldMouseY dx = dy = min(deltaX,deltaY) newRect = core.QRect(self.origins.x(), self.origins.y(), int(dx), int(dy)) newRect = newRect.normalized() self.rubberBand.setGeometry(newRect) def mouseReleaseEvent(self, event): """ method called when mouse button is released, changing paint center """ self.ignorePaint = False if event.button() == core.Qt.LeftButton: self.rubberBand.hide() if not self.origins.isNull() and not self.bottomRight.isNull(): g = self.rubberBand.geometry() if g.width() <= 20: pass else: mywindow = core.QRect() mywindow.setRect(self._zoomWindow.x(), self._zoomWindow.y(), self._zoomWindow.width(), self._zoomWindow.height()) self.zoomer.append(mywindow) curr_x = self._zoomWindow.x() curr_y = self._zoomWindow.y() curr_width = self._zoomWindow.width() curr_height = self._zoomWindow.height() zoom_x = float(g.x()) * curr_width / self.origin[0] + curr_x zoom_y = -1. * (float(g.y()) * curr_height / self.origin[0] )+ curr_y zoom_width = float(g.width())/self.origin[0] * curr_width zoom_height = zoom_width self.setZoomWindow(zoom_x, zoom_y, zoom_width, zoom_height) #self.update_figure() def setZoomWindow(self,x,y,width,height): """ set current zoom window """ self._zoomWindow.setRect((int)(x), (int)(y), (int)(width), (int)(height)) self.adjustZoomWindow() def adjustZoomWindow(self): x1 = self._zoomWindow.x() y1 = self._zoomWindow.y() x2 = x1 + self._zoomWindow.width() y2 = y1 - self._zoomWindow.height() print 'adjustZoomWindow ---', x1,y1,x2,y2 if x1<x2: self.axes.set_xlim(x1,x2) else: self.axes.set_xlim(x2,x1) if y1<y2: self.axes.set_ylim(y1,y2) else: self.axes.set_ylim(y2,y1) self.fig.canvas.draw() ''' class MplCanvasBSCAN(MyMplCanvas): """ A class for displaying radar data in BSCAN mode. In this mode, the width and height of plot are not equal. Parameters ---------- title : string Plotting header label. colormap : ColorMap ColorMap object. Attributes ---------- figurecanvas : FigureCanvas The canvas for display. zoomer : list Storing zoom windows. _zoomWindow : QRectF Storing current zoom window. origin : list Storing the coordinates for onPress event. var_ : dict Storing variables for display. COLORBAR : boolean Flag for colorbar display. PICKER_LABEL : boolean Flag for picker label display. cb : ColorbarBase Colorbar object. cMap : ColorMap ColorMap object. pressEvent : event Press event. pressed : boolean Flag for press event. deltaX : float X change of rubberband. Zoom window only when the change is greater than ZOOM_WINDOW_PIXEL_LIMIT. deltaY : float Y change of rubberband. startX : float Rubberband start x value. startY : float Rubberband start y value. moveLabel : QLabel Picker label sweep : Sweep Sweep object. ranges : list Sweep ranges varName : string Storing current display variable name. x : list Storing sweep x values. y : list Storing sweep y values. label : string Storing header label and sweep time stamp y_limits : list Storing updated y-limits. v_limits : list Storing y-limits. No change. """ def __init__(self, title, colormap, parent=None, width=3, height=3, dpi=100): self.fig = Figure(figsize=[15,5]) plt.axis('off') self.axes = self.fig.add_subplot(111) self.fig.set_dpi( dpi ) self.headerLabel = title self.figurecanvas = FigureCanvas.__init__(self, self.fig) self.setParent(parent) FigureCanvas.setSizePolicy(self, gui.QSizePolicy.Expanding, gui.QSizePolicy.Expanding) FigureCanvas.updateGeometry(self) self.setWindow(core.QRectF(-1. * RENDER_PIXELS/2., 1. * RENDER_PIXELS/2., 1. * RENDER_PIXELS, -1. * RENDER_PIXELS)) self.ignorePaint = False self.rubberBand = gui.QRubberBand(gui.QRubberBand.Rectangle, self) self.zoomer = [] self.origin = [RENDER_PIXELS,RENDER_PIXELS] self.scaleFactor = 1.0 self.offsetX = 0.0 self.offsetY = 0.0 self.var_ = {} self.COLORBAR = True self.PICKER_LABEL = False self.cb = None self.cMap = colormap self.pressEvent = None self.pressed = False self.deltaX = 0. self.deltaY = 0. self.startX = None self.startY = None self.moveLabel = gui.QLabel("",self) self.moveLabel.setText("") self.moveLabel.hide() self.moveLabel.setStyleSheet("font-size:12px; margin:3px; padding:4px; background:#FFFFFF; border:2px solid #000;") def setWindow(self, window): """ initialize the full window to use for this widget """ self._zoomWindow = window self._aspectRatio = window.width() / window.height() def resizeEvent(self, event): """ method called when resize window """ sz = event.size() width = sz.width() height = sz.height() dpival = self.fig.dpi winch = float(width)/dpival hinch = float(height)/dpival self.fig.set_size_inches( winch, hinch ) self.fig.canvas.draw() self.origin = [width,height] def drawSweep(self, sweep, varName, beamWidth): """ draw sweep """ self.beamWidth = beamWidth self.ranges = sweep.ranges self.sweep = sweep self.varName = varName.lower() self.var_ = sweep.vars_[varName] #in list self.x = sweep.x self.y = sweep.y self.label = sweep.timeRange ## ['2012-02-22T20:15:00Z', '2012-02-22T20:22:59Z'] vmin = int(ma.MaskedArray.min(self.var_)-1.) vmax = int(ma.MaskedArray.max(self.var_)+1.) self.y_limits = [vmin, vmax] self.v_limits = [vmin, vmax] ## will not change self.update_figure() #update figure def update_figure(self): """ re-plot figure """ vmin = self.y_limits[0] vmax = self.y_limits[1] if len(self.var_) > 0: self.axes.clear() im = self.axes.pcolormesh(self.x,self.y,np.transpose(self.var_),vmin=vmin,vmax=vmax) if self.COLORBAR: self.draw_colorbar(im) self.axes.set_title(self.label, size=10) ## TODO: change size to be adaptive self.axes.set_xlabel('time (seconds since ' + self.label[0] + ')', fontsize=10) self.axes.set_ylabel(self.varName + ' (log scale)', fontsize=10) self.axes.tick_params(axis='both', which='major', labelsize=10) self.fig.canvas.draw() def draw_colorbar(self,im): """ draw colorbar """ if self.cb: self.fig.delaxes(self.fig.axes[1]) self.fig.subplots_adjust(right=0.90) self.cb = self.fig.colorbar(im) self.cb.ax.tick_params(labelsize=8) self.cb.ax.set_picker(5) self.fig.canvas.mpl_connect('pick_event', self.on_pick) def on_pick(self,event): """ method called when mouse is pressed on colorbar""" if True:#event.mouseevent from SliderDialog import SliderDialog sld = SliderDialog(self.v_limits,self) ## QSlider only supports range in integer sld.exec_() def on_limit_update(self,vmin,vmax): """ update y limits """ self.y_limits = [vmin,vmax] ################ below is not used ########################################### class MyFrame(gui.QFrame): """ frame widget that sends an event when resized """ sizeChanged = core.Signal(int, int, name='sizeChanged') def __init__(self, parent=None): super(MyFrame, self).__init__(parent) def resizeEvent(self, event): sz = event.size() width = sz.width() height = sz.height() print 'MyFrame::resizeEvent: [%d, %d] '% (width, height) self.sizeChanged.emit(width, height) def connectSizeChanged(self, method): """ connect a slot to our sizeChanged signal """ self.sizeChanged.connect(method) class RadialDisplay(gui.QWidget): """ display radial data render an array of radar/lidar data """ def __init__(self, params, parent=None): """ create a new Radial Display widget """ super(RadialDisplay, self).__init__(parent) self.parent = parent self.PAINTED = False self._backgroundBrush = gui.QBrush((gui.QColor(params.background_color))) self.setWindow(core.QRect(-1 * RENDER_PIXELS, -1* RENDER_PIXELS, 2 * RENDER_PIXELS, 2 * RENDER_PIXELS)) # policy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, # QtGui.QSizePolicy.MinimumExpanding) # self.setSizePolicy(policy) new_palette = self.palette() new_palette.setColor(gui.QPalette.Dark, self._backgroundBrush.color()) self.setBackgroundRole(gui.QPalette.Dark) self.setAutoFillBackground(True) # self.setAttribute(Qt.WA_OpaquePaintEvent) self.setFocusPolicy(core.Qt.StrongFocus) self.rays = [] self.origin = [400,400] # hack the colors for now self._colors = [ gui.QColor(255, 0, 0), gui.QColor(0, 255, 0), gui.QColor(0, 0, 255), gui.QColor(50, 50, 0), gui.QColor(0, 50, 50), gui.QColor(50, 0, 50), gui.QColor(150, 150, 0), gui.QColor(0, 150, 150), gui.QColor(150, 0, 150) ] self.colorIndex = 0 self.drawIndex =
1) support_set = [neighbours[i][0] for i in xrange(support_size)] support_bbox = _find_limits(support_set) crop_aspect_ratio = np.random.uniform(self.crop_ratio_limits_[0], self.crop_ratio_limits_[1]) do_mirror = self.mirror_ and np.random.uniform(0.0, 1.0) < self.max_mirror_prob_ glob_param = TransformParams(crop=do_crop, support_bbox=support_bbox if do_crop else None, aspect_ratio=crop_aspect_ratio if do_crop else None, mirror=do_mirror, expand=do_expand, expand_ratio=expand_ratio if do_expand else None, expand_shift=expand_shift if do_expand else None, expand_fill=expand_fill if do_expand else None) return glob_param def _transform_image_with_objects(self, img, objects, transform, trg_height, trg_width): """Carry out random transformation of input image with annotation according the transformation parameters. :param img: Input image :param objects: Annottaion :param transform: Parameters of transformations :param trg_height: Target image height :param trg_width: Target image width :return: Transformed image and its annotation """ def _fit_bbox(src_bbox, trg_ratio, frame_size, delta_factor): """Fit input bounding box to the specified restrictions on aspect ratio and frame size. :param src_bbox: Input bounding box :param trg_ratio: Output aspect ratio of bounding box :param frame_size: Input frame sizes :param delta_factor: Scale to sample bounding box :return: Valid bounding box """ out_h = src_bbox.ymax - src_bbox.ymin out_w = src_bbox.xmax - src_bbox.xmin src_aspect_ratio = float(out_h) / float(out_w) if src_aspect_ratio > trg_ratio: out_h = out_h out_w = out_h / trg_ratio else: out_h = out_w * trg_ratio out_w = out_w delta_x = delta_factor * out_w delta_y = delta_factor * out_h center_x = src_bbox.xmin + 0.5 * out_w + np.random.uniform(-delta_x, delta_x) center_y = src_bbox.ymin + 0.5 * out_h + np.random.uniform(-delta_y, delta_y) out_xmin = np.maximum(0, int((center_x - 0.5 * out_w) * frame_size[1])) out_ymin = np.maximum(0, int((center_y - 0.5 * out_h) * frame_size[0])) out_xmax = np.minimum(int((center_x + 0.5 * out_w) * frame_size[1]), frame_size[1]) out_ymax = np.minimum(int((center_y + 0.5 * out_h) * frame_size[0]), frame_size[0]) return [out_xmin, out_ymin, out_xmax, out_ymax] if transform is None: return cv2.resize(img, (trg_width, trg_height)), objects augmented_img = img augmented_objects = objects if transform.expand: expanded_height = int(augmented_img.shape[0] * transform. expand_ratio) expanded_width = int(augmented_img.shape[1] * transform.expand_ratio) if transform.expand_fill == 0: expanded_img = np.zeros([expanded_height, expanded_width, 3], dtype=np.uint8) elif transform.expand_fill == 1: color = np.array([np.random.randint(0, 256)] * 3, dtype=np.uint8) expanded_img = np.full([expanded_height, expanded_width, 3], color, dtype=np.uint8) elif transform.expand_fill == 2: color = np.random.randint(0, 256, 3, dtype=np.uint8) expanded_img = np.full([expanded_height, expanded_width, 3], color, dtype=np.uint8) else: expanded_img = np.random.randint(0, 256, [expanded_height, expanded_width, 3], dtype=np.uint8) roi_xmin = transform.expand_shift[1] roi_ymin = transform.expand_shift[0] roi_xmax = roi_xmin + augmented_img.shape[1] roi_ymax = roi_ymin + augmented_img.shape[0] expanded_img[roi_ymin:roi_ymax, roi_xmin:roi_xmax] = augmented_img augmented_img = expanded_img expand_scale = 1.0 / transform.expand_ratio expand_shift = (float(transform.expand_shift[0]) / float(expanded_height), float(transform.expand_shift[1]) / float(expanded_width)) expanded_objects = [] for obj in augmented_objects: expanded_objects.append(BBox(track_id=obj.track_id, action=obj.action, xmin=expand_shift[1] + expand_scale * obj.xmin, ymin=expand_shift[0] + expand_scale * obj.ymin, xmax=expand_shift[1] + expand_scale * obj.xmax, ymax=expand_shift[0] + expand_scale * obj.ymax, occluded=obj.occluded)) augmented_objects = expanded_objects if transform.crop: src_height, src_width = augmented_img.shape[:2] crop_bbox = _fit_bbox(transform.support_bbox, transform.aspect_ratio, [src_height, src_width], delta_factor=self.crop_center_fraction_) crop_height = crop_bbox[3] - crop_bbox[1] crop_width = crop_bbox[2] - crop_bbox[0] augmented_img = augmented_img[crop_bbox[1]:crop_bbox[3], crop_bbox[0]:crop_bbox[2]] augmented_img = cv2.resize(augmented_img, (trg_width, trg_height)) cropped_objects = [] for obj in augmented_objects: obj_xmin = np.maximum(0, int(obj.xmin * src_width)) - crop_bbox[0] obj_ymin = np.maximum(0, int(obj.ymin * src_height)) - crop_bbox[1] obj_xmax = np.minimum(int(obj.xmax * src_width), src_width) - crop_bbox[0] obj_ymax = np.minimum(int(obj.ymax * src_height), src_height) - crop_bbox[1] if obj_xmin < 0 and obj_xmax > crop_width and obj_ymin < 0 and obj_ymax > crop_height or \ obj_xmax <= 0 or obj_ymax <= 0 or obj_xmin >= crop_width or obj_ymin >= crop_height: continue out_obj_xmin = float(np.maximum(0, obj_xmin)) / float(crop_width) out_obj_ymin = float(np.maximum(0, obj_ymin)) / float(crop_height) out_obj_xmax = float(np.minimum(obj_xmax, crop_width)) / float(crop_width) out_obj_ymax = float(np.minimum(obj_ymax, crop_height)) / float(crop_height) out_obj_height = out_obj_ymax - out_obj_ymin out_obj_width = out_obj_xmax - out_obj_xmin if out_obj_height < self.min_bbox_size_ or out_obj_width < self.min_bbox_size_: continue cropped_objects.append(BBox(track_id=obj.track_id, action=obj.action, xmin=out_obj_xmin, ymin=out_obj_ymin, xmax=out_obj_xmax, ymax=out_obj_ymax, occluded=obj.occluded)) augmented_objects = cropped_objects if augmented_img.shape[:2] != (trg_height, trg_width): augmented_img = cv2.resize(augmented_img, (trg_width, trg_height)) if transform.mirror: augmented_img = augmented_img[:, ::-1, :] mirrored_objects = [] for obj in augmented_objects: mirrored_objects.append(BBox(track_id=obj.track_id, action=obj.action, xmin=1.0 - obj.xmax, ymin=obj.ymin, xmax=1.0 - obj.xmin, ymax=obj.ymax, occluded=obj.occluded)) augmented_objects = mirrored_objects return augmented_img, augmented_objects def _augment_image(self, img): """Carry out augmentation of image. Maintainable types of single image augmentation: * Blur * Gamma * Brightness * Down- and Up-Scale * Gaussian noise * Salt and Pepper :param img: Input image :return: Augmented image """ augmented_img = img if self.blur_ and np.random.uniform(0.0, 1.0) < self.max_blur_prob_: filter_size = np.random.uniform(low=self.sigma_limits_[0], high=self.sigma_limits_[1]) augmented_img[:, :, 0] = gaussian_filter(augmented_img[:, :, 0], sigma=filter_size) augmented_img[:, :, 1] = gaussian_filter(augmented_img[:, :, 1], sigma=filter_size) augmented_img[:, :, 2] = gaussian_filter(augmented_img[:, :, 2], sigma=filter_size) if self.gamma_ and np.random.uniform(0.0, 1.0) < self.max_gamma_prob_: rand_val = np.random.uniform(-self.delta_, self.delta_) gamma = np.log(0.5 + (2 ** (-0.5)) * rand_val) / np.log(0.5 - (2 ** (-0.5)) * rand_val) float_image = augmented_img.astype(np.float32) * (1. / 255.) augmented_img = (np.power(float_image, gamma) * 255.0).astype(np.int32) augmented_img[augmented_img > 255] = 255 augmented_img[augmented_img < 0] = 0 augmented_img = augmented_img.astype(np.uint8) if self.brightness_ and np.random.uniform(0.0, 1.0) < self.max_brightness_prob_: if np.average(augmented_img) > self.min_pos_: alpha = np.random.uniform(self.pos_alpha_[0], self.pos_alpha_[1]) beta = np.random.randint(self.pos_beta_[0], self.pos_beta_[1]) else: alpha = np.random.uniform(self.neg_alpha_[0], self.neg_alpha_[1]) beta = np.random.randint(self.neg_beta_[0], self.neg_beta_[1]) augmented_img = (augmented_img.astype(np.float32) * alpha + beta).astype(np.int32) augmented_img[augmented_img > 255] = 255 augmented_img[augmented_img < 0] = 0 augmented_img = augmented_img.astype(np.uint8) if self.down_up_scale_ and np.random.uniform(0.0, 1.0) < self.down_up_scale_prob_: src_height, src_width = augmented_img.shape[:2] scale_factor = np.random.uniform(self.min_scale_, 1.0) aug_height = int(src_height * scale_factor) aug_width = int(src_width * scale_factor) augmented_img = cv2.resize(augmented_img, (aug_width, aug_height)) augmented_img = cv2.resize(augmented_img, (src_width, src_height)) if self.noise_ and np.random.uniform(0.0, 1.0) < self.noise_prob_: noise_scale = np.random.uniform(0.0, self.noise_max_scale_) * 255.0 augmented_img = augmented_img.astype(np.float32) + np.random.normal(0.0, noise_scale, augmented_img.shape) augmented_img[augmented_img < 0.0] = 0.0 augmented_img[augmented_img > 255.0] = 255.0 augmented_img = augmented_img.astype(np.uint8) if self.salt_pepper_ and np.random.uniform(0.0, 1.0) < self.salt_pepper_prob_: augmented_img[np.less(np.random.uniform(0.0, 1.0, augmented_img.shape), self.salt_pepper_p_)] = 0 augmented_img[np.less(np.random.uniform(0.0, 1.0, augmented_img.shape), self.salt_pepper_p_)] = 255 return augmented_img.astype(np.uint8) def _sample_annotated_image(self, frame_id): """Loads image from disk and augments it. :param frame_id: ID of loaded image :return: Image and its annotation """ image, objects = self._data_sampler.get_frame_with_annotation(frame_id) assert image is not None assert objects is not None transform_params = self._sample_params(image, objects) transformed_image, transformed_objects = \ self._transform_image_with_objects(image, objects, transform_params, self.height_, self.width_) augmented_image = self._augment_image(transformed_image) return transformed_objects, augmented_image def _sample_next_batch(self): """Generates next batch of images with annotation :return: Pair of images and its annotation """ images_blob = [] labels_blob = [] batch_frame_ids = [] item_id = 0 while item_id < self.batch_size_: frame_id, objects, augmented_image = self.annotated_images_queue.get(True) if frame_id in batch_frame_ids: continue labels_blob += self._objects_to_blob(item_id, objects) images_blob.append(self._image_to_blob(augmented_image, self.width_, self.height_)) batch_frame_ids.append(frame_id) item_id += 1 images_blob = np.array(images_blob, dtype=np.float32) labels_blob = np.array(labels_blob, dtype=np.float32).reshape([1, 1, -1, 8]) return images_blob, labels_blob def _set_data(self, data_sampler): """Sets loader of images. :param data_sampler: owner of loaded images """ self._data_sampler = data_sampler def _load_params(self, param_str): """Loads layer parameters. :param param_str: Input str of parameters """ layer_params = eval(param_str) assert 'tasks' in layer_params assert exists(layer_params['tasks']) assert 'batch' in layer_params assert 'height' in layer_params assert 'width' in layer_params assert 'valid_action_ids' in layer_params assert 'ignore_class_id' in layer_params self._valid_action_ids = layer_params['valid_action_ids'] assert len(self._valid_action_ids) > 0 self._ignore_class_id = layer_params['ignore_class_id'] self.ignore_occluded_ = layer_params['ignore_occluded'] if 'ignore_occluded' in layer_params else True data_sampler = SampleDataFromDisk(layer_params['tasks'], self.ignore_occluded_, ACTION_NAMES_MAP, self._valid_action_ids, self._ignore_class_id) self.batch_size_ = layer_params['batch'] self._set_data(data_sampler) self.height_ = layer_params['height'] self.width_ = layer_params['width'] self.num_data_fillers_ = layer_params['num_data_fillers'] if 'num_data_fillers' in layer_params else 3 self.data_queue_size_ = layer_params['data_queue_size'] if 'data_queue_size' in layer_params else 30 self.single_iter_ = layer_params['single_iter'] if 'single_iter' in layer_params else False if self.single_iter_: assert self.num_data_fillers_ == 1 self.blur_ = layer_params['blur'] if 'blur' in layer_params else False if self.blur_: self.sigma_limits_ = layer_params['sigma_limits'] if 'sigma_limits' in layer_params else [0.0, 0.5] self.max_blur_prob_ = layer_params['max_blur_prob'] if 'max_blur_prob' in layer_params else 0.5 assert 0.0 <= self.sigma_limits_[0] < self.sigma_limits_[1] assert 0.0 <= self.max_blur_prob_ <= 1.0 self.gamma_ = layer_params['gamma'] if 'gamma' in layer_params else False if self.gamma_: self.delta_ = layer_params['delta'] if 'delta' in layer_params else 0.15 self.max_gamma_prob_ = layer_params['max_gamma_prob'] if 'max_gamma_prob' in layer_params else 0.5 assert 0.0 < self.delta_ < 1.0 assert 0.0 <= self.max_gamma_prob_ <= 1.0 self.brightness_ = layer_params['brightness'] if 'brightness' in layer_params else False if self.brightness_: self.min_pos_ = layer_params['min_pos'] if 'min_pos' in layer_params else 100.0 self.pos_alpha_ = layer_params['pos_alpha'] if 'pos_alpha' in layer_params else [0.2, 1.5] self.pos_beta_ = layer_params['pos_beta'] if 'pos_beta' in layer_params else [-100.0, 50.0] self.neg_alpha_ = layer_params['neg_alpha'] if 'neg_alpha' in layer_params else [0.9, 1.5] self.neg_beta_ = layer_params['neg_beta'] if 'neg_beta' in layer_params else [-20.0, 50.0] self.max_brightness_prob_ = layer_params[ 'max_brightness_prob']
(4x4 mxs # in 1Q case) - whcih is what model.basis is. So, we just extract # a builtin basis name for the projection basis. if basis.name in ('pp', 'gm', 'std', 'qt'): proj_basis_name = basis.name else: proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis if basis.name != targetModel.basis.name: raise ValueError("Basis mismatch between model (%s) and target (%s)!" % (model.basis.name, targetModel.basis.name)) # Note: set to "full" parameterization so we can set the gates below # regardless of what parameterization the original model had. gsDict = {}; NpDict = {} for p in projectiontypes: gsDict[p] = model.copy() gsDict[p].set_all_parameterizations("full") NpDict[p] = 0 errgens = [error_generator(model.operations[gl], targetModel.operations[gl], targetModel.basis, genType) for gl in opLabels] for gl, errgen in zip(opLabels, errgens): if ('H' in projectiontypes) or ('H+S' in projectiontypes): hamProj, hamGens = std_errgen_projections( errgen, "hamiltonian", proj_basis_name, basis, True) #ham_error_gen = _np.einsum('i,ijk', hamProj, hamGens) ham_error_gen = _np.tensordot(hamProj, hamGens, (0, 0)) ham_error_gen = _bt.change_basis(ham_error_gen, "std", basis) if ('S' in projectiontypes) or ('H+S' in projectiontypes): stoProj, stoGens = std_errgen_projections( errgen, "stochastic", proj_basis_name, basis, True) #sto_error_gen = _np.einsum('i,ijk', stoProj, stoGens) sto_error_gen = _np.tensordot(stoProj, stoGens, (0, 0)) sto_error_gen = _bt.change_basis(sto_error_gen, "std", basis) if ('LND' in projectiontypes) or ('LNDF' in projectiontypes): HProj, OProj, HGens, OGens = \ lindblad_errgen_projections( errgen, proj_basis_name, proj_basis_name, basis, normalize=False, return_generators=True) #Note: return values *can* be None if an empty/None basis is given #lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \ # _np.einsum('ij,ijkl', OProj, OGens) lnd_error_gen = _np.tensordot(HProj, HGens, (0, 0)) + \ _np.tensordot(OProj, OGens, ((0, 1), (0, 1))) lnd_error_gen = _bt.change_basis(lnd_error_gen, "std", basis) targetOp = targetModel.operations[gl] if 'H' in projectiontypes: gsDict['H'].operations[gl] = operation_from_error_generator( ham_error_gen, targetOp, genType) NpDict['H'] += len(hamProj) if 'S' in projectiontypes: gsDict['S'].operations[gl] = operation_from_error_generator( sto_error_gen, targetOp, genType) NpDict['S'] += len(stoProj) if 'H+S' in projectiontypes: gsDict['H+S'].operations[gl] = operation_from_error_generator( ham_error_gen + sto_error_gen, targetOp, genType) NpDict['H+S'] += len(hamProj) + len(stoProj) if 'LNDF' in projectiontypes: gsDict['LNDF'].operations[gl] = operation_from_error_generator( lnd_error_gen, targetOp, genType) NpDict['LNDF'] += HProj.size + OProj.size if 'LND' in projectiontypes: evals, U = _np.linalg.eig(OProj) pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0 OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U))) #OProj_cp is now a pos-def matrix #lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \ # _np.einsum('ij,ijkl', OProj_cp, OGens) lnd_error_gen_cp = _np.tensordot(HProj, HGens, (0, 0)) + \ _np.tensordot(OProj_cp, OGens, ((0, 1), (0, 1))) lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis) gsDict['LND'].operations[gl] = operation_from_error_generator( lnd_error_gen_cp, targetOp, genType) NpDict['LND'] += HProj.size + OProj.size #Removed attempt to contract H+S to CPTP by removing positive stochastic projections, # but this doesn't always return the gate to being CPTP (maybe b/c of normalization)... #sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens) # # (only negative stochastic projections OK) #sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp) #gsHSCP.operations[gl] = _tools.operation_from_error_generator( # ham_error_gen, targetOp, genType) #+sto_error_gen_cp #DEBUG!!! #print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_evals(model)) #print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_evals(gsDict['LND'])) #Check for CPTP where expected #assert(_tools.sum_of_negative_choi_evals(gsHSCP) < 1e-6) #assert(_tools.sum_of_negative_choi_evals(gsDict['LND']) < 1e-6) #Collect and return requrested results: ret_gs = [gsDict[p] for p in projectiontypes] ret_Nps = [NpDict[p] for p in projectiontypes] return ret_gs, ret_Nps def get_a_best_case_gauge_transform(gate_mx, target_gate_mx, returnAll=False): """ Returns a gauge transformation that maps `gate_mx` into a matrix that is co-diagonal with `target_gate_mx`, i.e. they share a common set of eigenvectors. Gauge transformations effectively change the basis of all the gates in a model. From the perspective of a single gate a gauge transformation leaves it's eigenvalues the same and changes its eigenvectors. This function finds a *real* transformation that transforms the eigenspaces of `gate_mx` so that there exists a set of eigenvectors which diagonalize both `gate_mx` and `target_gate_mx`. Parameters ---------- gate_mx, target_gate_mx : numpy.ndarray The gate and target-gate matrices. returnAll : bool, optional If true, also return the matrices of eigenvectors for `Ugate` for gate_mx and `Utgt` for target_gate_mx such that `U = dot(Utgt, inv(Ugate))` is real. Returns ------- U : numpy.ndarray A gauge transformation such that if `epgate = U * gate_mx * U_inv`, then `epgate` (which has the same eigenalues as `gate_mx`), can be diagonalized with a set of eigenvectors that also diagonalize `target_gate_mx`. Furthermore, `U` is real. Ugate, Utgt : numpy.ndarray only if `returnAll == True`. See above. """ # A complication that must be dealt with is that # the eigenvalues of `target_gate_mx` can be degenerate, # and so matching up eigenvalues can't be done *just* based on value. # Our algorithm consists of two steps: # 1) match gate & target eigenvalues based on value, ensuring conjugacy # relationships between eigenvalues are preserved. # 2) for each eigenvalue/vector of `gate`, project the eigenvector onto # the eigenspace of `tgt_gate` corresponding to the matched eigenvalue. # (treat conj-pair eigenvalues of `gate` together). # we want a matrix that gauge-transforms gate_mx into a matrix as # close to target_gate_mx as possible, i.e. that puts gate_mx's # eigenvalues in the eigenspaces of target_gate_mx. This is done # by Ubest = _np.dot(Utgt, inv(Uop)), but there are often degrees # of freedom in Uop because of its degeneracies. Also, we want Ubest # to be *real*, so we need to ensure the conjugacy structure of Utgt # and Uop match... assert(_np.linalg.norm(gate_mx.imag) < 1e-8) assert(_np.linalg.norm(target_gate_mx.imag) < 1e-8) if True: # NEW approach that gives sorted eigenvectors def get_eigenspace_pairs(mx, TOL=1e-6): evals, U = _np.linalg.eig(mx) # so mx = U * evals * Uinv espace_pairs = {}; conj_pair_indices = [] #Pass 1: real evals and positive-imaginary-element-of-conjugate pair evals # (these are the representatives of "eigenspace pairs") for i, ev in enumerate(evals): if ev.imag < -TOL: conj_pair_indices.append(i); continue # save for pass2 #see if ev is already in espace_pairs for k, v in espace_pairs.items(): if abs(k - ev) < TOL: espace_pairs[k]['indices'].append(i) espace_pairs[k]['conj_pair_indices'].append(None) #espace_pairs[k]['evecs'].append(U[:,i]) break else: espace_pairs[ev] = {'indices': [i], 'conj_pair_indices': [None]} #Pass 2: negative-imaginary-part elements of evals that occur in conjugate pairs for i in conj_pair_indices: ev_pos = _np.conjugate(evals[i]) for k, v in espace_pairs.items(): # ev_pos *should* be in espace_pairs if abs(k - ev_pos) < TOL: #found the correct eigenspace-pair to add this eval & evec to, # now figure our where to put this index based on conjugacy relationships, # i.e. U[:,esp['indices'][i]] is always conjugate to U[:,esp['conj_pair_indices'][i]] for jj, j in enumerate(espace_pairs[k]['indices']): if espace_pairs[k]['conj_pair_indices'][jj] is None: # an empty slot espace_pairs[k]['conj_pair_indices'][jj] = i U[:, i] = U[:, j].conj() break else: raise ValueError("Nowhere to place a conjugate eigenvector %d-dim eigenbasis for %s!" % (len(espace_pairs[k]['indices']), str(k))) break else: raise ValueError("Expected to find %s as an espace-pair representative in %s" % (str(ev_pos), str(espace_pairs.keys()))) #if not (_np.allclose(mx, _np.dot(U, _np.dot(_np.diag(evals), _np.linalg.inv(U))))): # import bpdb; bpdb.set_trace() return evals, U, espace_pairs def standard_diag(mx, TOL=1e-6): evals, U, espairs = get_eigenspace_pairs(mx) std_evals = [] std_evecs = [] sorted_rep_evals = sorted(list(espairs.keys()), key=lambda x: (x.real, x.imag)) for ev in sorted_rep_evals: # iterate in sorted order just for definitiveness info = espairs[ev] dim = len(info['indices']) # dimension of this eigenspace (and it's pair, if there is one) #Ensure real eigenvalue blocks should have real eigenvectors if abs(ev.imag) < TOL: #find linear combinations of the eigenvectors that are real Usub = U[:, info['indices']] if _np.linalg.norm(Usub.imag) > TOL: # Im part of Usub * combo = Usub.real*combo.imag + Usub.imag*combo.real combo_real_imag = _mt.nullspace(_np.concatenate((Usub.imag, Usub.real), axis=1)) combos = combo_real_imag[0:dim, :] + 1j * combo_real_imag[dim:, :] if combos.shape[1] != dim: raise ValueError(("Can only find %d (< %d) *real* linear combinations of" " vectors in eigenspace for %s!") % (combos.shape[1], dim, str(ev))) U[:, info['indices']] = _np.dot(Usub, combos) assert(_np.linalg.norm(U[:, info['indices']].imag) < TOL) #Add real eigenvalues and vectors std_evals.extend([ev] * dim) std_evecs.extend([U[:, i] for i in info['indices']]) else: # complex eigenvalue case - should have conjugate pair info #Ensure blocks for conjugate-pairs of eigenvalues follow one after another and # corresponding eigenvectors (e.g. the first of each block) are conjugate pairs # (this is already done in the eigenspace construction) assert(len(info['conj_pair_indices']) == dim) std_evals.extend([ev] * dim) std_evals.extend([_np.conjugate(ev)] * dim) std_evecs.extend([U[:, i] for i in info['indices']]) std_evecs.extend([U[:, i] for i in info['conj_pair_indices']]) return _np.array(std_evals), _np.array(std_evecs).T #Create "gate_tilde" which has the eigenvectors of gate_mx around the matched eigenvalues of
year in range(1900, 2100): dt = date(year, 7, 9) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_san_martin_day(self): self.holidays.observed = False self.assertNotIn(date(1930, 8, 10), self.holidays) self.assertNotIn(date(2008, 8, 10), self.holidays) self.holidays.observed = True for year in range(1900, 2100): dt = date(year, 8, 17) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_cultural_day(self): self.holidays.observed = False self.assertNotIn(date(2014, 10, 12), self.holidays) self.assertNotIn(date(1913, 10, 12), self.holidays) self.holidays.observed = True for year in range(1900, 2100): dt = date(year, 10, 12) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_national_sovereignty_day(self): for year in range(1900, 2100): dt = date(year, 11, 20) if year < 2010: self.assertNotIn(dt, self.holidays) else: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_inmaculate_conception_day(self): self.holidays.observed = False self.assertNotIn(date(1940, 12, 8), self.holidays) self.assertNotIn(date(2013, 12, 8), self.holidays) self.holidays.observed = True for year in range(1900, 2100): dt = date(year, 12, 8) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_christmas(self): for year in range(1900, 2100): dt = date(year, 12, 25) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) class TestIND(unittest.TestCase): def setUp(self): self.holidays = holidays.IND() def test_2018(self): self.assertIn(date(2018, 1, 1), self.holidays) self.assertIn(date(2018, 10, 2), self.holidays) self.assertIn(date(2018, 8, 15), self.holidays) self.assertIn(date(2018, 1, 26), self.holidays) self.assertIn(date(2018, 12, 25), self.holidays) self.assertIn(date(2018, 5, 1), self.holidays) self.assertIn(date(2018, 1, 14), self.holidays) gj_holidays = holidays.IND(prov="GJ") as_holidays = holidays.IND(prov="AS") tn_holidays = holidays.IND(prov="TN") wb_holidays = holidays.IND(prov="WB") cg_holidays = holidays.IND(prov="CG") sk_holidays = holidays.IND(prov="SK") ka_holidays = holidays.IND(prov="KA") br_holidays = holidays.IND(prov="BR") rj_holidays = holidays.IND(prov="RJ") od_holidays = holidays.IND(prov="OD") ap_holidays = holidays.IND(prov="AP") kl_holidays = holidays.IND(prov="KL") hr_holidays = holidays.IND(prov="HR") mh_holidays = holidays.IND(prov="MH") mp_holidays = holidays.IND(prov="MP") up_holidays = holidays.IND(prov="UP") uk_holidays = holidays.IND(prov="UK") for dt in ([date(2018, 1, 14), date(2018, 5, 1), date(2018, 10, 31)]): self.assertIn(dt, gj_holidays) for dt in [date(2018, 4, 15), date(2018, 4, 14)]: self.assertIn(dt, tn_holidays) self.assertIn(dt, wb_holidays) for dt in ([date(2018, 1, 14), date(2018, 5, 1), date(2018, 10, 31)]): self.assertIn(dt, gj_holidays) self.assertIn(date(2018, 3, 22), br_holidays) self.assertIn(date(2018, 3, 30), rj_holidays) self.assertIn(date(2018, 6, 15), rj_holidays) self.assertIn(date(2018, 4, 1), od_holidays) self.assertIn(date(2018, 4, 15), od_holidays) self.assertIn(date(2018, 4, 14), od_holidays) self.assertIn(date(2018, 4, 14), br_holidays) self.assertIn(date(2018, 4, 14), kl_holidays) self.assertIn(date(2018, 4, 14), up_holidays) self.assertIn(date(2018, 4, 14), uk_holidays) self.assertIn(date(2018, 4, 14), hr_holidays) self.assertIn(date(2018, 4, 14), mh_holidays) self.assertIn(date(2018, 4, 14), wb_holidays) self.assertIn(date(2018, 5, 9), wb_holidays) self.assertIn(date(2018, 4, 15), as_holidays) self.assertIn(date(2018, 5, 1), mh_holidays) self.assertIn(date(2018, 5, 16), sk_holidays) self.assertIn(date(2018, 11, 1), ka_holidays) self.assertIn(date(2018, 11, 1), ap_holidays) self.assertIn(date(2018, 11, 1), hr_holidays) self.assertIn(date(2018, 11, 1), mp_holidays) self.assertIn(date(2018, 11, 1), kl_holidays) self.assertIn(date(2018, 11, 1), cg_holidays) class TestBelarus(unittest.TestCase): def setUp(self): self.holidays = holidays.BY() def test_2018(self): # http://calendar.by/procal.php?year=2018 # https://www.officeholidays.com/countries/belarus/index.php self.assertIn(date(2018, 1, 1), self.holidays) self.assertIn(date(2018, 1, 7), self.holidays) self.assertIn(date(2018, 3, 8), self.holidays) self.assertIn(date(2018, 4, 17), self.holidays) self.assertIn(date(2018, 5, 1), self.holidays) self.assertIn(date(2018, 5, 9), self.holidays) self.assertIn(date(2018, 7, 3), self.holidays) self.assertIn(date(2018, 11, 7), self.holidays) self.assertIn(date(2018, 12, 25), self.holidays) def test_radunitsa(self): # http://calendar.by/content.php?id=20 self.assertIn(date(2012, 4, 24), self.holidays) self.assertIn(date(2013, 5, 14), self.holidays) self.assertIn(date(2014, 4, 29), self.holidays) self.assertIn(date(2015, 4, 21), self.holidays) self.assertIn(date(2016, 5, 10), self.holidays) self.assertIn(date(2017, 4, 25), self.holidays) self.assertIn(date(2018, 4, 17), self.holidays) self.assertIn(date(2019, 5, 7), self.holidays) self.assertIn(date(2020, 4, 28), self.holidays) self.assertIn(date(2021, 5, 11), self.holidays) self.assertIn(date(2022, 5, 3), self.holidays) self.assertIn(date(2023, 4, 25), self.holidays) self.assertIn(date(2024, 5, 14), self.holidays) self.assertIn(date(2025, 4, 29), self.holidays) self.assertIn(date(2026, 4, 21), self.holidays) self.assertIn(date(2027, 5, 11), self.holidays) self.assertIn(date(2028, 4, 25), self.holidays) self.assertIn(date(2029, 4, 17), self.holidays) self.assertIn(date(2030, 5, 7), self.holidays) def test_before_1998(self): self.assertNotIn(date(1997, 7, 3), self.holidays) class TestCroatia(unittest.TestCase): def setUp(self): self.holidays = holidays.HR() def test_2018(self): self.assertIn(date(2018, 1, 1), self.holidays) self.assertIn(date(2018, 1, 6), self.holidays) self.assertIn(date(2018, 4, 1), self.holidays) self.assertIn(date(2018, 4, 2), self.holidays) self.assertIn(date(2018, 5, 1), self.holidays) self.assertIn(date(2018, 8, 15), self.holidays) self.assertIn(date(2018, 10, 8), self.holidays) self.assertIn(date(2018, 11, 1), self.holidays) self.assertIn(date(2018, 12, 25), self.holidays) self.assertIn(date(2018, 12, 26), self.holidays) class TestUkraine(unittest.TestCase): def setUp(self): self.holidays = holidays.UA() def test_before_1918(self): self.assertNotIn(date(1917, 12, 31), self.holidays) def test_2018(self): # http://www.buhoblik.org.ua/kadry-zarplata/vremya/1676-1676-kalendar.html self.assertIn(date(2018, 1, 1), self.holidays) self.assertIn(date(2018, 1, 7), self.holidays) self.assertIn(date(2018, 12, 25), self.holidays) self.assertIn(date(2018, 4, 8), self.holidays) self.assertIn(date(2018, 5, 27), self.holidays) self.assertIn(date(2018, 5, 9), self.holidays) self.assertIn(date(2018, 6, 28), self.holidays) self.assertIn(date(2018, 8, 24), self.holidays) self.assertIn(date(2018, 10, 14), self.holidays) def test_old_holidays(self): self.assertIn(date(2018, 5, 1), self.holidays) self.assertIn(date(2016, 5, 2), self.holidays) self.assertIn(date(1991, 7, 16), self.holidays) self.assertIn(date(1950, 1, 22), self.holidays) self.assertIn(date(1999, 11, 7), self.holidays) self.assertIn(date(1999, 11, 8), self.holidays) self.assertIn(date(1945, 5, 9), self.holidays) self.assertIn(date(1945, 9, 3), self.holidays) self.assertIn(date(1981, 10, 7), self.holidays) self.assertIn(date(1937, 12, 5), self.holidays) self.assertIn(date(1918, 3, 18), self.holidays) class TestBrazil(unittest.TestCase): def test_BR_holidays(self): self.holidays = holidays.BR(years=2018) self.assertIn("2018-01-01", self.holidays) self.assertEqual(self.holidays[date(2018, 1, 1)], "Ano novo") self.assertIn("2018-02-14", self.holidays) self.assertEqual(self.holidays[date(2018, 2, 14)], "Quarta-feira de cinzas (Início da Quaresma)") self.assertIn("2018-02-20", self.holidays) self.assertEqual(self.holidays[date(2018, 2, 20)], "Carnaval") self.assertIn("2018-04-01", self.holidays) self.assertEqual(self.holidays[date(2018, 4, 1)], "Páscoa") self.assertIn("2018-04-21", self.holidays) self.assertEqual(self.holidays[date(2018, 4, 21)], "Tiradentes") self.assertIn("2018-05-01", self.holidays) self.assertEqual(self.holidays[date(2018, 5, 1)], "Dia Mundial do Trabalho") self.assertIn("2018-05-31", self.holidays) self.assertEqual(self.holidays[date(2018, 5, 31)], "Corpus Christi") self.assertIn("2018-09-07", self.holidays) self.assertEqual(self.holidays[date(2018, 9, 7)], "Independência do Brasil") self.assertIn("2018-10-12", self.holidays) self.assertEqual(self.holidays[date(2018, 10, 12)], "Nossa Senhora Aparecida") self.assertIn("2018-11-02", self.holidays) self.assertEqual(self.holidays[date(2018, 11, 2)], "Finados") self.assertIn("2018-11-15", self.holidays) self.assertEqual(self.holidays[date(2018, 11, 15)], "Proclamação da República") self.assertIn("2018-12-25", self.holidays) self.assertEqual(self.holidays[date(2018, 12, 25)], "Natal") def test_AC_holidays(self): ac_holidays = holidays.BR(state="AC") self.assertIn("2018-01-23", ac_holidays) self.assertEqual(ac_holidays[date(2018, 1, 23)], "Dia do evangélico") self.assertIn("2018-06-15", ac_holidays) self.assertEqual(ac_holidays[date(2018, 6, 15)], "Aniversário do Acre") self.assertIn("2018-09-05", ac_holidays) self.assertEqual(ac_holidays[date(2018, 9, 5)], "Dia da Amazônia") self.assertIn("2018-11-17", ac_holidays) self.assertEqual(ac_holidays[date(2018, 11, 17)], "Assinatura do Tratado de Petrópolis") def test_AL_holidays(self): al_holidays = holidays.BR(state="AL") self.assertIn("2018-06-24", al_holidays) self.assertEqual(al_holidays[date(2018, 6, 24)], "São João") self.assertIn("2018-06-29", al_holidays) self.assertEqual(al_holidays[date(2018, 6, 29)], "São Pedro") self.assertIn("2018-09-16", al_holidays) self.assertEqual(al_holidays[date(2018, 9, 16)], "Emancipação política de Alagoas") self.assertIn("2018-11-20", al_holidays) self.assertEqual(al_holidays[date(2018, 11, 20)], "Consciência Negra") def test_AP_holidays(self): ap_holidays = holidays.BR(state="AP") self.assertIn("2018-03-19", ap_holidays) self.assertEqual(ap_holidays[date(2018, 3, 19)], "Dia de São José") self.assertIn("2018-07-25", ap_holidays) self.assertEqual(ap_holidays[date(2018, 7, 25)], "São Tiago") self.assertIn("2018-10-05", ap_holidays) self.assertEqual(ap_holidays[date(2018, 10, 5)], "Criação do estado") self.assertIn("2018-11-20", ap_holidays) self.assertEqual(ap_holidays[date(2018, 11, 20)], "Consciência Negra") def test_AM_holidays(self): am_holidays = holidays.BR(state="AM") self.assertIn("2018-09-05", am_holidays) self.assertEqual(am_holidays[date(2018, 9, 5)], "Elevação do Amazonas à categoria de província") self.assertIn("2018-11-20", am_holidays) self.assertEqual(am_holidays[date(2018, 11, 20)], "Consciência Negra") self.assertIn("2018-12-08", am_holidays) self.assertEqual(am_holidays[date(2018, 12, 8)], "Dia de Nossa Senhora da Conceição") def test_BA_holidays(self): ba_holidays = holidays.BR(state="BA") self.assertIn("2018-07-02", ba_holidays) self.assertEqual(ba_holidays[date(2018, 7, 2)], "Independência da Bahia") def test_CE_holidays(self): ce_holidays = holidays.BR(state="CE") self.assertIn("2018-03-19", ce_holidays) self.assertEqual(ce_holidays[date(2018, 3, 19)], "São José") self.assertIn("2018-03-25", ce_holidays) self.assertEqual(ce_holidays[date(2018, 3, 25)], "Data Magna do Ceará") def test_DF_holidays(self): df_holidays = holidays.BR(state="DF") self.assertIn("2018-04-21", df_holidays) self.assertEqual(df_holidays[date(2018, 4, 21)], "Fundação de Brasília, Tiradentes") self.assertIn("2018-11-30", df_holidays) self.assertEqual(df_holidays[date(2018, 11, 30)], "Dia do Evangélico") def test_ES_holidays(self): es_holidays = holidays.BR(state="ES") self.assertIn("2018-10-28", es_holidays) self.assertEqual( es_holidays[date(2018, 10, 28)], "Dia do Servidor Público") def test_GO_holidays(self): go_holidays = holidays.BR(state="GO") self.assertIn("2018-10-28", go_holidays) self.assertEqual( go_holidays[date(2018, 10, 28)], "Dia do Servidor Público") def test_MA_holidays(self): ma_holidays = holidays.BR(state="MA") self.assertIn("2018-07-28", ma_holidays) self.assertEqual(ma_holidays[date(2018, 7, 28)], "Adesão do Maranhão à independência do Brasil") self.assertIn("2018-12-08", ma_holidays) self.assertEqual(ma_holidays[date(2018, 12, 8)], "Dia de Nossa Senhora da Conceição") def test_MT_holidays(self): mt_holidays = holidays.BR(state="MT") self.assertIn("2018-11-20", mt_holidays) self.assertEqual(mt_holidays[date(2018, 11, 20)], "Consciência Negra") def test_MS_holidays(self): ms_holidays = holidays.BR(state="MS") self.assertIn("2018-10-11", ms_holidays) self.assertEqual(ms_holidays[date(2018, 10, 11)], "Criação do estado") def test_MG_holidays(self): mg_holidays = holidays.BR(state="MG") self.assertIn("2018-04-21", mg_holidays) self.assertEqual(mg_holidays[date(2018, 4, 21)], "Data Magna de MG, Tiradentes") def test_PA_holidays(self): pa_holidays = holidays.BR(state="PA") self.assertIn("2018-08-15", pa_holidays) self.assertEqual(pa_holidays[date(2018, 8, 15)], "Adesão do Grão-Pará à independência do Brasil") def test_PB_holidays(self): pb_holidays = holidays.BR(state="PB") self.assertIn("2018-08-05", pb_holidays) self.assertEqual(pb_holidays[date(2018, 8, 5)], "Fundação do Estado") def test_PE_holidays(self): pe_holidays = holidays.BR(state="PE") self.assertIn("2018-03-06", pe_holidays) self.assertEqual(pe_holidays[date(2018, 3, 6)], "Revolução Pernambucana (Data Magna)") self.assertIn("2018-06-24", pe_holidays) self.assertEqual(pe_holidays[date(2018, 6, 24)], "São João") def test_PI_holidays(self): pi_holidays = holidays.BR(state="PI") self.assertIn("2018-03-13", pi_holidays) self.assertEqual(pi_holidays[date(2018, 3, 13)], "Dia da Batalha do Jenipapo") self.assertIn("2018-10-19", pi_holidays) self.assertEqual(pi_holidays[date(2018, 10, 19)], "Dia do Piauí") def test_RJ_holidays(self): rj_holidays = holidays.BR(state="RJ") self.assertIn("2018-04-23", rj_holidays) self.assertEqual(rj_holidays[date(2018, 4, 23)], "Dia de São Jorge") self.assertIn("2018-10-28", rj_holidays) self.assertEqual(rj_holidays[date(2018, 10, 28)], "Dia do Funcionário Público") self.assertIn("2018-11-20", rj_holidays) self.assertEqual(rj_holidays[date(2018, 11, 20)], "Z<NAME> Palmares") def test_RN_holidays(self): rn_holidays = holidays.BR(state="RN") self.assertIn("2018-06-29", rn_holidays) self.assertEqual(rn_holidays[date(2018, 6, 29)], "Dia de São Pedro") self.assertIn("2018-10-03", rn_holidays) self.assertEqual(rn_holidays[date(2018, 10, 3)], "Mártires de Cunhaú e Uruaçuu") def test_RS_holidays(self): rs_holidays = holidays.BR(state="RS") self.assertIn("2018-09-20", rs_holidays) self.assertEqual( rs_holidays[date(2018, 9, 20)], "Revolução Farroupilha") def test_RO_holidays(self): ro_holidays = holidays.BR(state="RO") self.assertIn("2018-01-04", ro_holidays) self.assertEqual(ro_holidays[date(2018, 1, 4)], "Criação do estado") self.assertIn("2018-06-18", ro_holidays) self.assertEqual(ro_holidays[date(2018, 6, 18)], "Dia do Evangélico") def test_RR_holidays(self): rr_holidays = holidays.BR(state="RR") self.assertIn("2018-10-05", rr_holidays) self.assertEqual(rr_holidays[date(2018, 10, 5)], "Criação de Roraima") def test_SC_holidays(self): sc_holidays = holidays.BR(state="SC") self.assertIn("2018-08-11", sc_holidays) self.assertEqual(sc_holidays[date(2018, 8, 11)], "Criação da capitania, separando-se de SP") def test_SP_holidays(self): sp_holidays = holidays.BR(state="SP") self.assertIn("2018-07-09", sp_holidays) self.assertEqual(sp_holidays[date(2018, 7, 9)], "Revolução Constitucionalista de 1932") def test_SE_holidays(self): se_holidays =
'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')}, '861398648':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '861398649':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')}, '86145837':{'en': 'Jinan, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '86139773':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')}, '86139772':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')}, '86139771':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')}, '86139770':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')}, '86139777':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')}, '861395616':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')}, '86139775':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')}, '86139774':{'en': 'Wuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u68a7\u5dde\u5e02')}, '86139779':{'en': 'Beihai, Guangxi', 'zh': u('\u5e7f\u897f\u5317\u6d77\u5e02')}, '86139778':{'en': 'Hechi, Guangxi', 'zh': u('\u5e7f\u897f\u6cb3\u6c60\u5e02')}, '86139809':{'en': 'Chengdu, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')}, '86139808':{'en': 'Chengdu, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')}, '86145836':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '861391348':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861391349':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861391342':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391343':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391340':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391341':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391346':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861391347':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861391344':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861391345':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861454667':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')}, '861380519':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861380518':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861380849':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380848':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380843':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380842':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380841':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380840':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')}, '861380847':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')}, '861380514':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861380517':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861380516':{'en': '<NAME>su', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')}, '861394837':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')}, '861394836':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')}, '861394835':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')}, '861394834':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')}, '861394833':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')}, '861394832':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')}, '861394831':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')}, '861394830':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')}, '861379659':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')}, '861454339':{'en': 'Xilin, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9521\u6797\u90ed\u52d2\u76df')}, '861399779':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')}, '861394839':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')}, '861394838':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')}, '861379304':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')}, '861399778':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')}, '861379305':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')}, '861379306':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')}, '861379307':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')}, '861379300':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')}, '861379301':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')}, '861384838':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')}, '861384839':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')}, '861399771':{'en': 'Yichang, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861399770':{'en': 'Yichang, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '86138648':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '86138649':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')}, '86138640':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '86138641':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')}, '86138642':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '86138643':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '86138644':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')}, '86138645':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '86138646':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')}, '86138647':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')}, '861454363':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861454744':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')}, '861454745':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')}, '861454746':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')}, '861398588':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')}, '861454740':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')}, '861454741':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')}, '861454742':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')}, '861454743':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')}, '861454748':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')}, '861454749':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')}, '861397205':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')}, '861397204':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861397207':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')}, '861397206':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')}, '861397201':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861452636':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861397200':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861397203':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861397202':{'en': 'Y<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861452968':{'en': 'Zh<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')}, '861452969':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')}, '861452962':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452963':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452960':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452637':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')}, '861452966':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452967':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452964':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861452965':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')}, '861393443':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861393442':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861393441':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '861393440':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861393447':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861393446':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861393445':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861393444':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861393449':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861393448':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861387211':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')}, '86145238':{'en': 'Haikou, Hainan', 'zh': u('\u6d77\u5357\u7701\u6d77\u53e3\u5e02')}, '861452083':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')}, '861452082':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')}, '86138740':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')}, '86145237':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')}, '86145236':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')}, '861452089':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861452088':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '86139823':{'en': 'Panzhihua, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6500\u679d\u82b1\u5e02')}, '861394589':{'en': 'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')}, '861394588':{'en': 'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')}, '861394587':{'en': 'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')}, '861394586':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394585':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394584':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394583':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394582':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394581':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '861394580':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')}, '86138946':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')}, '861398581':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')}, '86139822':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')}, '861390932':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')}, '861390933':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')}, '861390930':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')}, '861390931':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')}, '861390936':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5f20\u6396\u5e02')}, '861390937':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')}, '861390934':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')}, '861390935':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')}, '861390938':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')}, '861390939':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9647\u5357\u5e02')}, '861395728':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')}, '861395729':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')}, '86139345':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '86139349':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861457005':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')}, '861380674':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')}, '861380675':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')}, '861380676':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')}, '861380677':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')}, '861380670':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')}, '861380671':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')}, '861380672':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')}, '861380673':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')}, '861454106':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')}, '861454107':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')}, '861454104':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')}, '861454105':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')}, '861380678':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')}, '861380679':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')}, '861454100':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')}, '861454101':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')}, '861399527':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')}, '861396578':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861396579':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861396570':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '861396571':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '861396572':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '861396573':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '861396574':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')}, '861396575':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861396576':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861396577':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')}, '861398024':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')}, '86138433':{'en': 'Yanbian, Jilin', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')}, '86138432':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')}, '86138431':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')}, '86138430':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')}, '86138437':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u8fbd\u6e90\u5e02')}, '86138436':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')}, '86138435':{'en': 'Tonghua, Jilin', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')}, '86138434':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')}, '861398583':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')}, '861398582':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')}, '86138439':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u767d\u5c71\u5e02')}, '86138438':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')}, '861398587':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')}, '861398586':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')}, '861398585':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')}, '861398584':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')}, '861398025':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')}, '861452798':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')}, '861452791':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')}, '861452790':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')}, '861452793':{'en': 'Hotan, Xinjiang', 'zh': u('\u65b0\u7586\u548c\u7530\u5730\u533a')}, '861452792':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')}, '861452795':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')}, '861452794':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')}, '861452797':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')}, '861452796':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')}, '861452556':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')}, '861450489':{'en': 'Benxi, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u672c\u6eaa\u5e02')}, '861450488':{'en': 'Fushun, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')}, '861450487':{'en': 'Fushun, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')}, '861450486':{'en': 'Fushun, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')}, '861450485':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')}, '861450484':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')}, '861450483':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861450482':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861450481':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861450480':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861399524':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')}, '861453825':{'en': 'Aba, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')}, '861453826':{'en': 'Aba, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')}, '861453827':{'en': 'Aba, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')}, '861453820':{'en': 'YaAn, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')}, '861453821':{'en': 'YaAn, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')}, '861452429':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')}, '861452428':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861452427':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')}, '861452426':{'en': 'Jinzhou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')}, '861452425':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861452424':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '861452423':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '861452422':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')}, '861452421':{'en': 'Chaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u671d\u9633\u5e02')}, '861452420':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '861458300':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')}, '861458301':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')}, '861458302':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')}, '861458303':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')}, '861458304':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')}, '861458305':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')}, '861458306':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')}, '861458307':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')}, '861458308':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')}, '861458309':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')}, '86139900':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')}, '861399151':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')}, '861384967':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')}, '861390475':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')}, '861381248':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861381249':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861381246':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861381247':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')}, '861381244':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861381245':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')}, '861381242':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')}, '861381243':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')}, '861381240':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')}, '861381241':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')}, '861454489':{'en': 'Xi<NAME>i', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')}, '861454488':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861454483':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861454482':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861454481':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861454480':{'en': 'Shiyan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')}, '861454487':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')}, '861454486':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')}, '861454485':{'en': 'Yichang, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')}, '861454484':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')}, '861452889':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')}, '861452888':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')}, '861452885':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')}, '861452884':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')}, '861452887':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u8fea\u5e86\u85cf\u65cf\u81ea\u6cbb\u5dde')}, '861452886':{'en': 'Nujiang, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6012\u6c5f\u5088\u50f3\u65cf\u81ea\u6cbb\u5dde')}, '861452881':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')}, '861452880':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')}, '861452883':{'en': 'Lincang,
self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) enum_valid.metadata = {"url": "/queries/enum/green%20color"} # type: ignore @distributed_trace def enum_null(self, *, enum_query: Optional[str] = None, **kwargs: Any) -> None: """Get null (no query parameter in url). :keyword enum_query: null string value. Possible values are: "red color", "green color", and "blue color". :paramtype enum_query: str :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_enum_null_request( enum_query=enum_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) enum_null.metadata = {"url": "/queries/enum/null"} # type: ignore @distributed_trace def byte_multi_byte(self, *, byte_query: Optional[bytearray] = None, **kwargs: Any) -> None: """Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array. :keyword byte_query: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array. :paramtype byte_query: bytearray :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_byte_multi_byte_request( byte_query=byte_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) byte_multi_byte.metadata = {"url": "/queries/byte/multibyte"} # type: ignore @distributed_trace def byte_empty(self, **kwargs: Any) -> None: """Get '' as byte array. :keyword byte_query: '' as byte array. The default value is bytearray("", encoding="utf-8"). Note that overriding this default value may result in unsupported behavior. :paramtype byte_query: bytearray :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) byte_query = kwargs.pop("byte_query", bytearray("", encoding="utf-8")) # type: bytearray request = build_queries_byte_empty_request( byte_query=byte_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) byte_empty.metadata = {"url": "/queries/byte/empty"} # type: ignore @distributed_trace def byte_null(self, *, byte_query: Optional[bytearray] = None, **kwargs: Any) -> None: """Get null as byte array (no query parameters in uri). :keyword byte_query: null as byte array (no query parameters in uri). :paramtype byte_query: bytearray :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_byte_null_request( byte_query=byte_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) byte_null.metadata = {"url": "/queries/byte/null"} # type: ignore @distributed_trace def date_valid(self, **kwargs: Any) -> None: """Get '2012-01-01' as date. :keyword date_query: '2012-01-01' as date. The default value is "2012-01-01". Note that overriding this default value may result in unsupported behavior. :paramtype date_query: ~datetime.date :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) date_query = kwargs.pop("date_query", "2012-01-01") # type: datetime.date request = build_queries_date_valid_request( date_query=date_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) date_valid.metadata = {"url": "/queries/date/2012-01-01"} # type: ignore @distributed_trace def date_null(self, *, date_query: Optional[datetime.date] = None, **kwargs: Any) -> None: """Get null as date - this should result in no query parameters in uri. :keyword date_query: null as date (no query parameters in uri). :paramtype date_query: ~datetime.date :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_date_null_request( date_query=date_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) date_null.metadata = {"url": "/queries/date/null"} # type: ignore @distributed_trace def date_time_valid(self, **kwargs: Any) -> None: """Get '2012-01-01T01:01:01Z' as date-time. :keyword date_time_query: '2012-01-01T01:01:01Z' as date-time. The default value is "2012-01-01T01:01:01Z". Note that overriding this default value may result in unsupported behavior. :paramtype date_time_query: ~datetime.datetime :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) date_time_query = kwargs.pop("date_time_query", "2012-01-01T01:01:01Z") # type: datetime.datetime request = build_queries_date_time_valid_request( date_time_query=date_time_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) date_time_valid.metadata = {"url": "/queries/datetime/2012-01-01T01%3A01%3A01Z"} # type: ignore @distributed_trace def date_time_null(self, *, date_time_query: Optional[datetime.datetime] = None, **kwargs: Any) -> None: """Get null as date-time, should result in no query parameters in uri. :keyword date_time_query: null as date-time (no query parameters). :paramtype date_time_query: ~datetime.datetime :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_date_time_null_request( date_time_query=date_time_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) date_time_null.metadata = {"url": "/queries/datetime/null"} # type: ignore @distributed_trace def array_string_csv_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None: """Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the csv-array format. :keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the csv-array format. :paramtype array_query: list[str] :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_array_string_csv_valid_request( array_query=array_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) array_string_csv_valid.metadata = {"url": "/queries/array/csv/string/valid"} # type: ignore @distributed_trace def array_string_csv_null(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None: """Get a null array of string using the csv-array format. :keyword array_query: a null array of string using the csv-array format. :paramtype array_query: list[str] :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_array_string_csv_null_request( array_query=array_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) array_string_csv_null.metadata = {"url": "/queries/array/csv/string/null"} # type: ignore @distributed_trace def array_string_csv_empty(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None: """Get an empty array [] of string using the csv-array format. :keyword array_query: an empty array [] of string using the csv-array format. :paramtype array_query: list[str] :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_array_string_csv_empty_request( array_query=array_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) array_string_csv_empty.metadata = {"url": "/queries/array/csv/string/empty"} # type: ignore @distributed_trace def array_string_no_collection_format_empty( self, *, array_query: Optional[List[str]] = None, **kwargs: Any ) -> None: """Array query has no defined collection format, should default to csv. Pass in ['hello', 'nihao', 'bonjour'] for the 'arrayQuery' parameter to the service. :keyword array_query: Array-typed query parameter. Pass in ['hello', 'nihao', 'bonjour']. :paramtype array_query: list[str] :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop("cls", None) # type: ClsType[None] error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_queries_array_string_no_collection_format_empty_request( array_query=array_query, ) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) array_string_no_collection_format_empty.metadata = {"url": "/queries/array/none/string/empty"} # type: ignore @distributed_trace def array_string_ssv_valid(self, *, array_query: Optional[List[str]] = None, **kwargs: Any) -> None: """Get an
lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses: DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). The default is 0 (zero). MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB). MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default is 345,600 (4 days). Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide . ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a `` ReceiveMessage `` action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). The default is 0 (zero). RedrivePolicy - The parameters for the dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide . Note The dead letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead letter queue of a standard queue must also be a standard queue. VisibilityTimeout - The visibility timeout for the queue. Valid values: An integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide . The following attributes apply only to server-side-encryption : KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms . While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the alias of a custom CMK can, for example, be alias/aws/sqs . For more examples, see KeyId in the AWS Key Management Service API Reference . KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? . The following attributes apply only to FIFO (first-in-first-out) queues : FifoQueue - Designates a queue as FIFO. Valid values: true , false . You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly. For more information, see FIFO Queue Logic in the Amazon SQS Developer Guide . ContentBasedDeduplication - Enables content-based deduplication. Valid values: true , false . For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide . Every message must have a unique MessageDeduplicationId , You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered. Any other valid special request parameters (such as the following) are ignored: ApproximateNumberOfMessages ApproximateNumberOfMessagesDelayed ApproximateNumberOfMessagesNotVisible CreatedTimestamp LastModifiedTimestamp QueueArn (string) -- (string) -- :rtype: dict :return: { 'QueueUrl': 'string' } :returns: If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. """ pass def delete_message(QueueUrl=None, ReceiptHandle=None): """ Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the MessageId you receive when you send the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes the message. See also: AWS API Documentation :example: response = client.delete_message( QueueUrl='string', ReceiptHandle='string' ) :type QueueUrl: string :param QueueUrl: [REQUIRED] The URL of the Amazon SQS queue from which messages are deleted. Queue URLs are case-sensitive. :type ReceiptHandle: string :param ReceiptHandle: [REQUIRED] The receipt handle associated with the message to delete. """ pass def delete_message_batch(QueueUrl=None, Entries=None): """ Deletes up to ten messages from the specified queue. This is a batch version of `` DeleteMessage .`` The result of the action on each message is reported individually in the response. See also: AWS API Documentation :example: response = client.delete_message_batch( QueueUrl='string', Entries=[ { 'Id': 'string', 'ReceiptHandle': 'string' }, ] ) :type QueueUrl: string :param QueueUrl: [REQUIRED] The URL of the Amazon SQS queue from which messages are deleted. Queue URLs are case-sensitive. :type Entries: list :param Entries: [REQUIRED] A list of receipt handles for the messages to be deleted. (dict) --Encloses a receipt handle and an identifier for it. Id (string) -- [REQUIRED]An identifier for this particular receipt handle. This is used to communicate the result. Note The Id s of a batch request need to be unique within a request ReceiptHandle (string) -- [REQUIRED]A receipt handle. :rtype: dict :return: { 'Successful': [ { 'Id': 'string' }, ], 'Failed': [ { 'Id': 'string', 'SenderFault': True|False, 'Code': 'string', 'Message': 'string' }, ] } """ pass def delete_queue(QueueUrl=None): """ Deletes the queue specified by the QueueUrl , even if the queue is empty. If the specified queue doesn't exist, Amazon SQS returns a successful response. When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a `` SendMessage `` request might succeed, but after 60 seconds the queue and the message you sent no longer exist. When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. See also: AWS API Documentation :example: response = client.delete_queue( QueueUrl='string' ) :type QueueUrl: string :param QueueUrl: [REQUIRED] The URL of the Amazon SQS queue to delete. Queue URLs are case-sensitive. """ pass def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ClientMethod. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod:
# # Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This is an example application for compressing image classification models. The application borrows its main flow code from torchvision's ImageNet classification training sample application (https://github.com/pytorch/examples/tree/master/imagenet). We tried to keep it similar, in order to make it familiar and easy to understand. Integrating compression is very simple: simply add invocations of the appropriate compression_scheduler callbacks, for each stage in the training. The training skeleton looks like the pseudo code below. The boiler-plate Pytorch classification training is speckled with invocations of CompressionScheduler. For each epoch: compression_scheduler.on_epoch_begin(epoch) train() validate() save_checkpoint() compression_scheduler.on_epoch_end(epoch) train(): For each training step: compression_scheduler.on_minibatch_begin(epoch) output = model(input) loss = criterion(output, target) compression_scheduler.before_backward_pass(epoch) loss.backward() optimizer.step() compression_scheduler.on_minibatch_end(epoch) This exmple application can be used with torchvision's ImageNet image classification models, or with the provided sample models: - ResNet for CIFAR: https://github.com/junyuseu/pytorch-cifar-models - MobileNet for ImageNet: https://github.com/marvis/pytorch-mobilenet """ import math import time import os import sys import random import traceback import logging from collections import OrderedDict from functools import partial import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim import torch.utils.data import torchnet.meter as tnt script_dir = os.path.dirname(__file__) module_path = os.path.abspath(os.path.join(script_dir, '..', '..')) try: import distiller except ImportError: sys.path.append(module_path) import distiller import apputils from distiller.data_loggers import * import distiller.quantization as quantization from models import ALL_MODEL_NAMES, create_model import parser # Logger handle msglogger = None def main(): global msglogger # Parse arguments prsr = parser.getParser() distiller.knowledge_distillation.add_distillation_args(prsr, ALL_MODEL_NAMES, True) args = prsr.parse_args() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) msglogger = apputils.config_pylogger(os.path.join(script_dir, 'logging.conf'), args.name, args.output_dir) # Log various details about the execution environment. It is sometimes useful # to refer to past experiment executions and this information may be useful. apputils.log_execution_env_state(sys.argv, gitroot=module_path) msglogger.debug("Distiller: %s", distiller.__version__) start_epoch = 0 best_epochs = [distiller.MutableNamedTuple({'epoch': 0, 'top1': 0, 'sparsity': 0}) for i in range(args.num_best_scores)] if args.deterministic: # Experiment reproducibility is sometimes important. Pete Warden expounded about this # in his blog: https://petewarden.com/2018/03/19/the-machine-learning-reproducibility-crisis/ # In Pytorch, support for deterministic execution is still a bit clunky. if args.workers > 1: msglogger.error('ERROR: Setting --deterministic requires setting --workers/-j to 0 or 1') exit(1) # Use a well-known seed, for repeatability of experiments torch.manual_seed(0) random.seed(0) np.random.seed(0) cudnn.deterministic = True else: # This issue: https://github.com/pytorch/pytorch/issues/3659 # Implies that cudnn.benchmark should respect cudnn.deterministic, but empirically we see that # results are not re-produced when benchmark is set. So enabling only if deterministic mode disabled. cudnn.benchmark = True if args.cpu or not torch.cuda.is_available(): # Set GPU index to -1 if using CPU args.device = 'cpu' args.gpus = -1 else: args.device = 'cuda' if args.gpus is not None: try: args.gpus = [int(s) for s in args.gpus.split(',')] except ValueError: msglogger.error('ERROR: Argument --gpus must be a comma-separated list of integers only') exit(1) available_gpus = torch.cuda.device_count() for dev_id in args.gpus: if dev_id >= available_gpus: msglogger.error('ERROR: GPU device ID {0} requested, but only {1} devices available' .format(dev_id, available_gpus)) exit(1) # Set default device in case the first one on the list != 0 torch.cuda.set_device(args.gpus[0]) # Infer the dataset from the model name args.dataset = 'cifar10' if 'cifar' in args.arch else 'imagenet' args.num_classes = 10 if args.dataset == 'cifar10' else 1000 if args.earlyexit_thresholds: args.num_exits = len(args.earlyexit_thresholds) + 1 args.loss_exits = [0] * args.num_exits args.losses_exits = [] args.exiterrors = [] # Create the model model = create_model(args.pretrained, args.dataset, args.arch, parallel=not args.load_serialized, device_ids=args.gpus) compression_scheduler = None # Create a couple of logging backends. TensorBoardLogger writes log files in a format # that can be read by Google's Tensor Board. PythonLogger writes to the Python logger. tflogger = TensorBoardLogger(msglogger.logdir) pylogger = PythonLogger(msglogger) # capture thresholds for early-exit training if args.earlyexit_thresholds: msglogger.info('=> using early-exit threshold values of %s', args.earlyexit_thresholds) # We can optionally resume from a checkpoint if args.resume: model, compression_scheduler, start_epoch = apputils.load_checkpoint(model, chkpt_file=args.resume) model.to(args.device) # Define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().to(args.device) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) msglogger.info('Optimizer Type: %s', type(optimizer)) msglogger.info('Optimizer Args: %s', optimizer.defaults) if args.ADC: return automated_deep_compression(model, criterion, optimizer, pylogger, args) # This sample application can be invoked to produce various summary reports. if args.summary: return summarize_model(model, args.dataset, which_summary=args.summary) # Load the datasets: the dataset to load is inferred from the model name passed # in args.arch. The default dataset is ImageNet, but if args.arch contains the # substring "_cifar", then cifar10 is used. train_loader, val_loader, test_loader, _ = apputils.load_data( args.dataset, os.path.expanduser(args.data), args.batch_size, args.workers, args.validation_size, args.deterministic) msglogger.info('Dataset sizes:\n\ttraining=%d\n\tvalidation=%d\n\ttest=%d', len(train_loader.sampler), len(val_loader.sampler), len(test_loader.sampler)) activations_collectors = create_activation_stats_collectors(model, *args.activation_stats) if args.sensitivity is not None: sensitivities = np.arange(args.sensitivity_range[0], args.sensitivity_range[1], args.sensitivity_range[2]) return sensitivity_analysis(model, criterion, test_loader, pylogger, args, sensitivities) if args.evaluate: return evaluate_model(model, criterion, test_loader, pylogger, activations_collectors, args, compression_scheduler) if args.compress: # The main use-case for this sample application is CNN compression. Compression # requires a compression schedule configuration file in YAML. compression_scheduler = distiller.file_config(model, optimizer, args.compress, compression_scheduler) # Model is re-transferred to GPU in case parameters were added (e.g. PACTQuantizer) model.to(args.device) elif compression_scheduler is None: compression_scheduler = distiller.CompressionScheduler(model) if args.thinnify: #zeros_mask_dict = distiller.create_model_masks_dict(model) assert args.resume is not None, "You must use --resume to provide a checkpoint file to thinnify" distiller.remove_filters(model, compression_scheduler.zeros_mask_dict, args.arch, args.dataset, optimizer=None) apputils.save_checkpoint(0, args.arch, model, optimizer=None, scheduler=compression_scheduler, name="{}_thinned".format(args.resume.replace(".pth.tar", "")), dir=msglogger.logdir) print("Note: your model may have collapsed to random inference, so you may want to fine-tune") return args.kd_policy = None if args.kd_teacher: teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus) if args.kd_resume: teacher, _, _ = apputils.load_checkpoint(teacher, chkpt_file=args.kd_resume) dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt) args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw) compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs, frequency=1) msglogger.info('\nStudent-Teacher knowledge distillation enabled:') msglogger.info('\tTeacher Model: %s', args.kd_teacher) msglogger.info('\tTemperature: %s', args.kd_temp) msglogger.info('\tLoss Weights (distillation | student | teacher): %s', ' | '.join(['{:.2f}'.format(val) for val in dlw])) msglogger.info('\tStarting from Epoch: %s', args.kd_start_epoch) for epoch in range(start_epoch, start_epoch + args.epochs): # This is the main training loop. msglogger.info('\n') if compression_scheduler: compression_scheduler.on_epoch_begin(epoch) # Train for one epoch with collectors_context(activations_collectors["train"]) as collectors: train(train_loader, model, criterion, optimizer, epoch, compression_scheduler, loggers=[tflogger, pylogger], args=args) distiller.log_weights_sparsity(model, epoch, loggers=[tflogger, pylogger]) distiller.log_activation_statsitics(epoch, "train", loggers=[tflogger], collector=collectors["sparsity"]) if args.masks_sparsity: msglogger.info(distiller.masks_sparsity_tbl_summary(model, compression_scheduler)) # evaluate on validation set with collectors_context(activations_collectors["valid"]) as collectors: top1, top5, vloss = validate(val_loader, model, criterion, [pylogger], args, epoch) distiller.log_activation_statsitics(epoch, "valid", loggers=[tflogger], collector=collectors["sparsity"]) save_collectors_data(collectors, msglogger.logdir) stats = ('Peformance/Validation/', OrderedDict([('Loss', vloss), ('Top1', top1), ('Top5', top5)])) distiller.log_training_progress(stats, None, epoch, steps_completed=0, total_steps=1, log_freq=1, loggers=[tflogger]) if compression_scheduler: compression_scheduler.on_epoch_end(epoch, optimizer) # Update the list of top scores achieved so far, and save the checkpoint is_best = top1 > best_epochs[-1].top1 if top1 > best_epochs[0].top1: best_epochs[0].epoch = epoch best_epochs[0].top1 = top1 # Keep best_epochs sorted such that best_epochs[0] is the lowest top1 in the best_epochs list best_epochs = sorted(best_epochs, key=lambda score: score.top1) for score in reversed(best_epochs): if score.top1 > 0: msglogger.info('==> Best Top1: %.3f on Epoch: %d', score.top1, score.epoch) apputils.save_checkpoint(epoch, args.arch, model, optimizer, compression_scheduler, best_epochs[-1].top1, is_best, args.name, msglogger.logdir) # Finally run results on the test set test(test_loader, model, criterion, [pylogger], activations_collectors, args=args) OVERALL_LOSS_KEY = 'Overall Loss' OBJECTIVE_LOSS_KEY = 'Objective Loss' def train(train_loader, model, criterion, optimizer, epoch, compression_scheduler, loggers, args): """Training loop for one epoch.""" losses = OrderedDict([(OVERALL_LOSS_KEY, tnt.AverageValueMeter()), (OBJECTIVE_LOSS_KEY, tnt.AverageValueMeter())]) classerr = tnt.ClassErrorMeter(accuracy=True, topk=(1, 5)) batch_time = tnt.AverageValueMeter() data_time = tnt.AverageValueMeter() # For Early Exit, we define statistics for each exit # So exiterrors is analogous to classerr for the non-Early Exit case if args.earlyexit_lossweights: args.exiterrors = [] for exitnum in range(args.num_exits): args.exiterrors.append(tnt.ClassErrorMeter(accuracy=True, topk=(1, 5))) total_samples = len(train_loader.sampler) batch_size = train_loader.batch_size steps_per_epoch = math.ceil(total_samples / batch_size) msglogger.info('Training epoch: %d samples (%d per mini-batch)', total_samples, batch_size) # Switch to train mode model.train() end = time.time() for train_step, (inputs, target) in enumerate(train_loader): # Measure data loading time data_time.add(time.time() - end) inputs, target = inputs.to(args.device), target.to(args.device) # Execute the forward phase, compute the output and measure loss if compression_scheduler: compression_scheduler.on_minibatch_begin(epoch, train_step, steps_per_epoch, optimizer) if not hasattr(args, 'kd_policy') or args.kd_policy is None: output = model(inputs) else: output = args.kd_policy.forward(inputs) if not args.earlyexit_lossweights: loss =
g_tankActiveCamouflage['historical'].get(vDesc.type.compactDescr) if activeCamo is None: activeCamo = g_tankActiveCamouflage.get(vDesc.type.compactDescr, 0) camouflageID = vDesc.camouflages[activeCamo][0] camouflageDesc = customization['camouflages'].get(camouflageID) if camouflageDesc is not None: __componentIDs[vehicle.invID]['camouflageTexture'] = camouflageDesc['texture'] if vState == 'undamaged': __emblemsAlpha[vehicle.invID] = ClientHangarSpace._CFG['emblems_alpha_undamaged'] __isVehicleDestroyed[vehicle.invID] = False else: __emblemsAlpha[vehicle.invID] = ClientHangarSpace._CFG['emblems_alpha_damaged'] __isVehicleDestroyed[vehicle.invID] = True resources = __componentIDs[vehicle.invID].values() splineDesc = vDesc.chassis['splineDesc'] if splineDesc is not None: resources.extend(splineDesc.values()) BigWorld.loadResourceListBG(tuple(resources), partial(__onResourcesLoaded, __curBuildInd[vehicle.invID], vehicle.invID)) return def __onResourcesLoaded(buildInd, invID, resourceRefs): debug(str(6)+ ' resource load '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) if buildInd != __curBuildInd[invID]: return failedIDs = resourceRefs.failedIDs resources = __resources[invID] succesLoaded = True for resID, resource in resourceRefs.items(): if resID not in failedIDs: resources[resID] = resource else: LOG_ERROR('Could not load %s' % resID) succesLoaded = False if succesLoaded: __setupModel(buildInd,invID) def __setupModel(buildIdx,invID): debug(str(7)+ ' modeling '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) model[invID] = __assembleModel(invID) scale = angarmodellist[invID]['scale'] angles = (angarmodellist[invID]['angles']['yaw'],angarmodellist[invID]['angles']['pitch'],angarmodellist[invID]['angles']['rotate']) pos = (angarmodellist[invID]['pos']['x'],angarmodellist[invID]['pos']['z'],angarmodellist[invID]['pos']['y']) model[invID].addMotor(BigWorld.Servo(_createMatrix(scale, angles, pos))) BigWorld.addModel(model[invID]) BigWorld.callback(0.0, partial(__doFinalSetup, buildIdx, model[invID], True, invID)) def __assembleModel(invID): debug(str(8)+ ' modeling stage 2 '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) resources = __resources[invID] compIDs = __componentIDs[invID] chassis = resources[compIDs['chassis']] hull = resources[compIDs['hull']] turret = resources[compIDs['turret']] gun = resources[compIDs['gun']] __models[invID] = (chassis, hull, turret, gun) vehicle = angarmodellist[invID]['vehicle'] vehicle.turretMatrix = Math.WGAdaptiveMatrixProvider() vehicle.gunMatrix = Math.WGAdaptiveMatrixProvider() chassis.node('V').attach(hull) turretJointName = __vDesc[invID].hull['turretHardPoints'][0] hull.node(turretJointName, vehicle.turretMatrix).attach(turret) turret.node('HP_gunJoint', vehicle.gunMatrix).attach(gun) __setupEmblems(__vDesc[invID], invID) __vehicleStickers[invID].show = False if not __isVehicleDestroyed[invID]: fashion = BigWorld.WGVehicleFashion(False, angarmodellist[invID]['scale']) VehicleAppearance.setupTracksFashion(fashion, __vDesc[invID], __isVehicleDestroyed[invID]) chassis.wg_fashion = fashion fashion.initialUpdateTracks(1.0, 10.0) VehicleAppearance.setupSplineTracks(fashion, __vDesc[invID], chassis, __resources[invID]) for model in __models[invID]: model.visible = False model.visibleAttachments = False return chassis def _createMatrix(scale, angles, pos): mat = Math.Matrix() mat.setScale((scale, scale, scale)) mat2 = Math.Matrix() mat2.setTranslate(pos) mat3 = Math.Matrix() mat3.setRotateYPR(angles) mat.preMultiply(mat3) mat.postMultiply(mat2) return mat def __setupEmblems(vDesc,invID): debug(str(9)+ ' modeling setupEmblems '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) if __vehicleStickers[invID] is not None: __vehicleStickers[invID].detach() insigniaRank = 0 vehicleDossier = g_itemsCache.items.getVehicleDossier(vDesc.type.compactDescr) insigniaRank = vehicleDossier.getRandomStats().getAchievement(MARK_ON_GUN_RECORD).getValue() #print 0,invID, insigniaRank __vehicleStickers[invID] = VehicleStickers.VehicleStickers(vDesc, insigniaRank) #print 1,invID, __vehicleStickers[invID] __vehicleStickers[invID].alpha = __emblemsAlpha[invID] #print 2,invID, __vehicleStickers[invID].alpha chassis = __models[invID][0] hull = __models[invID][1] turret = __models[invID][2] gun = __models[invID][3] #print 3,invID,chassis,hull,turret,gun turretJointName = vDesc.hull['turretHardPoints'][0] #print 4,invID,turretJointName modelsWithParents = ((hull, chassis.node('V')), (turret, hull.node(turretJointName)), (gun, turret.node('HP_gunJoint'))) #print 5,invID,modelsWithParents __vehicleStickers[invID].attach(modelsWithParents, __isVehicleDestroyed[invID], False) #print 6,invID,__vehicleStickers[invID] BigWorld.player().stats.get('clanDBID', __onClanDBIDRetrieved) #print 7,invID,mYclanID __vehicleStickers[invID].setClanID(mYclanID) #print 8,invID,__vehicleStickers[invID] return def __onClanDBIDRetrieved(_, clanID): global mYclanID mYclanID = clanID #print 'clanid', clanID def __doFinalSetup(buildIdx, model, delModel,invID): debug(str(10)+ ' modeling Final Stage '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) if delModel: BigWorld.delModel(model) if model.attached: BigWorld.callback(0.0, partial(__doFinalSetup, buildIdx, model, False, invID)) return elif buildIdx != __curBuildInd[invID]: return else: entity = BigWorld.entity(vEntityId) if entity: for m in __models[invID]: m.visible = True m.visibleAttachments = True if __vehicleStickers[invID]: __vehicleStickers[invID].show = True entity.modele[invID] = model entity.modele[invID].delMotor(entity.modele[invID].motors[0]) scale = angarmodellist[invID]['scale'] angles = (angarmodellist[invID]['angles']['yaw'],angarmodellist[invID]['angles']['pitch'],angarmodellist[invID]['angles']['rotate']) pos = (angarmodellist[invID]['pos']['x'],angarmodellist[invID]['pos']['z'],angarmodellist[invID]['pos']['y']) #debug('10 scale = ' + str(scale) +' angles = ' + str(angles) +' pos = ' + str(pos)) entity.modele[invID].addMotor(BigWorld.Servo(_createMatrix(scale, angles, pos))) __isLoaded[invID] = True updateCamouflage(invID) #### turret_rotate(vehicle = angarmodellist[invID]['vehicle'], endPos = None) #### BigWorld.addModel(entity.modele[invID]) if SelfinvID == invID: entity.modele[invID].visible = False entity.modele[invID].visibleAttachments = False if __vDesc[invID] is not None and 'observer' in __vDesc[invID].type.tags: model[invID].visible = False model[invID].visibleAttachments = False return def __isOutOfLimits(angle, limits): if limits is None: return elif abs(limits[1] - angle) < 1e-05 or abs(limits[0] - angle) < 1e-05: return else: dpi = 2 * pi minDiff = fmod(limits[0] - angle + dpi, dpi) maxDiff = fmod(limits[1] - angle + dpi, dpi) if minDiff > maxDiff: return elif minDiff < dpi - maxDiff: return limits[0] return limits[1] return def updateCamouflage(invID, camouflageID = None): debug(str(11)+ ' modeling updateCamouflage '+str(angarmodellist[invID]['vehicle'].name)+' in '+str(angarmodellist[invID]['spawn'])) texture = '' colors = [0,0,0,0] gloss = 0 weights = Math.Vector4(1, 0, 0, 0) camouflagePresent = True vDesc = __vDesc[invID] if vDesc is None: return else: if camouflageID is None and vDesc.camouflages is not None: activeCamo = g_tankActiveCamouflage['historical'].get(vDesc.type.compactDescr) if activeCamo is None: activeCamo = g_tankActiveCamouflage.get(vDesc.type.compactDescr, 0) camouflageID = vDesc.camouflages[activeCamo][0] if camouflageID is None: for camouflageData in vDesc.camouflages: if camouflageData[0] is not None: camouflageID = camouflageData[0] break customization = items.vehicles.g_cache.customization(vDesc.type.customizationNationID) defaultTiling = None if camouflageID is not None and customization is not None: camouflage = customization['camouflages'].get(camouflageID) if camouflage is not None: camouflagePresent = True texture = camouflage['texture'] colors = camouflage['colors'] weights = Math.Vector4(*[ (c >> 24) / 255.0 for c in colors ]) defaultTiling = camouflage['tiling'].get(vDesc.type.compactDescr) if __isVehicleDestroyed[invID]: weights *= 0.1 if vDesc.camouflages is not None: _, camStartTime, camNumDays = vDesc.camouflages[g_tankActiveCamouflage.get(vDesc.type.compactDescr, 0)] if camNumDays > 0: timeAmount = (time.time() - camStartTime) / (camNumDays * 86400) if timeAmount > 1.0: weights *= 1.0 elif timeAmount > 0: weights *= (1.0 - timeAmount) * (1.0 - 1.0) + 1.0 for model in __models[invID]: exclusionMap = vDesc.type.camouflageExclusionMask tiling = defaultTiling if tiling is None: tiling = vDesc.type.camouflageTiling if model == __models[invID][0]: compDesc = vDesc.chassis elif model == __models[invID][1]: compDesc = vDesc.hull elif model == __models[invID][2]: compDesc = vDesc.turret elif model == __models[invID][3]: compDesc = vDesc.gun else: compDesc = None if compDesc is not None: coeff = compDesc.get('camouflageTiling') if coeff is not None: if tiling is not None: tiling = (tiling[0] * coeff[0], tiling[1] * coeff[1], tiling[2] * coeff[2], tiling[3] * coeff[3]) else: tiling = coeff if compDesc.get('camouflageExclusionMask'): exclusionMap = compDesc['camouflageExclusionMask'] useCamouflage = camouflagePresent and exclusionMap and texture fashion = None if hasattr(model, 'wg_fashion'): fashion = model.wg_fashion elif hasattr(model, 'wg_gunRecoil'): fashion = model.wg_gunRecoil elif useCamouflage: fashion = model.wg_baseFashion = BigWorld.WGBaseFashion() elif hasattr(model, 'wg_baseFashion'): delattr(model, 'wg_baseFashion') if fashion is not None: if useCamouflage: if BigWorld.wg_getProductVersion() == '0, 9, 4, 0': fashion.setCamouflage(texture, exclusionMap, tiling, colors[0], colors[1], colors[2], colors[3], gloss, weights) if BigWorld.wg_getProductVersion() == '0, 9, 5, 0': fashion.setCamouflage(texture, exclusionMap, tiling, colors[0], colors[1], colors[2], colors[3], weights) else: fashion.removeCamouflage() return OLDremoveHangarShadowMap = ClientHangarSpace._VehicleAppearance._VehicleAppearance__removeHangarShadowMap OLDsetupHangarShadowMap = ClientHangarSpace._VehicleAppearance._VehicleAppearance__setupHangarShadowMap def NEWremoveHangarShadowMap(self): if angar_position_sorted: return else: OLDremoveHangarShadowMap(self) def NEWsetupHangarShadowMap(self): if angar_position_sorted: return else: OLDsetupHangarShadowMap(self) ClientHangarSpace._VehicleAppearance._VehicleAppearance__removeHangarShadowMap = NEWremoveHangarShadowMap ClientHangarSpace._VehicleAppearance._VehicleAppearance__setupHangarShadowMap = NEWsetupHangarShadowMap def turret_rotate(vehicle, endPos = None): ########################## invID = vehicle.invID if torsus_winter_mod == True: endPos = Math.Vector3(70,0,55) else: endPos = Math.Vector3(stdpos['v_start_pos']) pos = (angarmodellist[invID]['pos']['x'],angarmodellist[invID]['pos']['z'],angarmodellist[invID]['pos']['y']) scale = angarmodellist[invID]['scale'] angles = (angarmodellist[invID]['angles']['yaw'],angarmodellist[invID]['angles']['pitch'],angarmodellist[invID]['angles']['rotate']) startPos = _createMatrix(scale, angles, pos) __turretYaw, __gunPitch = getShotAngles(vehicle.descriptor, startPos, (0, 0), endPos, False) turretYawLimits = __vDesc[invID].gun['turretYawLimits'] closestLimit = __isOutOfLimits(__turretYaw, turretYawLimits) if closestLimit is not None: __turretYaw = closestLimit m = Math.Matrix() if angarmodellist[invID]['turret'] == True: m.setRotateYPR(Math.Vector3(__turretYaw, 0, 0)) else: m.setRotateYPR(Math.Vector3(0, 0, 0)) try: vehicle.turretMatrix.setStaticTransform(m) except: pass m1 = Math.Matrix() if angarmodellist[invID]['turret'] == True: m1.setRotateYPR(Math.Vector3(0, __gunPitch-0.2, 0)) else: m1.setRotateYPR(Math.Vector3(0, 0, 0)) try: vehicle.gunMatrix.setStaticTransform(m1) except: pass ##################### print '' oldFinalSetup = ClientHangarSpace._VehicleAppearance._VehicleAppearance__doFinalSetup def newFinalSetup(self, buildIdx, model, delModel): oldFinalSetup(self, buildIdx, model, delModel) entity = BigWorld.entity(self._VehicleAppearance__vEntityId) if entity: corx, corz, cory = ClientHangarSpace._CFG['v_start_pos'] chassis_ext = 0 hull_ext = 0 turret_ext = 0 gun_ext = 0 if spawned['model'] < g_xmlSetting['setup'].readInt('count_models'): debug('Model spawn count ='+str(g_xmlSetting['setup'].readInt('count_models'))) for m in xrange(1,g_xmlSetting['setup'].readInt('count_models')+1): spawned['model'] = m modelname = 'model' + str(m) entity.modelname = BigWorld.Model(g_xmlSetting['model' + str(m)].readString('path_model')) # Scale or Static if g_xmlSetting['model' + str(m)].readString('type_coord') == 'Static': coordins = (g_xmlSetting['model' + str(m)].readFloat('x'),g_xmlSetting['model' + str(m)].readFloat('z'),g_xmlSetting['model' + str(m)].readFloat('y')) else: coordins = (g_xmlSetting['model' + str(m)].readFloat('x')+corx,g_xmlSetting['model' + str(m)].readFloat('z')+corz,g_xmlSetting['model' + str(m)].readFloat('y')+cory) entity.modelname.addMotor(BigWorld.Servo(ClientHangarSpace._createMatrix(g_xmlSetting['model' + str(m)].readFloat('v_scale'), (g_xmlSetting['model' + str(m)].readFloat('anglesYaw'),g_xmlSetting['model' + str(m)].readFloat('anglesRotate'),g_xmlSetting['model' + str(m)].readFloat('anglesPitch')), coordins))) BigWorld.addModel(entity.modelname) debug('Resource '+str(modelname)+' spawned') if spawned['tank'] < g_xmlSetting['setup'].readInt('count_tanks'): debug('Tank spawn count ='+str(g_xmlSetting['setup'].readInt('count_tanks'))) for m in xrange(1,g_xmlSetting['setup'].readInt('count_tanks')+1): spawned['tank'] = m modelname = 'tank' + str(m) chassis = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('chassis'))) hull = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('hull'))) turret = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('turret'))) gun = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('gun'))) if g_xmlSetting['tank' + str(m)].readString('chassis_ext') is not None and g_xmlSetting['tank' + str(m)].readString('chassis_ext') != '' and g_xmlSetting['tank' + str(m)].readString('chassis_ext') != 'None': debug('Extended chassis to '+str(modelname)+' added') chassis_ext = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('chassis_ext'))) if g_xmlSetting['tank' + str(m)].readString('hull_ext') is not None and g_xmlSetting['tank' + str(m)].readString('hull_ext') != '' and g_xmlSetting['tank' + str(m)].readString('hull_ext') != 'None': debug('Extended hull to '+str(modelname)+' added') hull_ext = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('hull_ext'))) if g_xmlSetting['tank' + str(m)].readString('turret_ext') is not None and g_xmlSetting['tank' + str(m)].readString('turret_ext') != '' and g_xmlSetting['tank' + str(m)].readString('turret_ext') != 'None': debug('Extended turret to '+str(modelname)+' added') turret_ext = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('turret_ext'))) if g_xmlSetting['tank' + str(m)].readString('gun_ext') is not None and g_xmlSetting['tank' + str(m)].readString('gun_ext') != '' and g_xmlSetting['tank' + str(m)].readString('gun_ext') != 'None': debug('Extended gun to '+str(modelname)+' added') gun_ext = BigWorld.Model(str(g_xmlSetting['tank' + str(m)].readString('gun_ext'))) chassis.node('V').attach(hull) hull.node('HP_turretJoint').attach(turret) turret.node('HP_gunJoint').attach(gun) if chassis_ext != 0: chassis.node('V').attach(chassis_ext) if hull_ext != 0: hull.node('HP_turretJoint').attach(hull_ext) if turret_ext != 0: turret.node('HP_gunJoint').attach(turret_ext) if gun_ext != 0: gun.node('HP_gunFire').attach(gun_ext) entity.modelname = chassis if g_xmlSetting['tank' + str(m)].readString('type_coord') == 'Static': coordins = (g_xmlSetting['tank' + str(m)].readFloat('x'),g_xmlSetting['tank' + str(m)].readFloat('z'),g_xmlSetting['tank' + str(m)].readFloat('y')) else: coordins =
<reponame>NoSDante/micropython-esp32 import uasyncio as asyncio from machine import Pin, I2C, UART from time import sleep class Sensors(): I2C_SLOT = 1 I2C_SDA = 21 I2C_SCL = 22 I2C_FREQ = 400000 IMPORT_ERROR = "cannot import module" I2C_ERROR = "cannot initialize I2C" UART_ERROR = "cannot initialize UART" def __init__(self, i2c=None, debug=False): self.debug = debug self.i2c = None self.uart = None if isinstance(i2c, I2C): self.i2c = i2c else: self.init_I2C() def init_I2C(self): try: self.i2c = I2C(self.I2C_SLOT, scl=Pin(self.I2C_SCL), sda=Pin(self.I2C_SDA), freq=self.I2C_FREQ) #self.i2c = I2C(self.I2C_SLOT) if self.debug: print("\n----- I2C -----\n", self.i2c) except Exception as e: raise Exception(self.I2C_ERROR, e) def init_BH1750(self, i2c=None): self.bh1750 = None try: from lib.bh1750 import BH1750 except ImportError as e: print(self.IMPORT_ERROR, e) if isinstance(i2c, I2C): self.i2c = i2c self.bh1750 = BH1750(self.i2c) self.bh1750.data = {} def init_SPS30(self, port=1, rx=9, tx=10, start=True, clean=True, sample=60): self.sps30 = None try: from lib.sps30 import SPS30 except ImportError as e: print(self.IMPORT_ERROR, e) try: self.sps30 = SPS30(port=UART(port, baudrate=115200, bits=8, parity=None, stop=1, rx=rx, tx=tx), debug=self.debug, sample=sample) except Exception as e: print(self.UART_ERROR, e) self.sps30.Standby(debug=self.debug) if self.debug: print("\n----- SPS30 -----\n", self.sps30.device_info(debug=self.debug)) self.sps30.data = {} self.sps30.ready = True if clean: self.sps30.fan_clean(debug=self.debug) if start: self.sps30.start_measurement(debug=self.debug) def init_SCD30(self, i2c=None, start=True, auto_calibration=False, forced_co2=None, temp_offset=None, pause=1000): self.scd30 = None try: from lib.scd30 import SCD30 except Exception as e: print(self.IMPORT_ERROR, e) if isinstance(i2c, I2C): self.i2c = i2c #I2C address SCD30_I2C_ADDR = 0x61 try: self.scd30 = SCD30(self.i2c, SCD30_I2C_ADDR, pause=pause) except Exception as e: print(self.I2C_ERROR, e) self.scd30.data = {} self.scd30.started = False self.scd30.ready = True if start: self.scd30.start_continous_measurement() self.scd30.started = True # ASC only works in continuous measurement mode if self.scd30.get_automatic_recalibration() != auto_calibration: self.scd30.set_automatic_recalibration(auto_calibration) if forced_co2 is not None and isinstance(forced_co2, int): if self.debug: print("set forced recalibration {}ppm".format(forced_co2)) self.scd30.set_forced_recalibration(forced_co2) if temp_offset is not None and isinstance(temp_offset, int): if self.debug: print("set temperature offset {}°C".format(temp_offset)) self.scd30.set_temperature_offset(temp_offset) sleep(0.1) if self.debug: print("\n----- SCD30 -----") print("Firmware: {}".format(self.scd30.get_firmware_version())) print("Forced Recalibration: {}ppm".format(self.scd30.get_forced_recalibration())) print("Automatic Recalibration: {}".format(self.scd30.get_automatic_recalibration())) print("Measurement Interval: {}sec".format(self.scd30.get_measurement_interval())) print("Temperature Offset: {} ".format(self.scd30.get_temperature_offset())) print("Altitude Comp: {}".format(self.scd30.get_altitude_comp())) async def init_AS3935(self, i2c=None, pin_irq=None, capacitance=120, indoor=True, disturber=True): self.as3935 = None try: from lib.DFRobot_AS3935_Lib import DFRobot_AS3935 except ImportError as e: print(self.IMPORT_ERROR, e) #I2C address #AS3935_I2C_ADDR1 = 0X01 #AS3935_I2C_ADDR2 = 0X02 #AS3935_I2C_ADDR3 = 0X03 AS3935_I2C_ADDR = [0X01,0X02,0X03] AS3935_I2C_FREQ = 400000 if isinstance(i2c, I2C): self.i2c = i2c for addr in AS3935_I2C_ADDR: if self.debug: print("initializing as3935 with I2C address", hex(addr)) for retry in range(10): self.as3935 = DFRobot_AS3935(addr, self.i2c) if self.as3935.reset(): break if self.debug: print("error initializing as3935", retry) asyncio.sleep(0.5) if not self.as3935.reset(): self.as3935 = None return self.as3935.data = {} # configure sensor self.as3935.powerUp() # set indoors or outdoors models if indoor: self.as3935.setIndoors() else: self.as3935.setOutdoors() # disturber detection if disturber: self.as3935.disturberEn() else: self.as3935.disturberDis() self.as3935.setIrqOutputSource(0) asyncio.sleep(0.5) # set capacitance self.as3935.setTuningCaps(capacitance) # Connect the IRQ and GND pin to the oscilloscope. # uncomment the following sentences to fine tune the antenna for better performance. # This will dispaly the antenna's resonance frequency/16 on IRQ pin (The resonance frequency will be divided by 16 on this pin) # Tuning AS3935_CAPACITANCE to make the frequency within 500/16 kHz plus 3.5% to 500/16 kHz minus 3.5% # # self.as3935.setLcoFdiv(0) # self.as3935.setIrqOutputSource(3) # Set the noise level,use a default value greater than 7 self.as3935.setNoiseFloorLv1(2) #noiseLv = self.as3935.getNoiseFloorLv1() # used to modify WDTH,alues should only be between 0x00 and 0x0F (0 and 7) self.as3935.setWatchdogThreshold(2) #wtdgThreshold = self.as3935.getWatchdogThreshold() # used to modify SREJ (spike rejection),values should only be between 0x00 and 0x0F (0 and 7) self.as3935.setSpikeRejection(2) #spikeRejection = self.as3935.getSpikeRejection() self.as3935.trigger = pin_irq # view all register data if self.debug: self.as3935.printAllRegs() def init_MQ2(self, pin_analog=None, pin_trigger=None, baseVoltage=5.0, interval=2, calibrate=True): self.mq2 = None if pin_analog is None: raise ValueError('analog pin is not defined') try: from lib.MQ2 import MQ2 except ImportError as e: print(self.IMPORT_ERROR, e) ## This strategy measure values immideatly, so it might be inaccurate. Should be # suitable for tracking dynamics, raither than actual values STRATEGY_FAST = const(1) ## This strategy measure values separatelly. For a single measurement # MQ_SAMPLE_TIMES measurements are taken in interval MQ_SAMPLE_INTERVAL. # I.e. for multi-data sensors, like MQ2 it would take a while to receive full data STRATEGY_ACCURATE = const(2) self.mq2 = MQ2(pinData=pin_analog, baseVoltage=baseVoltage, measuringStrategy=STRATEGY_FAST) self.mq2.calibration = False self.mq2.data = {} # Calibration default sampletime 5000ms x (5) if calibrate: self.mq2.calibrate() self.mq2.calibration = True ## Set digital out of MQ2 as trigger # A low level signal triggers a gas detection => trigger.value() == 0 self.mq2.trigger = pin_trigger if self.debug: print("\n----- MQ2 -----") print("Calibrated: {}".format(self.mq2.calibration)) print("Base resistance: {}".format(self.mq2._ro)) print("Bas Voltage: {}".format(baseVoltage)) print("Pin Analog data: {}".format(pin_analog)) print("Pin Digital trigger: {}".format(pin_trigger)) class Scoring(object): DEFAULT_VALUE = "DEFAULT" def __init__(self, debug=False): self.debug = debug def co2(self, value): score = None if value is None: return score if value in range(0, 600): score = "GREAT" if value in range(600, 800): score = "GOOD" if value in range(800, 1000): score = "NORMAL" if value in range(1000, 1200): score = "BAD" if value in range(1200, 1600): score = "VERY BAD" if value in range(1600, 2000): score = "CRITICAL" if value >= 2000: score = "DANGER" return score def temperature(self, value): score = None if value is None: return score if value <= 0: score = "FREEZE" return score def heatindex(self, temp, hum): if temp is None or hum is None: return temp, status # Convert celcius to fahrenheit fahrenheit = ((temp * 9/5) + 32) hi, score = None, None if fahrenheit >= 80 and hum >= 40: # Creating multiples of 'fahrenheit' & 'hum' values for the coefficients T2 = pow(fahrenheit, 2) T3 = pow(fahrenheit, 3) H2 = pow(hum, 2) H3 = pow(hum, 3) # Coefficients for the calculations C1 = [ -42.379, 2.04901523, 10.14333127, -0.22475541, -6.83783e-03, -5.481717e-02, 1.22874e-03, 8.5282e-04, -1.99e-06] C2 = [ 0.363445176, 0.988622465, 4.777114035, -0.114037667, -0.000850208, -0.020716198, 0.000687678, 0.000274954, 0] C3 = [ 16.923, 0.185212, 5.37941, -0.100254, 0.00941695, 0.00728898, 0.000345372, -0.000814971, 0.0000102102, -0.000038646, 0.0000291583, 0.00000142721, 0.000000197483, -0.0000000218429, 0.000000000843296, -0.0000000000481975] # Calculating heat-indexes with 3 different formula heatindex1 = C1[0] + (C1[1] * fahrenheit) + (C1[2] * hum) + (C1[3] * fahrenheit * hum) + (C1[4] * T2) + (C1[5] * H2) + (C1[6] * T2 * hum) + (C1[7] * fahrenheit * H2) + (C1[8] * T2 * H2) heatindex2 = C2[0] + (C2[1] * fahrenheit) + (C2[2] * hum) + (C2[3] * fahrenheit * hum) + (C2[4] * T2) + (C2[5] * H2) + (C2[6] * T2 * hum) + (C2[7] * fahrenheit * H2) + (C2[8] * T2 * H2) heatindex3 = C3[0] + (C3[1] * fahrenheit) + (C3[2] * hum) + (C3[3] * fahrenheit * hum) + (C3[4] * T2) + (C3[5] * H2) + (C3[6] * T2 * hum) + (C3[7] * fahrenheit * H2) + (C3[8] * T2 * H2) + (C3[9] * T3) + (C3[10] * H3) + (C3[11] * T3 * hum) + (C3[12] * fahrenheit * H3) + (C3[13] * T3 * H2) + (C3[14] * T2 * H3) + (C3[15] * T3 * H3) hi = round(((((heatindex1+heatindex2+heatindex3)//3) - 32) * 5/9), 1) # score if hi: if hi in range(27, 32): score = "CAUTION" if hi in range(32, 41): score = "CRITCAL" if hi in range(41, 54): score = "DANGER" if hi >= 54: score = "EXTREME" return hi, score def humidity(self, value): score = None return score def dust(self, value): score = None return score def color(self, value): colors = { "EXCELLENT" : ("GREEN"), "GREAT" : ("GREEN"), "GOOD" : ("LIME"), "NORMAL" : ("YELLOW GREEN"), "BAD" : ("YELLOW"), "VERY BAD" : ("ORANGE"), "CAUTION" : ("DARK ORANGE"), "DANGER" : ("RED"), "EXTREME" : ("RED"), "CRITICAL" : ("ORANGE RED"), "HOT" : ("ORANGE"), "WARM" : ("YELLOW"), "FAIR" : ("DEEP SKY BLUE"), "COLD" : ("MEDIUM BLUE"), "FREEZE" : ("BLUE"), "DEFAULT" : ("WHITE"),
<gh_stars>0 # region [Imports] # * Standard Library Imports ----------------------------------------------------------------------------> import os import random from math import ceil import secrets from typing import List, TYPE_CHECKING, Tuple import asyncio from urllib.parse import quote as urlquote import re from typing import Optional # * Third Party Imports ---------------------------------------------------------------------------------> from discord.ext import commands from discord import AllowedMentions from pyfiglet import Figlet from PIL import Image, ImageDraw, ImageFont import discord # * Gid Imports -----------------------------------------------------------------------------------------> import gidlogger as glog # * Local Imports ---------------------------------------------------------------------------------------> from antipetros_discordbot.utility.misc import delete_message_if_text_channel, is_even from antipetros_discordbot.utility.checks import allowed_channel_and_allowed_role, log_invoker, owner_or_admin from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper from antipetros_discordbot.utility.discord_markdown_helper.the_dragon import THE_DRAGON from antipetros_discordbot.utility.discord_markdown_helper.special_characters import ZERO_WIDTH from antipetros_discordbot.utility.discord_markdown_helper.discord_formating_helper import make_box from antipetros_discordbot.utility.gidtools_functions import loadjson, pathmaker, writejson from antipetros_discordbot.utility.exceptions import ParseDiceLineError from antipetros_discordbot.utility.converters import UrlConverter from antipetros_discordbot.utility.enums import RequestStatus, CogMetaStatus, UpdateTypus from antipetros_discordbot.engine.replacements import AntiPetrosBaseCog, AntiPetrosBaseGroup, CommandCategory, RequiredFile, auto_meta_info_command, auto_meta_info_group if TYPE_CHECKING: from antipetros_discordbot.engine.antipetros_bot import AntiPetrosBot # endregion[Imports] # region [TODO] # endregion [TODO] # region [AppUserData] # endregion [AppUserData] # region [Logging] log = glog.aux_logger(__name__) # endregion[Logging] # region [Constants] APPDATA = ParaStorageKeeper.get_appdata() BASE_CONFIG = ParaStorageKeeper.get_config('base_config') COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config') # location of this file, does not work if app gets compiled to exe with pyinstaller THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # endregion[Constants] class KlimBimCog(AntiPetrosBaseCog, command_attrs={'hidden': False, "categories": CommandCategory.GENERAL}): """ Collection of small commands that either don't fit anywhere else or are just for fun. """ # region [ClassAttributes] public = True meta_status = CogMetaStatus.WORKING long_description = "" extra_info = "" short_doc = "Mostly unessential fun commands." brief = "Mostly fun stuff" required_config_data = {'base_config': {}, 'cogs_config': {"coin_image_heads": "https://i.postimg.cc/XY4fhCf5/antipetros-coin-head.png", "coin_image_tails": "https://i.postimg.cc/HsQ0B2yH/antipetros-coin-tails.png"}} music_data_file = pathmaker(APPDATA["fixed_data"], 'youtube_music_links.json') required_folder = [] required_files = [RequiredFile(music_data_file, {}, RequiredFile.FileType.JSON)] dice_statement_regex = re.compile(r"(?P<amount>\d+)(?P<dice_type>d\d+)", re.IGNORECASE) # endregion [ClassAttributes] # region [Init] def __init__(self, bot: "AntiPetrosBot"): super().__init__(bot) self.dice_mapping = { 'd4': {'sides': 4}, 'd6': {'sides': 6}, 'd8': {'sides': 8}, 'd10': {'sides': 10}, 'd12': {'sides': 12}, 'd20': {'sides': 20}, 'd100': {'sides': 100} } self.color = 'green' # endregion [Init] # region [Properties] @property def youtube_links(self): return loadjson(self.music_data_file) # endregion [Properties] # region [Setup] async def on_ready_setup(self): await super().on_ready_setup() self.ready = True log.debug('setup for cog "%s" finished', str(self)) async def update(self, typus: UpdateTypus): await super().update(typus=typus) log.debug('cog "%s" was updated', str(self)) # endregion [Setup] # region [Loops] # endregion [Loops] # region [Listener] # endregion [Listener] # region [Commands] @ auto_meta_info_command() @ allowed_channel_and_allowed_role() @commands.cooldown(1, 5, commands.BucketType.channel) async def the_dragon(self, ctx: commands.Context): """ Posts and awesome ASCII Art Dragon! Example: @AntiPetros the_dragon """ suprise_dragon_check = secrets.randbelow(100) + 1 if suprise_dragon_check == 1: await ctx.send('https://i.redd.it/073kp5pr5ev11.jpg') elif suprise_dragon_check == 2: await ctx.send('https://www.sciencenewsforstudents.org/wp-content/uploads/2019/11/860-dragon-header-iStock-494839519.gif') else: await ctx.send(THE_DRAGON) @ auto_meta_info_group(case_insensitive=True, cls=AntiPetrosBaseGroup, invoke_without_command=True) @allowed_channel_and_allowed_role(in_dm_allowed=True) @commands.cooldown(1, 5, commands.BucketType.channel) async def flip_coin(self, ctx: commands.Context): """ Simulates a coin flip and posts the result as an image of a Petros Dollar. Example: @AntiPetros flip_coin """ async with ctx.typing(): result = (secrets.randbelow(2) + 1) coin = "heads" if is_even(result) is True else 'tails' color = "green" if coin == "heads" else "red" await asyncio.sleep(random.random() * random.randint(1, 2)) coin_image = COGS_CONFIG.retrieve(self.config_name, f"coin_image_{coin}", typus=str) nato_check_num = secrets.randbelow(100) + 1 if nato_check_num <= 1: coin = 'nato, you lose!' coin_image = "https://i.postimg.cc/cdL5Z0BH/nato-coin.png" color = "blue" embed = await self.bot.make_generic_embed(title=coin.title(), description=ZERO_WIDTH, image=coin_image, thumbnail='no_thumbnail', color=color) await ctx.reply(**embed, allowed_mentions=AllowedMentions.none()) return coin @commands.cooldown(1, 5, commands.BucketType.member) @flip_coin.command(name='text') async def flip_coin_text(self, ctx: commands.Context): """ Renders the `flip_coin` command as text only, without images. Subcommand of `flip_coin` Example: @AntiPetros flip_coin text """ async with ctx.typing(): result = (secrets.randbelow(2) + 1) coin = "heads" if is_even(result) is True else 'tails' color = "green" if coin == "heads" else "red" await asyncio.sleep(random.random() * random.randint(1, 2)) nato_check_num = secrets.randbelow(100) + 1 if nato_check_num <= 1: coin = 'nato, you lose!' color = "blue" embed = discord.Embed(description=f"{ctx.author.mention} flipped a Coin: **{coin.title()}**", color=self.bot.get_discord_color(color)) await ctx.reply(embed=embed, allowed_mentions=AllowedMentions.none()) @ auto_meta_info_command() @allowed_channel_and_allowed_role() @commands.cooldown(1, 5, commands.BucketType.user) async def urban_dictionary(self, ctx, term: str, entries: int = 1): """ Searches Urbandictionary for the search term and post the answer as embed Args: term (str): the search term entries (int, optional): How many UD entries for that term it should post, max is 5. Defaults to 1. Example: @AntiPetros urban_dictionary Petros 2 """ if entries > 5: await ctx.send('To many requested entries,max allowed return entries is 5') return urban_request_url = "https://api.urbandictionary.com/v0/define?term=" full_url = urban_request_url + urlquote(term) json_content = await self.bot.request_json(url=full_url) content_list = sorted(json_content.get('list'), key=lambda x: x.get('thumbs_up') + x.get('thumbs_down'), reverse=True) for index, item in enumerate(content_list): if index <= entries - 1: _embed_data = await self.bot.make_generic_embed(title=f"Definition for '{item.get('word')}'", description=item.get('definition').replace('[', '*').replace(']', '*'), fields=[self.bot.field_item(name='EXAMPLE:', value=item.get('example').replace('[', '*').replace(']', '*'), inline=False), self.bot.field_item(name='LINK:', value=item.get('permalink'), inline=False)], thumbnail="https://gamers-palace.de/wordpress/wp-content/uploads/2019/10/Urban-Dictionary-e1574592239378-820x410.jpg") await ctx.send(**_embed_data) await asyncio.sleep(1) @ auto_meta_info_command() @ allowed_channel_and_allowed_role() @ commands.cooldown(1, 5, commands.BucketType.channel) async def make_figlet(self, ctx, *, text: str): """ Posts an ASCII Art version of the input text. Args: text (str): text you want to see as ASCII Art. Example: @AntiPetros make_figlet The text to figlet Info: Your invoking message gets deleted! """ figlet = Figlet(font='gothic', width=300) new_text = figlet.renderText(text.upper()) await ctx.send(f"```fix\n{new_text}\n```") await ctx.message.delete() @staticmethod def paste_together(*images): amount = len(images) spacing = 25 dice_per_line = 10 if amount <= 10: b_image_size = ((images[0].size[0] * amount) + (spacing * amount), images[0].size[1]) else: b_image_size = ((images[0].size[0] * dice_per_line) + (spacing * dice_per_line), (images[0].size[1] * ceil(amount / dice_per_line)) + (spacing * ceil(amount / dice_per_line))) b_image = Image.new('RGBA', b_image_size, color=(0, 0, 0, 0)) current_x = 0 current_y = 0 for index, image in enumerate(images): b_image.paste(image, (current_x, current_y)) current_x += image.size[0] + spacing if (index + 1) % dice_per_line == 0: current_x = 0 current_y += image.size[1] + spacing return b_image async def parse_dice_line(self, dice_line: str) -> List[Tuple[int, str]]: """ Parses the input string for the `roll_dice` command into a tuple of "amounts" and "type of dice". Args: dice_line (str): input string Raises: ParseDiceLineError: If the format is not in the needed format (e.g. "1d6 6d8") or the type of dice does not exist. Returns: List[Tuple[int, str]]: list of tuples, that conist of the amount to role and the type of dice. """ _out = [] statements = dice_line.split() for statement in statements: statement_match = self.dice_statement_regex.search(statement) if statement_match: _out.append((int(statement_match.group('amount')), statement_match.group('dice_type'))) else: raise ParseDiceLineError(statement) return _out @staticmethod async def _roll_the_dice(sides): """ Roles the die via the `secrets` module. """ return secrets.randbelow(sides) + 1 @staticmethod def _get_dice_images(result_image_file_paths): """ Retrieves the images of the dice from the filesystem. """ images = [Image.open(dice_image) for dice_image in result_image_file_paths] return images @staticmethod def _sum_dice_results(in_result): """ Calculates the sum of the dice. """ result_dict = {key: sum(value) for key, value in in_result.items()} result_combined = sum(value for key, value in result_dict.items()) return result_combined @ auto_meta_info_group(case_insensitive=True, cls=AntiPetrosBaseGroup, invoke_without_command=True) @allowed_channel_and_allowed_role(True) @commands.cooldown(1, 5, commands.BucketType.member) async def roll_dice(self, ctx, *, dice_line: str): # @AntiPetros roll_dice 14d4 14d6 14d8 14d10 14d12 14d20 14d100 """ Roll Dice and get the result also as Image. All standard DnD Dice are available, d4, d6, d8, d10, d12, d20, d100. Args: dice_line (str): the dice you want to roll in the format `2d6`, first number is amount. Multiple different dice can be rolled, just seperate them by a space. -> 2d6 4d20 1d4. Example: @AntiPetros roll_dice 14d4 14d6 14d8 14d10 14d12 14d20 14d100 """ # TODO: Refractor this ugly mess dice_limit = 100 results = {} result_image_files = [] parsed_dice_line = await self.parse_dice_line(dice_line) if sum(item[0] for item in parsed_dice_line) > dice_limit: await ctx.send(f"Amount of overall dice `{sum(item[1] for item in parsed_dice_line)}` is over the limit of `{dice_limit}`, aborting!", delete_after=120) return for amount, type_of_dice in parsed_dice_line: mod_type_of_dice = type_of_dice.casefold() if mod_type_of_dice not in self.dice_mapping: await ctx.reply(f"I dont know dice of the type `{type_of_dice}`!", delete_after=120) return sides_of_die = self.dice_mapping[mod_type_of_dice].get('sides') if mod_type_of_dice not in results: results[mod_type_of_dice] = [] for i in range(amount): roll_result = await self._roll_the_dice(sides_of_die) results[mod_type_of_dice].append(roll_result) result_image_files.append(APPDATA[f"{mod_type_of_dice}_{roll_result}.png"]) await asyncio.sleep(0) # await asyncio.to_thread(random.shuffle, result_image_files) result_images = await asyncio.to_thread(self._get_dice_images, result_image_files) result_image = await asyncio.to_thread(self.paste_together, *result_images) result_combined = await asyncio.to_thread(self._sum_dice_results, results) fields = [self.bot.field_item(name="Sum", value='`' + str(result_combined) + '`', inline=False)] embed_data = await self.bot.make_generic_embed(title=f'{ctx.author.display_name} rolled:', fields=fields, thumbnail='no_thumbnail', image=result_image, color='random') await ctx.send(**embed_data) @commands.cooldown(1, 5, commands.BucketType.member) @roll_dice.command(name='text') async def roll_dice_text(self, ctx, *, dice_line: str): """ Renders the `roll_dice` command as text only, without images. Subcommand of `roll_dice` Example: @AntiPetros roll_dice text """ # TODO: Refractor this ugly mess dice_limit = 100 results = {} parsed_dice_line = await self.parse_dice_line(dice_line) if sum(item[0] for item in parsed_dice_line) > dice_limit: await ctx.send(f"Amount of overall dice `{sum(item[1] for item in parsed_dice_line)}` is over the
x_forward_list = [] for n in range(nsamples): _,(_,x_forward,_) = self.forward_sample(A[:,1:T_forward+1,:], T_forward-1, B = B, X0=X[:,0,:], A0=A[:,0,:], eps = eps) x_forward_list.append(x_forward[...,None]) x_forward = torch.cat(x_forward_list,-1).mean(-1) x_forward = torch.cat([X[:,[0],:], x_forward], 1) if T_condition != -1: x_forward_conditional_list = [] for n in range(nsamples): Z_t_cond, _ = self.inf_network(X[:,:T_condition,:], A[:,:T_condition,:], M[:,:T_condition,:], B) _,(_,x_forward_conditional,_) = self.forward_sample(A[:,T_condition:,:], T_forward, Z_start = Z_t_cond[:,-1,:], B = B, eps = eps) x_forward_conditional_list.append(x_forward_conditional[...,None]) x_forward_conditional = torch.cat(x_forward_conditional_list, -1).mean(-1) x_sample_conditional = torch.cat([X[:,:T_condition,:], x_forward_conditional],1) return neg_elbo, per_feat_nelbo, torch.ones_like(masked_kl_t), torch.ones_like(masked_kl_t), x_sample_conditional, x_forward, (B,X,A,M,Y,CE), idx_select return neg_elbo, per_feat_nelbo, torch.ones_like(masked_kl_t), torch.ones_like(masked_kl_t), x_forward, (B,X,A,M,Y,CE), idx_select def inspect_trt(self, B, X, A, M, Y, CE, nsamples=3): self.eval() m_t, _, lens = get_masks(M) idx_select = lens>1 B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1] x_conditional_list = [] for n in range(nsamples): x_conditionals_per_pt = [] for i in range(X.shape[0]): # np.unique(np.where(pt_numpy(A)[...,-2:] == 1.)[0]) T_condition = np.max(np.where(pt_numpy(A[i,:,-3]) == 1.)[0])+1 print(i) if i == 38: import pdb; pdb.set_trace() l = np.where(pt_numpy(A[i,:,-1]) == 1.)[0] if len(l) == 0: T_total = np.max(np.where(pt_numpy(A[i,:-2] == 1.))[0])+1 else: T_total = np.max(l)+1 T_forward = T_total - T_condition Z_t_cond, _ = self.inf_network(X[[i],:T_condition,:], A[[i],:T_condition,:], M[[i],:T_condition,:], B[[i]]) _, (_, x_forward_conditional, _) = self.forward_sample(A[[i],T_condition:,:], T_forward, Z_start=Z_t_cond[:,-1,:], B = B[[i]]) x_conditional = torch.cat((X[[i],:T_condition], x_forward_conditional, X[[i],T_total:]),1) x_conditionals_per_pt.append(x_conditional) x_conditional_list.append(torch.cat(x_conditionals_per_pt,0)[...,None]) x_final_conditional = torch.cat(x_conditional_list, -1).mean(-1) return x_final_conditional, (B,X,A,M,Y,CE), idx_select def predict(self, **kwargs): raise NotImplemented() @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents = [parent_parser], add_help=False) parser.add_argument('--dim_stochastic', type=int, default=48, help='stochastic dimension of state space model') parser.add_argument('--dim_hidden', type=int, default=300, help='hidden dimension for nonlinear model') parser.add_argument('--etype', type=str, default='lin', help='SSM emission function') parser.add_argument('--ttype', type=str, default='lin', help='SSM transition function') parser.add_argument('--inftype', type=str, default='rnn_relu', help='inference network type') parser.add_argument('--post_approx', type=str, default='diag', help='inference of approximate posterior distribution') parser.add_argument('--elbo_samples', type=int, default=1, help='number of samples to run through inference network') parser.add_argument('--augmented', type=strtobool, default=False, help='SSM augmented') parser.add_argument('--C', type=float, default=.01, help='regularization strength') parser.add_argument('--inf_noise', type=float, default=0., help='noise parameter on input') parser.add_argument('--nheads', type=int, default=1, help='number of heads for attention inference network and generative model') parser.add_argument('--rank', type=int, default=5, help='rank of matrix for low_rank posterior approximation') parser.add_argument('--combiner_type', type=str, default='pog', help='combiner function used in inference network') parser.add_argument('--reg_all', type=str, default='all', help='regularize all weights or only subset') parser.add_argument('--reg_type', type=str, default='l2', help='regularization type (l1 or l2)') parser.add_argument('--alpha1_type', type=str, default='linear', help='alpha1 parameterization in TreatExp IEF') parser.add_argument('--zmatrix', type=str, default='identity') parser.add_argument('--otype', type=str, default='linear', help='final layer of GroMOdE IEF (linear, identity, nl)') parser.add_argument('--add_stochastic', type=strtobool, default=False, help='conditioning alpha-1 of TEXP on S_[t-1]') parser.add_argument('--clock_ablation', type=strtobool, default=False, help='set to true to run without local clock') return parser class SSMAtt(SSM): def __init__(self, trial, **kwargs): super(SSMAtt, self).__init__(trial) self.save_hyperparameters() def init_model(self): ttype = 'attn_transition'; etype = self.hparams['etype'] dim_hidden = self.hparams['dim_hidden'] # dim_stochastic = self.hparams['dim_stochastic'] dim_stochastic = self.trial.suggest_int('dim_stochastic',16,64) num_heads = self.hparams['nheads'] dim_data = self.hparams['dim_data'] dim_base = self.hparams['dim_base'] dim_treat = self.hparams['dim_treat'] post_approx = self.hparams['post_approx'] inftype = self.hparams['inftype']; etype = self.hparams['etype']; ttype = self.hparams['ttype'] augmented = self.hparams['augmented']; alpha1_type = self.hparams['alpha1_type'] rank = self.hparams['rank']; combiner_type = self.hparams['combiner_type']; nheads = self.hparams['nheads'] add_stochastic = self.hparams['add_stochastic'] # Inference Network if inftype == 'rnn': self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, combiner_type = combiner_type) elif inftype == 'rnn_bn': self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, use_bn=True, combiner_type = combiner_type) elif inftype == 'rnn_relu': self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, nl='relu', combiner_type = combiner_type) elif inftype == 'att': self.inf_network = Attention_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, nheads = num_heads, post_approx = post_approx, rank = rank) else: raise ValueError('Bad inference type') # Emission Function if etype == 'lin': self.e_mu = nn.Linear(dim_stochastic, dim_data) self.e_sigma = nn.Linear(dim_stochastic, dim_data) elif etype == 'nl': dim_hidden = self.trial.suggest_int('dim_hidden',100,500) emodel = nn.Sequential(nn.Linear(dim_stochastic, dim_hidden), nn.ReLU(True)) self.e_mu = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data)) self.e_sigma = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data)) else: raise ValueError('bad etype') # Transition Function if self.hparams['include_baseline'] == 'all': self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat+dim_base, dim_hidden, ttype, \ augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads) elif self.hparams['include_baseline'] == 'none': self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat, dim_hidden, ttype, \ augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads) else: pass # Prior over Z1 self.prior_W = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic) self.prior_sigma = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic) # Attention self.attn = MultiHeadedAttention(num_heads, dim_treat+dim_base) self.attn_lin = nn.Linear(dim_stochastic, dim_treat+dim_base) def p_Zt_Ztm1(self, Zt, A, B, X, A0, Am, eps = 0.): X0 = X[:,0,:]; Xt = X[:,1:,:] inp_cat = torch.cat([B, X0, A0], -1) mu1 = self.prior_W(inp_cat)[:,None,:] sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))[:,None,:] Tmax = Zt.shape[1] if self.hparams['augmented']: Zinp = torch.cat([Zt[:,:-1,:], Xt[:,:-1,:]], -1) else: Zinp = Zt[:,:-1,:] Aval = A[:,1:Tmax,:]; Am_res = Am[:,1:Tmax,1:Tmax] if self.hparams['include_baseline']: Acat = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1) res = self.attn(self.attn_lin(Zinp), Acat, Acat, mask=Am_res, use_matmul=True) mu2T, sig2T = self.transition_fxn(Zinp, res, eps = eps) else: res = self.attn(self.attn_lin(Zinp), Aval, Aval, mask=Am_res, use_matmul=True) # res mu2T, sig2T = self.transition_fxn(Zinp, res, eps = eps) mu, sig = torch.cat([mu1,mu2T],1), torch.cat([sig1,sig2T],1) return Independent(Normal(mu, sig), 1) def get_loss(self, B, X, A, M, Y, CE, Am, anneal = 1., return_reconstruction = False, with_pred = False): _, _, lens = get_masks(M) B, X, A, M, Y, CE, Am = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1], Am[lens>1] m_t, m_g_t, _ = get_masks(M[:,1:,:]) Z_t, q_zt = self.inf_network(X, A, M, B) Tmax = Z_t.shape[1] p_x_mu, p_x_std = self.p_X_Z(Z_t, A[:,1:Tmax+1,[0]]) p_zt = self.p_Zt_Ztm1(Z_t, A, B, X, A[:,0,:], Am) masked_nll = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu, p_x_std, M[:,1:Tmax+1,:]) full_masked_nll = masked_nll masked_nll = masked_nll.sum(-1).sum(-1) if with_pred: p_x_mu_pred, p_x_std_pred = self.p_X_Z(p_zt.mean, A[:,:Z_t.shape[1],[0]]) masked_nll_pred = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu_pred, p_x_std_pred, M[:,1:Tmax+1,:]) masked_nll_pred = masked_nll_pred.sum(-1).sum(-1) masked_nll = (masked_nll+masked_nll_pred)*0.5 kl_t = q_zt.log_prob(Z_t)-p_zt.log_prob(Z_t) masked_kl_t= (m_t[:,:Tmax]*kl_t).sum(-1) neg_elbo = masked_nll + anneal*masked_kl_t if return_reconstruction: return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t), p_x_mu*M[:,1:,:], p_x_std*M[:,1:,:]) else: return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t)) def forward(self, B, X, A, M, Y, CE, Am, anneal = 1.): if self.training: if self.hparams['elbo_samples']>1: B, X = torch.repeat_interleave(B, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(X, repeats=self.elbo_samples, dim=0) A, M = torch.repeat_interleave(A, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(M, repeats=self.elbo_samples, dim=0) Y, CE= torch.repeat_interleave(Y, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(CE, repeats=self.elbo_samples, dim=0) neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, Am, anneal = anneal, with_pred = True) else: neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, Am, anneal = anneal, with_pred = False) reg_loss = torch.mean(neg_elbo) for name,param in self.named_parameters(): if self.reg_all: # reg_loss += self.hparams['C']*apply_reg(param, reg_type=self.hparams['reg_type']) reg_loss += self.C*apply_reg(param, reg_type=self.reg_type) else: if 'attn' not in name: reg_loss += self.C*apply_reg(param, reg_type=self.reg_type) loss = torch.mean(reg_loss) return (torch.mean(neg_elbo), torch.mean(masked_nll), torch.mean(kl), torch.ones_like(kl)), loss def forward_sample(self, A, T_forward, Z_start = None, B=None, X0=None, A0=None, eps = 0.): pass def inspect(self, T_forward, T_condition, B, X, A, M, Y, CE, Am, restrict_lens = False, nsamples = 1, eps = 0.): pass def inspect_trt(self, B, X, A, M, Y, CE, Am, nsamples=3): pass class TransitionFunction(nn.Module): def __init__(self, dim_stochastic, dim_data, dim_treat, dim_hidden, ttype, augmented: bool = False, alpha1_type: str = 'linear', otype: str = 'linear', add_stochastic: bool = False, num_heads: int = 1, zmatrix: str = 'identity'): super(TransitionFunction, self).__init__() self.dim_stochastic = dim_stochastic self.dim_treat = dim_treat self.dim_hidden = dim_hidden self.dim_data = dim_data # Number of different lines of therapy to multiplex on (only for heterogenous models) self.K = 3 self.ttype = ttype dim_treat_mK = dim_treat-self.K if augmented: # augmented does not completely work for transition function other than ('gated','lin'), ('lin','lin') dim_input = dim_stochastic+dim_data else: dim_input = dim_stochastic if self.ttype == 'lin': self.t_mu = nn.Linear(dim_input+dim_treat, dim_stochastic) self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic) elif self.ttype == 'nl': tmodel = nn.Sequential(nn.Linear(dim_input+dim_treat, dim_hidden),nn.ReLU(True)) self.t_mu = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic)) self.t_sigma = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic)) elif self.ttype == 'het_lin': self.t_mu = nn.ModuleList([nn.Linear(dim_input+dim_treat_mK, dim_stochastic) for k in range(self.K)]) self.t_sigma = nn.ModuleList([nn.Linear(dim_input+dim_treat_mK, dim_stochastic) for k in range(self.K)]) elif self.ttype == 'het_nl': t_mu, t_sigma = [],[] for k in range(self.K): tmodel = nn.Sequential(nn.Linear(dim_input+dim_treat_mK, dim_hidden), nn.ReLU(True)) t_mu.append(nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))) t_sigma.append(nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))) self.t_mu = nn.ModuleList(t_mu) self.t_sigma = nn.ModuleList(t_sigma) elif self.ttype == 'gated': avoid_init = False if self.dim_data != 16 or self.dim_treat != 9: avoid_init = True self.t_mu = GatedTransition(dim_input, dim_treat, avoid_init = avoid_init, dim_output=dim_stochastic, alpha1_type=alpha1_type, otype=otype, add_stochastic=add_stochastic) self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic) elif self.ttype == 'attn_transition': avoid_init = False
colormap. For example, 0.95 translates to a min/max of mean(ratio) +/- (1.96*std(ratio)) cmap The colormap used to display the ratio image. Diverging colormaps are a good choice here (default is RdBu_r). r_min The minimum value for the ratio colormap. If None, uses the `prob` parameter (see its description), and requires `profile_data`. r_max The maximum value for the ratio colormap. If None, uses the `prob` parameter (see its description), and requires `profile_data`. i_min The intensity to map to 0 in the value channel i_max The intensity to map to 1 in the value channel clip Whether or not the value channel should be clipped to [0, 1] before converting back to RGB. Leaving this as True is a sane default. ax If given, the image is plotted on this axis. If ``None``, this function uses the pyplot interface. colorbar show the colorbar or not imshow_args keyword arguments that will be passed along to the ``imshow`` function """ with warnings.catch_warnings(): warnings.simplefilter("ignore") # Convert ratio to RGB if profile_data is None: if (r_min is None) or (r_max is None): raise ValueError( "r_min and r_max must be set if profile_data is not given" ) else: r_mean = np.mean(profile_data) r_std = np.std(profile_data) Z = stats.norm.ppf( prob ) # this converts probability -> "Z" value (e.g. 0.95 -> 1.96) window = r_std * Z r_min_ = r_mean - window r_max_ = r_mean + window if r_min is None: r_min = r_min_ if r_max is None: r_max = r_max_ norm_ratio = colors.Normalize(vmin=r_min, vmax=r_max) cmap = cm.get_cmap(cmap) img_rgba = cmap(norm_ratio(ratio_img)) # Now convert RGB to HSV, using intensity image as the "V" (value) if i_max is None: i_max = np.max(fl_img) norm_fl = colors.Normalize(vmin=i_min, vmax=i_max, clip=clip) hsv_img = colors.rgb_to_hsv(img_rgba[:, :, :3]) # ignore the "alpha" channel hsv_img[:, :, -1] = norm_fl(fl_img) if ax is None: ax = plt.gca() # Convert HSV back to RGB and plot img_rgba = colors.hsv_to_rgb(hsv_img) im = ax.imshow(img_rgba, **imshow_kwargs) im.cmap = cmap im.norm = norm_ratio if colorbar: cbar = add_img_colorbar(ax, **colorbar_kwargs_dict) return im, cbar else: return im def add_img_colorbar(ax, position="right", size="5%", pad=0.05, **colorbar_kwargs): try: axes_img = [ obj for obj in ax.get_children() if isinstance(obj, image.AxesImage) ][0] except IndexError: raise ValueError( "No image found in axis children. This method only works for axes with images." ) divider = make_axes_locatable(ax) cax = divider.append_axes(position, size=size, pad=pad) return plt.colorbar( cm.ScalarMappable(norm=axes_img.norm, cmap=axes_img.cmap), cax=cax, **colorbar_kwargs, ) def registration_diagnostic_plot_stack(fl, raw_prof, reg_prof, filepath, **reg_params): with PdfPages(filepath) as pdf: for i in range(fl.spec.size): f = registration_diagnostic_plot(fl, raw_prof, reg_prof, i, **reg_params) pdf.savefig(f) plt.close(f) def registration_diagnostic_plot(fl, raw_prof, reg_prof, idx, **params) -> plt.Figure: if "pair" in fl.dims: n_pairs = fl.pair.size else: n_pairs = 1 fig_width, fig_height = 12, 12 fig_scale = 1 fig, axes = plt.subplots( 4, n_pairs, figsize=(fig_scale * fig_width, fig_scale * fig_height) ) colors = {"410": "tab:blue", "470": "tab:orange"} ############### # RATIO IMAGE # ############### for pair in range(n_pairs): ax = axes[0, pair] colormap_profile_buffer = 10 imshow_ratio_normed( fl.sel(wavelength="r", pair=pair)[idx], fl.sel(wavelength="410", pair=pair)[idx], profile_data=raw_prof.sel(wavelength="r", pair=pair)[idx][ colormap_profile_buffer:-colormap_profile_buffer ], prob=0.999, i_max=2000, colorbar=True, ax=ax, ) ax.set_xlim(45, 120) ax.set_ylim(50, 80) ##################### # INTENSITY PROFILE # ##################### for pair in range(n_pairs): # Intensity Profile ax = axes[1, pair] xs = np.linspace(0, 1, raw_prof.position.size) # REG 410 ("unregistered, but smooth") ax.scatter( xs, raw_prof.sel(wavelength="410", pair=pair)[idx], color="k", label="raw 410", s=1, ) # RAW 410 (to test hyper-fine smoothing) ax.plot( xs, reg_prof.sel(wavelength="410", pair=pair)[idx], lw=1, color=colors["410"], label="r410", linestyle="-", ) # RAW 470 ax.plot( xs, raw_prof.sel(wavelength="470", pair=pair)[idx], linestyle="-", lw=1, color=colors["470"], label="470", ) # REG 470 ax.plot( xs, reg_prof.sel(wavelength="470", pair=pair)[idx], linestyle="--", lw=1, color=colors["470"], label="r470", ) ax.set_xlim(0, 1) ax.set_ylim(0, 2.5e4) # UNREGISTERED d(I) PROFILE ax = ax.twinx() # RAW 410 (to test hyper-fine smoothing) ax.plot( xs, reg_prof.sel(wavelength="410", pair=pair)[idx].differentiate( coord="position" ), lw=1, color=colors["410"], label="r410", linestyle="-", ) # RAW 470 ax.plot( xs, raw_prof.sel(wavelength="470", pair=pair)[idx].differentiate( coord="position" ), linestyle="-", lw=1, color=colors["470"], label="470", ) # REG 470 ax.plot( xs, reg_prof.sel(wavelength="470", pair=pair)[idx].differentiate( coord="position" ), linestyle="--", lw=1, color=colors["470"], label="r470", ) ax.axhline(0, linestyle="--", color="lightgray", lw=1) ax.set_xlim(0, 1) ax.set_ylim(-5e3, 1e3) ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") ################# # RATIO PROFILE # ################# # get appropriate y-limits for ratios buffer = int(raw_prof.position.size * 0.30) r_min = raw_prof.sel(wavelength="r")[..., buffer:-buffer].min() r_max = raw_prof.sel(wavelength="r")[..., buffer:-buffer].max() for pair in range(n_pairs): # Intensity Profile ax = axes[2, pair] xs = np.linspace(0, 1, raw_prof.position.size) ax.plot( xs, raw_prof.sel(wavelength="r", pair=pair)[idx], label="raw", color="k" ) ax.plot( xs, reg_prof.sel(wavelength="r", pair=pair)[idx], label="raw", color="tab:red", linestyle="-", ) ax.set_xlim(0, 1) autoscale_percent_buffer = 0.0 ax.set_ylim( r_min - (r_min * autoscale_percent_buffer), r_max + (r_max * autoscale_percent_buffer), ) ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") ax = axes[3, 0] ax.set_axis_off() ################# # Parameter box # ################# textstr = "\n".join(f"{k}={v}" for k, v in params.items()) props = dict(boxstyle="round", facecolor="wheat", alpha=0.5) ax.text( 0.01, 0.97, textstr, transform=ax.transAxes, fontsize=10, verticalalignment="top", bbox=props, ) axes[3, 1].set_axis_off() plt.tight_layout() return fig def plot_pharynx_R_imgs( img: xr.DataArray, mask: xr.DataArray, crop: bool = True, crop_pad: int = 10, cmap_normalization: str = "frame", cmap: str = "coolwarm", fig_kwargs=None, ): """ Generate a figure which has ratio images broken up by timepoint and pair Parameters ---------- img : xr.DataArray The image to display. Should contain a single animal, and the `r` and `410` wavelengths. mask : xr.DataArray The mask with which a ROI will be used for calculated the average R value of the pharynx crop : bool, optional Whether the image should be cropped, by default True crop_pad : int, optional The padding for the crop, as number of pixels on each side of the bounding box surrounding the pharynx, by default 10 cmap_normalization : str, optional How the colormap should be normalized, by default "frame". "frame" means each timepoint and pair will be normalized separately cmap : str, optional The colormap to use, by default "coolwarm" fig_kwargs : [type], optional Keyword arguments to be passed to `matplotlib.pyplot.figure`, by default None Raises ------ ValueError [description] ValueError [description] NotImplementedError [description] """ if "animal" in img.dims: raise ValueError( f"Image must contain single animal. Given stack contains {img.animal.size} animals" ) valid_cmap_normalizations = ["frame", "animal"] if cmap_normalization not in valid_cmap_normalizations: raise ValueError( f"`cmap_normaliztion` must be one of {valid_cmap_normalizations} (given <{cmap_normalization}>)" ) fig = plt.figure(constrained_layout=True) gs = gridspec.GridSpec(ncols=img.pair.size, nrows=img.timepoint.size, figure=fig) for i, tp in enumerate(img.timepoint.values): for j, pair in enumerate(img.pair.values): ax = fig.add_subplot(gs[i, j]) ax.set_title(f"timepoint = {tp} | Pair={pair}") ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) R = img.sel(wavelength="r", timepoint=tp, pair=pair).values I = img.sel(wavelength="410", timepoint=tp, pair=pair).values M = mask.sel(timepoint=tp, pair=pair).astype(bool).values P_r = R[M] if cmap_normalization == "frame": r_min = np.mean(P_r) - 1.96 * np.std(P_r) r_max = np.mean(P_r) + 1.96 * np.std(P_r) i_max = 0.30 * regionprops(label(M), intensity_image=I)[0].max_intensity if cmap_normalization == "animal": raise NotImplementedError if crop: rp = regionprops(label(M), intensity_image=R)[0] (min_row, min_col, max_row, max_col) = rp.bbox ax.set_ylim(max_row + crop_pad, min_row - crop_pad) ax.set_xlim(min_col - crop_pad, max_col + crop_pad) imshow_ratio_normed( R, I, cmap=cmap, i_min=0, i_max=i_max, r_min=r_min, r_max=r_max, ax=ax ) return fig def plot_multiple_pop_errors( data_and_labels, ylim=None, xlim=None, add_regions=True, figsize=(20, 10), dpi=100, regions=constants.untrimmed_regions, ): """Plot multiple error profiles and their corresponding labels Parameters ---------- data_and_labels : List[Tuple(xr.DataArray, str)] A list of (data, label), one for each data set to plot ylim : Tuple(float, float), optional The y limits of the plot, by default None xlim : Tuple(float, float), optional The x-limits of the plot, by default None add_regions : bool, optional Whether or not to add regions to the plot, by default True figsize : Tuple(float, floa), optional The figure size, by default (20, 10) dpi : int, optional The DPI of the plot, by default 100 regions : dict, optional The regions to plot, if enabled, by default constants.untrimmed_regions Returns ------- fig, ax The figure and axis object """ fig, ax = plt.subplots(figsize=figsize, dpi=dpi) for data, label in data_and_labels: xs = np.linspace(0, 1, data.position.size) plot_profile_avg_with_bounds( da.fold_error(data.sel(timepoint=0)), xs=xs, ax=ax, label=label ) if ylim: ax.set_ylim(*ylim) if xlim: ax.set_xlim(*xlim) if add_regions: add_regions_to_axis(ax, regions, alpha=0.3, hide_labels=True) ax.set_ylabel("Absolute Error (%)") ax.set_xlabel("position along midline") ax.legend(loc="lower left") return fig, ax def plot_multiple_pop_wvl( data_and_labels, wvl="r", ylim=None, xlim=None, add_regions=True, figsize=(20, 10), dpi=100, regions=constants.untrimmed_regions, ): """Plot multiple error profiles and their corresponding labels Parameters ---------- data_and_labels : List[Tuple(xr.DataArray, str)] A list of (data, label), one for each data set to plot ylim : Tuple(float, float), optional The
set default font to %s" %myFont) try: UIManager.getLookAndFeelDefaults().put("defaultFont", myFont ) # https://thebadprogrammer.com/swing-uimanager-keys/ UIManager.put("CheckBoxMenuItem.acceleratorFont", myFont) UIManager.put("Button.font", myFont) UIManager.put("ToggleButton.font", myFont) UIManager.put("RadioButton.font", myFont) UIManager.put("CheckBox.font", myFont) UIManager.put("ColorChooser.font", myFont) UIManager.put("ComboBox.font", myFont) UIManager.put("Label.font", myFont) UIManager.put("List.font", myFont) UIManager.put("MenuBar.font", myFont) UIManager.put("Menu.acceleratorFont", myFont) UIManager.put("RadioButtonMenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.font", myFont) UIManager.put("RadioButtonMenuItem.font", myFont) UIManager.put("CheckBoxMenuItem.font", myFont) UIManager.put("OptionPane.buttonFont", myFont) UIManager.put("OptionPane.messageFont", myFont) UIManager.put("Menu.font", myFont) UIManager.put("PopupMenu.font", myFont) UIManager.put("OptionPane.font", myFont) UIManager.put("Panel.font", myFont) UIManager.put("ProgressBar.font", myFont) UIManager.put("ScrollPane.font", myFont) UIManager.put("Viewport.font", myFont) UIManager.put("TabbedPane.font", myFont) UIManager.put("Slider.font", myFont) UIManager.put("Table.font", myFont) UIManager.put("TableHeader.font", myFont) UIManager.put("TextField.font", myFont) UIManager.put("Spinner.font", myFont) UIManager.put("PasswordField.font", myFont) UIManager.put("TextArea.font", myFont) UIManager.put("TextPane.font", myFont) UIManager.put("EditorPane.font", myFont) UIManager.put("TabbedPane.smallFont", myFont) UIManager.put("TitledBorder.font", myFont) UIManager.put("ToolBar.font", myFont) UIManager.put("ToolTip.font", myFont) UIManager.put("Tree.font", myFont) UIManager.put("FormattedTextField.font", myFont) UIManager.put("IconButton.font", myFont) UIManager.put("InternalFrame.optionDialogTitleFont", myFont) UIManager.put("InternalFrame.paletteTitleFont", myFont) UIManager.put("InternalFrame.titleFont", myFont) except: myPrint("B","Failed to set Swing default fonts to use Moneydance defaults... sorry") return setDefaultFonts() def who_am_i(): try: username = System.getProperty("user.name") except: username = "???" return username def getHomeDir(): # Yup - this can be all over the place... myPrint("D", 'System.getProperty("user.dir")', System.getProperty("user.dir")) myPrint("D", 'System.getProperty("UserHome")', System.getProperty("UserHome")) myPrint("D", 'System.getProperty("user.home")', System.getProperty("user.home")) myPrint("D", 'os.path.expanduser("~")', os.path.expanduser("~")) myPrint("D", 'os.environ.get("HOMEPATH")', os.environ.get("HOMEPATH")) return def amIaMac(): myPlat = System.getProperty("os.name") if myPlat is None: return False myPrint("DB", "Platform:", myPlat) myPrint("DB", "OS Version:", System.getProperty("os.version")) return myPlat == "Mac OS X" myPrint("D", "I am user:", who_am_i()) if debug: getHomeDir() lIamAMac = amIaMac() def myDir(): global lIamAMac homeDir = None try: if lIamAMac: homeDir = System.getProperty("UserHome") # On a Mac in a Java VM, the homedir is hidden else: # homeDir = System.getProperty("user.home") homeDir = os.path.expanduser("~") # Should work on Unix and Windows if homeDir is None or homeDir == "": homeDir = System.getProperty("user.home") if homeDir is None or homeDir == "": homeDir = os.environ.get("HOMEPATH") except: pass if homeDir is None or homeDir == "": homeDir = moneydance_data.getRootFolder().getParent() # Better than nothing! myPrint("DB", "Home Directory selected...:", homeDir) if homeDir is None: return "" return homeDir # noinspection PyArgumentList class JTextFieldLimitYN(PlainDocument): limit = 10 # Default toUpper = False what = "" def __init__(self, limit, toUpper, what): super(PlainDocument, self).__init__() self.limit = limit self.toUpper = toUpper self.what = what def insertString(self, myOffset, myString, myAttr): if (myString is None): return if self.toUpper: myString = myString.upper() if (self.what == "YN" and (myString in "YN")) \ or (self.what == "DELIM" and (myString in ";|,")) \ or (self.what == "1234" and (myString in "1234")) \ or (self.what == "CURR"): if ((self.getLength() + len(myString)) <= self.limit): super(JTextFieldLimitYN, self).insertString(myOffset, myString, myAttr) # noqa def fix_delimiter( theDelimiter ): try: if sys.version_info.major >= 3: return theDelimiter if sys.version_info.major < 2: return str(theDelimiter) if sys.version_info.minor > 7: return theDelimiter if sys.version_info.minor < 7: return str(theDelimiter) if sys.version_info.micro >= 2: return theDelimiter except: pass return str( theDelimiter ) def get_StuWareSoftSystems_parameters_from_file(): global debug, myParameters, lPickle_version_warning, version_build, _resetParameters # noqa myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" ) if _resetParameters: myPrint("B", "User has specified to reset parameters... keeping defaults and skipping pickle()") myParameters = {} return myFile = "StuWareSoftSystems.dict" old_dict_filename = os.path.join("..", myFile) # Pickle was originally encrypted, no need, migrating to unencrypted migratedFilename = os.path.join(moneydance_data.getRootFolder().getAbsolutePath(),myFile) myPrint("DB", "Now checking for parameter file:", migratedFilename) if os.path.exists( migratedFilename ): myPrint("DB", "loading parameters from non-encrypted Pickle file:", migratedFilename) myPrint("DB", "Parameter file", migratedFilename, "exists..") # Open the file try: istr = FileInputStream(migratedFilename) load_file = FileUtil.wrap(istr) # noinspection PyTypeChecker myParameters = pickle.load(load_file) load_file.close() except FileNotFoundException: myPrint("B", "Error: failed to find parameter file...") myParameters = None except EOFError: myPrint("B", "Error: reached EOF on parameter file....") myParameters = None except: myPrint("B","Error opening Pickle File (will try encrypted version) - Unexpected error ", sys.exc_info()[0]) myPrint("B","Error opening Pickle File (will try encrypted version) - Unexpected error ", sys.exc_info()[1]) myPrint("B","Error opening Pickle File (will try encrypted version) - Line Number: ", sys.exc_info()[2].tb_lineno) # OK, so perhaps from older version - encrypted, try to read try: local_storage = moneydance.getCurrentAccountBook().getLocalStorage() istr = local_storage.openFileForReading(old_dict_filename) load_file = FileUtil.wrap(istr) # noinspection PyTypeChecker myParameters = pickle.load(load_file) load_file.close() myPrint("B","Success loading Encrypted Pickle file - will migrate to non encrypted") lPickle_version_warning = True except: myPrint("B","Opening Encrypted Pickle File - Unexpected error ", sys.exc_info()[0]) myPrint("B","Opening Encrypted Pickle File - Unexpected error ", sys.exc_info()[1]) myPrint("B","Error opening Pickle File - Line Number: ", sys.exc_info()[2].tb_lineno) myPrint("B", "Error: Pickle.load() failed.... Is this a restored dataset? Will ignore saved parameters, and create a new file...") myParameters = None if myParameters is None: myParameters = {} myPrint("DB","Parameters did not load, will keep defaults..") else: myPrint("DB","Parameters successfully loaded from file...") else: myPrint("J", "Parameter Pickle file does not exist - will use default and create new file..") myPrint("D", "Parameter Pickle file does not exist - will use default and create new file..") myParameters = {} if not myParameters: return myPrint("DB","myParameters read from file contains...:") for key in sorted(myParameters.keys()): myPrint("DB","...variable:", key, myParameters[key]) if myParameters.get("debug") is not None: debug = myParameters.get("debug") if myParameters.get("lUseMacFileChooser") is not None: myPrint("B", "Detected old lUseMacFileChooser parameter/variable... Will delete it...") myParameters.pop("lUseMacFileChooser", None) # Old variable - not used - delete from parameter file myPrint("DB","Parameter file loaded if present and myParameters{} dictionary set.....") # Now load into memory! load_StuWareSoftSystems_parameters_into_memory() return def save_StuWareSoftSystems_parameters_to_file(): global debug, myParameters, lPickle_version_warning, version_build myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" ) if myParameters is None: myParameters = {} # Don't forget, any parameters loaded earlier will be preserved; just add changed variables.... myParameters["__Author"] = "<NAME> - (c) StuWareSoftSystems" myParameters["debug"] = debug dump_StuWareSoftSystems_parameters_from_memory() myFile = "StuWareSoftSystems.dict" # Pickle was originally encrypted, no need, migrating to unencrypted migratedFilename = os.path.join(moneydance_data.getRootFolder().getAbsolutePath(),myFile) myPrint("DB","Will try to save parameter file:", migratedFilename) ostr = FileOutputStream(migratedFilename) myPrint("DB", "about to Pickle.dump and save parameters to unencrypted file:", migratedFilename) try: save_file = FileUtil.wrap(ostr) # noinspection PyTypeChecker pickle.dump(myParameters, save_file) save_file.close() myPrint("DB","myParameters now contains...:") for key in sorted(myParameters.keys()): myPrint("DB","...variable:", key, myParameters[key]) except: myPrint("B", "Error - failed to create/write parameter file.. Ignoring and continuing.....") dump_sys_error_to_md_console_and_errorlog() return myPrint("DB","Parameter file written and parameters saved to disk.....") return # END COMMON DEFINITIONS ############################################################################################### # END COMMON DEFINITIONS ############################################################################################### # END COMMON DEFINITIONS ############################################################################################### # >>> CUSTOMISE & DO THIS FOR EACH SCRIPT def load_StuWareSoftSystems_parameters_into_memory(): global debug, myParameters, lPickle_version_warning, version_build global __StockGlance2020, hideHiddenSecurities, hideInactiveAccounts, hideHiddenAccounts, lAllCurrency, filterForCurrency global lAllSecurity, filterForSecurity, lAllAccounts, filterForAccounts, lIncludeCashBalances, lStripASCII, csvDelimiter, scriptpath global lSplitSecuritiesByAccount, lExcludeTotalsFromCSV, lRoundPrice, _column_widths_SG2020 global lWriteBOMToExportFile_SWSS, lIncludeFutureBalances_SG2020 myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" ) myPrint("DB", "Loading variables into memory...") if myParameters is None: myParameters = {} if myParameters.get("__StockGlance2020") is not None: __StockGlance2020 = myParameters.get("__StockGlance2020") if myParameters.get("hideHiddenSecurities") is not None: hideHiddenSecurities = myParameters.get("hideHiddenSecurities") if myParameters.get("hideInactiveAccounts") is not None: hideInactiveAccounts = myParameters.get("hideInactiveAccounts") if myParameters.get("hideHiddenAccounts") is not None: hideHiddenAccounts = myParameters.get("hideHiddenAccounts") if myParameters.get("lAllCurrency") is not None: lAllCurrency = myParameters.get("lAllCurrency") if myParameters.get("filterForCurrency") is not None: filterForCurrency = myParameters.get("filterForCurrency") if myParameters.get("lAllSecurity") is not None: lAllSecurity = myParameters.get("lAllSecurity") if myParameters.get("filterForSecurity") is not None: filterForSecurity = myParameters.get("filterForSecurity") if myParameters.get("lAllAccounts") is not None: lAllAccounts = myParameters.get("lAllAccounts") if myParameters.get("filterForAccounts") is not None: filterForAccounts = myParameters.get("filterForAccounts") if myParameters.get("lIncludeCashBalances") is not None: lIncludeCashBalances = myParameters.get("lIncludeCashBalances") if myParameters.get("lSplitSecuritiesByAccount") is not None: lSplitSecuritiesByAccount = myParameters.get("lSplitSecuritiesByAccount") if myParameters.get("lExcludeTotalsFromCSV") is not None: lExcludeTotalsFromCSV = myParameters.get("lExcludeTotalsFromCSV") if myParameters.get("lIncludeFutureBalances_SG2020") is not None: lIncludeFutureBalances_SG2020 = myParameters.get("lIncludeFutureBalances_SG2020") if myParameters.get("lDontRoundPrice") is not None: lRoundPrice = myParameters.get("lDontRoundPrice") if myParameters.get("lStripASCII") is not None: lStripASCII = myParameters.get("lStripASCII") if myParameters.get("csvDelimiter") is not None: csvDelimiter = myParameters.get("csvDelimiter") if myParameters.get("_column_widths_SG2020") is not None: _column_widths_SG2020 = myParameters.get("_column_widths_SG2020") if myParameters.get("lWriteBOMToExportFile_SWSS") is not None: lWriteBOMToExportFile_SWSS = myParameters.get("lWriteBOMToExportFile_SWSS") # noqa if myParameters.get("scriptpath") is not None: scriptpath = myParameters.get("scriptpath") if not os.path.isdir(scriptpath): myPrint("DB", "Warning: loaded parameter scriptpath does not appear to be a valid directory:", scriptpath, "will ignore") scriptpath = "" myPrint("DB","myParameters{} set into memory (as variables).....") return # >>> CUSTOMISE & DO THIS FOR EACH SCRIPT def dump_StuWareSoftSystems_parameters_from_memory(): global debug, myParameters, lPickle_version_warning, version_build # >>> THESE ARE THIS SCRIPT's PARAMETERS TO SAVE global __StockGlance2020 global hideHiddenSecurities, hideInactiveAccounts, hideHiddenAccounts, lAllCurrency, filterForCurrency global lAllSecurity, filterForSecurity, lAllAccounts, filterForAccounts, lIncludeCashBalances, lStripASCII, csvDelimiter, scriptpath global lSplitSecuritiesByAccount, lExcludeTotalsFromCSV, lRoundPrice global lDisplayOnly, _column_widths_SG2020, lIncludeFutureBalances_SG2020 global lWriteBOMToExportFile_SWSS myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" ) # NOTE: Parameters were loaded earlier on... Preserve existing, and update any used ones... # (i.e. other StuWareSoftSystems programs might be sharing the same file) if myParameters is None: myParameters = {} myParameters["__StockGlance2020"] = version_build myParameters["hideHiddenSecurities"] = hideHiddenSecurities myParameters["hideInactiveAccounts"] = hideInactiveAccounts myParameters["hideHiddenAccounts"] = hideHiddenAccounts myParameters["lAllCurrency"] = lAllCurrency myParameters["filterForCurrency"] = filterForCurrency myParameters["lAllSecurity"] = lAllSecurity myParameters["filterForSecurity"] = filterForSecurity myParameters["lAllAccounts"] = lAllAccounts myParameters["filterForAccounts"] = filterForAccounts myParameters["lIncludeCashBalances"] = lIncludeCashBalances myParameters["lSplitSecuritiesByAccount"] = lSplitSecuritiesByAccount
# --- # jupyter: # jupytext: # formats: jupyter_scripts//ipynb,scripts//py # text_representation: # extension: .py # format_name: light # format_version: '1.3' # jupytext_version: 1.0.0 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # series_tools: # # set of tools that work with streamflow records. # - Identify events. # - Identidy baseflow and runoff. # import pandas as pd import numpy as np # ## Digital filters # # Collection of functions to separate runoff from baseflow. # + def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8): '''Digital filters to separate baseflow from runoff in a continuos time series. Parameters: - tipo: type of filter to be used. - Eckhart o 1. - Nathan o 2. - Chapman o 3. - Q: pandas series with the streamflow records. - a: paramter for the filter. - Eckhart: 0.98. - Nathan: 0.8. - Chapman: 0.8. - BFI: 0.8 only applies for Eckhart filter. Returns: - Pandas DataFrame with the Runoff, Baseflow.''' #Functions definitions. def Nathan1990(Q, a = 0.8): '''One parameter digital filter of Nathan and McMahon (1990)''' R = np.zeros(Q.size) c = 1 for q1,q2 in zip(Q[:-1], Q[1:]): R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1) if R[c]<0: R[c] = 0 elif R[c]>q2: R[c] = q2 c += 1 B = Q - R return R, B def Eckhart2005(Q, BFI=0.8, a = 0.98): '''Two parameter Eckhart digital filter Parameters: - Q: np.ndarray with the streamflow records. - BFI: The maximum amount of baseflow (%). - a: parameter alpha (0.98) Output: - R: total runoff. - B: total baseflow.''' #SEparation B = np.zeros(Q.size) B[0] = Q[0] c = 1 for q in Q[1:]: #SEparation equation B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI) #Constrains if B[c] > q: B[c] = q c+=1 R = Q - B return R, B def ChapmanMaxwell1996(Q, a = 0.98): '''Digital filter proposed by chapman and maxwell (1996)''' B = np.zeros(Q.size) c = 1 for q in Q[1:]: B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q c+=1 R = Q-B return R,B #Cal the filter if tipo == 'Eckhart' or tipo == 1: R,B = Eckhart2005(Q.values, a, BFI) elif tipo =='Nathan' or tipo == 2: R,B = Nathan1990(Q.values, a,) elif tipo == 'Chapman' or tipo ==3: R,B = ChapmanMaxwell1996(Q.values, a) #Returns the serie return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow']) # - # ## Events selection functions # # Collection of functions to identify peaks in a series and the end of each peak recession. # + def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')): '''Find the peack values of the hydrographs of a serie Params: - Q: Pandas serie with the records. - Qmin: The minimum value of Q to be considered a peak. if None takes the 99th percentile of the series as the min - tw: size of the ime window used to eliminate surrounding maximum values''' if Qmin is None: Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99) #Find the maximum Qmax = Q[Q>Qmin] QmaxCopy = Qmax.copy() #Search the maxium maximorums Flag = True PosMax = [] while Flag: MaxIdx = Qmax.idxmax() PosMax.append(MaxIdx) Qmax[MaxIdx-tw:MaxIdx+tw] = -9 if Qmax.max() < Qmin: Flag = False #Return the result return QmaxCopy[PosMax].sort_index() def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'): '''Find the end of each selected event in order to know the longitude of each recession event. Parameters: - Q: Pandas series with the records. - Qmax: Pandas series with the peak streamflows. - minDif: The minimum difference to consider that a recession is over. Optional: - minDistance: minimum temporal distance between the peak and the end. - maxSearch: maximum number of iterations to search for the end. - Widow: Size of the temporal window used to smooth the streamflow records before the difference estimation (pandas format). Returns: - Qend: The point indicating the en of the recession.''' #Obtains the difference X = Q.resample('1h').mean() dX = X.values[1:] - X.values[:-1] dX = pd.Series(dX, index=X.index[:-1]) #Obtains the points. DatesEnds = [] Correct = [] for peakIndex in Qmax.index: try: a = dX[dX.index > peakIndex] if minDistance is None: DatesEnds.append(a[a>minDif].index[0]) else: Dates = a[a>minDif].index flag = True c = 0 while flag: distancia = Dates[c] - peakIndex if distancia > minDistance: DatesEnds.append(Dates[c]) flag= False c += 1 if c>maxSearch: flag = False Correct.append(0) except: DatesEnds.append(peakIndex) Correct.append(1) #Returns the pandas series with the values and end dates Correct = np.array(Correct) return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0] # - # ## Runoff analysis # + def Runoff_SeparateBaseflow(Qobs, Qsim): '''From observed records obtain the baseflow and runoff streamflow records. Parameters: - Qobs: Observed record dt < 1h. - Qsim: Simulated records dt < 1h. Returns: - Qh: Observed records at hourly scale. - Qsh: Simulated records at a hourly scale. - Qsep: Observed separated records at hourly scale''' #Observed series to hourly scale. Qh = Qobs.resample('1h').mean() Qh[np.isnan(Qh)] = Qh.mean() Qh[Qh<0] = Qh.mean() Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998) #Pre-process of simulated series to hourly scale. Qsh = Qsim.resample('1h').mean() Qsh[np.isnan(Qsh)] = 0.0 #Return results return Qh, Qsh, Qsep def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None): '''Separates runoff from baseflow and finds the events. Parameters: - Qobs: Hourly obseved streamflow. - Qsim: Hourly simulated streamflow. - minTime: minimum duration of the event. - minConcav: minimum concavity of the event. - minPeak: minimum value of the peakflows. Returns: - pos1: pandas index lists with the initial positions. - pos2: pandas index lists with the end positions.''' #Obtain the positions of the start and pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20)) pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak) #Returns results return pos1, pos2 def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None): '''Obtains the DataFrame with the resume of the RC analysis. Parameters: - Area: the area of the basin in km2. - Qobs: Hourly observed streamflow. - Rain: Hourly rainfall. - Qsep: Hourly dataFrame with the separated flows. - pos1: pandas index lists with the initial positions. - pos2: pandas index lists with the end positions. - N: Number of days to eval the rainfall between p1-N: p2. - Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N. Results: - DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax''' #Search for N if N is None: #Time window based on the basin area. N = Area**0.2 N = np.floor(N) // 2 * 2 + 1 if N<3: N = 3 if N>11: N = 11 Ndays = pd.Timedelta(str(N)+'d') if Nant is None: Nant = pd.Timedelta(str(N+3)+'d') else: Ndays = N if Nant is None: Nant = N + pd.Timedelta('3d') #Lists of data RC = [] RainTot = [] Date = [] Qmax = [] RainInt = [] RainAnt = [] #Get Values for events for pi,pf in zip(pos1, pos2): #General variables obtention Runoff = Qsep['Runoff'][pi:pf+Ndays].sum()*3600. Rainfall = (Rain[pi-Ndays:pf].sum()/1000.)*(Area*1e6) #Runoff and streamflow List updates Qmax.append(Qobs[pi:pf].max()) RC.append(Runoff / Rainfall) #Rainfall list updates RainTot.append(Rain[pi-Ndays:pf].sum()) RainInt.append(Rain[pi-Ndays:pf].max()) RainAnt.append(Rain[pi-Ndays-Nant:pi-Ndays].sum()) #Dates. Date.append(pi) #Converts to arrays RC = np.array(RC) RainTot = np.array(RainTot) RainInt = np.array(RainInt) RainAnt = np.array(RainAnt) Date = np.array(Date) Qmax = np.array(Qmax) #Select the correct values p1 = np.where(np.isfinite(RC))[0] p2 = np.where((RC[p1]<=1.0) & (RC[p1]>0.0))[0] #Lo que es RC = RC[p1[p2]] RainTot = RainTot[p1[p2]] RainInt = RainInt[p1[p2]] RainAnt = RainAnt[p1[p2]] Date = Date[p1[p2]] Qmax = Qmax[p1[p2]] #Los malos pos = np.where((RC>0.04) & (RainTot<10))[0] #Depura de nuevo RC = np.delete(RC, pos) RainTot = np.delete(RainTot, pos) RainInt = np.delete(RainInt, pos) RainAnt = np.delete(RainAnt, pos) Date = np.delete(Date, pos) Qmax = np.delete(Qmax, pos) #Turns things into a DataFrame Data = pd.DataFrame( np.vstack([RC, RainTot, RainAnt, RainInt, Qmax]).T, index= Date, columns=['RC', 'RainEvent', 'RainBefore','RainInt','Qmax']) return Data def Runoff_groupByRain(D, groupby = 'RainEvent' , bins = None, Vmin=None, Vmax=None, Nb = 10, logx = True): '''Group the values of RC in function of a variable. Parameters: - D: pandas Dataframe with the results from the RC analysis. - groupby: name
"CUser *" : return _znc_core.CWebSession_GetUser(self) def IsLoggedIn(self) -> "bool" : return _znc_core.CWebSession_IsLoggedIn(self) def IsAdmin(self) -> "bool" : return _znc_core.CWebSession_IsAdmin(self) def SetUser(self, *args) -> "CUser *" : return _znc_core.CWebSession_SetUser(self, *args) def ClearMessageLoops(self) -> "void" : return _znc_core.CWebSession_ClearMessageLoops(self) def FillMessageLoops(self, *args) -> "void" : return _znc_core.CWebSession_FillMessageLoops(self, *args) def AddError(self, *args) -> "size_t" : return _znc_core.CWebSession_AddError(self, *args) def AddSuccess(self, *args) -> "size_t" : return _znc_core.CWebSession_AddSuccess(self, *args) CWebSession_swigregister = _znc_core.CWebSession_swigregister CWebSession_swigregister(CWebSession) class CWebSubPage(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, CWebSubPage, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, CWebSubPage, name) __repr__ = _swig_repr def __init__(self, *args): this = _znc_core.new_CWebSubPage(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _znc_core.delete_CWebSubPage __del__ = lambda self : None; F_ADMIN = _znc_core.CWebSubPage_F_ADMIN def SetName(self, *args) -> "void" : return _znc_core.CWebSubPage_SetName(self, *args) def SetTitle(self, *args) -> "void" : return _znc_core.CWebSubPage_SetTitle(self, *args) def AddParam(self, *args) -> "void" : return _znc_core.CWebSubPage_AddParam(self, *args) def RequiresAdmin(self) -> "bool" : return _znc_core.CWebSubPage_RequiresAdmin(self) def GetName(self) -> "CString const &" : return _znc_core.CWebSubPage_GetName(self) def GetTitle(self) -> "CString const &" : return _znc_core.CWebSubPage_GetTitle(self) def GetParams(self) -> "VPair const &" : return _znc_core.CWebSubPage_GetParams(self) CWebSubPage_swigregister = _znc_core.CWebSubPage_swigregister CWebSubPage_swigregister(CWebSubPage) class CWebSessionMap(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, CWebSessionMap, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, CWebSessionMap, name) __repr__ = _swig_repr def __init__(self, uTTL : 'unsigned int'=5000): this = _znc_core.new_CWebSessionMap(uTTL) try: self.this.append(this) except: self.this = this def FinishUserSessions(self, *args) -> "void" : return _znc_core.CWebSessionMap_FinishUserSessions(self, *args) __swig_destroy__ = _znc_core.delete_CWebSessionMap __del__ = lambda self : None; CWebSessionMap_swigregister = _znc_core.CWebSessionMap_swigregister CWebSessionMap_swigregister(CWebSessionMap) class CWebSock(CHTTPSock): __swig_setmethods__ = {} for _s in [CHTTPSock]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{})) __setattr__ = lambda self, name, value: _swig_setattr(self, CWebSock, name, value) __swig_getmethods__ = {} for _s in [CHTTPSock]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{})) __getattr__ = lambda self, name: _swig_getattr(self, CWebSock, name) __repr__ = _swig_repr PAGE_NOTFOUND = _znc_core.CWebSock_PAGE_NOTFOUND PAGE_PRINT = _znc_core.CWebSock_PAGE_PRINT PAGE_DEFERRED = _znc_core.CWebSock_PAGE_DEFERRED PAGE_DONE = _znc_core.CWebSock_PAGE_DONE def __init__(self): this = _znc_core.new_CWebSock() try: self.this.append(this) except: self.this = this __swig_destroy__ = _znc_core.delete_CWebSock __del__ = lambda self : None; def ForceLogin(self) -> "bool" : return _znc_core.CWebSock_ForceLogin(self) def OnLogin(self, *args) -> "bool" : return _znc_core.CWebSock_OnLogin(self, *args) def OnPageRequest(self, *args) -> "void" : return _znc_core.CWebSock_OnPageRequest(self, *args) def PrintTemplate(self, *args) -> "CWebSock::EPageReqResult" : return _znc_core.CWebSock_PrintTemplate(self, *args) def PrintStaticFile(self, *args) -> "CWebSock::EPageReqResult" : return _znc_core.CWebSock_PrintStaticFile(self, *args) def FindTmpl(self, *args) -> "CString" : return _znc_core.CWebSock_FindTmpl(self, *args) def GetSession(self) -> "CSmartPtr< CWebSession >" : return _znc_core.CWebSock_GetSession(self) def GetSockObj(self, *args) -> "Csock *" : return _znc_core.CWebSock_GetSockObj(self, *args) __swig_getmethods__["GetSkinPath"] = lambda x: _znc_core.CWebSock_GetSkinPath if _newclass:GetSkinPath = staticmethod(_znc_core.CWebSock_GetSkinPath) def GetModule(self) -> "CModule *" : return _znc_core.CWebSock_GetModule(self) def GetAvailSkins(self, *args) -> "void" : return _znc_core.CWebSock_GetAvailSkins(self, *args) def GetSkinName(self) -> "CString" : return _znc_core.CWebSock_GetSkinName(self) def GetRequestCookie(self, *args) -> "CString" : return _znc_core.CWebSock_GetRequestCookie(self, *args) def SendCookie(self, *args) -> "bool" : return _znc_core.CWebSock_SendCookie(self, *args) __swig_getmethods__["FinishUserSessions"] = lambda x: _znc_core.CWebSock_FinishUserSessions if _newclass:FinishUserSessions = staticmethod(_znc_core.CWebSock_FinishUserSessions) CWebSock_swigregister = _znc_core.CWebSock_swigregister CWebSock_swigregister(CWebSock) def CWebSock_GetSkinPath(*args) -> "CString" : return _znc_core.CWebSock_GetSkinPath(*args) CWebSock_GetSkinPath = _znc_core.CWebSock_GetSkinPath def CWebSock_FinishUserSessions(*args) -> "void" : return _znc_core.CWebSock_FinishUserSessions(*args) CWebSock_FinishUserSessions = _znc_core.CWebSock_FinishUserSessions class CZNC(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, CZNC, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, CZNC, name) __repr__ = _swig_repr def __init__(self): this = _znc_core.new_CZNC() try: self.this.append(this) except: self.this = this __swig_destroy__ = _znc_core.delete_CZNC __del__ = lambda self : None; ECONFIG_NOTHING = _znc_core.CZNC_ECONFIG_NOTHING ECONFIG_NEED_REHASH = _znc_core.CZNC_ECONFIG_NEED_REHASH ECONFIG_NEED_WRITE = _znc_core.CZNC_ECONFIG_NEED_WRITE def DeleteUsers(self) -> "void" : return _znc_core.CZNC_DeleteUsers(self) def Loop(self) -> "void" : return _znc_core.CZNC_Loop(self) def WritePidFile(self, *args) -> "bool" : return _znc_core.CZNC_WritePidFile(self, *args) def DeletePidFile(self) -> "bool" : return _znc_core.CZNC_DeletePidFile(self) def WaitForChildLock(self) -> "bool" : return _znc_core.CZNC_WaitForChildLock(self) def IsHostAllowed(self, *args) -> "bool" : return _znc_core.CZNC_IsHostAllowed(self, *args) def AllowConnectionFrom(self, *args) -> "bool" : return _znc_core.CZNC_AllowConnectionFrom(self, *args) def InitDirs(self, *args) -> "void" : return _znc_core.CZNC_InitDirs(self, *args) def OnBoot(self) -> "bool" : return _znc_core.CZNC_OnBoot(self) def ExpandConfigPath(self, *args) -> "CString" : return _znc_core.CZNC_ExpandConfigPath(self, *args) def WriteNewConfig(self, *args) -> "bool" : return _znc_core.CZNC_WriteNewConfig(self, *args) def WriteConfig(self) -> "bool" : return _znc_core.CZNC_WriteConfig(self) def ParseConfig(self, *args) -> "bool" : return _znc_core.CZNC_ParseConfig(self, *args) def RehashConfig(self, *args) -> "bool" : return _znc_core.CZNC_RehashConfig(self, *args) def BackupConfigOnce(self, *args) -> "void" : return _znc_core.CZNC_BackupConfigOnce(self, *args) __swig_getmethods__["GetVersion"] = lambda x: _znc_core.CZNC_GetVersion if _newclass:GetVersion = staticmethod(_znc_core.CZNC_GetVersion) __swig_getmethods__["GetTag"] = lambda x: _znc_core.CZNC_GetTag if _newclass:GetTag = staticmethod(_znc_core.CZNC_GetTag) __swig_getmethods__["GetCompileOptionsString"] = lambda x: _znc_core.CZNC_GetCompileOptionsString if _newclass:GetCompileOptionsString = staticmethod(_znc_core.CZNC_GetCompileOptionsString) def GetUptime(self) -> "CString" : return _znc_core.CZNC_GetUptime(self) def ClearBindHosts(self) -> "void" : return _znc_core.CZNC_ClearBindHosts(self) def AddBindHost(self, *args) -> "bool" : return _znc_core.CZNC_AddBindHost(self, *args) def RemBindHost(self, *args) -> "bool" : return _znc_core.CZNC_RemBindHost(self, *args) def Broadcast(self, *args) -> "void" : return _znc_core.CZNC_Broadcast(self, *args) def AddBytesRead(self, *args) -> "void" : return _znc_core.CZNC_AddBytesRead(self, *args) def AddBytesWritten(self, *args) -> "void" : return _znc_core.CZNC_AddBytesWritten(self, *args) def BytesRead(self) -> "unsigned long long" : return _znc_core.CZNC_BytesRead(self) def BytesWritten(self) -> "unsigned long long" : return _znc_core.CZNC_BytesWritten(self) def GetTrafficStats(self, *args) -> "CZNC::TrafficStatsMap" : return _znc_core.CZNC_GetTrafficStats(self, *args) def AuthUser(self, *args) -> "void" : return _znc_core.CZNC_AuthUser(self, *args) def SetConfigState(self, *args) -> "void" : return _znc_core.CZNC_SetConfigState(self, *args) def SetSkinName(self, *args) -> "void" : return _znc_core.CZNC_SetSkinName(self, *args) def SetStatusPrefix(self, *args) -> "void" : return _znc_core.CZNC_SetStatusPrefix(self, *args) def SetMaxBufferSize(self, *args) -> "void" : return _znc_core.CZNC_SetMaxBufferSize(self, *args) def SetAnonIPLimit(self, *args) -> "void" : return _znc_core.CZNC_SetAnonIPLimit(self, *args) def SetServerThrottle(self, *args) -> "void" : return _znc_core.CZNC_SetServerThrottle(self, *args) def SetProtectWebSessions(self, *args) -> "void" : return _znc_core.CZNC_SetProtectWebSessions(self, *args) def SetConnectDelay(self, *args) -> "void" : return _znc_core.CZNC_SetConnectDelay(self, *args) def GetConfigState(self) -> "enum CZNC::ConfigState" : return _znc_core.CZNC_GetConfigState(self) def GetManager(self, *args) -> "CSockManager const &" : return _znc_core.CZNC_GetManager(self, *args) def GetModules(self) -> "CModules &" : return _znc_core.CZNC_GetModules(self) def FilterUncommonModules(self, *args) -> "size_t" : return _znc_core.CZNC_FilterUncommonModules(self, *args) def GetSkinName(self) -> "CString" : return _znc_core.CZNC_GetSkinName(self) def GetStatusPrefix(self) -> "CString const &" : return _znc_core.CZNC_GetStatusPrefix(self) def GetCurPath(self) -> "CString const &" : return _znc_core.CZNC_GetCurPath(self) def GetHomePath(self) -> "CString const &" : return _znc_core.CZNC_GetHomePath(self) def GetZNCPath(self) -> "CString const &" : return _znc_core.CZNC_GetZNCPath(self) def GetConfPath(self, bAllowMkDir : 'bool'=True) -> "CString" : return _znc_core.CZNC_GetConfPath(self, bAllowMkDir) def GetUserPath(self) -> "CString" : return _znc_core.CZNC_GetUserPath(self) def GetModPath(self) -> "CString" : return _znc_core.CZNC_GetModPath(self) def GetPemLocation(self) -> "CString" : return _znc_core.CZNC_GetPemLocation(self) def GetConfigFile(self) -> "CString const &" : return _znc_core.CZNC_GetConfigFile(self) def WritePemFile(self) -> "bool" : return _znc_core.CZNC_WritePemFile(self) def GetBindHosts(self) -> "VCString const &" : return _znc_core.CZNC_GetBindHosts(self) def GetListeners(self) -> "std::vector< CListener *,std::allocator< CListener * > > const &" : return _znc_core.CZNC_GetListeners(self) def TimeStarted(self) -> "time_t" : return _znc_core.CZNC_TimeStarted(self) def GetMaxBufferSize(self) -> "unsigned int" : return _znc_core.CZNC_GetMaxBufferSize(self) def GetAnonIPLimit(self) -> "unsigned int" : return _znc_core.CZNC_GetAnonIPLimit(self) def GetConnectDelay(self) -> "unsigned int" : return _znc_core.CZNC_GetConnectDelay(self) def GetProtectWebSessions(self) -> "bool" : return _znc_core.CZNC_GetProtectWebSessions(self) __swig_getmethods__["Get"] = lambda x: _znc_core.CZNC_Get if _newclass:Get = staticmethod(_znc_core.CZNC_Get) def FindUser(self, *args) -> "CUser *" : return _znc_core.CZNC_FindUser(self, *args) def FindModule(self, *args) -> "CModule *" : return _znc_core.CZNC_FindModule(self, *args) def UpdateModule(self, *args) -> "bool" : return _znc_core.CZNC_UpdateModule(self, *args) def DeleteUser(self, *args) -> "bool" : return _znc_core.CZNC_DeleteUser(self, *args) def AddUser(self, *args) -> "bool" : return _znc_core.CZNC_AddUser(self, *args) def GetUserMap(self) -> "std::map< CString,CUser *,std::less< CString >,std::allocator< std::pair< CString const,CUser * > > > const &" : return _znc_core.CZNC_GetUserMap(self) def FindListener(self, *args) -> "CListener *" : return _znc_core.CZNC_FindListener(self, *args) def AddListener(self, *args) -> "bool" : return _znc_core.CZNC_AddListener(self, *args) def DelListener(self, *args) -> "bool" : return _znc_core.CZNC_DelListener(self, *args) def SetMotd(self, *args) -> "void" : return _znc_core.CZNC_SetMotd(self, *args) def AddMotd(self, *args) -> "void" : return _znc_core.CZNC_AddMotd(self, *args) def ClearMotd(self) -> "void" : return _znc_core.CZNC_ClearMotd(self) def GetMotd(self) -> "VCString const &" : return _znc_core.CZNC_GetMotd(self) def AddServerThrottle(self, *args) -> "void" : return _znc_core.CZNC_AddServerThrottle(self, *args) def GetServerThrottle(self, *args) -> "bool" : return _znc_core.CZNC_GetServerThrottle(self, *args) def AddNetworkToQueue(self, *args) -> "void" : return _znc_core.CZNC_AddNetworkToQueue(self, *args) def GetConnectionQueue(self) -> "std::list< CIRCNetwork *,std::allocator< CIRCNetwork * > > &" : return _znc_core.CZNC_GetConnectionQueue(self) def EnableConnectQueue(self) -> "void" : return _znc_core.CZNC_EnableConnectQueue(self) def DisableConnectQueue(self) -> "void" : return _znc_core.CZNC_DisableConnectQueue(self) def PauseConnectQueue(self) -> "void" : return _znc_core.CZNC_PauseConnectQueue(self) def ResumeConnectQueue(self) -> "void" : return _znc_core.CZNC_ResumeConnectQueue(self) def LeakConnectQueueTimer(self, *args) -> "void" : return _znc_core.CZNC_LeakConnectQueueTimer(self, *args) __swig_getmethods__["DumpConfig"] = lambda x: _znc_core.CZNC_DumpConfig if _newclass:DumpConfig = staticmethod(_znc_core.CZNC_DumpConfig) CZNC_swigregister = _znc_core.CZNC_swigregister CZNC_swigregister(CZNC) def CZNC_GetVersion() -> "CString" : return _znc_core.CZNC_GetVersion() CZNC_GetVersion = _znc_core.CZNC_GetVersion def CZNC_GetTag(bIncludeVersion : 'bool'=True, bHTML : 'bool'=False) -> "CString" : return _znc_core.CZNC_GetTag(bIncludeVersion, bHTML) CZNC_GetTag = _znc_core.CZNC_GetTag def CZNC_GetCompileOptionsString() ->
vul_lineno) except Exception as e: logger.debug(traceback.format_exc()) def analysis_functioncall(node, back_node, vul_function, vul_lineno): """ 调用FunctionCall-->判断调用Function是否敏感-->get params获取所有参数-->开始递归判断 :param node: :param back_node: :param vul_function: :param vul_lineno :return: """ global scan_results try: if node.name == vul_function and int(node.lineno) == int(vul_lineno): # 定位到敏感函数 for param in node.params: if isinstance(param.node, php.Variable): analysis_variable_node(param.node, back_node, vul_function, vul_lineno) if isinstance(param.node, php.FunctionCall): analysis_functioncall_node(param.node, back_node, vul_function, vul_lineno) if isinstance(param.node, php.BinaryOp): analysis_binaryop_node(param.node, back_node, vul_function, vul_lineno) if isinstance(param.node, php.ArrayOffset): analysis_arrayoffset_node(param.node, vul_function, vul_lineno) except Exception as e: logger.debug(e) def analysis_binaryop_node(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理BinaryOp类型节点-->取出参数-->回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) params = get_binaryop_params(node) params = export_list(params, export_params=[]) for param in params: param = php.Variable(param) param_lineno = node.lineno # is_co, cp, expr_lineno = parameters_back(param, back_node, function_params) if file_path is not None: is_co, cp, expr_lineno, chain = anlysis_params(param, file_path, param_lineno, vul_function=vul_function) else: count = 0 is_co, cp, expr_lineno = deep_parameters_back(node, back_node, function_params, count, file_path, vul_function=vul_function) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_objectproperry_node(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理_objectproperry类型节点-->取出参数-->回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) param = node param_lineno = node.lineno # is_co, cp, expr_lineno = parameters_back(param, back_node, function_params) if file_path is not None: # with open(file_path, 'r') as fi: # fi = codecs.open(file_path, 'r', encoding='utf-8', errors='ignore') # code_content = fi.read() is_co, cp, expr_lineno, chain = anlysis_params(param, file_path, param_lineno, vul_function=vul_function) else: count = 0 is_co, cp, expr_lineno = deep_parameters_back(node, back_node, function_params, count, vul_function=vul_function) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_arrayoffset_node(node, vul_function, vul_lineno): """ 处理ArrayOffset类型节点-->取出参数-->回溯判断参数是否可控-->输出结果 :param node: :param vul_function: :param vul_lineno: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) param = get_node_name(node.node) expr_lineno = node.lineno is_co, cp = is_controllable(param) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_functioncall_node(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理FunctionCall类型节点-->取出参数-->回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) params = get_all_params(node.params) function_name = get_node_name(node) if is_repair(function_name): logger.info("[AST] Function {} is repair func. fail control back.".format(function_name)) return False for param in params: param = php.Variable(param) param_lineno = node.lineno if file_path is not None: is_co, cp, expr_lineno, chain = anlysis_params(param, file_path, param_lineno, vul_function=vul_function) else: count = 0 is_co, cp, expr_lineno = deep_parameters_back(node, back_node, function_params, count, file_path, vul_function=vul_function) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_variable_node(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理Variable类型节点-->取出参数-->回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) param = get_node_name(node) param_lineno = node.lineno if file_path is not None: is_co, cp, expr_lineno, chain = anlysis_params(param, file_path, param_lineno, vul_function=vul_function) else: count = 0 is_co, cp, expr_lineno = deep_parameters_back(node, back_node, function_params, count, file_path, vul_function=vul_function) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_ternaryop_node(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None, repair_functions=[]): """ 处理三元提交判断语句,回溯双变量 :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :param file_path: :return: """ logger.debug('[AST] vul_function:{v}'.format(v=vul_function)) param = node.expr node1 = node.iftrue node2 = node.iffalse if type(node1) is int: node1 = php.Variable(node1) if type(node2) is int: node2 = php.Variable(node2) logger.debug('[AST] vul_param1: {}, vul_param2: {}'.format(node1, node2)) count = 0 is_co, cp, expr_lineno = deep_parameters_back(node1, back_node, function_params, count, file_path) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) is_co, cp, expr_lineno = deep_parameters_back(node2, back_node, function_params, count, file_path) set_scan_results(is_co, cp, expr_lineno, vul_function, param, vul_lineno) def analysis_if_else(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): nodes = [] if isinstance(node.node, php.Block): # if语句中的sink点以及变量 analysis(node.node.nodes, vul_function, back_node, vul_lineno, file_path, function_params) else: analysis([node.node], vul_function, back_node, vul_lineno, file_path, function_params) if node.else_ is not None: # else语句中的sink点以及变量 if isinstance(node.else_.node, php.Block): analysis(node.else_.node.nodes, vul_function, back_node, vul_lineno, file_path, function_params) else: analysis([node.node], vul_function, back_node, vul_lineno, file_path, function_params) if len(node.elseifs) != 0: # elseif语句中的sink点以及变量 for i_node in node.elseifs: if i_node.node is not None: if isinstance(i_node.node, php.Block): analysis(i_node.node.nodes, vul_function, back_node, vul_lineno, file_path, function_params) else: nodes.append(i_node.node) analysis(nodes, vul_function, back_node, vul_lineno, file_path, function_params) def analysis_try(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): # for try analysis(node.nodes, vul_function, back_node, vul_lineno, file_path, function_params) if node.catches is not None: for catch in node.catches: analysis(catch.nodes, vul_function, back_node, vul_lineno, file_path, function_params) if getattr(node, 'finally') is not None: analysis(getattr(node, 'finally').nodes, vul_function, back_node, vul_lineno, file_path, function_params) def analysis_echo_print(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理echo/print类型节点-->判断节点类型-->不同If分支回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ global scan_results if int(vul_lineno) == int(node.lineno): if isinstance(node, php.Print): if isinstance(node.node, php.FunctionCall) or isinstance(node.node, php.MethodCall) or isinstance(node.node, php.StaticMethodCall): analysis_functioncall_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.Variable) and vul_function == 'print': # 直接输出变量信息 analysis_variable_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.BinaryOp) and vul_function == 'print': analysis_binaryop_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.ArrayOffset) and vul_function == 'print': analysis_arrayoffset_node(node.node, vul_function, vul_lineno) if isinstance(node.node, php.TernaryOp) and vul_function == 'print': analysis_ternaryop_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) elif isinstance(node, php.Echo): for k_node in node.nodes: if isinstance(k_node, php.FunctionCall) or isinstance(k_node, php.MethodCall) or isinstance( k_node, php.StaticMethodCall): # 判断节点中是否有函数调用节点 analysis_functioncall_node(k_node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) # 将含有函数调用的节点进行分析 if isinstance(k_node, php.Variable) and vul_function == 'echo': analysis_variable_node(k_node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(k_node, php.BinaryOp) and vul_function == 'echo': analysis_binaryop_node(k_node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(k_node, php.ArrayOffset) and vul_function == 'echo': analysis_arrayoffset_node(k_node, vul_function, vul_lineno) if isinstance(k_node, php.TernaryOp) and vul_function == 'echo': analysis_ternaryop_node(k_node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) def analysis_return(node, back_node, vul_function, vul_lineno, function_params=None, file_path=None): """ 处理return节点 :param file_path: :param node: :param back_node: :param vul_function: :param vul_lineno: :param function_params: :return: """ global scan_results if int(vul_lineno) == int(node.lineno) and isinstance(node, php.Return): if isinstance(node.node, php.FunctionCall) or isinstance(node.node, php.MethodCall) or isinstance(node.node, php.StaticMethodCall): analysis_functioncall_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.Variable): # 直接输出变量信息 analysis_variable_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.BinaryOp): analysis_binaryop_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.ArrayOffset): analysis_arrayoffset_node(node.node, vul_function, vul_lineno) if isinstance(node.node, php.TernaryOp): analysis_ternaryop_node(node.node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.node, php.Silence): nodes = get_silence_params(node.node) analysis(nodes, vul_function, back_node, vul_lineno, file_path) def analysis_eval(node, vul_function, back_node, vul_lineno, function_params=None, file_path=None): """ 处理eval类型节点-->判断节点类型-->不同If分支回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param vul_function: :param back_node: :param vul_lineno: :param function_params: :return: """ global scan_results if vul_function == 'eval' and int(node.lineno) == int(vul_lineno): if isinstance(node.expr, php.Variable): analysis_variable_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.FunctionCall) or isinstance(node.expr, php.MethodCall) or isinstance(node.expr, php.StaticMethodCall): analysis_functioncall_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.BinaryOp): analysis_binaryop_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.ArrayOffset): analysis_arrayoffset_node(node.expr, vul_function, vul_lineno) if isinstance(node.expr, php.ObjectProperty): analysis_objectproperry_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.Silence): nodes = get_silence_params(node.expr) analysis(nodes, vul_function, back_node, vul_lineno, file_path) def analysis_file_inclusion(node, vul_function, back_node, vul_lineno, function_params=None, file_path=None): """ 处理include/require类型节点-->判断节点类型-->不同If分支回溯判断参数是否可控-->输出结果 :param file_path: :param node: :param vul_function: :param back_node: :param vul_lineno: :param function_params: :return: """ global scan_results include_fs = ['include', 'include_once', 'require', 'require_once'] if vul_function in include_fs and int(node.lineno) == int(vul_lineno): logger.debug('[AST-INCLUDE] {l}-->{r}'.format(l=vul_function, r=vul_lineno)) if isinstance(node.expr, php.Variable): analysis_variable_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.FunctionCall) or isinstance(node.expr, php.MethodCall) or isinstance(node.expr, php.StaticMethodCall): analysis_functioncall_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.BinaryOp): analysis_binaryop_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.ArrayOffset): analysis_arrayoffset_node(node.expr, vul_function, vul_lineno) if isinstance(node.expr, php.ObjectProperty): analysis_objectproperry_node(node.expr, back_node, vul_function, vul_lineno, function_params, file_path=file_path) def set_scan_results(is_co, cp, expr_lineno, sink, param, vul_lineno): """ 获取结果信息-->输出结果 :param is_co: :param cp: :param expr_lineno: :param sink: :param param: :param vul_lineno: :return: """ results = [] global scan_results, scan_chain result = { 'code': is_co, 'source': cp, 'source_lineno': expr_lineno, 'sink': sink, 'sink_param:': param, 'sink_lineno': vul_lineno, "chain": scan_chain, } if result['code'] > 0: # 查出来漏洞结果添加到结果信息中 results.append(result) scan_results += results def analysis(nodes, vul_function, back_node, vul_lineno, file_path=None, function_params=None): """ 调用FunctionCall-->analysis_functioncall分析调用函数是否敏感 :param nodes: 所有节点 :param vul_function: 要判断的敏感函数名 :param back_node: 各种语法结构里面的语句 :param vul_lineo: 漏洞函数所在行号 :param function_params: 自定义函数的所有参数列表 :param file_path: 当前分析文件的地址 :return: """ buffer_ = [] for node in nodes: # 检查line范围,以快速锁定参数 if vul_lineno < node.lineno: break if isinstance(node, php.FunctionCall) or isinstance(node, php.MethodCall) or isinstance(node, php.StaticMethodCall): # 函数直接调用,不进行赋值 anlysis_function(node, back_node, vul_function, function_params, vul_lineno, file_path=file_path) elif isinstance(node, php.Assignment): # 函数调用在赋值表达式中 if isinstance(node.expr, php.FunctionCall) or isinstance(node.expr, php.MethodCall) or isinstance(node.expr, php.StaticMethodCall): anlysis_function(node.expr, back_node, vul_function, function_params, vul_lineno, file_path=file_path) if isinstance(node.expr, php.Eval): analysis_eval(node.expr, vul_function, back_node, vul_lineno, function_params, file_path=file_path) if isinstance(node.expr, php.Silence): buffer_.append(node.expr) analysis(buffer_, vul_function, back_node, vul_lineno, file_path, function_params) elif isinstance(node, php.Return): analysis_return(node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) elif isinstance(node, php.Print) or isinstance(node, php.Echo): analysis_echo_print(node, back_node, vul_function, vul_lineno, function_params, file_path=file_path) elif isinstance(node, php.Silence): nodes = get_silence_params(node) analysis(nodes,
from machine import I2C import utime import math class MinIMU_v5: """ Init function Valid values for aFullScale are 2, 4, 8, and 16 [g] gFullScale are 125, 245, 500, 1000, and 2000 [dps] mFullScale are 4, 8, 12, and 16 [guass] gOdr are 1 -> 8 (13Hz -> 1.666kHz) aOdr (Hz) are 1 -> 10 (13Hz -> 6.666kHz) mOdr (Hz) are 0 -> 7 (0.625Hz -> 80Hz) """ def __init__(self, i2cBusNum = 0, aFullScale = 2, gFullScale = 500, mFullScale = 4, i2cFreq = 400000, gOdr = 8, aOdr=8, mOdr=4): #Accelerometer and Gyro Register addresses self.Accel_Gyro_REG = dict( FUNC_CFG_ACCESS = 0x01, \ FIFO_CTRL1 = 0x06, FIFO_CTRL2 = 0x07, FIFO_CTRL3 = 0x08, FIFO_CTRL4 = 0x09, FIFO_CTRL5 = 0x0A, ORIENT_CFG_G = 0x0B, \ INT1_CTRL = 0x0D, INT2_CTRL = 0x0E, WHO_AM_I = 0x0F, CTRL1_XL = 0x10, CTRL2_G = 0x11, CTRL3_C = 0x12, CTRL4_C = 0x13, CTRL5_C = 0x14, CTRL6_C = 0x15, CTRL7_G = 0x16, CTRL8_XL = 0x17, CTRL9_XL = 0x18, CTRL10_C = 0x19, \ WAKE_UP_SRC = 0x1B, TAP_SRC = 0x1C, D6D_SRC = 0x1D, STATUS_REG = 0x1E, \ OUT_TEMP_L = 0x20, OUT_TEMP_H = 0x21, OUTX_L_G = 0x22, OUTX_H_G = 0x23, OUTY_L_G = 0x24, OUTY_H_G = 0x25, OUTZ_L_G = 0x26, OUTZ_H_G = 0x27, OUTX_L_XL = 0x28, OUTX_H_XL = 0x29, OUTY_L_XL = 0x2A, OUTY_H_XL = 0x2B, OUTZ_L_XL = 0x2C, OUTZ_H_XL = 0x2D, \ FIFO_STATUS1 = 0x3A, FIFO_STATUS2 = 0x3B, FIFO_STATUS3 = 0x3C, FIFO_STATUS4 = 0x3D, FIFO_DATA_OUT_L = 0x3E, FIFO_DATA_OUT_H = 0x3F, TIMESTAMP0_REG = 0x40, TIMESTAMP1_REG = 0x41, TIMESTAMP2_REG = 0x42, \ STEP_TIMESTAMP_L = 0x49, STEP_TIMESTAMP_H = 0x4A, STEP_COUNTER_L = 0x4B, STEP_COUNTER_H = 0x4C, \ FUNC_SRC = 0x53, \ TAP_CFG = 0x58, TAP_THS_6D = 0x59, INT_DUR2 = 0x5A, WAKE_UP_THS = 0x5B, WAKE_UP_DUR = 0x5C, FREE_FALL = 0x5D, MD1_CFG = 0x5E, MD2_CFG = 0x5F) #Magnemometer addresses self.Mag_REG= dict( WHO_AM_I = 0x0F, \ CTRL_REG1 = 0x20, CTRL_REG2 = 0x21, CTRL_REG3 = 0x22, CTRL_REG4 = 0x23, CTRL_REG5 = 0x24, \ STATUS_REG = 0x27, OUT_X_L = 0x28, OUT_X_H = 0x29, OUT_Y_L = 0x2A, OUT_Y_H = 0x2B, OUT_Z_L = 0x2C, OUT_Z_H = 0x2D, TEMP_OUT_L = 0x2E, TEMP_OUT_H = 0x2F, INT_CFG = 0x30, INT_SRC = 0x31, INT_THS_L = 0x32, INT_THS_H = 0x33) #Unit scales self.aScale = 0 #default: aScale = 2g/2^15, self.gScale = 0 #default: gScale = 500dps/2^15 self.mScale = 0 #default: mScale = 4guass/2^15 #Variables for updateAngle and updateYaw self.prevAngle = [0,0,0] #x, y, z (roll, pitch, yaw) self.prevGAngle = [0, 0, 0] self.prevYaw = 0 self.tau = 0.04 #Want this roughly 10x the dt self.lastTimeAngle = [0] self.lastTimeYaw = [0] #i2c addresses self.mag = 0x1e #0011110 (from docs) self.accel_gyro = 0x6b #Connect i2c bus self.i2c = I2C(i2cBusNum, freq = i2cFreq) #Enable Mag, Accel, and Gyro self.enableMag(mFullScale, mOdr) self.enableAccel_Gyro(aFullScale, gFullScale, aOdr, gOdr) """Setup the needed registers for the Accelerometer and Gyro""" def enableAccel_Gyro(self, aFullScale, gFullScale, aOdr, gOdr): #Accelerometer g = 9.806 #the gravitational constant for a latitude of 45 degrees at sea level is 9.80665 #g for altitude is g(6,371.0088 km / (6,371.0088 km + altitude))^2 #9.80600 is a good approximation # Output Data Rate b0_3 = aOdr #full-scale selection; 2**15 = 32768 if aFullScale == 4: b4_5 = 0b10 self.aScale = 4*g/32768 elif aFullScale == 8: b4_5 = 0b11 self.aScale = 8*g/32768 elif aFullScale == 16: b4_5 = 0b01 self.aScale = 16*g/32768 else: #default to 2g if no valid value is given b4_5 = 0b00 self.aScale = 2*g/32768 b6_7 = 0b00 #400Hz anti-aliasing filter bandwidth # write CTRL1_XL self.i2c.writeto_mem(self.accel_gyro, self.Accel_Gyro_REG['CTRL1_XL'], bytes([b0_3 << 4 | b4_5 << 2 | b6_7])) # Gyro # Output Data Rate b0_3 = gOdr # full-scale selection # See table 3 page 15 of the data sheet for the gScale (converted from mdps to dps) if gFullScale == 245: b4_6 = 0b000 self.gScale = 0.00875 elif gFullScale == 1000: b4_6 = 0b100 self.gScale = 0.035 elif gFullScale == 2000: b4_6 = 0b110 self.gScale = 0.07 elif gFullScale == 125: b4_6 = 0b001 self.gScale = 0.004375 else: #default to 500 dps if no valid value is given b4_6 = 0b010 self.gScale = 0.0175 # Write CTRL2_G self.i2c.writeto_mem(self.accel_gyro, self.Accel_Gyro_REG['CTRL2_G'], bytes([b0_3 << 4 | b4_6 << 1])) #Accelerometer and Gyro #default: 0b00000100 self.i2c.writeto_mem(self.accel_gyro, self.Accel_Gyro_REG['CTRL3_C'], bytes([0b00000100])) """Setup the needed registers for the Magnetometer""" def enableMag(self, mFullScale, mOdr): #Magnemometer #default: 0b01110000 #Temp off, High-Performance, ODR = 300Hz, Self_test off bt = 0 << 7 bperf = 0b10 << 5 bOdr = mOdr << 2 bst = 0 self.i2c.writeto_mem(self.mag, self.Mag_REG['CTRL_REG1'], bytes([bt | bperf | bOdr | bst])) #default: 0b00000000 # +/-4guass, reboot off, soft_reset off #full-scale selection; 2**15 = 32768 if mFullScale == 8: b1_2 = 0b0100000 #'01' self.mScale = 8.0/32768 elif mFullScale == 12: b1_2 = 0b1000000 self.mScale = 12.0/32768 elif mFullScale == 16: b1_2 = 0b1100000 self.mScale = 16.0/32768 else: #default to 4 guass if no valid value is given b1_2 = 0b0000000 self.mScale = 4.0/32768 rebootMem = 0 << 3 #Reboot memory content softReset = 0 << 2 #Configuration registers and user register reset function self.i2c.writeto_mem(self.mag, self.Mag_REG['CTRL_REG2'], bytes([0 | b1_2 | rebootMem | softReset])) #default: 0b00000011 #Low-power off, default SPI, continous convo mode self.i2c.writeto_mem(self.mag, self.Mag_REG['CTRL_REG3'], bytes([0b00000000])) #default: 0b00000000 #High-Performance, data LSb at lower address self.i2c.writeto_mem(self.mag, self.Mag_REG['CTRL_REG4'], bytes([0b00001000])) """Read values from accelerometer and scale them to m/s^2, returns 0's if unable to read""" def readAccelerometer(self): # check to see if the status register shows that there is data to read. if int.from_bytes(self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['STATUS_REG'], 1), "big") & 0b1 == 0: utime.sleep_us(1) # Reading low and high 8-bit register and converting the 16-bit two's complement number to decimal. # Read the registers as quickly as possible. try: AXL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTX_L_XL'], 1) AXH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTX_H_XL'], 1) AYL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTY_L_XL'], 1) AYH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTY_H_XL'], 1) AZL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTZ_L_XL'], 1) AZH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTZ_H_XL'], 1) except: print("readAccelerometer Error!") return 0, 0, 0 AX = self.byteToNumber(int.from_bytes(AXL, "big"), int.from_bytes(AXH, "big")) AY = self.byteToNumber(int.from_bytes(AYL, "big"), int.from_bytes(AYH, "big")) AZ = self.byteToNumber(int.from_bytes(AZL, "big"), int.from_bytes(AZH, "big")) #Scaling the decimal number to understandable units AX *= self.aScale; AY *= self.aScale; AZ *= self.aScale; return [AX, AY, AZ] """Read values from gyro and scale them to dps, returns 0's if unable to read""" def readGyro(self): # check to see if the status register shows that there is data to read. if int.from_bytes(self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['STATUS_REG'], 1), "big") & 0b10 == 0: utime.sleep_us(1) # Reading low and high 8-bit register and converting the 16-bit two's complement number to decimal. # Read the registers as quickly as possible. try: GXL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTX_L_G'], 1) GXH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTX_H_G'], 1) GYL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTY_L_G'], 1) GYH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTY_H_G'], 1) GZL = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTZ_L_G'], 1) GZH = self.i2c.readfrom_mem(self.accel_gyro, self.Accel_Gyro_REG['OUTZ_H_G'], 1) except: print("readGyro Error!") return 0, 0, 0 GX = self.byteToNumber(int.from_bytes(GXL, "big"), int.from_bytes(GXH, "big")) GY = self.byteToNumber(int.from_bytes(GYL, "big"), int.from_bytes(GYH, "big")) GZ = self.byteToNumber(int.from_bytes(GZL, "big"), int.from_bytes(GZH, "big")) #Scaling the decimal number to understandable units GX *= self.gScale; GY *= self.gScale; GZ *= self.gScale; return [GX, GY, GZ] """Read values from magnetometer and scale them to guass, returns 0's if unable to read""" def readMagnetometer(self): # check to see if the status register shows that there is data to read. if int.from_bytes(self.i2c.readfrom_mem(self.mag, self.Mag_REG['STATUS_REG'], 1), "big") & 0b1000 == 0: utime.sleep_us(1) # Reading low and high 8-bit register and converting the 16-bit two's complement number to decimal. # Read the registers as quickly as possible. try: MXL = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_X_L'], 1) MXH = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_X_H'], 1) MYL = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_Y_L'], 1) MYH = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_Y_H'], 1) MZL = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_Z_L'], 1) MZH = self.i2c.readfrom_mem(self.mag, self.Mag_REG['OUT_Z_H'], 1) except: print("readMagnetometer Error!") return 0, 0, 0 MX = self.byteToNumber(int.from_bytes(MXL, "big"), int.from_bytes(MXH, "big")) MY = self.byteToNumber(int.from_bytes(MYL, "big"), int.from_bytes(MYH, "big")) MZ = self.byteToNumber(int.from_bytes(MZL, "big"), int.from_bytes(MZH, "big")) #Scaling the decimal number to understandable units MX *= self.mScale; MY *= self.mScale; MZ
<filename>lambda/lambda_function.py # -*- coding: utf-8 -*- # This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python. # Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management, # session persistence, api calls, and more. # This sample is built using the handler classes approach in skill builder. import logging import random import ask_sdk_core.utils as ask_utils import boto3 from ask_sdk_core.skill_builder import SkillBuilder from ask_sdk_core.dispatch_components import AbstractRequestHandler from ask_sdk_core.dispatch_components import AbstractExceptionHandler from ask_sdk_core.handler_input import HandlerInput from ask_sdk_core.attributes_manager import AbstractPersistenceAdapter from ask_sdk_core.utils.request_util import get_slot from ask_sdk_model import Response from ask_sdk_dynamodb.partition_keygen import user_id_partition_keygen from ask_sdk_dynamodb.adapter import DynamoDbAdapter, user_id_partition_keygen from boto3.dynamodb.conditions import Key, Attr from utils import get_story_frase logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) boto_sts=boto3.client('sts') stsresponse = boto_sts.assume_role( RoleArn="", RoleSessionName='' ) # Save the details from assumed role into vars newsession_id = stsresponse["Credentials"]["AccessKeyId"] newsession_key = stsresponse["Credentials"]["SecretAccessKey"] newsession_token = stsresponse["Credentials"]["SessionToken"] ddb2 = boto3.resource( 'dynamodb', region_name='sa-east-1', aws_access_key_id=newsession_id, aws_secret_access_key=newsession_key, aws_session_token=newsession_token ) db = DynamoDbAdapter(table_name="dynamo_revolucao",partition_key_name="id",partition_keygen=user_id_partition_keygen,dynamodb_resource=ddb2) class LaunchRequestHandler(AbstractRequestHandler): """Handler for Skill Launch.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_request_type("LaunchRequest")(handler_input) def handle(self, handler_input, ): # type: (HandlerInput) -> Response speak_output = "" try:# checar se usuario existe request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) if not user.get('apelido'): user['kills'] = 0 user['deaths'] = 0 db.save_attributes(request_envelope=request_envelope,attributes=user) speak_output = speak_output + """ <speak> Pelo jeito você não tem uma conta ainda. Vamos começar com seu apelido <break/> para cadastrar um apelido fale <break/> meu apelido é e fale seu apelido <break/> </speak> """ elif not user.get('frase'): speak_output = speak_output + """ <break/>Ok {}. Agora cadestre agora sa frase de vitória basta falar <break/> Minha frase de vitória é <break/> então diga sua frase. Essa frase será apresentada para o adiversario perdedor. """.format(user.get('apelido')) elif not user.get('time'): speak_output = speak_output + """ <break/>Tudo certo. Agora escolha entre time coxinha ou mortadela.""" else: speak_output = speak_output + "<speak>Ola {}. Vamos jogar, para atacar escolha pedra, papel ou tesoura. Caso queira ver a historia diga ver historia.</speak>".format(user.get('apelido')) except Exception as e: speak_output = "ocorreu um erro" return ( handler_input.response_builder .speak(speak_output) .ask(speak_output) .response ) class CreateName(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("CreateName")(handler_input) def handle(self, handler_input): apelido = ask_utils.get_slot_value(handler_input=handler_input, slot_name="apelido") speak_output = "Seu apelido agora é " + apelido request_envelope = handler_input.request_envelope try:# checar se usuario existe user = db.get_attributes(request_envelope=request_envelope) user['apelido'] = apelido db.save_attributes(request_envelope=request_envelope,attributes=user) if not user.get('time'): speak_output = speak_output except Exception as e: speak_output = "Ocorreu um problema." return LaunchRequestHandler().handle(handler_input) #return handler_input.response_builder.speak(speak_output).ask(speak_output).response class CreateVictoryPhrase(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("VictoryPhrase")(handler_input) def handle(self, handler_input): try:# checar se usuario existe frase = ask_utils.get_slot_value(handler_input=handler_input, slot_name="frase") request_envelope = handler_input.request_envelope speak_output = "Sua frase de vitória é " + frase user = db.get_attributes(request_envelope=request_envelope) user['frase'] = frase db.save_attributes(request_envelope=request_envelope,attributes=user) except Exception as e: #speak_output = str(e) speak_output = "Ocorreu um problema." return LaunchRequestHandler().handle(handler_input) class CoxinhaTeam(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Coxinha")(handler_input) def handle(self, handler_input): try:# checar se usuario existe speak_output = "Você agora é do time coxinha." request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) user['time'] = "coxinha" db.save_attributes(request_envelope=request_envelope,attributes=user) if not user.get('frase'): speak_output = speak_output + "" except Exception as e: speak_output = "Ocorreu um problema." return LaunchRequestHandler().handle(handler_input) #return handler_input.response_builder.speak(speak_output).ask(speak_output).response class MortadelaTeam(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Mortadela")(handler_input) def handle(self, handler_input): try:# checar se usuario existe speak_output = "Você agora é do time mortadela" request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) user['time'] = "mortadela" db.save_attributes(request_envelope=request_envelope,attributes=user) if not user.get('frase'): speak_output = speak_output + "<break/> Agora cadestre agora sua frase de vitória basta falar <break/> Minha frase de vitória é <break/> então diga sua frase. Essa frase será apresentada para o adiversario perdedor." except Exception as e: speak_output = "Ocorreu um problema." speak_output = str(e) return LaunchRequestHandler().handle(handler_input) #return handler_input.response_builder.speak(speak_output).ask(speak_output).response class AttackHandler(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Attack")(handler_input) def handle(self, handler_input): try: request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) if not user.get('apelido'): speak_output = "Você ainda não tem seu apelido cadastrado. Faça isso falando meu apelido é <break/> e então diga seu apelido." elif not user.get('frase'): speak_output = "Você ainda não tem uma frase cadastrada. Faça isso falando minha frase é <break/> e então diga sua frase." elif not user.get('time'): speak_output = "Você ainda não esta em nenhum time. Quer ser do time coxinha ou dos mortadelas." else: time = 'coxinha' if user.get('time') == 'mortadela' else 'mortadela' weapon = user['arma'] table = ddb2.Table('dynamo_revolucao') response = table.scan( FilterExpression=Attr('attributes.time').eq(time), Limit=1000 ) opponents = response["Items"] if not opponents: speak_output = "Não temos usuarios cadastrados neo seu time cadastrado. Volte mais tarde para jogar. Fale sair para sair" return handler_input.response_builder.speak(speak_output).ask(speak_output).response random_num = random.randint(0,len(opponents)-1) selected_opponent = opponents[random_num]['attributes'] win = False if weapon == selected_opponent['arma']: win = None elif weapon == 'pedra': if selected_opponent['arma'] == 'tesoura': win = True elif weapon == 'papel': if selected_opponent['arma'] == 'pedra': win = True elif weapon == 'tesoura': if selected_opponent['arma'] == 'papel': win = True if win == None: speak_output = "O jogo empatou seu oponente usou {}. Tente de novo. Pedra papel ou tesoura ?".format(weapon) elif win: speak_output = """Parabens. Você ganhou do {}. Ele deve ta chorando agora, ouvindo sua mensagem da vitória.<break/> Vença mais oponentes, vamos de novo, vai querer o que ? pedra papel ou tesoura ?""".format(selected_opponent['apelido']) user['kills'] = user['kills'] + 1 else: speak_output = """Vish seu oponente usou {}. Você perdeu pro {} do time {}. A frase que ele gostaria de te falar é {}.<break/> não deixe isso barato jogue de novo, vamos lá. Vai querer pedra papel ou tesoura ?""".format(selected_opponent['arma'],selected_opponent['apelido'], selected_opponent['time'], selected_opponent['frase']) user['deaths'] = user['deaths'] + 1 db.save_attributes(request_envelope=request_envelope,attributes=user) except Exception as e: #speak_output = str(e) return LaunchRequestHandler().handle(handler_input) return handler_input.response_builder.speak(speak_output).ask(speak_output).response class PedraHandler(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Pedra")(handler_input) def handle(self, handler_input): try:# checar se usuario existe speak_output = "Você escolheu pedra." request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) user['arma'] = "pedra" db.save_attributes(request_envelope=request_envelope,attributes=user) except Exception as e: speak_output = "Ocorreu um problema." return AttackHandler().handle(handler_input) #return handler_input.response_builder.speak(speak_output).ask(speak_output).response class PapelHandler(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Papel")(handler_input) def handle(self, handler_input): try:# checar se usuario existe speak_output = "Você escolheu papel." request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) user['arma'] = "papel" db.save_attributes(request_envelope=request_envelope,attributes=user) except Exception as e: speak_output = "Ocorreu um problema." return AttackHandler().handle(handler_input) class TesouraHandler(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Tesoura")(handler_input) def handle(self, handler_input): try:# checar se usuario existe speak_output = "Você escolheu tesoura." request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) user['arma'] = "tesoura" db.save_attributes(request_envelope=request_envelope,attributes=user) except Exception as e: speak_output = "Ocorreu um problema." return AttackHandler().handle(handler_input) class StoryHandler(AbstractRequestHandler): """Handler for Hello World Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("Story")(handler_input) def handle(self, handler_input): try:# checar se usuario existe request_envelope = handler_input.request_envelope user = db.get_attributes(request_envelope=request_envelope) if not user.get('apelido'): speak_output = "Você ainda não tem seu apelido cadastrado. Faça isso falando meu apelido é <break/> então diga seu apelido." elif not user.get('frase'): speak_output = "Você ainda nnão tem uma frase cadastrada. Faça isso falando minha frase é <break/> então diga sua frase." elif not user.get('time'): speak_output = "Você ainda não esta em nenhum time. Quer ser do time coxinha ou dos mortadelas." else: kills = user.get('kills') time = user.get('time') speak_output = get_story_frase(time,kills) except Exception as e: #speak_output = str(e) return LaunchRequestHandler().handle(handler_input) return handler_input.response_builder.speak(speak_output).ask(speak_output).response class DidNotUnderstandHandler(AbstractRequestHandler): """Handler for Help Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("DidNotUnderstand")(handler_input) def handle(self, handler_input): # type: (HandlerInput) -> Response speak_output = '''Não te entendi. fale ajuda para você entender melhor os comando desse jogo''' return ( handler_input.response_builder .speak(speak_output) .ask(speak_output) .response ) class HelpIntentHandler(AbstractRequestHandler): """Handler for Help Intent.""" def can_handle(self, handler_input): # type: (HandlerInput) -> bool return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input) def handle(self, handler_input): # type: (HandlerInput) -> Response speak_output = "Os comandos desse jogo são. pedra papel e tesoura para atacar. ver historia para ver a historia. Voce tambem pode alterar
# -*- encoding: utf-8 -*- ''' :maintainer: HubbleStack / madchills :maturity: 2016.7.0 :platform: Windows :requires: SaltStack ''' from __future__ import absolute_import import copy import fnmatch import logging import salt.utils try: import codecs import uuid HAS_WINDOWS_MODULES = True except ImportError: HAS_WINDOWS_MODULES = False log = logging.getLogger(__name__) __virtualname__ = 'win_secedit' def __virtual__(): if not salt.utils.is_windows() or not HAS_WINDOWS_MODULES: return False, 'This audit module only runs on windows' return True def audit(data_list, tags, debug=False): ''' Runs secedit on the local machine and audits the return data with the CIS yaml processed by __virtual__ ''' __data__ = {} __secdata__ = _secedit_export() __sidaccounts__ = _get_account_sid() for profile, data in data_list: _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('secedit audit __data__:') log.debug(__data__) log.debug('secedit audit __tags__:') log.debug(__tags__) ret = {'Success': [], 'Failure': [], 'Controlled': []} for tag in __tags__: if fnmatch.fnmatch(tag, tags): for tag_data in __tags__[tag]: if 'control' in tag_data: ret['Controlled'].append(tag_data) continue name = tag_data['name'] audit_type = tag_data['type'] output = tag_data['match_output'].lower() # Blacklisted audit (do not include) if audit_type == 'blacklist': if 'no one' in output: if name not in __secdata__: ret['Success'].append(tag_data) else: ret['Failure'].append(tag_data) else: if name in __secdata__: secret = _translate_value_type(__secdata__[name], tag_data['value_type'], tag_data['match_output']) if secret: ret['Failure'].append(tag_data) else: ret['Success'].append(tag_data) # Whitelisted audit (must include) if audit_type == 'whitelist': if name in __secdata__: sec_value = __secdata__[name] tag_data['found_value'] = sec_value if 'MACHINE\\' in name: match_output = _reg_value_translator(tag_data['match_output']) else: match_output = tag_data['match_output'] if ',' in sec_value and '\\' in sec_value: sec_value = sec_value.split(',') match_output = match_output.split(',') if 'account' in tag_data['value_type']: secret = _translate_value_type(sec_value, tag_data['value_type'], match_output, __sidaccounts__) else: secret = _translate_value_type(sec_value, tag_data['value_type'], match_output) if secret: ret['Success'].append(tag_data) else: ret['Failure'].append(tag_data) else: log.error('name {} was not in __secdata__'.format(name)) ret['Failure'].append(tag_data) return ret def _merge_yaml(ret, data, profile=None): ''' Merge two yaml dicts together at the secedit:blacklist and secedit:whitelist level ''' if __virtualname__ not in ret: ret[__virtualname__] = {} for topkey in ('blacklist', 'whitelist'): if topkey in data.get(__virtualname__, {}): if topkey not in ret[__virtualname__]: ret[__virtualname__][topkey] = [] for key, val in data[__virtualname__][topkey].iteritems(): if profile and isinstance(val, dict): val['nova_profile'] = profile ret[__virtualname__][topkey].append({key: val}) return ret def _get_tags(data): ''' Retrieve all the tags for this distro from the yaml ''' ret = {} distro = __grains__.get('osfullname') for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): # secedit:whitelist for audit_dict in toplevel: for audit_id, audit_data in audit_dict.iteritems(): # secedit:whitelist:PasswordComplexity tags_dict = audit_data.get('data', {}) # secedit:whitelist:PasswordComplexity:data tags = None for osfinger in tags_dict: if osfinger == '*': continue osfinger_list = [finger.strip() for finger in osfinger.split(',')] for osfinger_glob in osfinger_list: if fnmatch.fnmatch(distro, osfinger_glob): tags = tags_dict.get(osfinger) break if tags is not None: break # If we didn't find a match, check for a '*' if tags is None: tags = tags_dict.get('*', []) # secedit:whitelist:PasswordComplexity:data:Server 2012 if isinstance(tags, dict): # malformed yaml, convert to list of dicts tmp = [] for name, tag in tags.iteritems(): tmp.append({name: tag}) tags = tmp for item in tags: for name, tag in item.iteritems(): tag_data = {} # Whitelist could have a dictionary, not a string if isinstance(tag, dict): tag_data = copy.deepcopy(tag) tag = tag_data.pop('tag') if tag not in ret: ret[tag] = [] formatted_data = {'name': name, 'tag': tag, 'module': 'win_secedit', 'type': toplist} formatted_data.update(tag_data) formatted_data.update(audit_data) formatted_data.pop('data') ret[tag].append(formatted_data) return ret def _secedit_export(): '''Helper function that will create(dump) a secedit inf file. You can specify the location of the file and the file will persist, or let the function create it and the file will be deleted on completion. Should only be called once.''' dump = "C:\ProgramData\{}.inf".format(uuid.uuid4()) try: ret = __salt__['cmd.run']('secedit /export /cfg {0}'.format(dump)) if ret: secedit_ret = _secedit_import(dump) ret = __salt__['file.remove'](dump) return secedit_ret except StandardError: log.debug('Error occurred while trying to get / export secedit data') return False, None def _secedit_import(inf_file): '''This function takes the inf file that SecEdit dumps and returns a dictionary''' sec_return = {} with codecs.open(inf_file, 'r', encoding='utf-16') as f: for line in f: line = str(line).replace('\r\n', '') if not line.startswith('[') and not line.startswith('Unicode'): if line.find(' = ') != -1: k, v = line.split(' = ') sec_return[k] = v else: k, v = line.split('=') sec_return[k] = v return sec_return def _get_account_sid(): '''This helper function will get all the users and groups on the computer and return a dictionary''' win32 = __salt__['cmd.run']('Get-WmiObject win32_useraccount -Filter "localaccount=\'True\'"' ' | Format-List -Property Name, SID', shell='powershell', python_shell=True) win32 += '\n' win32 += __salt__['cmd.run']('Get-WmiObject win32_group -Filter "localaccount=\'True\'" | ' 'Format-List -Property Name, SID', shell='powershell', python_shell=True) if win32: dict_return = {} lines = win32.split('\n') lines = filter(None, lines) if 'local:' in lines: lines.remove('local:') for line in lines: line = line.strip() if line != '' and ' : ' in line: k, v = line.split(' : ') if k.lower() == 'name': key = v else: dict_return[key] = v if dict_return: if 'LOCAL SERVICE' not in dict_return: dict_return['LOCAL SERVICE'] = 'S-1-5-19' if 'NETWORK SERVICE' not in dict_return: dict_return['NETWORK SERVICE'] = 'S-1-5-20' if 'SERVICE' not in dict_return: dict_return['SERVICE'] = 'S-1-5-6' return dict_return else: log.debug('Error parsing the data returned from powershell') return False else: log.debug('error occurred while trying to run powershell ' 'get-wmiobject command') return False def _translate_value_type(current, value, evaluator, __sidaccounts__=False): '''This will take a value type and convert it to what it needs to do. Under the covers you have conversion for more, less, and equal''' value = value.lower() if 'more' in value: if ',' in evaluator: evaluator = evaluator.split(',')[1] if ',' in current: current = current.split(',')[1] if '"' in current: current = current.replace('"', '') if '"' in evaluator: evaluator = evaluator.replace('"', '') if int(current) >= int(evaluator): return True else: return False elif 'less' in value: if ',' in evaluator: evaluator = evaluator.split(',')[1] if ',' in current: current = current.split(',')[1] if '"' in current: current = current.replace('"', '') if '"' in evaluator: evaluator = evaluator.replace('"', '') if int(current) <= int(evaluator): if current != '0': return True else: return False else: return False elif 'equal' in value: if ',' not in evaluator and type(evaluator) != list: evaluator = _evaluator_translator(evaluator) if type(current) == list: ret_final = [] for item in current: item = item.lower() if item in evaluator: ret_final.append(True) else: ret_final.append(False) if False in ret_final: return False else: return True if current.lower() == evaluator: return True else: return False elif 'account' in value: evaluator = _account_audit(evaluator, __sidaccounts__) evaluator_list = evaluator.split(',') current_list = current.split(',') list_match = False for list_item in evaluator_list: if list_item in current_list: list_match = True else: list_match = False break if list_match: for list_item in current_list: if list_item in evaluator_list: list_match = True else: list_match = False break else: return False if list_match: return True else: return False elif 'configured' in value: if current == '': return False elif current == value: return True else: return False else: return 'Undefined' def _evaluator_translator(input_string): '''This helper function takes words from the CIS yaml and replaces them with what you actually find in the secedit dump''' if type(input_string) == str: input_string = input_string.replace(' ','').lower() if 'enabled' in input_string: return '1' elif 'disabled' in input_string: return '0' elif 'success' in input_string: return '1' elif 'failure' in input_string: return '2' elif input_string == 'success,failure' or input_string == 'failure,success': return '3' elif input_string in ['0','1','2','3']: return input_string else: log.debug('error translating evaluator from enabled/disabled or success/failure.' ' Could have received incorrect string') return 'undefined' def _account_audit(current, __sidaccounts__): '''This helper function takes the account names from the cis yaml and replaces them with the account SID that you find in the secedit dump''' user_list = current.split(', ') ret_string = '' if __sidaccounts__: for usr in user_list: if usr == 'Guest': if not ret_string: ret_string = usr else: ret_string += ',' + usr if usr in __sidaccounts__: if not ret_string: ret_string = '*' + __sidaccounts__[usr] else: ret_string += ',*' + __sidaccounts__[usr] return ret_string else: log.debug('getting the SIDs for each account failed') return False def _reg_value_translator(input_string): input_string = input_string.lower() if input_string == 'enabled': return '4,1' elif input_string == 'disabled': return '4,0' elif input_string == 'users cant add or log on with microsoft accounts': return '4,3' elif input_string == 'administrators': return '1,"0"' elif input_string == 'lock workstation': return '1,"1"' elif input_string == 'accept if provided by client': return '4,1' elif input_string == 'classic - local users
variable, and validate params variable = self._check_args(variable=variable, context=context, params=params, target_set=target_set, ) try: value = self._function(variable=variable, context=context, params=params, **kwargs) except ValueError as err: err_msg = f"Problem with '{self}' in '{self.owner.name if self.owner else self.__class__.__name__}': {err}" raise FunctionError(err_msg) self.most_recent_context = context self.parameters.value._set(value, context=context) self._reset_runtime_parameters(context) return value @abc.abstractmethod def _function( self, variable=None, context=None, params=None, ): pass def _parse_arg_generic(self, arg_val): if isinstance(arg_val, list): return np.asarray(arg_val) else: return arg_val def _validate_parameter_spec(self, param, param_name, numeric_only=True): """Validates function param Replace direct call to parameter_spec in tc, which seems to not get called by Function __init__()'s """ if not parameter_spec(param, numeric_only): owner_name = 'of ' + self.owner_name if self.owner else "" raise FunctionError(f"{param} is not a valid specification for " f"the {param_name} argument of {self.__class__.__name__}{owner_name}.") def _get_current_parameter_value(self, param_name, context=None): try: param = getattr(self.parameters, param_name) except TypeError: param = param_name except AttributeError: # don't accept strings that don't correspond to Parameters # on this function raise return super()._get_current_parameter_value(param, context) def get_previous_value(self, context=None): # temporary method until previous values are integrated for all parameters value = self.parameters.previous_value._get(context) return value def convert_output_type(self, value, output_type=None): if output_type is None: if not self.enable_output_type_conversion or self.output_type is None: return value else: output_type = self.output_type value = convert_to_np_array(value) # Type conversion (specified by output_type): # MODIFIED 6/21/19 NEW: [JDC] # Convert to same format as variable if isinstance(output_type, (list, np.ndarray)): shape = np.array(output_type).shape return np.array(value).reshape(shape) # MODIFIED 6/21/19 END # Convert to 2D array, irrespective of value type: if output_type is FunctionOutputType.NP_2D_ARRAY: # KDM 8/10/18: mimicking the conversion that Mechanism does to its values, because # this is what we actually wanted this method for. Can be changed to pure 2D np array in # future if necessary converted_to_2d = np.atleast_2d(value) # If return_value is a list of heterogenous elements, return as is # (satisfies requirement that return_value be an array of possibly multidimensional values) if converted_to_2d.dtype == object: pass # Otherwise, return value converted to 2d np.array else: value = converted_to_2d # Convert to 1D array, irrespective of value type: # Note: if 2D array (or higher) has more than two items in the outer dimension, generate exception elif output_type is FunctionOutputType.NP_1D_ARRAY: # If variable is 2D if value.ndim >= 2: # If there is only one item: if len(value) == 1: value = value[0] else: raise FunctionError(f"Can't convert value ({value}: 2D np.ndarray object " f"with more than one array) to 1D array.") elif value.ndim == 1: value = value elif value.ndim == 0: value = np.atleast_1d(value) else: raise FunctionError(f"Can't convert value ({value} to 1D array.") # Convert to raw number, irrespective of value type: # Note: if 2D or 1D array has more than two items, generate exception elif output_type is FunctionOutputType.RAW_NUMBER: if object_has_single_value(value): value = float(value) else: raise FunctionError(f"Can't convert value ({value}) with more than a single number to a raw number.") return value @property def owner_name(self): try: return self.owner.name except AttributeError: return '<no owner>' def _is_identity(self, context=None): # should return True in subclasses if the parameters for context are such that # the Function's output will be the same as its input # Used to bypass execute when unnecessary return False @property def _model_spec_parameter_blacklist(self): return super()._model_spec_parameter_blacklist.union({ 'multiplicative_param', 'additive_param', }) # ***************************************** EXAMPLE FUNCTION ******************************************************* PROPENSITY = "PROPENSITY" PERTINACITY = "PERTINACITY" class ArgumentTherapy(Function_Base): """ ArgumentTherapy( \ variable, \ propensity=Manner.CONTRARIAN, \ pertinacity=10.0 \ params=None, \ owner=None, \ name=None, \ prefs=None \ ) .. _ArgumentTherapist: Return `True` or :keyword:`False` according to the manner of the therapist. Arguments --------- variable : boolean or statement that resolves to one : default class_defaults.variable assertion for which a therapeutic response will be offered. propensity : Manner value : default Manner.CONTRARIAN specifies preferred therapeutic manner pertinacity : float : default 10.0 specifies therapeutic consistency params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. owner : Component `component <Component>` to which to assign the Function. name : str : default see `name <Function.name>` specifies the name of the Function. prefs : PreferenceSet or specification dict : default Function.classPreferences specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details). Attributes ---------- variable : boolean assertion to which a therapeutic response is made. propensity : Manner value : default Manner.CONTRARIAN determines therapeutic manner: tendency to agree or disagree. pertinacity : float : default 10.0 determines consistency with which the manner complies with the propensity. owner : Component `component <Component>` to which the Function has been assigned. name : str the name of the Function; if it is not specified in the **name** argument of the constructor, a default is assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict : Function.classPreferences the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences` for details). """ # Function componentName and type (defined at top of module) componentName = ARGUMENT_THERAPY_FUNCTION componentType = EXAMPLE_FUNCTION_TYPE classPreferences = { PREFERENCE_SET_NAME: 'ExampleClassPreferences', REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } # Mode indicators class Manner(Enum): OBSEQUIOUS = 0 CONTRARIAN = 1 # Parameter class defaults # These are used both to type-cast the params, and as defaults if none are assigned # in the initialization call or later (using either _instantiate_defaults or during a function call) def __init__(self, default_variable=None, propensity=10.0, pertincacity=Manner.CONTRARIAN, params=None, owner=None, prefs: tc.optional(is_pref_set) = None): super().__init__( default_variable=default_variable, propensity=propensity, pertinacity=pertincacity, params=params, owner=owner, prefs=prefs, ) def _validate_variable(self, variable, context=None): """Validates variable and returns validated value This overrides the class method, to perform more detailed type checking See explanation in class method. Note: this method (or the class version) is called only if the parameter_validation attribute is `True` :param variable: (anything but a dict) - variable to be validated: :param context: (str) :return variable: - validated """ if type(variable) == type(self.class_defaults.variable) or \ (isinstance(variable, numbers.Number) and isinstance(self.class_defaults.variable, numbers.Number)): return variable else: raise FunctionError(f"Variable must be {type(self.class_defaults.variable)}.") def _validate_params(self, request_set, target_set=None, context=None): """Validates variable and /or params and assigns to targets This overrides the class method, to perform more detailed type checking See explanation in class method. Note: this method (or the class version) is called only if the parameter_validation attribute is `True` :param request_set: (dict) - params to be validated :param target_set: (dict) - destination of validated params :return none: """ message = "" # Check params for param_name, param_value in request_set.items(): if param_name == PROPENSITY: if isinstance(param_value, ArgumentTherapy.Manner): # target_set[self.PROPENSITY] = param_value pass # This leaves param in request_set, clear to be assigned to target_set in call to super below else: message = "Propensity must be of type Example.Mode" continue # Validate param if param_name == PERTINACITY: if isinstance(param_value, numbers.Number) and 0 <= param_value <= 10: # target_set[PERTINACITY] = param_value pass # This leaves param in request_set, clear to be assigned to target_set in call to super below else: message += "Pertinacity must be a number between 0 and 10" continue if message: raise FunctionError(message) super()._validate_params(request_set, target_set, context) def _function(self, variable=None, context=None, params=None, ): """ Returns a boolean that is (or tends to be) the same as or opposite the one passed in. Arguments --------- variable : boolean : default class_defaults.variable an assertion to which a therapeutic response is made. params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. Returns ------- therapeutic response : boolean """ # Compute the function statement = variable propensity = self._get_current_parameter_value(PROPENSITY, context) pertinacity = self._get_current_parameter_value(PERTINACITY, context) whim = np.random.randint(-10, 10) if propensity == self.Manner.OBSEQUIOUS: value = whim < pertinacity elif propensity == self.Manner.CONTRARIAN: value = whim
from. :type storage_account_id: str :param job_type: The type of job. :type job_type: str :param return_address: Specifies the return address information for the job. :type return_address: ~storage_import_export.models.ReturnAddress :param return_shipping: Specifies the return carrier and customer's account with the carrier. :type return_shipping: ~storage_import_export.models.ReturnShipping :param shipping_information: Contains information about the Microsoft datacenter to which the drives should be shipped. :type shipping_information: ~storage_import_export.models.ShippingInformation :param delivery_package: Contains information about the package being shipped by the customer to the Microsoft data center. :type delivery_package: ~storage_import_export.models.DeliveryPackageInformation :param return_package: Contains information about the package being shipped from the Microsoft data center to the customer to return the drives. The format is the same as the deliveryPackage property above. This property is not included if the drives have not yet been returned. :type return_package: ~storage_import_export.models.PackageInformation :param diagnostics_path: The virtual blob directory to which the copy logs and backups of drive manifest files (if enabled) will be stored. :type diagnostics_path: str :param log_level: Default value is Error. Indicates whether error logging or verbose logging will be enabled. :type log_level: str :param backup_drive_manifest: Default value is false. Indicates whether the manifest files on the drives should be copied to block blobs. :type backup_drive_manifest: bool :param state: Current state of the job. :type state: str :param cancel_requested: Indicates whether a request has been submitted to cancel the job. :type cancel_requested: bool :param percent_complete: Overall percentage completed for the job. :type percent_complete: long :param incomplete_blob_list_uri: A blob path that points to a block blob containing a list of blob names that were not exported due to insufficient drive space. If all blobs were exported successfully, then this element is not included in the response. :type incomplete_blob_list_uri: str :param drive_list: List of up to ten drives that comprise the job. The drive list is a required element for an import job; it is not specified for export jobs. :type drive_list: list[~storage_import_export.models.DriveStatus] :param export: A property containing information about the blobs to be exported for an export job. This property is included for export jobs only. :type export: ~storage_import_export.models.Export :param provisioning_state: Specifies the provisioning state of the job. :type provisioning_state: str :param encryption_key: Contains information about the encryption key. :type encryption_key: ~storage_import_export.models.EncryptionKeyDetails """ _attribute_map = { 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, 'job_type': {'key': 'jobType', 'type': 'str'}, 'return_address': {'key': 'returnAddress', 'type': 'ReturnAddress'}, 'return_shipping': {'key': 'returnShipping', 'type': 'ReturnShipping'}, 'shipping_information': {'key': 'shippingInformation', 'type': 'ShippingInformation'}, 'delivery_package': {'key': 'deliveryPackage', 'type': 'DeliveryPackageInformation'}, 'return_package': {'key': 'returnPackage', 'type': 'PackageInformation'}, 'diagnostics_path': {'key': 'diagnosticsPath', 'type': 'str'}, 'log_level': {'key': 'logLevel', 'type': 'str'}, 'backup_drive_manifest': {'key': 'backupDriveManifest', 'type': 'bool'}, 'state': {'key': 'state', 'type': 'str'}, 'cancel_requested': {'key': 'cancelRequested', 'type': 'bool'}, 'percent_complete': {'key': 'percentComplete', 'type': 'long'}, 'incomplete_blob_list_uri': {'key': 'incompleteBlobListUri', 'type': 'str'}, 'drive_list': {'key': 'driveList', 'type': '[DriveStatus]'}, 'export': {'key': 'export', 'type': 'Export'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKeyDetails'}, } def __init__( self, *, storage_account_id: Optional[str] = None, job_type: Optional[str] = None, return_address: Optional["ReturnAddress"] = None, return_shipping: Optional["ReturnShipping"] = None, shipping_information: Optional["ShippingInformation"] = None, delivery_package: Optional["DeliveryPackageInformation"] = None, return_package: Optional["PackageInformation"] = None, diagnostics_path: Optional[str] = None, log_level: Optional[str] = None, backup_drive_manifest: Optional[bool] = None, state: Optional[str] = None, cancel_requested: Optional[bool] = None, percent_complete: Optional[int] = None, incomplete_blob_list_uri: Optional[str] = None, drive_list: Optional[List["DriveStatus"]] = None, export: Optional["Export"] = None, provisioning_state: Optional[str] = None, encryption_key: Optional["EncryptionKeyDetails"] = None, **kwargs ): super(JobDetails, self).__init__(**kwargs) self.storage_account_id = storage_account_id self.job_type = job_type self.return_address = return_address self.return_shipping = return_shipping self.shipping_information = shipping_information self.delivery_package = delivery_package self.return_package = return_package self.diagnostics_path = diagnostics_path self.log_level = log_level self.backup_drive_manifest = backup_drive_manifest self.state = state self.cancel_requested = cancel_requested self.percent_complete = percent_complete self.incomplete_blob_list_uri = incomplete_blob_list_uri self.drive_list = drive_list self.export = export self.provisioning_state = provisioning_state self.encryption_key = encryption_key class JobResponse(msrest.serialization.Model): """Contains the job information. Variables are only populated by the server, and will be ignored when sending a request. :ivar system_data: SystemData of ImportExport Jobs. :vartype system_data: ~storage_import_export.models.SystemData :ivar id: Specifies the resource identifier of the job. :vartype id: str :ivar name: Specifies the name of the job. :vartype name: str :ivar type: Specifies the type of the job resource. :vartype type: str :param location: Specifies the Azure location where the job is created. :type location: str :param tags: A set of tags. Specifies the tags that are assigned to the job. :type tags: any :param properties: Specifies the job properties. :type properties: ~storage_import_export.models.JobDetails :param identity: Specifies the job identity details. :type identity: ~storage_import_export.models.IdentityDetails """ _validation = { 'system_data': {'readonly': True}, 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': 'object'}, 'properties': {'key': 'properties', 'type': 'JobDetails'}, 'identity': {'key': 'identity', 'type': 'IdentityDetails'}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Any] = None, properties: Optional["JobDetails"] = None, identity: Optional["IdentityDetails"] = None, **kwargs ): super(JobResponse, self).__init__(**kwargs) self.system_data = None self.id = None self.name = None self.type = None self.location = location self.tags = tags self.properties = properties self.identity = identity class ListJobsResponse(msrest.serialization.Model): """List jobs response. :param next_link: link to next batch of jobs. :type next_link: str :param value: Job list. :type value: list[~storage_import_export.models.JobResponse] """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'value': {'key': 'value', 'type': '[JobResponse]'}, } def __init__( self, *, next_link: Optional[str] = None, value: Optional[List["JobResponse"]] = None, **kwargs ): super(ListJobsResponse, self).__init__(**kwargs) self.next_link = next_link self.value = value class ListOperationsResponse(msrest.serialization.Model): """List operations response. :param value: operations. :type value: list[~storage_import_export.models.Operation] """ _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self, *, value: Optional[List["Operation"]] = None, **kwargs ): super(ListOperationsResponse, self).__init__(**kwargs) self.value = value class Location(msrest.serialization.Model): """Provides information about an Azure data center location. :param id: Specifies the resource identifier of the location. :type id: str :param name: Specifies the name of the location. Use List Locations to get all supported locations. :type name: str :param type: Specifies the type of the location. :type type: str :param recipient_name: The recipient name to use when shipping the drives to the Azure data center. :type recipient_name: str :param street_address1: The first line of the street address to use when shipping the drives to the Azure data center. :type street_address1: str :param street_address2: The second line of the street address to use when shipping the drives to the Azure data center. :type street_address2: str :param city: The city name to use when shipping the drives to the Azure data center. :type city: str :param state_or_province: The state or province to use when shipping the drives to the Azure data center. :type state_or_province: str :param postal_code: The postal code to use when shipping the drives to the Azure data center. :type postal_code: str :param country_or_region: The country or region to use when shipping the drives to the Azure data center. :type country_or_region: str :param phone: The phone number for the Azure data center. :type phone: str :param additional_shipping_information: Additional shipping information for customer, specific to datacenter to which customer should send their disks. :type additional_shipping_information: str :param supported_carriers: A list of carriers that are supported at this location. :type supported_carriers: list[str] :param alternate_locations: A list of location IDs that should be used to ship shipping drives to for jobs created against the current location. If the current location is active, it will be part of the list. If it is temporarily closed due to maintenance, this list may contain other locations. :type alternate_locations: list[str] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'recipient_name': {'key': 'properties.recipientName', 'type': 'str'}, 'street_address1': {'key': 'properties.streetAddress1', 'type': 'str'}, 'street_address2': {'key': 'properties.streetAddress2', 'type': 'str'}, 'city': {'key': 'properties.city', 'type': 'str'}, 'state_or_province': {'key': 'properties.stateOrProvince', 'type': 'str'}, 'postal_code': {'key': 'properties.postalCode', 'type': 'str'}, 'country_or_region': {'key': 'properties.countryOrRegion', 'type': 'str'}, 'phone': {'key': 'properties.phone', 'type': 'str'}, 'additional_shipping_information': {'key': 'properties.additionalShippingInformation', 'type': 'str'}, 'supported_carriers': {'key': 'properties.supportedCarriers', 'type': '[str]'}, 'alternate_locations': {'key': 'properties.alternateLocations', 'type': '[str]'}, } def __init__( self, *, id: Optional[str]
= FloatRangeTrait() _default_value = 3.0 _good_values = [2.0, 3.0, 4.0, 5.0, 2.001, 4.999] _bad_values = [ 0, 1, 6, LONG_TYPE(0), LONG_TYPE(1), LONG_TYPE(6), 1.999, 6.01, "two", "0.999", "6.01", None, ] def coerce(self, value): try: return float(value) except: return float(LONG_TYPE(value)) # Old style class version: class OTraitTest1: pass class OTraitTest2(OTraitTest1): pass class OTraitTest3(OTraitTest2): pass class OBadTraitTest: pass otrait_test1 = OTraitTest1() class OldInstanceTrait(HasTraits): value = Trait(otrait_test1) class OldInstanceTest(AnyTraitTest): obj = OldInstanceTrait() _default_value = otrait_test1 _good_values = [ otrait_test1, OTraitTest1(), OTraitTest2(), OTraitTest3(), None, ] _bad_values = [ 0, LONG_TYPE(0), 0.0, 0j, OTraitTest1, OTraitTest2, OBadTraitTest(), "string", u"string", [otrait_test1], (otrait_test1,), {"data": otrait_test1}, ] # New style class version: class NTraitTest1(object): pass class NTraitTest2(NTraitTest1): pass class NTraitTest3(NTraitTest2): pass class NBadTraitTest: pass ntrait_test1 = NTraitTest1() class NewInstanceTrait(HasTraits): value = Trait(ntrait_test1) class NewInstanceTest(AnyTraitTest): obj = NewInstanceTrait() _default_value = ntrait_test1 _good_values = [ ntrait_test1, NTraitTest1(), NTraitTest2(), NTraitTest3(), None, ] _bad_values = [ 0, LONG_TYPE(0), 0.0, 0j, NTraitTest1, NTraitTest2, NBadTraitTest(), "string", u"string", [ntrait_test1], (ntrait_test1,), {"data": ntrait_test1}, ] class FactoryClass(HasTraits): pass class ConsumerClass(HasTraits): x = Instance(FactoryClass, ()) class ConsumerSubclass(ConsumerClass): x = FactoryClass() embedded_instance_trait = Trait( "", Str, Instance("traits.has_traits.HasTraits") ) class Dummy(HasTraits): x = embedded_instance_trait xl = List(embedded_instance_trait) class RegressionTest(unittest.TestCase): """ Check that fixed bugs stay fixed. """ def test_factory_subclass_no_segfault(self): """ Test that we can provide an instance as a default in the definition of a subclass. """ # There used to be a bug where this would segfault. obj = ConsumerSubclass() obj.x def test_trait_compound_instance(self): """ Test that a deferred Instance() embedded in a TraitCompound handler and then a list will not replace the validate method for the outermost trait. """ # Pass through an instance in order to make the instance trait resolve # the class. d = Dummy() d.xl = [HasTraits()] d.x = "OK" # Trait(using a function) that must be an odd integer: def odd_integer(object, name, value): try: float(value) if (value % 2) == 1: return int(value) except: pass raise TraitError class OddIntegerTrait(HasTraits): value = Trait(99, odd_integer) class OddIntegerTest(AnyTraitTest): obj = OddIntegerTrait() _default_value = 99 _good_values = [ 1, 3, 5, 7, 9, 999999999, LONG_TYPE(1), LONG_TYPE(3), LONG_TYPE(5), LONG_TYPE(7), LONG_TYPE(9), LONG_TYPE(999999999), 1.0, 3.0, 5.0, 7.0, 9.0, 999999999.0, -1, -3, -5, -7, -9, -999999999, LONG_TYPE(-1), LONG_TYPE(-3), LONG_TYPE(-5), LONG_TYPE(-7), LONG_TYPE(-9), LONG_TYPE(-999999999), -1.0, -3.0, -5.0, -7.0, -9.0, -999999999.0, ] _bad_values = [0, 2, -2, 1j, None, "1", [1], (1,), {1: 1}] class NotifierTraits(HasTraits): value1 = Int value2 = Int value1_count = Int value2_count = Int def _anytrait_changed(self, trait_name, old, new): if trait_name == "value1": self.value1_count += 1 elif trait_name == "value2": self.value2_count += 1 def _value1_changed(self, old, new): self.value1_count += 1 def _value2_changed(self, old, new): self.value2_count += 1 class NotifierTests(unittest.TestCase): obj = NotifierTraits() def __init__(self, value): unittest.TestCase.__init__(self, value) def setUp(self): obj = self.obj obj.value1 = 0 obj.value2 = 0 obj.value1_count = 0 obj.value2_count = 0 def tearDown(self): obj = self.obj obj.on_trait_change(self.on_value1_changed, "value1", remove=True) obj.on_trait_change(self.on_value2_changed, "value2", remove=True) obj.on_trait_change(self.on_anytrait_changed, remove=True) def on_anytrait_changed(self, object, trait_name, old, new): if trait_name == "value1": self.obj.value1_count += 1 elif trait_name == "value2": self.obj.value2_count += 1 def on_value1_changed(self): self.obj.value1_count += 1 def on_value2_changed(self): self.obj.value2_count += 1 def test_simple(self): obj = self.obj obj.value1 = 1 self.assertEqual(obj.value1_count, 2) self.assertEqual(obj.value2_count, 0) obj.value2 = 1 self.assertEqual(obj.value1_count, 2) self.assertEqual(obj.value2_count, 2) def test_complex(self): obj = self.obj obj.on_trait_change(self.on_value1_changed, "value1") obj.value1 = 1 self.assertEqual(obj.value1_count, 3) self.assertEqual(obj.value2_count, 0) obj.on_trait_change(self.on_value2_changed, "value2") obj.value2 = 1 self.assertEqual(obj.value1_count, 3) self.assertEqual(obj.value2_count, 3) obj.on_trait_change(self.on_anytrait_changed) obj.value1 = 2 self.assertEqual(obj.value1_count, 7) self.assertEqual(obj.value2_count, 3) obj.value1 = 2 self.assertEqual(obj.value1_count, 7) self.assertEqual(obj.value2_count, 3) obj.value2 = 2 self.assertEqual(obj.value1_count, 7) self.assertEqual(obj.value2_count, 7) obj.on_trait_change(self.on_value1_changed, "value1", remove=True) obj.value1 = 3 self.assertEqual(obj.value1_count, 10) self.assertEqual(obj.value2_count, 7) obj.on_trait_change(self.on_value2_changed, "value2", remove=True) obj.value2 = 3 self.assertEqual(obj.value1_count, 10) self.assertEqual(obj.value2_count, 10) obj.on_trait_change(self.on_anytrait_changed, remove=True) obj.value1 = 4 self.assertEqual(obj.value1_count, 12) self.assertEqual(obj.value2_count, 10) obj.value2 = 4 self.assertEqual(obj.value1_count, 12) self.assertEqual(obj.value2_count, 12) class RaisesArgumentlessRuntimeError(HasTraits): x = Int(0) def _x_changed(self): raise RuntimeError class TestRuntimeError(unittest.TestCase): def setUp(self): push_exception_handler(lambda *args: None, reraise_exceptions=True) def tearDown(self): pop_exception_handler() def test_runtime_error(self): f = RaisesArgumentlessRuntimeError() self.assertRaises(RuntimeError, setattr, f, "x", 5) class DelegatedFloatTrait(HasTraits): value = Trait(99.0) class DelegateTrait(HasTraits): value = Delegate("delegate") delegate = Trait(DelegatedFloatTrait()) class DelegateTrait2(DelegateTrait): delegate = Trait(DelegateTrait()) class DelegateTrait3(DelegateTrait): delegate = Trait(DelegateTrait2()) class DelegateTests(unittest.TestCase): def test_delegation(self): obj = DelegateTrait3() self.assertEqual(obj.value, 99.0) parent1 = obj.delegate parent2 = parent1.delegate parent3 = parent2.delegate parent3.value = 3.0 self.assertEqual(obj.value, 3.0) parent2.value = 2.0 self.assertEqual(obj.value, 2.0) self.assertEqual(parent3.value, 3.0) parent1.value = 1.0 self.assertEqual(obj.value, 1.0) self.assertEqual(parent2.value, 2.0) self.assertEqual(parent3.value, 3.0) obj.value = 0.0 self.assertEqual(obj.value, 0.0) self.assertEqual(parent1.value, 1.0) self.assertEqual(parent2.value, 2.0) self.assertEqual(parent3.value, 3.0) del obj.value self.assertEqual(obj.value, 1.0) del parent1.value self.assertEqual(obj.value, 2.0) self.assertEqual(parent1.value, 2.0) del parent2.value self.assertEqual(obj.value, 3.0) self.assertEqual(parent1.value, 3.0) self.assertEqual(parent2.value, 3.0) del parent3.value # Uncommenting the following line allows # the last assertions to pass. However, this # may not be intended behavior, so keeping # the line commented. # del parent2.value self.assertEqual(obj.value, 99.0) self.assertEqual(parent1.value, 99.0) self.assertEqual(parent2.value, 99.0) self.assertEqual(parent3.value, 99.0) # Complex(i.e. 'composite') Traits tests: # Make a TraitCompound handler that does not have a fast_validate so we can # check for a particular regression. slow = Trait(1, TraitRange(1, 3), TraitRange(-3, -1)) try: del slow.handler.fast_validate except AttributeError: pass class complex_value(HasTraits): num1 = Trait(1, TraitRange(1, 5), TraitRange(-5, -1)) num2 = Trait( 1, TraitRange(1, 5), TraitPrefixList("one", "two", "three", "four", "five"), ) num3 = Trait( 1, TraitRange(1, 5), TraitPrefixMap({"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}), ) num4 = Trait(1, Trait(1, Tuple, slow), 10) num5 = Trait(1, 10, Trait(1, Tuple, slow)) class test_complex_value(test_base2): obj = complex_value() def test_num1(self): self.check_values( "num1", 1, [1, 2, 3, 4, 5, -1, -2, -3, -4, -5], [ 0, 6, -6, "0", "6", "-6", 0.0, 6.0, -6.0, [1], (1,), {1: 1}, None, ], [1, 2, 3, 4, 5, -1, -2, -3, -4, -5], ) def test_enum_exceptions(self): """ Check that enumerated values can be combined with nested TraitCompound handlers. """ self.check_values( "num4", 1, [1, 2, 3, -3, -2, -1, 10, ()], [0, 4, 5, -5, -4, 11] ) self.check_values( "num5", 1, [1, 2, 3, -3, -2, -1, 10, ()], [0, 4, 5, -5, -4, 11] ) class list_value(HasTraits): # Trait definitions: list1 = Trait([2], TraitList(Trait([1, 2, 3, 4]), maxlen=4)) list2 = Trait([2], TraitList(Trait([1, 2, 3, 4]), minlen=1, maxlen=4)) alist = List() class test_list_value(test_base2): obj = list_value() def setUp(self): test_base2.setUp(self) self.last_event = None def tearDown(self): del self.last_event def del_range(self, list, index1, index2): del list[index1:index2] def del_extended_slice(self, list, index1, index2, step): del list[index1:index2:step] def check_list(self, list): self.assertEqual(list, [2]) self.assertEqual(len(list), 1) list.append(3) self.assertEqual(len(list), 2) list[1] = 2 self.assertEqual(list[1], 2) self.assertEqual(len(list), 2) list[0] = 1 self.assertEqual(list[0], 1) self.assertEqual(len(list), 2) self.assertRaises(TraitError, self.indexed_assign, list, 0, 5) self.assertRaises(TraitError, list.append, 5) self.assertRaises(TraitError, list.extend, [1, 2, 3]) list.extend([3, 4]) self.assertEqual(list, [1, 2, 3, 4]) self.assertRaises(TraitError, list.append, 1) self.assertRaises( ValueError, self.extended_slice_assign, list, 0, 4, 2, [4, 5, 6] ) del list[1] self.assertEqual(list, [1, 3, 4]) del list[0] self.assertEqual(list, [3, 4]) list[:0] = [1, 2] self.assertEqual(list, [1, 2, 3, 4]) self.assertRaises( TraitError, self.indexed_range_assign, list, 0, 0, [1] ) del list[0:3] self.assertEqual(list, [4]) self.assertRaises( TraitError, self.indexed_range_assign, list, 0, 0, [4, 5] ) def test_list1(self): self.check_list(self.obj.list1) def test_list2(self): self.check_list(self.obj.list2) self.assertRaises(TraitError, self.del_range, self.obj.list2, 0, 1) self.assertRaises( TraitError, self.del_extended_slice, self.obj.list2, 4, -5, -1 ) def assertLastTraitListEventEqual(self, index, removed, added): self.assertEqual(self.last_event.index, index) self.assertEqual(self.last_event.removed, removed) self.assertEqual(self.last_event.added, added) def test_trait_list_event(self): """ Record TraitListEvent behavior. """ # FIXME: The behavior of TraitListEvent is suboptimal with # respect to extended slice changes. Previously, TraitListObject # used to have a __setitem__() and a separate __setslice__() to # handle non-extended slices. Extended slices were added to the # underlying list object later. The __setitem__() code handled # the new extended slices, but created the TraitListEvent in the # same way it did for an integer index; namely it wrapped the # value with a list. For simple slices, the `index` attribute of # the TraitListEvent is an integer, and the `added` list is just # the list of values added. For an extended slice, the `index` # attribute is the slice object and the `added` list is the list # of values wrapped in another list. self.obj.alist = [1, 2, 3, 4] self.obj.on_trait_change(self._record_trait_list_event, "alist_items") del self.obj.alist[0] self.assertLastTraitListEventEqual(0, [1], []) self.obj.alist.append(5) self.assertLastTraitListEventEqual(3, [], [5]) self.obj.alist[0:2] = [6, 7] self.assertLastTraitListEventEqual(0, [2, 3], [6, 7]) self.obj.alist[0:2:1] = [8, 9] self.assertLastTraitListEventEqual(0, [6, 7], [8, 9]) old_event = self.last_event self.obj.alist[0:2:1] = [8, 9] # If no values changed, no new TraitListEvent will be generated. self.assertIs(self.last_event, old_event) self.obj.alist[0:4:2] = [10, 11] self.assertLastTraitListEventEqual( slice(0, 4, 2), [[8, 4]], [[10,
<gh_stars>0 from py.path import local from src.utils.logger import Log, create_logger from typing import Dict, Union from unittest.mock import MagicMock import logging import os import pytest import re # Define the expected logging format for the `create_logger` function EXPECTED_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s: %(message)s" # Define the expected base regular expression pattern for the log messages EXPECTED_LOG_MESSAGE_BASE = r"\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}},\d{{3}} - {name} - {level}: `{function}`:" @pytest.mark.parametrize("test_input_filename", [None, "hello.log", "world.log", "foo.log", "bar.log"]) @pytest.mark.parametrize("test_input_name", [None, "hello", "world", "foo", "bar"]) class TestCreateLogger: """Test the `create_logger` function.""" def test_log_name(self, temporary_log_directory: local, patch_logging_getlogger: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the function is assigned the correct name.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # Assert the correct name is used patch_logging_getlogger.assert_called_with(test_input_name) def test_log_level(self, temporary_log_directory: local, patch_logging_getlogger: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the correct logging level is set.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # Assert the correct logging level is set for the log patch_logging_getlogger.return_value.setLevel.assert_called_once_with(logging.DEBUG) def test_log_format(self, temporary_log_directory: local, patch_logging_formatter: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the format of the log.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # Assert the correct logging format is applied for the log patch_logging_formatter.assert_called_once_with(EXPECTED_LOG_FORMAT) def test_streamhandler_level(self, temporary_log_directory: local, patch_logging_streamhandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the correct logging level for the stream handler is used.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # Assert the correct logging level for the stream handler is used patch_logging_streamhandler.return_value.setLevel.assert_called_once_with(logging.INFO) def test_streamhandler_format(self, temporary_log_directory: local, patch_logging_formatter: MagicMock, patch_logging_streamhandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the correct log format is used for the stream handler.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # Assert the correct log format is set for the stream handler patch_logging_streamhandler.return_value.setFormatter.assert_called_once_with( patch_logging_formatter.return_value ) def test_filehandler_filename(self, temporary_log_directory: local, patch_logging_filehandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the file handler is set with the correct filename.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # If a filename is given, check that the file handler is set with it. Otherwise check the file handler is not # called if test_input_filename: patch_logging_filehandler.assert_called_with(temporary_log_file_path) else: assert not patch_logging_filehandler.called def test_filehandler_level(self, temporary_log_directory: local, patch_logging_filehandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the file handler is set with the correct logging level.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # If a filename is given, check that the file handler is set with the correct logging level. Otherwise check # the file handler is not called if test_input_filename: patch_logging_filehandler.return_value.setLevel.assert_called_once_with(logging.INFO) else: assert not patch_logging_filehandler.called def test_filehandler_format(self, temporary_log_directory: local, patch_logging_formatter: MagicMock, patch_logging_filehandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the file handler is set with the correct logging format.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # If a filename is given, check the file handler is set with the correct logging format. Otherwise check that # the file handler is not called if test_input_filename: patch_logging_filehandler.return_value.setFormatter.assert_called_once_with( patch_logging_formatter.return_value ) else: assert not patch_logging_filehandler.called def test_add_handlers_to_log(self, mocker, temporary_log_directory: local, patch_logging_getlogger: MagicMock, patch_logging_streamhandler: MagicMock, patch_logging_filehandler: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the stream handler is added to the log, as well as the file handler, if a filename is given.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function _ = create_logger(test_input_name, temporary_log_file_path) # If a filename is given, check that the last two handlers added to the log are stream and file handler. # Otherwise, check that only the stream handler is added if test_input_filename: # Define the last two expected calls as the stream and file handlers (in order) test_expected = [mocker.call(patch_logging_streamhandler.return_value), mocker.call(patch_logging_filehandler.return_value)] # Assert that the last two calls to `addHandler` are correct assert patch_logging_getlogger.return_value.addHandler.call_args_list[-2:] == test_expected else: patch_logging_getlogger.return_value.addHandler.assert_called_with(patch_logging_streamhandler.return_value) def test_log_output(self, temporary_log_directory: local, patch_logging_getlogger: MagicMock, test_input_name: str, test_input_filename: str) -> None: """Test the function outputs the expected log.""" # Create a temporary log file, and get its path, if `test_input_filename` is not None if test_input_filename: temporary_log_file = temporary_log_directory.join(test_input_filename) temporary_log_file_path = os.path.join(temporary_log_file.dirname, temporary_log_file.basename) else: temporary_log_file_path = None # Run the `create_logger` function test_output = create_logger(test_input_name, temporary_log_file_path) # Assert the output is as expected assert test_output == patch_logging_getlogger.return_value # Define test cases for test_input_level argument in the `TestLog` test class args_test_log_test_input_level = list(sum( [(L, L.lower()) for L in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]], () )) @pytest.mark.parametrize("test_input_level", args_test_log_test_input_level) class TestLog: def test_logger(self, patch_src_utils_logger_create_logger: MagicMock, test_input_level: str) -> None: """Test the `self.logger` instance attribute.""" test_output = Log(patch_src_utils_logger_create_logger, test_input_level).logger assert test_output == patch_src_utils_logger_create_logger def test_level(self, patch_src_utils_logger_create_logger: MagicMock, test_input_level: str) -> None: """Test the `self.level` instance attribute.""" assert Log(patch_src_utils_logger_create_logger, test_input_level).level == test_input_level.lower() @pytest.mark.parametrize("test_input_function_duration", range(1, 5)) def test_log_messages_correct_for_no_exceptions( self, patch_src_utils_logger_time: MagicMock, example_log_file: Dict[str, Union[logging.Logger, logging.RootLogger, str]], test_input_level: str, test_input_function_duration: int ) -> None: """Test the decorator creates the correct log messages, if the function it wraps raises no exceptions.""" # Set the `side_effect` of `patch_src_utils_logger_time` patch_src_utils_logger_time.side_effect = [0, test_input_function_duration] @Log(example_log_file["logger"], test_input_level) def example_function(): """Example function that raises no errors.""" pass # Define the base regular expression pattern of the log message. This will be a datetime stamp, followed by the # logger name, logging level, and wrapped function name log_base_pattern = EXPECTED_LOG_MESSAGE_BASE.format(name=example_log_file["logger"].name, level=test_input_level.upper(), function=example_function.__name__) # Define the complete log message expected test_expected_regex_pattern = fr"{log_base_pattern} Executing function\n{log_base_pattern} Executed in " \ fr"{test_input_function_duration:0.2f} s" # Execute the `example_function` _ = example_function() # Open the log file, and assert the message is as expected. If level is DEBUG, assert the log is empty with open(example_log_file["path"], "r") as f: if test_input_level.upper() == "DEBUG": assert f.read() == "" else: assert re.match(test_expected_regex_pattern, f.read()) @pytest.mark.parametrize("test_input_function_duration", range(1, 5)) def test_log_messages_correct_for_exceptions( self, patch_src_utils_logger_time: MagicMock, example_log_file: Dict[str, Union[logging.Logger, logging.RootLogger, str]], test_input_level: str, test_input_function_duration: int ) -> None: """Test the decorator creates the correct log messages, if the function it wraps raises exceptions.""" # Set the `side_effect` of `patch_src_utils_logger_time` patch_src_utils_logger_time.side_effect = [0, test_input_function_duration] @Log(example_log_file["logger"], test_input_level) def example_function(): """Example function that raises a ValueError.""" raise ValueError("Testing for errors") # Define the base regular expression pattern of the entry and error log messages entry_log_base_pattern = EXPECTED_LOG_MESSAGE_BASE.format(name=example_log_file["logger"].name, level=test_input_level.upper(), function=example_function.__name__) error_log_base_pattern = EXPECTED_LOG_MESSAGE_BASE.format(name=example_log_file["logger"].name, level="ERROR", function=example_function.__name__) # Define the expected entry and error log regular expression patterns test_expected_entry_log_regex_pattern = entry_log_base_pattern + r" Executing function\n" test_expected_error_log_regex_pattern = error_log_base_pattern + r" Raised an exception!" # Execute the `example_function`, which should raise a `ValueError` with pytest.raises(ValueError): _ = example_function() # Open the log file, and assert the message is as expected. If level is DEBUG, assert the log is empty with open(example_log_file["path"], "r") as f: if
<filename>mmtbx/regression/model/tst_model_biomt_mtrix.py from __future__ import absolute_import, division, print_function import iotbx.pdb import mmtbx.model import time """ Test multiplication of hierarchy and SS annotations in different combinations of MTRIX and BIOMT records presence. """ single_mtrix_txt = """ MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1 MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1 MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1 MTRIX1 2 0.479787 -0.038259 -0.876550 0.00000 MTRIX2 2 -0.530698 0.782918 -0.324654 0.00000 MTRIX3 2 0.698688 0.620947 0.355330 0.00000 """ mtrix_txt = """ MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1 MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1 MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1 MTRIX1 2 0.479787 -0.038259 -0.876550 0.00000 MTRIX2 2 -0.530698 0.782918 -0.324654 0.00000 MTRIX3 2 0.698688 0.620947 0.355330 0.00000 MTRIX1 3 -0.361936 -0.592602 -0.719600 0.00000 MTRIX2 3 -0.896947 0.431671 0.095646 0.00000 MTRIX3 3 0.253950 0.680060 -0.687769 0.00000 """ biomt_txt = """ REMARK 350 BIOMT1 1 1.000000 0.000000 0.000000 0.00000 REMARK 350 BIOMT2 1 0.000000 1.000000 0.000000 0.00000 REMARK 350 BIOMT3 1 0.000000 0.000000 1.000000 0.00000 REMARK 350 BIOMT1 2 0.500000 -0.809017 0.309017 0.00000 REMARK 350 BIOMT2 2 0.809017 0.309017 -0.500000 0.00000 REMARK 350 BIOMT3 2 0.309017 0.500000 0.809017 0.00000 REMARK 350 BIOMT1 3 -0.309017 -0.500000 0.809017 0.00000 REMARK 350 BIOMT2 3 0.500000 -0.809017 -0.309017 0.00000 REMARK 350 BIOMT3 3 0.809017 0.309017 0.500000 0.00000 """ ss_txt = """ HELIX 6 6 ARG A 316 LEU A 318 5 3 HELIX 7 7 SER A 335 ASN A 341 1 7 SHEET 1 E 2 TYR A 305 SER A 308 0 SHEET 2 E 2 GLN A 311 GLU A 314 -1 O ARG A 313 N PHE A 306 """ # 300 atoms atoms_txt = """ ATOM 2065 N GLY A 304 3.950 -35.449 102.015 1.00 21.30 N ATOM 2066 CA GLY A 304 4.631 -35.764 103.257 1.00 19.87 C ATOM 2067 C GLY A 304 6.074 -36.196 103.097 1.00 19.37 C ATOM 2068 O GLY A 304 6.642 -36.823 103.994 1.00 18.69 O ATOM 2069 N TYR A 305 6.673 -35.863 101.957 1.00 19.21 N ATOM 2070 CA TYR A 305 8.065 -36.210 101.688 1.00 18.15 C ATOM 2071 C TYR A 305 8.975 -35.002 101.819 1.00 18.33 C ATOM 2072 O TYR A 305 8.673 -33.920 101.314 1.00 19.14 O ATOM 2073 CB TYR A 305 8.202 -36.803 100.289 1.00 17.45 C ATOM 2074 CG TYR A 305 7.826 -38.258 100.228 1.00 18.23 C ATOM 2075 CD1 TYR A 305 8.761 -39.250 100.515 1.00 18.89 C ATOM 2076 CD2 TYR A 305 6.526 -38.647 99.917 1.00 17.60 C ATOM 2077 CE1 TYR A 305 8.411 -40.595 100.493 1.00 18.65 C ATOM 2078 CE2 TYR A 305 6.166 -39.982 99.895 1.00 18.16 C ATOM 2079 CZ TYR A 305 7.112 -40.954 100.182 1.00 18.93 C ATOM 2080 OH TYR A 305 6.756 -42.284 100.149 1.00 20.67 O ATOM 2081 N PHE A 306 10.097 -35.200 102.498 1.00 18.08 N ATOM 2082 CA PHE A 306 11.061 -34.133 102.707 1.00 18.50 C ATOM 2083 C PHE A 306 12.458 -34.585 102.325 1.00 19.27 C ATOM 2084 O PHE A 306 12.737 -35.780 102.239 1.00 19.20 O ATOM 2085 CB PHE A 306 11.071 -33.720 104.181 1.00 18.35 C ATOM 2086 CG PHE A 306 9.784 -33.115 104.650 1.00 18.59 C ATOM 2087 CD1 PHE A 306 9.582 -31.744 104.580 1.00 18.76 C ATOM 2088 CD2 PHE A 306 8.765 -33.919 105.141 1.00 18.71 C ATOM 2089 CE1 PHE A 306 8.379 -31.180 104.994 1.00 19.56 C ATOM 2090 CE2 PHE A 306 7.558 -33.366 105.557 1.00 20.09 C ATOM 2091 CZ PHE A 306 7.365 -31.993 105.483 1.00 20.22 C ATOM 2092 N MET A 307 13.330 -33.615 102.088 1.00 20.68 N ATOM 2093 CA MET A 307 14.717 -33.899 101.773 1.00 21.16 C ATOM 2094 C MET A 307 15.423 -33.667 103.103 1.00 22.68 C ATOM 2095 O MET A 307 15.811 -32.544 103.417 1.00 23.48 O ATOM 2096 CB MET A 307 15.243 -32.920 100.729 1.00 20.61 C ATOM 2097 CG MET A 307 16.666 -33.197 100.265 1.00 21.45 C ATOM 2098 SD MET A 307 16.835 -34.726 99.306 1.00 21.53 S ATOM 2099 CE MET A 307 18.018 -35.567 100.270 1.00 21.98 C ATOM 2100 N SER A 308 15.551 -34.726 103.900 1.00 23.90 N ATOM 2101 CA SER A 308 16.196 -34.629 105.204 1.00 25.50 C ATOM 2102 C SER A 308 17.711 -34.736 105.044 1.00 26.51 C ATOM 2103 O SER A 308 18.287 -35.817 105.177 1.00 26.25 O ATOM 2104 CB SER A 308 15.681 -35.736 106.127 1.00 25.33 C ATOM 2105 OG SER A 308 16.066 -35.503 107.471 1.00 28.15 O ATOM 2106 N ASN A 309 18.344 -33.602 104.754 1.00 27.44 N ATOM 2107 CA ASN A 309 19.787 -33.533 104.556 1.00 28.19 C ATOM 2108 C ASN A 309 20.235 -34.192 103.260 1.00 28.98 C ATOM 2109 O ASN A 309 20.106 -33.612 102.183 1.00 30.58 O ATOM 2110 CB ASN A 309 20.522 -34.170 105.737 1.00 28.18 C ATOM 2111 CG ASN A 309 20.631 -33.238 106.923 1.00 28.56 C ATOM 2112 OD1 ASN A 309 21.308 -32.212 106.855 1.00 28.58 O ATOM 2113 ND2 ASN A 309 19.963 -33.585 108.017 1.00 27.94 N ATOM 2114 N ASP A 310 20.753 -35.410 103.369 1.00 28.99 N ATOM 2115 CA ASP A 310 21.249 -36.144 102.212 1.00 28.59 C ATOM 2116 C ASP A 310 20.246 -37.125 101.612 1.00 27.76 C ATOM 2117 O ASP A 310 20.386 -37.526 100.457 1.00 28.39 O ATOM 2118 CB ASP A 310 22.514 -36.904 102.598 1.00 30.83 C ATOM 2119 CG ASP A 310 22.271 -37.900 103.717 1.00 33.72 C ATOM 2120 OD1 ASP A 310 21.917 -37.469 104.838 1.00 32.95 O ATOM 2121 OD2 ASP A 310 22.426 -39.118 103.473 1.00 34.80 O ATOM 2122 N GLN A 311 19.238 -37.512 102.388 1.00 26.40 N ATOM 2123 CA GLN A 311 18.239 -38.461 101.910 1.00 24.95 C ATOM 2124 C GLN A 311 16.812 -37.956 101.941 1.00 23.86 C ATOM 2125 O GLN A 311 16.499 -36.941 102.561 1.00 23.90 O ATOM 2126 CB GLN A 311 18.268 -39.739 102.741 1.00 27.17 C ATOM 2127 CG GLN A 311 19.513 -40.558 102.642 1.00 31.00 C ATOM 2128 CD GLN A 311 19.343 -41.880 103.340 1.00 32.73 C ATOM 2129 OE1 GLN A 311 18.986 -41.928 104.519 1.00 32.73 O ATOM 2130 NE2 GLN A 311 19.591 -42.969 102.618 1.00 35.40 N ATOM 2131 N ILE A 312 15.948 -38.706 101.272 1.00 22.64 N ATOM 2132 CA ILE A 312 14.529 -38.409 101.221 1.00 20.51 C ATOM 2133 C ILE A 312 13.903 -39.134 102.404 1.00 21.02 C ATOM 2134 O ILE A 312 14.209 -40.300 102.653 1.00 21.21 O ATOM 2135 CB ILE A 312 13.886 -38.956 99.933 1.00 19.13 C ATOM 2136 CG1 ILE A 312 14.452 -38.229 98.715 1.00 18.32 C ATOM 2137 CG2 ILE A 312 12.375 -38.824 100.008 1.00 17.77 C ATOM 2138 CD1 ILE A 312 13.918 -38.745 97.395 1.00 17.16 C ATOM 2139 N ARG A 313 13.042 -38.442 103.140 1.00 21.29 N ATOM 2140 CA ARG A 313 12.363 -39.049 104.275 1.00 21.69 C ATOM 2141 C ARG A 313 10.872 -38.764 104.223 1.00 22.24 C ATOM 2142 O ARG A 313 10.445 -37.705 103.766 1.00 22.50 O ATOM 2143 CB ARG A 313 12.932 -38.539 105.601 1.00 21.74 C ATOM 2144 CG ARG A 313 14.216 -39.223 106.028 1.00 23.27 C ATOM 2145 CD ARG A 313 14.488 -39.007 107.511 1.00 24.27 C ATOM 2146 NE ARG A 313 15.647 -39.768 107.970 1.00 26.04 N ATOM 2147 CZ ARG A 313 16.906 -39.483 107.652 1.00 26.93 C ATOM 2148 NH1 ARG A 313 17.177 -38.443 106.873 1.00 26.76 N ATOM 2149 NH2 ARG A 313 17.895 -40.244 108.103 1.00 27.18 N ATOM 2150 N GLU A 314 10.085 -39.729 104.686 1.00 23.21 N ATOM 2151 CA GLU A 314 8.637 -39.591 104.709 1.00 23.33 C ATOM 2152 C GLU A 314 8.256 -39.096 106.107 1.00 23.56 C ATOM 2153 O GLU A 314 8.950 -39.370 107.084 1.00 23.95 O ATOM 2154 CB GLU A 314 7.990 -40.946 104.405 1.00 23.60 C ATOM 2155 CG GLU A 314 6.517 -40.906 104.006 1.00 25.54 C ATOM 2156 CD GLU A 314 5.571 -40.837 105.196 1.00 27.64 C ATOM 2157 OE1 GLU A 314 5.803 -41.568 106.184 1.00 27.37 O ATOM 2158 OE2 GLU A 314 4.586 -40.068 105.137 1.00 27.35 O ATOM 2159 N ARG A 315 7.162 -38.349 106.187 1.00 23.65 N ATOM 2160 CA ARG A 315 6.672 -37.790 107.443
from __future__ import division from __future__ import unicode_literals from __future__ import print_function from __future__ import absolute_import # Standard imports from future import standard_library standard_library.install_aliases() from builtins import str from builtins import * from past.utils import old_div import logging import attrdict as ad import numpy as np import datetime as pydt # Our imports import emission.analysis.point_features as pf import emission.analysis.intake.segmentation.trip_segmentation as eaist import emission.core.wrapper.location as ecwl import emission.analysis.intake.segmentation.restart_checking as eaisr class DwellSegmentationDistFilter(eaist.TripSegmentationMethod): def __init__(self, time_threshold, point_threshold, distance_threshold): """ Determines segmentation points for points that were generated using a distance filter (i.e. report points every n meters). This will *not* work for points generated using a distance filter because it expects to have a time gap between subsequent points to detect the trip end, and with a time filter, we get updates every n seconds. At least on iOS, we sometimes get points even when the phone is not in motion. This seems to be triggered by zigzagging between low quality points. """ self.time_threshold = time_threshold self.point_threshold = point_threshold self.distance_threshold = distance_threshold def segment_into_trips(self, timeseries, time_query): """ Examines the timeseries database for a specific range and returns the segmentation points. Note that the input is the entire timeseries and the time range. This allows algorithms to use whatever combination of data that they want from the sensor streams in order to determine the segmentation points. """ filtered_points_df = timeseries.get_data_df("background/filtered_location", time_query) transition_df = timeseries.get_data_df("statemachine/transition", time_query) if len(transition_df) > 0: logging.debug("transition_df = %s" % transition_df[["fmt_time", "transition"]]) else: logging.debug("no transitions found. This can happen for continuous sensing") self.last_ts_processed = None logging.info("Last ts processed = %s" % self.last_ts_processed) segmentation_points = [] last_trip_end_point = None curr_trip_start_point = None just_ended = True for idx, row in filtered_points_df.iterrows(): currPoint = ad.AttrDict(row) currPoint.update({"idx": idx}) logging.debug("-" * 30 + str(currPoint.fmt_time) + "-" * 30) if curr_trip_start_point is None: logging.debug("Appending currPoint because the current start point is None") # segmentation_points.append(currPoint) if just_ended: if self.continue_just_ended(idx, currPoint, filtered_points_df): # We have "processed" the currPoint by deciding to glom it self.last_ts_processed = currPoint.metadata_write_ts continue # else: # Here's where we deal with the start trip. At this point, the # distance is greater than the filter. sel_point = currPoint logging.debug("Setting new trip start point %s with idx %s" % (sel_point, sel_point.idx)) curr_trip_start_point = sel_point just_ended = False else: # Using .loc here causes problems if we have filtered out some points and so the index is non-consecutive. # Using .iloc just ends up including points after this one. # So we reset_index upstream and use it here. last10Points_df = filtered_points_df.iloc[max(idx-self.point_threshold, curr_trip_start_point.idx):idx+1] lastPoint = ad.AttrDict(filtered_points_df.iloc[idx-1]) if self.has_trip_ended(lastPoint, currPoint, timeseries): last_trip_end_point = lastPoint logging.debug("Appending last_trip_end_point %s with index %s " % (last_trip_end_point, idx-1)) segmentation_points.append((curr_trip_start_point, last_trip_end_point)) logging.info("Found trip end at %s" % last_trip_end_point.fmt_time) # We have processed everything up to the trip end by marking it as a completed trip self.last_ts_processed = currPoint.metadata_write_ts just_ended = True # Now, we have finished processing the previous point as a trip # end or not. But we still need to process this point by seeing # whether it should represent a new trip start, or a glom to the # previous trip if not self.continue_just_ended(idx, currPoint, filtered_points_df): sel_point = currPoint logging.debug("Setting new trip start point %s with idx %s" % (sel_point, sel_point.idx)) curr_trip_start_point = sel_point just_ended = False # Since we only end a trip when we start a new trip, this means that # the last trip that was pushed is ignored. Consider the example of # 2016-02-22 when I took kids to karate. We arrived shortly after 4pm, # so during that remote push, a trip end was not detected. And we got # back home shortly after 5pm, so the trip end was only detected on the # phone at 6pm. At that time, the following points were pushed: # ..., 2016-02-22T16:04:02, 2016-02-22T16:49:34, 2016-02-22T16:49:50, # ..., 2016-02-22T16:57:04 # Then, on the server, while iterating through the points, we detected # a trip end at 16:04, and a new trip start at 16:49. But we did not # detect the trip end at 16:57, because we didn't start a new point. # This has two issues: # - we won't see this trip until the next trip start, which may be on the next day # - we won't see this trip at all, because when we run the pipeline the # next time, we will only look at points from that time onwards. These # points have been marked as "processed", so they won't even be considered. # There are multiple potential fixes: # - we can mark only the completed trips as processed. This will solve (2) above, but not (1) # - we can mark a trip end based on the fact that we only push data # when a trip ends, so if we have data, it means that the trip has been # detected as ended on the phone. # This seems a bit fragile - what if we start pushing incomplete trip # data for efficiency reasons? Therefore, we also check to see if there # is a trip_end_detected in this timeframe after the last point. If so, # then we end the trip at the last point that we have. if not just_ended and len(transition_df) > 0: stopped_moving_after_last = transition_df[(transition_df.ts > currPoint.ts) & (transition_df.transition == 2)] logging.debug("stopped_moving_after_last = %s" % stopped_moving_after_last[["fmt_time", "transition"]]) if len(stopped_moving_after_last) > 0: logging.debug("Found %d transitions after last point, ending trip..." % len(stopped_moving_after_last)) segmentation_points.append((curr_trip_start_point, currPoint)) self.last_ts_processed = currPoint.metadata_write_ts else: logging.debug("Found %d transitions after last point, not ending trip..." % len(stopped_moving_after_last)) return segmentation_points def has_trip_ended(self, lastPoint, currPoint, timeseries): # So we must not have been moving for the last _time filter_ # points. So the trip must have ended # Since this is a distance filter, we detect that the last # trip has ended at the time that the new trip starts. So # if the last_trip_end_point is lastPoint, then # curr_trip_start_point should be currPoint. But then we will # have problems with the spurious, noisy points that are # generated until the geofence is turned on, if ever # So we will continue to defer new trip starting until we # have worked through all of those. timeDelta = currPoint.ts - lastPoint.ts distDelta = pf.calDistance(lastPoint, currPoint) logging.debug("lastPoint = %s, time difference = %s dist difference %s" % (lastPoint, timeDelta, distDelta)) if timeDelta > self.time_threshold: # We have been at this location for more than the time filter. # This could be because we have not been moving for the last # _time filter_ points, or because we didn't get points for # that duration, (e.g. because we were underground) if timeDelta > 0: speedDelta = old_div(distDelta, timeDelta) else: speedDelta = np.nan # this is way too slow. On ios, we use 5meters in 10 minutes. # On android, we use 10 meters in 5 mins, which seems to work better # for this kind of test speedThreshold = old_div(float(self.distance_threshold * 2), (old_div(self.time_threshold, 2))) if eaisr.is_tracking_restarted_in_range(lastPoint.ts, currPoint.ts, timeseries): logging.debug("tracking was restarted, ending trip") return True # In general, we get multiple locations between each motion activity. If we see a bunch of motion activities # between two location points, and there is a large gap between the last location and the first # motion activity as well, let us just assume that there was a restart ongoing_motion_check = len(eaisr.get_ongoing_motion_in_range(lastPoint.ts, currPoint.ts, timeseries)) > 0 if timeDelta > self.time_threshold and not ongoing_motion_check: logging.debug("lastPoint.ts = %s, currPoint.ts = %s, threshold = %s, large gap = %s, ongoing_motion_in_range = %s, ending trip" % (lastPoint.ts, currPoint.ts,self.time_threshold, currPoint.ts - lastPoint.ts, ongoing_motion_check)) return True # http://www.huffingtonpost.com/hoppercom/the-worlds-20-longest-non-stop-flights_b_5994268.html # Longest flight is 17 hours, which is the longest you can go without cell reception # And even if you split an air flight that long into two, you will get some untracked time in the # middle, so that's good. TWELVE_HOURS = 12 * 60
['MSIL1C'], 'include_s2_sr': 'sr_refl', 'keep_intermediate_data': 'intermediate_data' } # If nothing to do just return if self._xml_filename is None: return # Remove generated products that were not requested products_to_remove = [] if not options['include_customized_source_data']: products_to_remove.extend( order2product['source_data']) if not options['include_s2_sr']: products_to_remove.append( order2product['include_s2_sr']) if not options['keep_intermediate_data']: products_to_remove.append( order2product['keep_intermediate_data']) if products_to_remove is not None: # Create and load the metadata object espa_metadata = Metadata(xml_filename=self._xml_filename) # Search for and remove the items for band in espa_metadata.xml_object.bands.band: if band.attrib['product'] in products_to_remove: self.remove_band_from_xml(band) # Validate the XML espa_metadata.validate() # Write it to the XML file espa_metadata.write(xml_filename=self._xml_filename) del espa_metadata def cleanup_work_dir(self): """Cleanup all the intermediate non-products and the science products not requested """ product_id = self._parms['product_id'] options = self._parms['options'] # Define intermediate files that need to be removed before product # tarball generation intermediate_files = [ 'lndsr.*.txt', 'lndcal.*.txt', 'LogReport*', '*_elevation.*' ] # Define L1 source files that may need to be removed before product # tarball generation l1_source_files = [ # TODO perhaps eventually we will want to add the option to deliver these r'MTD_MSIL1C\.xml', r'MTD_TL\.xml', r'_B[0-9,A-Z]' ] # Change to the working directory current_directory = os.getcwd() os.chdir(self._work_dir) try: non_products = [] # Remove the intermediate non-product files if not options['keep_intermediate_data']: for item in intermediate_files: non_products.extend(glob.glob(item)) # Add level 1 source files if not requested if not options['include_source_data']: for item in l1_source_files: non_products.extend([f for f in os.listdir('.') if re.search(item, f)]) if len(non_products) > 0: cmd = ' '.join(['rm', '-rf'] + non_products) self._logger.info(' '.join(['REMOVING INTERMEDIATE DATA' ' COMMAND:', cmd])) output = '' try: output = utilities.execute_cmd(cmd) finally: if len(output) > 0: self._logger.info(output) self.remove_products_from_xml() finally: # Change back to the previous directory os.chdir(current_directory) def generate_statistics(self): """Generates statistics if required for the processor """ options = self._parms['options'] # Nothing to do if the user did not specify anything to build if not self._build_products or not options['include_statistics']: return # Generate the stats for each stat'able' science product # Hold the wild card strings in a type based dictionary files_to_search_for = dict() """ # These original L1C bands may be included at a later date s2_toa_bands = list() s2_toa_bands.extend([*_B[0-9,A-Z].img]) """ s2_sr_bands = list() s2_sr_bands.extend(['*_sr_band*.img']) # The types must match the types in settings.py files_to_search_for['SR'] = s2_sr_bands # files_to_search_for['TOA'] = s2_toa_bands files_to_search_for['INDEX'] = ['*_nbr.img', '*_nbr2.img', '*_ndmi.img', '*_ndvi.img', '*_evi.img', '*_savi.img', '*_msavi.img'] # Build a command line arguments list cmd = ['espa_statistics.py', '--work_directory', self._work_dir, "--files_to_search_for '{}'".format(json.dumps(files_to_search_for))] # Turn the list into a string cmd = ' '.join(cmd) self._logger.info(' '.join(['SUMMARY LANDSAT STATISTICS COMMAND:', cmd])) output = '' try: output = utilities.execute_cmd(cmd) finally: if len(output) > 0: self._logger.info(output) def distribute_statistics(self): """ Distributes statistics if required for the processor. Note: We override the CDRProcessor method when processing a Sentinel-2 scene because the product ID given by the ESPA API is different from the espa_product_formatter outputs for Sentinel-2. """ options = self._parms['options'] if options['include_statistics']: # regex pattern to match the espa-formatted Sentinel-2 naming convention pattern = r'S2[A,B]_\w{3}_[A-Z,0-9]{3}_[A-Z,0-9]{6}_[0-9]{8}_[0-9]{8}' p = re.compile(pattern) files = glob.glob(os.path.join(self._work_dir, '*')) s2_product_id = None for f in files: match = p.search(os.path.basename(f)) if match is not None: s2_product_id = match.group() break if s2_product_id is not None: s2_parms = copy.deepcopy(self._parms) s2_parms['product_id'] = s2_product_id else: msg = 'Unable to determine the ESPA Sentinel-2 Product ID' self._logger.exception(msg) raise ESPAException(msg) try: immutability = utilities.str2bool(self._cfg.get('immutable_distribution')) distribution.distribute_statistics(immutability, self._work_dir, self._output_dir, s2_parms, self._user, self._group) except (Exception, ESPAException): msg = 'An exception occurred delivering the stats' self._logger.exception(msg) raise ESPAException(msg) self._logger.info('*** Statistics Distribution Complete ***') def get_product_name(self): """Build the product name from the product information and current time """ # product_id = self._parms['product_id'] product_id = None # For sentinel-2 we want to work with the ESPA formatting # as opposed to the original product_id returned by M2M if self._product_name is None: prods = os.listdir(self._work_dir) for prod in prods: # Use the scene name taken from the XML filename if prod.startswith('S2') and prod.endswith('.xml'): product_id = os.path.splitext(prod)[0] break if product_id is None: msg = "Unable to determine ESPA-formatted product id" self._logger.exception(msg) raise ESPAException(msg) # Get the current time information ts = datetime.datetime.today() # Extract stuff from the product information product_prefix = sensor.info(product_id).product_prefix product_name = ('{0}-SC{1}{2}{3}{4}{5}{6}' .format(product_prefix, str(ts.year).zfill(4), str(ts.month).zfill(2), str(ts.day).zfill(2), str(ts.hour).zfill(2), str(ts.minute).zfill(2), str(ts.second).zfill(2))) self._product_name = product_name return self._product_name class VIIRSProcessor(CDRProcessor): """Implements the common processing between all of the VIIRS processors """ def __init__(self, cfg, parms): super(VIIRSProcessor, self).__init__(cfg, parms) self._h5_filename = None def validate_parameters(self): """Validates the parameters required for the processor """ # Call the base class parameter validation super(VIIRSProcessor, self).validate_parameters() self._logger.info('Validating [VIIRSProcessor] parameters') options = self._parms['options'] # Force these parameters to false if not provided # They are the required includes for product generation required_includes = ['include_customized_source_data', 'include_source_data', 'include_statistics'] for parameter in required_includes: if not parameters.test_for_parameter(options, parameter): self._logger.warning('[{}] parameter missing defaulting to' ' False'.format(parameter)) options[parameter] = False # Determine if we need to build products if not options['include_customized_source_data'] and not options['include_viirs_ndvi']: self._logger.info('***NO CUSTOMIZED PRODUCTS CHOSEN***') self._build_products = False else: self._build_products = True def stage_input_data(self): """Stages the input data required for the processor """ product_id = self._parms['product_id'] download_url = self._parms['download_url'] file_name = ''.join([product_id, settings.VIIRS_INPUT_FILENAME_EXTENSION]) staged_file = os.path.join(self._stage_dir, file_name) # Download the source data transfer.download_file_url(download_url, staged_file) self._h5_filename = os.path.basename(staged_file) work_file = os.path.join(self._work_dir, self._h5_filename) # Copy the staged data to the work directory shutil.copyfile(staged_file, work_file) os.unlink(staged_file) def convert_to_raw_binary(self): """Converts the Viirs input data to our internal raw binary format """ options = self._parms['options'] # Build a command line arguments list cmd = ['convert_viirs_to_espa', '--hdf', self._h5_filename] if not options['include_source_data']: cmd.append('--del_src_files') # Turn the list into a string cmd = ' '.join(cmd) self._logger.info(' '.join(['CONVERT VIIRS TO ESPA COMMAND:', cmd])) output = '' try: output = utilities.execute_cmd(cmd) finally: if len(output) > 0: self._logger.info(output) def generate_spectral_indices(self): """Generates the requested spectral indices """ options = self._parms['options'] if options['include_viirs_ndvi']: cmd = ['spectral_indices.py', '--xml', self._xml_filename, '--ndvi'] cmd = ' '.join(cmd) self._logger.info(' '.join(['SPECTRAL INDICES COMMAND:', cmd])) output = '' try: output = utilities.execute_cmd(cmd) finally: if len(output) > 0: self._logger.info(output) def build_science_products(self): """Build the science products requested by the user Note: We get science products as the input, so the only thing really happening here is generating a customized product for the statistics generation. - Added option to request spectral indices but this should only be available for the VNP09GA products """ self._logger.info('[ViirsProcessor] Building Science Products') # Change to the working directory current_directory = os.getcwd() os.chdir(self._work_dir) try: self.convert_to_raw_binary() self.generate_spectral_indices() finally: # Change back to the previous directory os.chdir(current_directory) def remove_products_from_xml(self): """Remove the specified products from the XML file The file is read into memory, processed, and written back out with out the specified products. Specific for Viirs XML """ options = self._parms['options'] # Map order options to the products in the XML files # For Viirs, the source data is surface reflectance... order2product = { 'source_data': ['sr_refl'] } # If nothing to do just return if self._xml_filename is None: return # Remove source products that were not requested products_to_remove = [] if not options['include_customized_source_data']: products_to_remove.extend( order2product['source_data']) if products_to_remove is not None: # Create and load the metadata object espa_metadata = Metadata(xml_filename=self._xml_filename) # Search for and remove the items for band in espa_metadata.xml_object.bands.band: if band.attrib['product'] in products_to_remove: self.remove_band_from_xml(band) # Validate the XML espa_metadata.validate() # Write it to the XML file espa_metadata.write(xml_filename=self._xml_filename) del espa_metadata def cleanup_work_dir(self): """Cleanup source data if it was not requested """ # Change to the working directory current_directory = os.getcwd() os.chdir(self._work_dir) try: self.remove_products_from_xml() finally: # Change back to the previous directory os.chdir(current_directory) return def generate_statistics(self): """Generates statistics if required for the processor """ options = self._parms['options'] # Nothing to do if the user did not specify anything to build if not self._build_products or not options['include_statistics']: return # Generate the stats for each stat'able' science product # Hold the wild card strings in a type based dictionary files_to_search_for = dict() # VIIRS files # The types must match the types in settings.py files_to_search_for['SR'] = ['*SurfReflect_I*.img'] files_to_search_for['INDEX'] = ['*_sr_ndvi.img'] # Build a command line arguments list cmd = ['espa_statistics.py', '--work_directory', self._work_dir, "--files_to_search_for '{}'".format(json.dumps(files_to_search_for))] # Turn the list into a string cmd = ' '.join(cmd) self._logger.info('
<filename>code/Practica2.py #!/usr/bin/env python # coding: utf-8 # # Pràctica 2: Neteja i anàlisis de les dades # # El següent notebook esta orientat a resoldre la pràctica 2 de l'assignatura *M2.951 - Tipologia i cicle de vida de les dades* del màster en Data Science de la UOC. # # ### Nota important # # Per poder executar el notebook, es necessari la descàrrega dels fitxers *csv* que conforman el dataset de la pràctica: GlobalTemperatures, GlobalLandTemperaturesByCountry i GlobalLandTemperaturesByCity, i situarlos dins la carpeta **data/** del projecte de github. La raó per la qual no es troben actualment en el projecte de github es el tamany, ja que alguns d'aquests fitxers superen els *25MB* d'espai i Github no permet la seva càrrega. # # ### Llibreries # In[1]: import numpy as np import pandas as pd import warnings from scipy import stats import statsmodels.api as sm import matplotlib.pyplot as plt import seaborn as sns warnings.filterwarnings('ignore') get_ipython().run_line_magic('matplotlib', 'inline') # # Descripció del dataset # # El dataset utilitzat per realitzar aquesta practica tracta sobre el canvi climàtic en les temperatures de l'aire a la superficie de la Terra, es pot trobar a partir del següent enllaç: [**climate-change-earth-surface-temperature-data**](https://www.kaggle.com/berkeleyearth/climate-change-earth-surface-temperature-data). # Aquest dataset d'ús public a traves de la plataforma Kaggle, consta de la licencia *CC BY-NC-SA 4.0*. # # És tracta d'un dataset que conté registres de dades des de l'any 1750 fins al 2015 sobre la temperatura de l'aire a la superficie mesurada en diferents punts de la Terra. # # En aquesta pràctica es vol plantejar l'estudi de l'evolució de la temperatura en la superficíe terrestre, per compendre si es cert que hi ha hagut un augment de les temperatures en els ultims anys, i consequentment confirmar que el canvi climatic referent a la temperatura terrestre es real. # # El dataset constà de 4 fitxers de dades en format *csv*: # # # - GlobalTemperatures.csv # - GlobalLandTemperaturesByCountry.csv # - GlobalLandTemperaturesByState.csv # - GlobalLandTemperaturesByMajorCity.csv # - GlobalLandTemperaturesByCity.csv # # Per al cas d'estudi plantejat en aquesta pràctica, utilitzarem les dades dels fitxers *GlobalTemperatures*, *GlobalLandTemperaturesByCountry* i *GlobalLandTemperaturesByCity*. # # A continuació es detalla la informació que contenen cadascún d'aquests datasets, però primer, es llegirant aquests fitxers per poder obtindre un millor resum. # In[2]: global_temp=pd.read_csv('../data/GlobalTemperatures.csv') countries_temp=pd.read_csv('../data/GlobalLandTemperaturesByCountry.csv') cities_temp=pd.read_csv('../data/GlobalLandTemperaturesByCity.csv') # ## GlobalTemperatures # # Dataset info: # In[3]: global_temp.info() # ### Variables # # El dataset *GlobalTemperatures* conté 3192 registres i 9 columnes (no totes les columnes contenen informació en tots els registres i per tant més endavant s'hauràn de tractar aquests valors nuls), les quals es corresponen a cadascuna de les següents variables: # # - **Date**: data del registre, començant des de l'any 1750 on es registraba la temperatura mitjana en la terra, i a partira del 1850, es registraba també els maxims i minims de les temperatures a la superficie terrestre i la dels oceans. # # - **LandAverageTemperature**: promig global de la temperatura a la terra en graus celsius. # # - **LandAverageTemperatureUncertainty**: valor del 95% de l'interval de confiança sobre la variable de la mitjana. # # - **LandMaxTemperature**: promig global de la temperatura maxima en la terra en graus celsius. # # - **LandMaxTemperatureUncertainty**: valor del 95% de l'interval de confiança sobre la variable de la mitjana de la temperatura máxima. # # - **LandMinTemperature**: promig globla de la temperatura minima en la terra en graus celsius. # # - **LandMinTemperatureUncertainty**: valor del 95% de l'interval de confiança sobre la variable de la mitjana de la temperatura minima. # # - **LandAndOceanAverageTemperature**: promig global de la temperatura als oceans i a la terra en celsius. # # - **LandAndOceanAverageTemperatureUncertainty**: valor del 95% de l'interval de confiança sobre la variable de la mitjana de la temperatura als oceans i a la terra. # ## GlobalLandTemperaturesByCountry # # Dataset info: # In[4]: countries_temp.info() # ### Variables # # El dataset *GlobalLandTemperaturesByCountry* conté 577462 registres i 4 columnes, que es corresponen a les següents variables: # # - **dt**: data en la qual es va mesura la informació. # - **AverageTemperature**: promig de la temperatura terrestre en celsius. # - **AverateTemperatureUncertainty**: valor del 95% de l'interval de confiança de la mitjana. # - **Country**: Pais on es va obtindre el valor de la temperatura. # # ## GlobalLandTemperaturesByCity # # Dataset info: # In[5]: cities_temp.info() # ### Variables # # El dataset *GlobalLantTemperaturesByCity* conté 8588212 registre i 7 columnes que es corresponen a les següents variables: # # - **dt**: data en la qual es va mesura la informació. # - **AverageTemperature**: promig de la temperatura terrestre en celsius. # - **AverateTemperatureUncertainty**: valor del 95% de l'interval de confiança de la mitjana. # - **City**: Ciutat on es va realitzar la mesura de la temperatura registrada. # - **Country**: Pais on pertany la ciutat on es va realitzar la mesura. # - **Latitude**: Valor de la latitud de la localització de la ciutat en graus # - **Longitud**: Valor de la longitud de la localització de la ciutat en graus. # # Integració i selecció de les dades d'interes # Primerament, observarem les dades per aclarir quines dades ens poden ser d'interès i quines no per a l'estudi plantejat en la pràctica. # In[6]: global_temp.describe() # In[7]: global_temp.head() # In[8]: countries_temp.describe() # In[9]: cities_temp.describe() # ### Dades d'interes # # A partir de l'observació anterior, es pot determinar: # # - Els tres datasets contenent dades que s'hauran de netejar previament a l'estudi a realitzar. # - El dataset *GlobalTemperatures* conte la variable *LandAverageTemperature*, la qual es d'interes per l'estudi. # - Els datasets *GlobalLandTemperaturesByCountry* i *GlobalLandTemperaturesByCity* contenen també informació d'interès per l'estudi en les variables *AverageTemperature*. # # Neteja de dades # # ## Les dades contenen zeros o elements buits? Com gestionaries aquests casos? # Les dades contenen elements NaN (nulls). Aquests NaN en la seva majoria es corresponent a les dates entre 1750 i 1850 ja que com s'ha descrit abans en el dataset *GlobalTemperatures*, durant aquell periode nomes registrava la temperatura mitjana en terra i per altra banda, es comprensible que tractantse d'un registre de dades tant antic, hi haguin casos de dades perdudes. # # Tot i això, per al cas d'estudi no afecta ja que, com sabem, el canvi climàtic i l'augment de temperatures es un desastre humà que es va començar a esdevenir durant l'última meitat del segle XX, i per tant, el fet de no tindre alguns registres del segle XVIII, a priori, no a d'afectar. # # Aleshores, s'obtarà per eliminar els registres de dades amb valors nulls dels datasets. # In[10]: global_temp.dropna(inplace=True) countries_temp.dropna(inplace=True) cities_temp.dropna(inplace=True) # ## Identificació i tractament de valors extrems. # A continuació eliminarem els valors **outliers** dels tres datasets càrregats: # In[11]: global_temp[(np.abs(stats.zscore(global_temp['LandAverageTemperature'])) < 3)] # In[12]: countries_temp[(np.abs(stats.zscore(countries_temp['AverageTemperature'])) < 3)] # In[13]: cities_temp[(np.abs(stats.zscore(cities_temp['AverageTemperature'])) < 3)] # En el cas del dataset de temperatures per pais, aprofitem per corretgir alguns del noms utilitzats per a registra el pais: # In[14]: countries_temp['Country'].replace({'Denmark (Europe)':'Denmark','France (Europe)':'France','Netherlands (Europe)':'Netherlands','United Kingdom (Europe)':'United Kingdom'},inplace=True) temp_country1=countries_temp.groupby(['Country'])['AverageTemperature'].mean().reset_index() # # Anàlisis de les dades # ## Selecció dels grups de dades que es volen analitzar. # # Com s'ha descrit anteriorment, les dades d'interes que es volen analitzar són: # # - AverageTemperature del dataset GlobalLandTemperaturesByCity, es carregarà en la variable *cities_average_temp*. # - AverageTemperature del dataset GlobalLandTemperaturesByCountry, es carregarà en la variable *country_average_temp*. # - LandAverageTemperature del dataset GlobalTemperature, es carregarà en la variable *global_land_average*. # In[15]: country_average_temp=countries_temp.groupby(['dt'])['AverageTemperature'].mean().reset_index() country_average_temp=countries_temp[['AverageTemperature']] country_average_temp.describe() # In[16]: cities_temp['year']=cities_temp['dt'].apply(lambda x: x[:4]) cities_temp['month']=cities_temp['dt'].apply(lambda x: x[5:7]) cities_temp.drop('dt',axis=1,inplace=True) cities_temp=cities_temp[['year','month','AverageTemperature','City','Country','Latitude','Longitude']] cities_temp['Latitude']=cities_temp['Latitude'].str.strip('N') cities_temp['Longitude']=cities_temp['Longitude'].str.strip('E') # In[17]: cities_average_temp=cities_temp.groupby(['year', 'month'])['AverageTemperature'].mean().reset_index() cities_average_temp=cities_temp[['AverageTemperature']] cities_average_temp.describe() # In[18]: global_temp['dt']=pd.to_datetime(global_temp.dt).dt.strftime('%d/%m/%Y') global_temp['dt']=global_temp['dt'].apply(lambda x:x[6:]) # In[19]: global_land_average=global_temp.groupby(['dt'])['LandAverageTemperature'].mean().reset_index() global_land_average=global_temp[['LandAverageTemperature']] global_land_average.describe() # ## Comprovació de la normalitat i homogeneïtat de la variància. # ### Shapiro # A continuació es realitzarà el test de Shapiro per comprovar la normalitat de les variables a estudiar # In[20]: stats.shapiro(global_land_average) # In[21]: stats.shapiro(country_average_temp) # In[22]: stats.shapiro(cities_average_temp) # ### QQplots # # A continuació, utilitzant la llibreria de *statsmodels*, es visualitzaran els qqplots de les diferents variables seleccionades utilitzant la funció *probplot*. # In[23]: ax1 = plt.subplot(221).set_title('Global Average Temperature') res = stats.probplot(global_temp['LandAverageTemperature'], plot=plt) ax2 = plt.subplot(222) ax2.set_title('Countries Average Temperature') res = stats.probplot(countries_temp['AverageTemperature'], plot=plt) ax3 = plt.subplot(223) ax3.set_title('Cities Average Temperature') res = stats.probplot(cities_temp['AverageTemperature'], plot=plt) plt.show() # ## Aplicació de proves estadístiques per comparar els grups de dades. # En funció de les dades i de l’objectiu de l’estudi, aplicar proves de contrast d’hipòtesis, correlacions, regressions, etc. Aplicar almenys tres mètodes d’anàlisi diferents. # ### Regressió lineal de les dades globals # # A continuació es realitzará un estudi de la regressió lineal de les dades del dataset *GlobalTemperatures* centrat en la variable *AverageTemperature*. # In[24]: glm_binom = sm.GLM(global_land_average.astype(float), global_temp.astype(float), family=sm.families.Binomial()) res
<gh_stars>1-10 from typing import Sequence import numpy as np import theano import theano.tensor as tt from artemis.experiments import ExperimentFunction, capture_created_experiments from artemis.general.checkpoint_counter import Checkpoints from artemis.general.duck import Duck from artemis.general.nested_structures import NestedType from artemis.general.numpy_helpers import get_rng from artemis.general.should_be_builtins import izip_equal from artemis.general.speedometer import Speedometer from artemis.general.test_mode import is_test_mode from artemis.ml.datasets.mnist import get_mnist_dataset from artemis.ml.tools.costs import percent_argmax_incorrect from artemis.ml.tools.iteration import minibatch_index_info_generator from init_eqprop.result_display import report_score_from_result, \ parse_eqprop_result, compare_learning_curves_new from plato.core import symbolic @symbolic def energy(params, states, x, beta=0., y=None): energy_per_sample = sum(.5*(s_post**2).sum(axis=1) -tt.batched_dot(rho(s_pre).dot(w), rho(s_post)) - rho(s_post).dot(b) for s_pre, s_post, (w, b) in izip_equal([x] + states[:-1], states, params)) if y is not None: energy_per_sample = energy_per_sample + beta * ((states[-1] - y) ** 2).sum(axis=1) return energy_per_sample @symbolic def rho(x): return tt.clip(x, 0, 1) @symbolic def forward_pass(params, x, nonlinearity, disconnect_grads = True): nonlinearity_func = lambda x: eval(nonlinearity, dict(np=np, rho=rho, sigm=tt.nnet.sigmoid), dict(x=x)) if disconnect_grads: return [s for s in [x] for w, b in params for s in [nonlinearity_func(theano.gradient.disconnected_grad(s).dot(w)+b)]] else: return [s for s in [x] for w, b in params for s in [nonlinearity_func(s.dot(w)+b)]] @symbolic def equilibriating_step(params, states: Sequence[np.ndarray], x: np.ndarray, y=None, y_pressure=0.5, epsilon=0.5) -> Sequence[np.ndarray]: # n_layers list of new layer states assert len(states)==len(params) state_grads = tt.grad(energy(params=params, states=states, x=x, beta=y_pressure, y=y).sum(), wrt=states) new_states = [tt.clip(s - epsilon*g, 0, 1) for s, g in izip_equal(states, state_grads)] return new_states @symbolic def do_param_update(update_function, loss, params, learning_rates): """ :param update_function: Has form new_param = update_function(param, param_grad, learning_rate) :param loss: A scalar loss :param params: A structure of parameters :param learning_rates: A learning rate or structure of learning rates which can be broadcast against params :return: A structure of new parameter values in the same format as params. """ param_structure = NestedType.from_data(params) flat_params = param_structure.get_leaves(params) flat_param_grads = tt.grad(loss, wrt=flat_params) new_flat_params = (update_function(p, pg, lr) for p, pg, lr in izip_equal(flat_params, flat_param_grads, param_structure.get_leaves(learning_rates, broadcast=True))) new_params = param_structure.expand_from_leaves(new_flat_params) return new_params @symbolic def update_eq_params(x, params, negative_states, positive_states, learning_rates, beta): loss = (energy(params=params, states=positive_states, x=x) - energy(params=params, states=negative_states, x=x)).mean() return do_param_update(lambda p, pg, lr: p-lr*pg/beta, loss=loss, params = params, learning_rates=learning_rates) def xavier_init(n_in, n_out, rng=None): rng = get_rng(rng) return rng.uniform(size=(n_in, n_out), low=-np.sqrt(6. / (n_in + n_out)), high=np.sqrt(6. / (n_in + n_out))) def initialize_params(layer_sizes, initial_weight_scale=1., rng=None): rng = get_rng(rng) params = [(initial_weight_scale*xavier_init(n_in, n_out, rng=rng), np.zeros(n_out)) for n_in, n_out in izip_equal(layer_sizes[:-1], layer_sizes[1:])] return params @symbolic def update_forward_params_with_energy(forward_params, eq_params, x, learning_rates, nonlinearity, disconnect_grads=True): # Note that disconnect makes no difference - as the loss is calculated given the states. states = forward_pass(params=forward_params, x=x, disconnect_grads=disconnect_grads, nonlinearity=nonlinearity) loss = energy(params=eq_params, states=states, x=x).mean() return do_param_update(lambda p, pg, lr: p-lr*pg, loss=loss, params = forward_params, learning_rates=learning_rates) @symbolic def update_forward_params_with_contrast(forward_params, eq_states, x, nonlinearity, learning_rates, disconnect_grads=True): states = forward_pass(params=forward_params, x=x, nonlinearity=nonlinearity, disconnect_grads=disconnect_grads) loss = sum((.5*(fs-es)**2).sum(axis=1) for fs, es in izip_equal(states, eq_states)).mean(axis=0) return do_param_update(lambda p, pg, lr: p-lr*pg, loss=loss, params = forward_params, learning_rates=learning_rates) def initialize_states(n_samples, noninput_layer_sizes): return [np.zeros((n_samples, dim)) for dim in noninput_layer_sizes] @ExperimentFunction(compare=compare_learning_curves_new, one_liner_function=report_score_from_result, is_root=True, result_parser = parse_eqprop_result) def demo_energy_based_initialize_eq_prop( n_epochs = 25, hidden_sizes = (500, ), minibatch_size = 20, beta = 0.5, epsilon = 0.5, learning_rate = (0.1, .05), n_negative_steps = 20, n_positive_steps = 4, initial_weight_scale = 1., epoch_checkpoint_period = {0: .25, 1: .5, 5: 1, 10: 2, 50: 4}, n_test_samples = 10000, skip_zero_epoch_test = False, train_with_forward = 'contrast', forward_nonlinearity = 'rho(x)', local_loss = True, random_flip_beta = True, seed = 1234,): print('Params:\n' + '\n'.join(list(f' {k} = {v}' for k, v in locals().items()))) assert train_with_forward in ('contrast', 'contrast+', 'energy', False) rng = get_rng(seed) n_in = 784 n_out = 10 dataset = get_mnist_dataset(flat=True, n_test_samples=None).to_onehot() x_train, y_train = dataset.training_set.xy x_test, y_test = dataset.test_set.xy # Their 'validation set' is our 'test set' if is_test_mode(): x_train, y_train, x_test, y_test = x_train[:100], y_train[:100], x_test[:100], y_test[:100] n_epochs=1 layer_sizes = [n_in] + list(hidden_sizes) + [n_out] eq_params = initialize_params(layer_sizes=layer_sizes, initial_weight_scale=initial_weight_scale, rng=rng) forward_params = initialize_params(layer_sizes=layer_sizes, initial_weight_scale=initial_weight_scale, rng=rng) y_train = y_train.astype(np.float32) sp = Speedometer(mode='last') is_epoch_checkpoint = Checkpoints(epoch_checkpoint_period, skip_first=skip_zero_epoch_test) f_negative_eq_step = equilibriating_step.compile() f_inference_eq_step = equilibriating_step.compile() f_positive_eq_step = equilibriating_step.compile() f_parameter_update = update_eq_params.compile() f_forward_pass = forward_pass.partial(nonlinearity=forward_nonlinearity).compile() f_forward_parameter_update = update_forward_params_with_energy.partial(disconnect_grads=local_loss, nonlinearity=forward_nonlinearity).compile() f_forward_parameter_contrast_update = update_forward_params_with_contrast.partial(disconnect_grads=local_loss, nonlinearity=forward_nonlinearity).compile() f_energy = energy.compile() def do_inference(forward_params_, eq_params_, x, n_steps): states_ = forward_states_ = f_forward_pass(x=x, params=forward_params_) if train_with_forward else initialize_states(n_samples=x.shape[0], noninput_layer_sizes=layer_sizes[1:]) for _ in range(n_steps): states_ = f_inference_eq_step(params=eq_params_, states = states_, x=x, epsilon=epsilon) return forward_states_[-1], states_[-1] results = Duck() for i, (ixs, info) in enumerate(minibatch_index_info_generator(n_samples=x_train.shape[0], minibatch_size=minibatch_size, n_epochs=n_epochs)): epoch = i*minibatch_size/x_train.shape[0] if is_epoch_checkpoint(epoch): n_samples = n_test_samples if n_test_samples is not None else len(x_test) (test_init_error, test_neg_error), (train_init_error, train_neg_error) = [ [percent_argmax_incorrect(prediction, y[:n_test_samples]) for prediction in do_inference(forward_params_=forward_params, eq_params_=eq_params, x=x[:n_test_samples], n_steps=n_negative_steps)] for x, y in [(x_test, y_test), (x_train, y_train)] ] print(f'Epoch: {epoch:.3g}, Iter: {i}, Test Init Error: {test_init_error:.3g}%, Test Neg Error: {test_neg_error:.3g}%, Train Init Error: {train_init_error:.3g}%, Train Neg Error: {train_neg_error:.3g}%, , Mean Rate: {sp(i):.3g}iter/s') results[next, :] = dict(iter=i, epoch=epoch, test_init_error=test_init_error, test_neg_error=test_neg_error, train_init_error=train_init_error, train_neg_error=train_neg_error) yield results if epoch>2 and train_neg_error>50: return # The Original training loop, just taken out here: x_data_sample, y_data_sample = x_train[ixs], y_train[ixs] states = forward_states = f_forward_pass(x=x_data_sample, params=forward_params) if train_with_forward else initialize_states(n_samples=minibatch_size, noninput_layer_sizes=layer_sizes[1:]) for t in range(n_negative_steps): # if i % 200 == 0: # with hold_dbplots(): # dbplot_collection(states, 'states', cornertext='NEG') # dbplot(f_energy(params = eq_params, states=states, x=x_data_sample).mean(), 'energies', plot_type=DBPlotTypes.LINE_HISTORY_RESAMPLED) states = f_negative_eq_step(params=eq_params, states = states, x=x_data_sample, epsilon=epsilon) negative_states = states this_beta = rng.choice([-beta, beta]) if random_flip_beta else beta for t in range(n_positive_steps): # if i % 200 == 0: # with hold_dbplots(): # dbplot_collection(states, 'states', cornertext='') # dbplot(f_energy(params = eq_params, states=states, x=x_data_sample).mean(), 'energies', plot_type=DBPlotTypes.LINE_HISTORY_RESAMPLED) states = f_positive_eq_step(params=eq_params, states = states, x=x_data_sample, y=y_data_sample, y_pressure=this_beta, epsilon=epsilon) positive_states = states eq_params = f_parameter_update(x=x_data_sample, params=eq_params, negative_states=negative_states, positive_states=positive_states, learning_rates=learning_rate, beta=this_beta) # with hold_dbplots(draw_every=50): # dbplot_collection([forward_params[0][0][:, :16].T.reshape(-1, 28, 28)] + [w for w, b in forward_params[1:]], '$\phi$') # dbplot_collection([eq_params[0][0][:, :16].T.reshape(-1, 28, 28)] + [w for w, b in eq_params[1:]], '$\\theta$') # dbplot_collection(forward_states, 'forward states') # dbplot_collection(negative_states, 'negative_states') # dbplot(np.array([f_energy(params = eq_params, states=forward_states, x=x_data_sample).mean(), f_energy(params = eq_params, states=negative_states, x=x_data_sample).mean()]), 'energies', plot_type=DBPlotTypes.LINE_HISTORY_RESAMPLED) if train_with_forward == 'contrast': forward_params = f_forward_parameter_contrast_update(x=x_data_sample, forward_params=forward_params, eq_states=negative_states, learning_rates=learning_rate) # forward_params = f_forward_parameter_contrast_update(x=x_data_sample, forward_params=forward_params, eq_states=negative_states, learning_rates=[lr/10 for lr in learning_rate]) elif train_with_forward == 'contrast+': forward_params = f_forward_parameter_contrast_update(x=x_data_sample, forward_params=forward_params, eq_states=positive_states, learning_rates=learning_rate) elif train_with_forward == 'energy': forward_params = f_forward_parameter_update(x=x_data_sample, forward_params = forward_params, eq_params=eq_params, learning_rates=learning_rate) else: assert train_with_forward is False # ====================================================================================================================== baseline_losstype = demo_energy_based_initialize_eq_prop.add_root_variant('losstype_baseline') for random_flip_beta in (False, True): for forward_nonlinearity in ('rho(x)', 'rho(x)+0.01*x', 'sigm(x)'): for train_with_forward in (False, 'contrast', 'contrast+', 'energy'): baseline_losstype.add_variant(random_flip_beta=random_flip_beta, forward_nonlinearity=forward_nonlinearity, train_with_forward=train_with_forward) # X.add_variant(train_with_forward = False) # X=demo_energy_based_initialize_eq_prop.add_root_variant(forward_leak=leak) # X.add_variant(train_with_forward = 'energy') # X.add_variant(train_with_forward = 'contrast') """ Things learned: - The rho(s)^2 term we were erroneously using earlier doesn't really matter. - "energy" method fails always - "contrast" seems to slightly help negative convergence - "random_flip_beta" can actually help but can cause instability in the initialization network training (however forward_leak or sigmoid can fix this). - "leaky rho" seems to be the most reliable nonlinearity, rho it seems CAN be unstable, sigm isn't quite as good. - contrast+ behaves pretty similarily to contrast. """ # ====================================================================================================================== with capture_created_experiments() as exps_reduced_steps: X = demo_energy_based_initialize_eq_prop.add_root_variant('stepsize_baseline', random_flip_beta=True) for n_negative_steps in (10, 5): X.add_variant(train_with_forward=False, n_negative_steps=n_negative_steps) XX=X.add_root_variant(train_with_forward='contrast', n_negative_steps=n_negative_steps) for forward_nonlinearity in ('rho(x)', 'rho(x)+0.01*x', 'sigm(x)'): XX.add_variant(forward_nonlinearity = forward_nonlinearity) """ Things learned: - With few steps the normal rho(x) forward nonlinearity can really mess up. Leaky-rho seems to be best. - Our init-network lets us train with fewer steps, and comes up with almost-as-good predicitons. - Very small degredation for 5-step case. - leaky-rho gets best performance but may be less stable than sigmoid based off learning curve. sigm is go """ # ====================================================================================================================== with capture_created_experiments() as deeper_experiments: X = demo_energy_based_initialize_eq_prop.add_root_variant('large', hidden_sizes = [500, 500, 500], n_epochs = 500, minibatch_size = 20, n_negative_steps = 500, n_positive_steps = 8, epsilon= .5, beta = 1., learning_rate = [.128, .032, .008, .002] ) for train_with_forward in (False, 'contrast'): for n_negative_steps in (10, 20, 100, 500): X.add_variant(train_with_forward=train_with_forward, n_negative_steps=n_negative_steps, forward_nonlinearity='rho(x)+0.01*x') X.add_variant(train_with_forward=train_with_forward, n_negative_steps=n_negative_steps, forward_nonlinearity='sigm(x)') """ What we learned - Initialization works great! Forward pass scores similarily to negative-tphase in the end - Initialization allows learning with low numbers of negative steps. - sigmoid initialization may work slightly better than leaky-rho, but not a big diff. - There are limits... the 10-step version always fails. """ # ====================================================================================================================== with capture_created_experiments() as local_loss_exp: X = demo_energy_based_initialize_eq_prop.add_root_variant('local_loss_baseline', train_with_forward='contrast', random_flip_beta=False, forward_nonlinearity='rho(x)+0.01*x') X.add_variant(local_loss = False) X.add_variant(local_loss = True) X = demo_energy_based_initialize_eq_prop.add_root_variant('local_loss_baseline_energy', train_with_forward='energy', random_flip_beta=False, forward_nonlinearity='rho(x)+0.01*x') X.add_variant(local_loss = False) X.add_variant(local_loss = True) """ Does using only local loss hurt? - Local loss works almost identically to global loss. - Local loss is not the reason the Energy approach is
<gh_stars>0 """ ------------------------------------------------------------------------------ The MIT License (MIT) Copyright (c) 2016 Newcastle University Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.import time ------------------------------------------------------------------------------ Author <NAME>, School of Computing Science, Newcastle University ------------------------------------------------------------------------------ Acknowledgement This Python module is based on the microbit API defined as part of the BBC microbit micropython project. See: https://github.com/bbcmicrobit/micropython for the project repository and licensing. This project is an implementation of the API in order to test microbit Python scripts without the need to execute them on the physical BBC microbit. There is no assertion of copyright over the microbit API (represented by the relevant class and function definitions, as opposed to their implementation). ------------------------------------------------------------------------------ """ import array import json import random import time STATE_FILE_DEFAULT = 'microbit_state.json' try: from microbit_settings import state_file except ImportError: state_file = STATE_FILE_DEFAULT __all__ = [ 'panic', 'reset', 'running_time', 'sleep', 'Accelerometer', 'accelerometer', 'compass', 'button_a', 'button_b', 'display', 'Image', 'i2c', 'pin0', 'pin1', 'pin2', 'pin3', 'pin4', 'pin5', 'pin6', 'pin7', 'pin8', 'pin9', 'pin10', 'pin11', 'pin12', 'pin13', 'pin14', 'pin15', 'pin16', 'pin19', 'pin20', 'uart', 'state', ] """ State ---------------------------------------------------------------- """ """ This is for the emulation not part of the microbit module ------------ """ class State: """Represents the state of the microbit buttons and pins. This is for emulation purposes - State is not part of the microbit API. There is a single state object. It is used to manage state during emulation. It can also be used to programmatically manipulate microbit state in a test program. """ __VALUE_MIN = 0 __VALUE_MAX = 1023 __RUNTIME_MAX_INCR = 100 __STATE_FILE_KEY = 'state_file' __POWER_KEY = 'power' __ACCELEROMETER_KEYS = ['accelerometer_x','accelerometer_y', 'accelerometer_z'] __PRESSES_KEYS = ['button_a_presses', 'button_b_presses'] def __init__(self): self.__running_time = 0 # not part of persistent state self.__data = { "accelerometer_x": 0, "accelerometer_y": 0, "accelerometer_z": 0, "button_a": 0, "button_a_presses": 0, "button_b": 0, "button_b_presses": 0, "state_file": state_file, "pin0": 0, "pin1": 0, "pin2": 0, "pin3": 0, "pin4": 0, "pin5": 0, "pin6": 0, "pin7": 0, "pin8": 0, "pin9": 0, "pin10": 0, "pin11": 0, "pin12": 0, "pin13": 0, "pin14": 0, "pin15": 0, "pin16": 0, "pin19": 0, "pin20": 0, "power": 1 } self.load() def __get_runtime(self): return self.__running_time def __incr_runtime(self, ms, randomise = False): if ms >= 0: if randomise: ms = random.randint(ms, State.__RUNTIME_MAX_INCR) self.__running_time = self.__running_time + ms def __valid_value(self, key, value): return key in State.__ACCELEROMETER_KEYS \ or (value >= State.__VALUE_MIN \ and (key in State.__PRESSES_KEYS \ or value <= State.__VALUE_MAX)) def get(self, key): """Returns the state associated with the named key. The key string can be one of: accelerometer_x, accelerometer_z, accelerometer_y buton_a, button_a_presses, button_b, button_b_presses state_file, pin0 to pin16, pin19, pin20 power If there is no state associated with the key, 0 is returned (note, 0 may be a valid value for the given key) This method is usually used through one of the corresponding, higher-level microbit objects (e.g. accelerometer, button, etc.). """ return self.__data.get(key.lower(), State.__VALUE_MIN) def set(self, key, value): """Sets the state associated with the named key to the given value. The key string can be one of: accelerometer_x, accelerometer_z, accelerometer_y buton_a, button_a_presses, button_b, button_b_presses state_file, pin0 to pin16, pin19, pin20 power This method has no effect if the key is unknown. The value is checked for validity: state_file can have any value, accelerometer values can be any integer, all other values must be positive and all but the button_presses values must be less than or equal to 1023. This method is usually used through one of the corresponding, higher-level microbit objects (e.g. accelerometer, button, etc.). It can also be used to update state in a test program. """ key = key.lower() if key in self.__data: self.load() if key != State.__STATE_FILE_KEY: if not self.__valid_value(key, value): raise ValueError( 'invalid value {0} for key {1}'.format(value, key)) value = int(value) self.__data[key] = value self.dump() def press(self, input): """Emulates pressing down on a button. This method will set the value for the named input to 1 and increment any associated presses counter. For a button press, the input can be either button_a or button_b. If the input is another valid state key, this method will set the associated value to 1. """ presses = input + '_presses' self.set(input, 1) self.set(presses, self.get(presses) + 1) def release(self, input): """Emulates releasing a button. This method will set the value for the named input to 0. For a button release, the input can be either button_a or button_b. If the input is another valid state key, this method will set the associated value to 0. """ self.set(input, 0) def press_and_release(self, input, delay=50): """Emulates pressing then releasing the named input with the given millisecond delay following press and release. This method will set the value for the named input to 1 and then to 0 with the given delay following both press and release. For a button, the input can be either button_a or button_b. If the input is another valid state key, this method will set the associated value to 1 then to 0. """ self.press(input) sleep(delay) self.release(input) sleep(delay) def power_on(self): """Emulates switching power off (meaning there will be output to the display). """ self.set(State.__POWER_KEY, 1) def power_off(self): """Emulates switching power off (meaning there will be no output to display). Note: unlike the real microbit, "powering off" does not stop the microbit stub program running. It is more a way of "silencing" the microbit stub program during the tests. """ self.__running_time = 0 self.set(State.__POWER_KEY, 0) def is_on(self): """Returns True if power is on, False otherwise. """ return self.get(State.__POWER_KEY) > 0 def load(self): """Load state from the current json format state file. The state file name is initialised from a microbit_settings.py file, if one exists. Otherwise the file microbit_state.json is used as the initial state file. Errors and exceptions during loading are ignored. This method has no effect if the state file is empty, has an invalid format, does not exist etc. """ try: with open(self.__data[State.__STATE_FILE_KEY]) as f: data = json.load(f) self.__data = data except: pass def dump(self): """Dump state to the current json format state file. The state file name is initialised from a microbit_settings.py file, if one exists. Otherwise the file microbit_state.json is used as the initial state file. Errors and exceptions during dumping are ignored. This method has no effect if the state file is empty, has an invalid format, does not exist etc. """ try: with open(self.__data[State.__STATE_FILE_KEY], 'w') as f: json.dump(self.__data, f, sort_keys=True, indent=4, ensure_ascii=False) except OSError: pass def reset(self): """Reset all state values and dump to the current state file. Reset values are 0 keys except for power, which is reset to 1, and state_file, which is set as the current state file name. """ filename = self.__data[State.__STATE_FILE_KEY] self.__data = { k:0 for k in self.__data.keys() } self.__data[State.__STATE_FILE_KEY] = filename self.__data[State.__POWER_KEY] = 1 self.dump() def __str__(self): return '\n'.join([str(k) + ':' \ + str(self.__data[k]) for k in
usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t) voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t) drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_count1 : Autopilot-specific errors (uint16_t) errors_count2 : Autopilot-specific errors (uint16_t) errors_count3 : Autopilot-specific errors (uint16_t) errors_count4 : Autopilot-specific errors (uint16_t) ''' msg = MAVLink_sys_status_message(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4) msg.pack(self) return msg def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4): ''' The general system state. If the system is following the MAVLink standard, the system state is mainly defined by three orthogonal states/modes: The system mode, which is either LOCKED (motors shut down and locked), MANUAL (system under RC control), GUIDED (system with autonomous position control, position setpoint controlled manually) or AUTO (system guided by path/waypoint planner). The NAV_MODE defined the current flight state: LIFTOFF (often an open-loop maneuver), LANDING, WAYPOINTS or VECTOR. This represents the internal navigation state machine. The system status shows wether the system is currently active or not and if an emergency occured. During the CRITICAL and EMERGENCY states the MAV is still considered to be active, but should start emergency procedures autonomously. After a failure occured it should first move from active to critical to allow manual intervention and then move to emergency after a certain timeout. onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t) voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t) drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_count1 : Autopilot-specific errors (uint16_t) errors_count2 : Autopilot-specific errors (uint16_t) errors_count3 : Autopilot-specific errors (uint16_t) errors_count4 : Autopilot-specific errors (uint16_t) ''' return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4)) def system_time_encode(self, time_unix_usec, time_boot_ms): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t) ''' msg = MAVLink_system_time_message(time_unix_usec, time_boot_ms) msg.pack(self) return msg def system_time_send(self, time_unix_usec, time_boot_ms): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t) ''' return self.send(self.system_time_encode(time_unix_usec, time_boot_ms)) def ping_encode(self, time_usec, seq, target_system, target_component): ''' A ping message either requesting or responding to a ping. This allows to measure the system latencies, including serial port, radio modem and UDP connections. time_usec : Unix timestamp in microseconds (uint64_t) seq : PING sequence (uint32_t) target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) ''' msg = MAVLink_ping_message(time_usec, seq, target_system, target_component) msg.pack(self) return msg def ping_send(self, time_usec, seq, target_system, target_component): ''' A ping message either requesting or responding to a ping. This allows to measure the system latencies, including serial port, radio modem and UDP connections. time_usec : Unix timestamp in microseconds (uint64_t) seq : PING sequence (uint32_t) target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) ''' return self.send(self.ping_encode(time_usec, seq, target_system, target_component)) def change_operator_control_encode(self, target_system, control_request, version, passkey): ''' Request to control this MAV target_system : System the GCS requests control for (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t) passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char) ''' msg = MAVLink_change_operator_control_message(target_system, control_request, version, passkey) msg.pack(self) return msg def change_operator_control_send(self, target_system, control_request, version, passkey): ''' Request to control this MAV target_system : System the GCS requests control for (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t) passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char) ''' return self.send(self.change_operator_control_encode(target_system, control_request, version, passkey)) def change_operator_control_ack_encode(self, gcs_system_id, control_request, ack): ''' Accept / deny control
<reponame>lemiceterieux/nilearn<filename>nilearn/input_data/nifti_labels_masker.py """ Transformer for computing ROI signals. """ import numpy as np import warnings from joblib import Memory from .. import _utils from .._utils import logger, CacheMixin, _compose_err_msg from .._utils.class_inspect import get_params from .._utils.niimg_conversions import _check_same_fov from .. import masking from .. import image from .base_masker import filter_and_extract, BaseMasker class _ExtractionFunctor(object): func_name = 'nifti_labels_masker_extractor' def __init__(self, _resampled_labels_img_, background_label, strategy, mask_img): self._resampled_labels_img_ = _resampled_labels_img_ self.background_label = background_label self.strategy = strategy self.mask_img = mask_img def __call__(self, imgs): from ..regions import signal_extraction return signal_extraction.img_to_signals_labels( imgs, self._resampled_labels_img_, background_label=self.background_label, strategy=self.strategy, mask_img=self.mask_img) class NiftiLabelsMasker(BaseMasker, CacheMixin): """Class for masking of Niimg-like objects. NiftiLabelsMasker is useful when data from non-overlapping volumes should be extracted (contrarily to NiftiMapsMasker). Use case: Summarize brain signals from clusters that were obtained by prior K-means or Ward clustering. Parameters ---------- labels_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html Region definitions, as one image of labels. labels : list of str, optional Full labels corresponding to the labels image. This is used to improve reporting quality if provided. Warning: The labels must be consistent with the label values provided through `labels_img`. background_label : number, optional Label used in labels_img to represent background. Warning: This value must be consistent with label values and image provided. Default=0. mask_img : Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html Mask to apply to regions before extracting signals. smoothing_fwhm : float, optional If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. standardize : {False, True, 'zscore', 'psc'}, optional Strategy to standardize the signal. 'zscore': the signal is z-scored. Timeseries are shifted to zero mean and scaled to unit variance. 'psc': Timeseries are shifted to zero mean value and scaled to percent signal change (as compared to original mean signal). True : the signal is z-scored. Timeseries are shifted to zero mean and scaled to unit variance. False : Do not standardize the data. Default=False. standardize_confounds : boolean, optional If standardize_confounds is True, the confounds are z-scored: their mean is put to 0 and their variance to 1 in the time dimension. Default=True. high_variance_confounds : boolean, optional If True, high variance confounds are computed on provided image with :func:`nilearn.image.high_variance_confounds` and default parameters and regressed out. Default=False. detrend : boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details. Default=False. low_pass : None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass : None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r : float, optional This parameter is passed to signal.clean. Please see the related documentation for details dtype : {dtype, "auto"}, optional Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. resampling_target : {"data", "labels", None}, optional Gives which image gives the final shape/size. For example, if `resampling_target` is "data", the atlas is resampled to the shape of the data if needed. If it is "labels" then mask_img and images provided to fit() are resampled to the shape and affine of maps_img. "None" means no resampling: if shapes and affines do not match, a ValueError is raised. Default="data". memory : joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : int, optional Aggressiveness of memory caching. The higher the number, the higher the number of functions that will be cached. Zero means no caching. Default=1. verbose : integer, optional Indicate the level of verbosity. By default, nothing is printed Default=0. strategy : str, optional The name of a valid function to reduce the region with. Must be one of: sum, mean, median, minimum, maximum, variance, standard_deviation. Default='mean'. reports : boolean, optional If set to True, data is saved in order to produce a report. Default=True. See also -------- nilearn.input_data.NiftiMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, labels_img, labels=None, background_label=0, mask_img=None, smoothing_fwhm=None, standardize=False, standardize_confounds=True, high_variance_confounds=False, detrend=False, low_pass=None, high_pass=None, t_r=None, dtype=None, resampling_target="data", memory=Memory(location=None, verbose=0), memory_level=1, verbose=0, strategy="mean", reports=True): self.labels_img = labels_img self.labels = labels self.background_label = background_label self.mask_img = mask_img # Parameters for _smooth_array self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.standardize_confounds = standardize_confounds self.high_variance_confounds = high_variance_confounds self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.dtype = dtype # Parameters for resampling self.resampling_target = resampling_target # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose self.reports = reports self._report_content = dict() self._report_content['description'] = ( 'This reports shows the regions ' 'defined by the labels of the mask.') self._report_content['warning_message'] = None available_reduction_strategies = {'mean', 'median', 'sum', 'minimum', 'maximum', 'standard_deviation', 'variance'} if strategy not in available_reduction_strategies: raise ValueError(str.format( "Invalid strategy '{}'. Valid strategies are {}.", strategy, available_reduction_strategies )) self.strategy = strategy if resampling_target not in ("labels", "data", None): raise ValueError("invalid value for 'resampling_target' " "parameter: " + str(resampling_target)) def generate_report(self): from nilearn.reporting.html_report import generate_report return generate_report(self) def _reporting(self): """ Returns ------- displays : list A list of all displays to be rendered. """ try: import matplotlib.pyplot as plt from nilearn import plotting except ImportError: with warnings.catch_warnings(): mpl_unavail_msg = ('Matplotlib is not imported! ' 'No reports will be generated.') warnings.filterwarnings('always', message=mpl_unavail_msg) warnings.warn(category=ImportWarning, message=mpl_unavail_msg) return [None] if self._reporting_data is not None: labels_image = self._reporting_data['labels_image'] else: labels_image = None if labels_image is not None: # Remove warning message in case where the masker was # previously fitted with no func image and is re-fitted if 'warning_message' in self._report_content: self._report_content['warning_message'] = None labels_image_data = image.get_data(labels_image) labels_image_affine = image.load_img(labels_image).affine # Number of regions excluding the background number_of_regions = np.sum(np.unique(labels_image_data) != self.background_label) # Basic safety check to ensure we have as many labels as we # have regions (plus background). if(self.labels is not None and len(self.labels) != number_of_regions + 1): raise ValueError(("Mismatch between the number of provided " "labels ({0}) and the number of regions " "in provided label image ({1})").format( len(self.labels), number_of_regions + 1)) self._report_content['number_of_regions'] = number_of_regions label_values = np.unique(labels_image_data) label_values = label_values[label_values != self.background_label] columns = ['label value', 'region name', 'size (in mm^3)', 'relative size (in %)'] if self.labels is None: columns.remove('region name') regions_summary = {c: [] for c in columns} for label in label_values: regions_summary['label value'].append(label) if self.labels is not None: regions_summary['region name'].append(self.labels[label]) size = len(labels_image_data[labels_image_data == label]) voxel_volume = np.abs(np.linalg.det( labels_image_affine[:3, :3])) regions_summary['size (in mm^3)'].append(round( size * voxel_volume)) regions_summary['relative size (in %)'].append(round( size / len( labels_image_data[labels_image_data != 0] ) * 100, 2)) self._report_content['summary'] = regions_summary img = self._reporting_data['img'] # If we have a func image to show in the report, use it if img is not None: dim = image.load_img(img).shape if len(dim) == 4: # compute middle image from 4D series for plotting img = image.index_img(img, dim[-1] // 2) display = plotting.plot_img(img, black_bg=False, cmap='CMRmap_r') plt.close() display.add_contours(labels_image, filled=False, linewidths=3) # Otherwise, simply plot the ROI of the label image # and give a warning to the user else: msg = ("No image provided to fit in NiftiLabelsMasker. " "Plotting ROIs of label image on the " "MNI152Template for reporting.") warnings.warn(msg) self._report_content['warning_message'] = msg display = plotting.plot_roi(labels_image) plt.close() # If we have a mask, show its contours if self._reporting_data['mask'] is not None: display.add_contours(self._reporting_data['mask'], filled=False, colors="g", linewidths=3) else: self._report_content['summary'] = None display = None return [display] def fit(self, imgs=None, y=None): """Prepare signal extraction from regions. All parameters are unused, they are for scikit-learn compatibility. """ logger.log("loading data from %s" % _utils._repr_niimgs(self.labels_img, shorten=(not self.verbose)), verbose=self.verbose) self.labels_img_ = _utils.check_niimg_3d(self.labels_img) if self.mask_img is not None: logger.log("loading data from %s" % _utils._repr_niimgs(self.mask_img, shorten=(not self.verbose)), verbose=self.verbose) self.mask_img_ = _utils.check_niimg_3d(self.mask_img) else: self.mask_img_ = None # Check shapes and affines or resample. if self.mask_img_ is not None: if self.resampling_target == "data": # resampling will be done at transform time pass elif self.resampling_target is None: if self.mask_img_.shape
import multiprocessing as mp import os import tempfile import shutil import dask.dataframe as dd import dask.diagnostics import genomepy from gimmemotifs.scanner import scan_regionfile_to_table from gimmemotifs.utils import pfmfile_location from loguru import logger import numpy as np import pandas as pd import pickle import pysam import qnorm from scipy import stats from sklearn.preprocessing import minmax_scale from ananse.utils import ( bed_sort, bed_merge, bam_index, bam_sort, mosdepth, ) from ananse.distributions import Distributions class CombineBedFiles: def __init__(self, genome, peakfiles, verbose=True): self.genome = genome self.list_of_peakfiles = ( peakfiles if isinstance(peakfiles, list) else [peakfiles] ) self.verbose = verbose @staticmethod def is_narrowpeak(bed, check_values=True): """ Check BED type by column count. Check if peak values are not all zeroes unless check_values is False. Accepts a BED file (including narrowPeak, broadPeak, etc.) Returns bool """ with open(bed) as b: for line in b: if line.startswith("#"): continue line = line.split("\t") cols = len(line) break # narrowPeak has 10 columns # and the peak column is >= 0 if cols != 10 or int(line[9]) < 0: return False if not check_values: return True # check if the peak values aren't all zeroes summit_values = 0 sample_size = 20 # check an arbitrary number of lines with open(bed) as b: for n, line in enumerate(b): if line.startswith("#"): continue line = line.split("\t") peak_val = int(line[9]) # value must be >=0 if peak_val < 0: return False summit_values += peak_val if n >= sample_size: break if summit_values > 0: return True return False @staticmethod def bed_resize( genome, bed_in, bed_out, width=200, narrowpeak=False, fix_outliers=False, output_bed3=True, verbose=True, ): """ Set bed region width. If the input bed is a narrowPeak file (narrowpeak=True), center region on the summit (start+peak). Otherwise center on the middle of the region. If fix_outliers is set to True, shift regions to fit their chromosomes. Otherwise drop these regions. If output_bed3 is set to False, output the whole bed file. """ half_seqlen = width // 2 chrom_sizes = genomepy.Genome(genome).sizes missing_chrm = [] if narrowpeak: def get_summit(_start, _, summit_offset): return _start + int(summit_offset) summit_col = 9 else: def get_summit(_start, _end, _): return (_start + _end) // 2 summit_col = 0 # unused with open(bed_in) as old, open(bed_out, "w") as new: for line in old: if line.startswith("#"): continue line = line.split("\t") chrm = str(line[0]) if chrm not in chrom_sizes.keys(): missing_chrm.append(chrm) continue start = int(line[1]) end = int(line[2]) rest = line[3:] if not output_bed3 else [] chrm_len = chrom_sizes[chrm] if width == end - start: nstart = str(start) nend = str(end) elif chrm_len <= width: if not fix_outliers: continue nstart = str(0) nend = str(chrm_len) else: summit = get_summit(start, end, line[summit_col]) if not fix_outliers: nstart = str(summit - half_seqlen) nend = str(summit + half_seqlen) if int(nstart) < 0 or int(nend) > chrm_len: continue else: # adjust the summit for the chromosome boundaries summit = max(summit, 0 + half_seqlen) summit = min(summit, chrm_len - half_seqlen) nstart = str(summit - half_seqlen) nend = str(summit + half_seqlen) new.write("\t".join([chrm, nstart, nend] + rest) + "\n") if missing_chrm and verbose: logger.warning( "The following contigs were present in " + f"'{os.path.basename(bed_in)}', " + "but were missing in the genome file: " + f"{', '.join(list(set(missing_chrm)))}\n" ) return bed_out def run(self, outfile, width=200, force=False): if force or not os.path.exists(outfile): if self.verbose: logger.info("Combining bed files") tmpdir = tempfile.mkdtemp(prefix="ANANSE_") try: list_of_beds = [] for peakfile in self.list_of_peakfiles: # use narrowPeak Peak location for region centering if possible is_np = self.is_narrowpeak(peakfile) resized_peakfile = os.path.join(tmpdir, os.path.basename(peakfile)) # resize each BED region to 200 BP self.bed_resize( genome=self.genome, bed_in=peakfile, bed_out=resized_peakfile, width=width, narrowpeak=is_np, verbose=self.verbose, ) bed_sort(resized_peakfile) list_of_beds.append(resized_peakfile) # merge resized beds into one merged_bed = os.path.join(tmpdir, "merged") bed_merge(list_of_beds=list_of_beds, merged_bed=merged_bed) shutil.copy2(merged_bed, outfile) finally: shutil.rmtree(tmpdir, ignore_errors=True) class ScorePeaks: def __init__(self, bams, bed, ncore=1, verbose=True): self.list_of_bams = bams if isinstance(bams, list) else [bams] self.bed = bed # one bed file with all putative enhancer binding regions self.verbose = verbose self.ncore = ncore def compatibility_check(self): """ Check if any chromosome in each bams file are found in the bed file. This filters out datasets mapped to different genomes. """ error = False bed_chromosomes = set( pd.read_csv(self.bed, sep="\t", header=None)[0].astype(str) ) for bam in self.list_of_bams: bam_header = pysam.view(bam, "-H").split("\n") # noqa: pysam bug for line in bam_header: if not line.startswith("@SQ"): continue # extract chrom (ex: '@SQ\tSN:chr11\tLN:100316') chrom = line.split("\tSN:")[1].split("\tLN:")[0] # if any chrom matches: next bam if chrom in bed_chromosomes: break else: logger.exception( f"Chromosomes in the peak file(s) do not match any in bam file '{os.path.basename(bam)}'!\n" f"Does {self.bed} contain any regions, and " "are both bam- and peak file(s) mapped to the same genome assembly?\n" ) error = True if error: exit(1) def peaks_count(self, outdir): """ count bam reads in the bed regions returns one bed file for each bam in outdir """ # linear script: # coverage_files = [] # for bam in self.list_of_bams: # bed_output = os.path.join(outdir, os.path.basename(bam).replace(".bam", ".regions.bed")) # coverage_files.append(bed_output) # mosdepth(self.bed, bam, bed_output, self.ncore) # return coverage_files # parallel script: nbams = len(self.list_of_bams) npool = min(self.ncore, nbams) ncore = min(4, self.ncore // npool) # 1-4 cores/bam # list with tuples. each tuple = one run mosdepth_params = [] coverage_files = [] for bam in self.list_of_bams: bed_output = os.path.join( outdir, os.path.basename(bam).replace(".bam", ".regions.bed") ) mosdepth_params.append((self.bed, bam, bed_output, ncore)) coverage_files.append(bed_output) pool = mp.Pool(npool) try: pool.starmap_async(mosdepth, mosdepth_params) finally: # To make sure processes are closed in the end, even if errors happen pool.close() pool.join() return coverage_files @staticmethod def peaks_merge(coverage_files, bed_output, ncore=1): """ averages all peaks_count outputs uses quantile normalization to normalize for read depth returns one BED 3+1 file """ ncore = min(4, ncore) bed = pd.read_csv(coverage_files[0], header=None, sep="\t") if len(coverage_files) > 1: for file in coverage_files[1:]: scores = pd.read_csv(file, header=None, sep="\t")[3] bed = pd.concat([bed, scores], axis=1) scores = bed.iloc[:, 3:] scores = qnorm.quantile_normalize(scores, axis=1, ncpus=ncore) scores = scores.mean(axis=1) bed = pd.concat([bed.iloc[:, :3], scores], axis=1) bed.to_csv(bed_output, sep="\t", header=False, index=False) @staticmethod def peaks_fit(bam_coverage, bed_output, dist_func="lognorm_dist", **kwargs): """ fit the peak scores to a distribution """ bed = pd.read_csv(bam_coverage, header=None, sep="\t") region = ( bed[0].astype(str) + ":" + bed[1].astype(str) + "-" + bed[2].astype(str) ) score = bed[3] # obtain a distribution dist_func = Distributions().set(dist_func) # with np.errstate(divide="ignore", invalid="ignore"): # dist = dist_func(score, **kwargs) dist = dist_func(score, **kwargs) # replace scores with distribution values ascending_dist = np.sort(dist) ascending_scores_index = np.searchsorted(np.sort(score), score) norm_score = np.array([ascending_dist[i] for i in ascending_scores_index]) logn_score = np.log(norm_score + 1) scaled_score = minmax_scale(logn_score) log10_score = np.log10(norm_score + 1) data = { "region": region, # ex: "chr1:0-200" "score": score, "norm_score": norm_score, "logn_score": logn_score, "scaled_score": scaled_score, "log10_score": log10_score, # used by the original function } bed = pd.DataFrame(data=data) bed.to_csv(bed_output, sep="\t", index=False) def run(self, outfile, dist_func="peak_rank_file_dist", force=False, **kwargs): # save the results as it takes ages to run raw_peak_scores = os.path.join(os.path.dirname(outfile), "raw_scoredpeaks.bed") if force or not os.path.exists(raw_peak_scores): self.compatibility_check() tmpdir = tempfile.mkdtemp(prefix="ANANSE_") try: if self.verbose: logger.info("Scoring peaks (slow)") try: # assumes sorted for bam in self.list_of_bams: bam_index(bam, force=False, ncore=self.ncore) coverage_files = self.peaks_count(tmpdir) except Exception: # sort, index & try again for bam in self.list_of_bams: bam_sort(bam, self.ncore) coverage_files = self.peaks_count(tmpdir) tmp_peak_scores = os.path.join(tmpdir, "raw_scoredpeaks.bed") self.peaks_merge(coverage_files, tmp_peak_scores, self.ncore) shutil.copy2(tmp_peak_scores, raw_peak_scores) finally: shutil.rmtree(tmpdir, ignore_errors=True) # fit bam read counts to specified distribution if force or not os.path.exists(outfile): self.peaks_fit(raw_peak_scores, outfile, dist_func=dist_func, **kwargs) class ScoreMotifs: def __init__(self, genome, bed, pfmfile=None, ncore=1, verbose=True): self.genome = genome self.bed = bed # putative enhancer regions in format chr:start-end (in column 0 with header) self.pfm_file = pfmfile_location(pfmfile) self.ncore = ncore self.verbose = verbose def motifs_get_scores(self, pfmscorefile, debug=False): """ Scan for TF binding motifs in potential enhancer regions. """ if not debug: df = scan_regionfile_to_table( input_table=self.bed, genome=self.genome, scoring="score", pfmfile=self.pfm_file, ncpus=self.ncore, zscore=True, gc=True, ) else: # test output df = pd.DataFrame( { "region": ["chr1:400-600", "chr1:2400-2600", "chr1:10003-10203"], "GM.5.0.Sox.0001": [-0.544, -2.496, -0.544], "GM.5.0.Homeodomain.0001": [-0.750, -0.377, -7.544], } ).set_index("region") df["motif"] = df.idxmax(axis=1) df["zscore"] = df.max(axis=1) df.reset_index(inplace=True) df.to_csv( pfmscorefile, sep="\t", header=True, index=False, columns=["motif", "region", "zscore"], # filter + order columns ) @staticmethod def motifs_normalize(bed_input, bed_output): """ Add normalized scores to the scored motifs """ bed = pd.read_csv(bed_input, sep="\t") bed["rank_zscore"] = minmax_scale(stats.rankdata(bed["zscore"])) bed.to_csv(bed_output, sep="\t", index=False) def run(self, outfile, force=False): # save the results as it takes ages to run raw_motif_scores = os.path.join( os.path.dirname(outfile),
-1), JzKet(1, -1), JzKet(1, 0))/3 + \ sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1))/3 + \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(3, -3, (1, 1, 1))) == \ TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, -1)) assert uncouple(JzKetCoupled(2, 2, (1, 1, 1))) == \ -sqrt(6)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1))/6 - \ sqrt(6)*TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1))/6 + \ sqrt(6)*TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0))/3 assert uncouple(JzKetCoupled(2, 1, (1, 1, 1))) == \ -sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1))/6 - \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1))/3 + \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0))/6 - \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(2, 0, (1, 1, 1))) == \ -TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1))/2 + \ TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1))/2 assert uncouple(JzKetCoupled(2, -1, (1, 1, 1))) == \ -sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1))/3 - \ sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0))/6 + \ sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1))/6 - \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1))/3 + \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1))/6 assert uncouple(JzKetCoupled(2, -2, (1, 1, 1))) == \ -sqrt(6)*TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0))/3 + \ sqrt(6)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1))/6 + \ sqrt(6)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1))/6 assert uncouple(JzKetCoupled(1, 1, (1, 1, 1))) == \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1))/30 + \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1))/15 - \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0))/10 + \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1))/30 - \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0))/10 + \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1))/5 assert uncouple(JzKetCoupled(1, 0, (1, 1, 1))) == \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1))/10 - \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0))/15 + \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1))/10 - \ 2*sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 0))/15 + \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1))/10 - \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0))/15 + \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1))/10 assert uncouple(JzKetCoupled(1, -1, (1, 1, 1))) == \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1))/5 - \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0))/10 + \ sqrt(15)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1))/30 - \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0))/10 + \ sqrt(15)*TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1))/15 + \ sqrt(15)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1))/30 # Defined j13 # j1=1/2, j2=1/2, j3=1, j13=1/2 assert uncouple(JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 3, S(1)/2), (1, 2, 1)) )) == \ -sqrt(6)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1))/3 + \ sqrt(3)*TensorProduct( JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0))/3 assert uncouple(JzKetCoupled(1, 0, (S(1)/2, S(1)/2, 1), ((1, 3, S(1)/2), (1, 2, 1)) )) == \ -sqrt(3)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1))/3 - \ sqrt(6)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0))/6 + \ sqrt(6)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0))/6 + \ sqrt(3)*TensorProduct( JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 3, S(1)/2), (1, 2, 1)) )) == \ -sqrt(3)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0))/3 + \ sqrt(6)*TensorProduct( JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, -1))/3 # j1=1/2, j2=1, j3=1, j13=1/2 assert uncouple(JzKetCoupled(S(3)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)))) == \ -sqrt(6)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 1))/3 + \ sqrt(3)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 0))/3 assert uncouple(JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)))) == \ -2*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, 0), JzKet(1, 1))/3 - \ TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 0))/3 + \ sqrt(2)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 0))/3 + \ sqrt(2)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)))) == \ -sqrt(2)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, -1), JzKet(1, 1))/3 - \ sqrt(2)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, 0), JzKet(1, 0))/3 + \ TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(1, 0))/3 + \ 2*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(S(3)/2, -S(3)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)))) == \ -sqrt(3)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(1, -1), JzKet(1, 0))/3 + \ sqrt(6)*TensorProduct( JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(1, -1))/3 # j1=1, j2=1, j3=1, j13=1 assert uncouple(JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2)))) == \ -sqrt(2)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1))/2 + \ sqrt(2)*TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0))/2 assert uncouple(JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2)))) == \ -TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1))/2 assert uncouple(JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2)))) == \ -sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1))/3 - \ sqrt(3)*TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0))/6 - \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0))/6 + \ sqrt(3)*TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1))/3 assert uncouple(JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2)))) == \ -TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1))/2 - \ TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0))/2 + \ TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1))/2 assert uncouple(JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 1), (1, 2, 2)))) == \ -sqrt(2)*TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0))/2 + \ sqrt(2)*TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1))/2 assert uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)))) == \ TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0))/2 - \ TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1))/2 assert uncouple(JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1)))) == \ TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0))/2 assert uncouple(JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)))) == \ -TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1))/2 + \ TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0))/2 - \ TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1))/2 + \ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1))/2 def test_uncouple_4_coupled_states_numerical(): # j1=1/2, j2=1/2, j3=1, j4=1, default coupling assert uncouple(JzKetCoupled(3, 3, (S(1)/2, S(1)/2, 1, 1))) == \ TensorProduct(JzKet( S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 1)) assert uncouple(JzKetCoupled(3, 2, (S(1)/2, S(1)/2, 1, 1))) == \ sqrt(6)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 1))/6 + \ sqrt(6)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 1))/6 + \ sqrt(3)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 1))/3 + \ sqrt(3)*TensorProduct(JzKet(S( 1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 0))/3 assert uncouple(JzKetCoupled(3, 1, (S(1)/2, S(1)/2, 1, 1))) == \ sqrt(15)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 1))/15 + \ sqrt(30)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 1))/15 + \ sqrt(30)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 0))/15 + \ sqrt(30)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0), JzKet(1, 1))/15 + \ sqrt(30)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 0))/15 + \ sqrt(15)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(1, 1))/15 + \ 2*sqrt(15)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 0))/15 + \ sqrt(15)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, -1))/15 assert uncouple(JzKetCoupled(3, 0, (S(1)/2, S(1)/2, 1, 1))) == \ sqrt(10)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0), JzKet(1, 1))/10 + \ sqrt(10)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, 0))/10 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(1, 1))/10 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 0))/5 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, -1))/10 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, -1), JzKet(1, 1))/10 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0), JzKet(1, 0))/5 + \ sqrt(5)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1), JzKet(1, -1))/10 + \ sqrt(10)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1), JzKet(1, 0))/10 + \ sqrt(10)*TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2),
: ( 29086, 29088 ), "DR1fOns_5" : ( 29088, 29089 ), "DR1fAgeRec_5" : ( 29089, 29091 ), "DR1fRec_5" : ( 29091, 29092 ), "DR1f_6" : ( 29092, 29093 ), "DR1f1_6" : ( 29093, 29097 ), "DR1fAgeOns_6" : ( 29097, 29099 ), "DR1fOns_6" : ( 29099, 29100 ), "DR1fAgeRec_6" : ( 29100, 29102 ), "DR1fRec_6" : ( 29102, 29103 ), "DR1f_7" : ( 29103, 29104 ), "DR1f1_7" : ( 29104, 29108 ), "DR1fAgeOns_7" : ( 29108, 29110 ), "DR1fOns_7" : ( 29110, 29111 ), "DR1fAgeRec_7" : ( 29111, 29113 ), "DR1fRec_7" : ( 29113, 29114 ), "DR1f_8" : ( 29114, 29115 ), "DR1f1_8" : ( 29115, 29119 ), "DR1fAgeOns_8" : ( 29119, 29121 ), "DR1fOns_8" : ( 29121, 29122 ), "DR1fAgeRec_8" : ( 29122, 29124 ), "DR1fRec_8" : ( 29124, 29125 ), "DR1f_9" : ( 29125, 29126 ), "DR1f1_9" : ( 29126, 29130 ), "DR1fAgeOns_9" : ( 29130, 29132 ), "DR1fOns_9" : ( 29132, 29133 ), "DR1fAgeRec_9" : ( 29133, 29135 ), "DR1fRec_9" : ( 29135, 29136 ), "DR1iSPECIFYa" : ( 29136, 29391 ), "DR1iCODEa" : ( 29391, 29394 ), "DR1iSPECIFYb" : ( 29394, 29649 ), "DR1iCODEb" : ( 29649, 29652 ), "dr1i_ANOTHER" : ( 29652, 29653 ), "DR1iSPECIFYa2" : ( 29653, 29908 ), "DR1iCODEa2" : ( 29908, 29911 ), "DR1iSPECIFYb2" : ( 29911, 30166 ), "DR1iCODEb2" : ( 30166, 30169 ), "dr1i_ANOTHER2" : ( 30169, 30170 ), "DR1iSPECIFYa3" : ( 30170, 30425 ), "DR1iCODEa3" : ( 30425, 30428 ), "DR1iSPECIFYb3" : ( 30428, 30683 ), "DR1iCODEb3" : ( 30683, 30686 ), "dr1i_ANOTHER3" : ( 30686, 30687 ), "DR1iSPECIFYa4" : ( 30687, 30942 ), "DR1iCODEa4" : ( 30942, 30945 ), "DR1iSPECIFYb4" : ( 30945, 31200 ), "DR1iCODEb4" : ( 31200, 31203 ), "dr1i_ANOTHER4" : ( 31203, 31204 ), "DR1iSPECIFYa5" : ( 31204, 31459 ), "DR1iCODEa5" : ( 31459, 31462 ), "DR1iSPECIFYb5" : ( 31462, 31717 ), "DR1iCODEb5" : ( 31717, 31720 ), "dr1i_ANOTHER5" : ( 31720, 31721 ), "DR1f_aux" : ( 31721, 31722 ), "DR1g" : ( 31722, 31723 ), "DR1g1" : ( 31723, 31727 ), "DR1gAgeOns" : ( 31727, 31729 ), "DR1gOns" : ( 31729, 31730 ), "DR1gAgeRec" : ( 31730, 31732 ), "DR1gRec" : ( 31732, 31733 ), "DR1h_specify" : ( 31733, 31813 ), "DR1h_CODE" : ( 31813, 31816 ), "DR1i" : ( 31816, 31817 ), "DR2_specify" : ( 31817, 31897 ), "DR2_Code" : ( 31897, 31900 ), "DR2_NUM_" : ( 31900, 31902 ), "DR2_UNIT_" : ( 31902, 31903 ), "DR2A_" : ( 31903, 31904 ), "DR2B_" : ( 31904, 31906 ), "DR2B1_" : ( 31906, 31909 ), "DR2B2_" : ( 31909, 31911 ), "DR2AgeOns_" : ( 31911, 31913 ), "DR2_NUM_2" : ( 31913, 31915 ), "DR2_UNIT_2" : ( 31915, 31916 ), "DR2A_2" : ( 31916, 31917 ), "DR2B_2" : ( 31917, 31919 ), "DR2B1_2" : ( 31919, 31922 ), "DR2B2_2" : ( 31922, 31924 ), "DR2AgeOns_2" : ( 31924, 31926 ), "DR2_NUM_3" : ( 31926, 31928 ), "DR2_UNIT_3" : ( 31928, 31929 ), "DR2A_3" : ( 31929, 31930 ), "DR2B_3" : ( 31930, 31932 ), "DR2B1_3" : ( 31932, 31935 ), "DR2B2_3" : ( 31935, 31937 ), "DR2AgeOns_3" : ( 31937, 31939 ), "DR2_NUM_4" : ( 31939, 31941 ), "DR2_UNIT_4" : ( 31941, 31942 ), "DR2A_4" : ( 31942, 31943 ), "DR2B_4" : ( 31943, 31945 ), "DR2B1_4" : ( 31945, 31948 ), "DR2B2_4" : ( 31948, 31950 ), "DR2AgeOns_4" : ( 31950, 31952 ), "DR2_NUM_5" : ( 31952, 31954 ), "DR2_UNIT_5" : ( 31954, 31955 ), "DR2A_5" : ( 31955, 31956 ), "DR2B_5" : ( 31956, 31958 ), "DR2B1_5" : ( 31958, 31961 ), "DR2B2_5" : ( 31961, 31963 ), "DR2AgeOns_5" : ( 31963, 31965 ), "DR3_" : ( 31965, 31966 ), "DR3a_" : ( 31966, 31967 ), "DR3_2" : ( 31967, 31968 ), "DR3a_2" : ( 31968, 31969 ), "DR3_3" : ( 31969, 31970 ), "DR3a_3" : ( 31970, 31971 ), "DR3_4" : ( 31971, 31972 ), "DR3a_4" : ( 31972, 31973 ), "DR3_5" : ( 31973, 31974 ), "DR3a_5" : ( 31974, 31975 ), "DR5_" : ( 31975, 31976 ), "DR5a_" : ( 31976, 31977 ), "DR5AgeOns_" : ( 31977, 31979 ), "DR5Ons_" : ( 31979, 31980 ), "DR5AgeRec_" : ( 31980, 31982 ), "DR5Rec_" : ( 31982, 31983 ), "DR5_2" : ( 31983, 31984 ), "DR5a_2" : ( 31984, 31985 ), "DR5AgeOns_2" : ( 31985, 31987 ), "DR5Ons_2" : ( 31987, 31988 ), "DR5AgeRec_2" : ( 31988, 31990 ), "DR5Rec_2" : ( 31990, 31991 ), "DR5_3" : ( 31991, 31992 ), "DR5a_3" : ( 31992, 31993 ), "DR5AgeOns_3" : ( 31993, 31995 ), "DR5Ons_3" : ( 31995, 31996 ), "DR5AgeRec_3" : ( 31996, 31998 ), "DR5Rec_3" : ( 31998, 31999 ), "DR5_4" : ( 31999, 32000 ), "DR5a_4" : ( 32000, 32001 ), "DR5AgeOns_4" : ( 32001, 32003 ), "DR5Ons_4" : ( 32003, 32004 ), "DR5AgeRec_4" : ( 32004, 32006 ), "DR5Rec_4" : ( 32006, 32007 ), "DR5_5" : ( 32007, 32008 ), "DR5a_5" : ( 32008, 32009 ), "DR5AgeOns_5" : ( 32009, 32011 ), "DR5Ons_5" : ( 32011, 32012 ), "DR5AgeRec_5" : ( 32012, 32014 ), "DR5Rec_5" : ( 32014, 32015 ), "DR6_" : ( 32015, 32016 ), "DR6_2" : ( 32016, 32017 ), "DR6_3" : ( 32017, 32018 ), "DR6_4" : ( 32018, 32019 ), "DR6_5" : ( 32019, 32020 ), "DR7_" : ( 32020, 32021 ), "DR7a_" : ( 32021, 32022 ), "DR7b_" : ( 32022, 32023 ), "DR7_2" : ( 32023, 32024 ), "DR7a_2" : ( 32024, 32025 ), "DR7b_2" : ( 32025, 32026 ), "DR7_3" : ( 32026, 32027 ), "DR7a_3" : ( 32027, 32028 ), "DR7b_3" : ( 32028, 32029 ), "DR7_4" : ( 32029, 32030 ), "DR7a_4" : ( 32030, 32031 ), "DR7b_4" : ( 32031, 32032 ), "DR7_5" : ( 32032, 32033 ), "DR7a_5" : ( 32033, 32034 ), "DR7b_5" : ( 32034, 32035 ), "DR8_" : ( 32035, 32036 ), "DR8_2" : ( 32036, 32037 ), "DR8_3" : ( 32037, 32038 ), "DR8_4" : ( 32038, 32039 ), "DR8_5" : ( 32039, 32040 ), "DR9_" : ( 32040, 32041 ), "DR9a_" : ( 32041, 32042 ), "DR9_2" : ( 32042, 32043 ), "DR9a_2" : ( 32043, 32044 ), "DR9_3" : ( 32044, 32045 ), "DR9a_3" : ( 32045, 32046 ), "DR9_4" : ( 32046, 32047 ), "DR9a_4" : ( 32047, 32048 ), "DR9_5" : ( 32048, 32049 ), "DR9a_5" : ( 32049, 32050 ), "DR10_" : ( 32050, 32051 ), "DR10_2" : ( 32051, 32052 ), "DR10_3" : ( 32052, 32053 ), "DR10_4" : ( 32053, 32054 ), "DR10_5" : ( 32054, 32055 ), "DR11a1_" : ( 32055, 32056 ), "DR11a2_" : ( 32056, 32057 ), "DR11a3_" : ( 32057, 32058 ), "DR11a4_" : ( 32058, 32059 ), "DR11a5_" : ( 32059, 32060 ), "DR11a6_" : ( 32060, 32061 ), "DR11a7_" : ( 32061, 32062 ), "DR11a8_" : ( 32062, 32063 ), "DR11a9_" : ( 32063, 32064 ), "DR11a10_" : ( 32064, 32065 ), "DR11a11_" : ( 32065, 32066 ), "DR11a12_" : ( 32066, 32067 ), "DR11a13_" : ( 32067, 32068 ), "DR11a14_" : ( 32068, 32069 ), "DR11a15_" : ( 32069, 32070 ), "DR11a16_" : ( 32070, 32071 ), "DR11a17_" : ( 32071, 32072 ), "DR11a18_" : ( 32072, 32073 ), "DR11a19_" : ( 32073, 32074 ), "DR11a20_" : ( 32074, 32075 ), "DR11a21_" : ( 32075, 32076 ), "DR11a22_" : ( 32076, 32077 ), "DR11a23_" : ( 32077, 32078 ), "DR11a24_" : ( 32078, 32079 ), "DR11a25_" : ( 32079, 32080 ), "DR11a26_" : ( 32080, 32081 ), "DR11a27_" : ( 32081, 32082 ), "DR11a28_" : ( 32082, 32083 ), "DR11a29_" : ( 32083, 32084 ), "DR11b_" : ( 32084, 32085 ), "DR11b1_" : ( 32085, 32086 ), "DR11bAgeOns_" : ( 32086, 32088 ), "DR11bOns_" : ( 32088, 32089 ), "DR11bAgeRec_" : (
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. Original C++ source file: string_ops.cc """ import collections from tensorflow.python import pywrap_tfe as pywrap_tfe from tensorflow.python.eager import context as _context from tensorflow.python.eager import core as _core from tensorflow.python.eager import execute as _execute from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.util.deprecation import deprecated_endpoints from tensorflow.python.util import dispatch as _dispatch from tensorflow.python.util.tf_export import tf_export from typing import TypeVar @_dispatch.add_dispatch_list @tf_export('strings.as_string', 'as_string', v1=['dtypes.as_string', 'strings.as_string', 'as_string']) @deprecated_endpoints('dtypes.as_string') def as_string(input, precision=-1, scientific=False, shortest=False, width=-1, fill="", name=None): r"""Converts each entry in the given tensor to strings. Supports many numeric types and boolean. For Unicode, see the [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) tutorial. Examples: >>> tf.strings.as_string([3, 2]) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)> >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() array([b'3.14', b'2.72'], dtype=object) Args: input: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `float32`, `float64`, `bool`, `variant`. precision: An optional `int`. Defaults to `-1`. The post-decimal precision to use for floating point numbers. Only used if precision > -1. scientific: An optional `bool`. Defaults to `False`. Use scientific notation for floating point numbers. shortest: An optional `bool`. Defaults to `False`. Use shortest representation (either scientific or standard) for floating point numbers. width: An optional `int`. Defaults to `-1`. Pad pre-decimal numbers to this width. Applies to both floating point and integer numbers. Only used if width > -1. fill: An optional `string`. Defaults to `""`. The value to pad if width > -1. If empty, pads with spaces. Another typical value is '0'. String cannot be longer than 1 character. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "AsString", name, input, "precision", precision, "scientific", scientific, "shortest", shortest, "width", width, "fill", fill) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return as_string_eager_fallback( input, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( as_string, (), dict(input=input, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise # Add nodes to the TensorFlow graph. if precision is None: precision = -1 precision = _execute.make_int(precision, "precision") if scientific is None: scientific = False scientific = _execute.make_bool(scientific, "scientific") if shortest is None: shortest = False shortest = _execute.make_bool(shortest, "shortest") if width is None: width = -1 width = _execute.make_int(width, "width") if fill is None: fill = "" fill = _execute.make_str(fill, "fill") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "AsString", input=input, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( as_string, (), dict(input=input, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "precision", _op._get_attr_int("precision"), "scientific", _op._get_attr_bool("scientific"), "shortest", _op._get_attr_bool("shortest"), "width", _op._get_attr_int("width"), "fill", _op.get_attr("fill")) _inputs_flat = _op.inputs _execute.record_gradient( "AsString", _inputs_flat, _attrs, _result) _result, = _result return _result AsString = tf_export("raw_ops.AsString")(_ops.to_raw_op(as_string)) def as_string_eager_fallback(input, precision, scientific, shortest, width, fill, name, ctx): if precision is None: precision = -1 precision = _execute.make_int(precision, "precision") if scientific is None: scientific = False scientific = _execute.make_bool(scientific, "scientific") if shortest is None: shortest = False shortest = _execute.make_bool(shortest, "shortest") if width is None: width = -1 width = _execute.make_int(width, "width") if fill is None: fill = "" fill = _execute.make_str(fill, "fill") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.complex64, _dtypes.complex128, _dtypes.float32, _dtypes.float64, _dtypes.bool, _dtypes.variant, ]) _inputs_flat = [input] _attrs = ("T", _attr_T, "precision", precision, "scientific", scientific, "shortest", shortest, "width", width, "fill", fill) _result = _execute.execute(b"AsString", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "AsString", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('io.decode_base64', v1=['io.decode_base64', 'decode_base64']) @deprecated_endpoints('decode_base64') def decode_base64(input, name=None): r"""Decode web-safe base64-encoded strings. Input may or may not have padding at the end. See EncodeBase64 for padding. Web-safe means that input must use - and _ instead of + and /. Args: input: A `Tensor` of type `string`. Base64 strings to decode. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "DecodeBase64", name, input) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return decode_base64_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( decode_base64, (), dict(input=input, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "DecodeBase64", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( decode_base64, (), dict(input=input, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = () _inputs_flat = _op.inputs _execute.record_gradient( "DecodeBase64", _inputs_flat, _attrs, _result) _result, = _result return _result DecodeBase64 = tf_export("raw_ops.DecodeBase64")(_ops.to_raw_op(decode_base64)) def decode_base64_eager_fallback(input, name, ctx): input = _ops.convert_to_tensor(input, _dtypes.string) _inputs_flat = [input] _attrs = None _result = _execute.execute(b"DecodeBase64", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "DecodeBase64", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('io.encode_base64', v1=['io.encode_base64', 'encode_base64']) @deprecated_endpoints('encode_base64') def encode_base64(input, pad=False, name=None): r"""Encode strings into web-safe base64 format. Refer to the following article for more information on base64 format: en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the end so that the encoded has length multiple of 4. See Padding section of the link above. Web-safe means that the encoder uses - and _ instead of + and /. Args: input: A `Tensor` of type `string`. Strings to be encoded. pad: An optional `bool`. Defaults to `False`. Bool whether padding is applied at the ends. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx, "EncodeBase64", name, input, "pad", pad) return _result except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) except _core._FallbackException: pass try: return encode_base64_eager_fallback( input, pad=pad, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( encode_base64, (), dict(input=input, pad=pad, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise # Add nodes to the TensorFlow graph. if pad is None: pad = False pad = _execute.make_bool(pad, "pad") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "EncodeBase64", input=input, pad=pad, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( encode_base64, (), dict(input=input, pad=pad, name=name) ) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("pad", _op._get_attr_bool("pad")) _inputs_flat = _op.inputs _execute.record_gradient( "EncodeBase64", _inputs_flat, _attrs, _result) _result, = _result return _result EncodeBase64 = tf_export("raw_ops.EncodeBase64")(_ops.to_raw_op(encode_base64)) def encode_base64_eager_fallback(input, pad, name, ctx): if pad is None: pad = False pad = _execute.make_bool(pad, "pad") input = _ops.convert_to_tensor(input, _dtypes.string) _inputs_flat = [input] _attrs = ("pad", pad) _result = _execute.execute(b"EncodeBase64", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "EncodeBase64", _inputs_flat, _attrs, _result) _result, = _result return _result def reduce_join(inputs, reduction_indices, keep_dims=False, separator="", name=None): r"""Joins a string Tensor across the given dimensions. Computes the string join across dimensions in the given string Tensor of shape `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are counted backwards from the end, with `-1` being equivalent to `n - 1`. If indices are not specified, joins across all dimensions beginning from `n - 1` through `0`. For example: ```python # tensor `a` is [["a", "b"], ["c", "d"]] tf.reduce_join(a, 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] tf.reduce_join(a, [0, 1]) ==> "acbd" tf.reduce_join(a, [1, 0]) ==> "abcd" tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" ``` Args: inputs: A `Tensor` of type `string`. The
/ Number.POSITIVE_INFINITY) self.assertEqual(Number.NAN, Number.NEGATIVE_INFINITY / Number.NEGATIVE_INFINITY) def test_infinity_from_add_subtract(self): self.assertEqual(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY + Number.POSITIVE_INFINITY) self.assertEqual(Number.NEGATIVE_INFINITY, Number.NEGATIVE_INFINITY + Number.NEGATIVE_INFINITY) self.assertEqual(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY - Number.NEGATIVE_INFINITY) self.assertEqual(Number.NEGATIVE_INFINITY, Number.NEGATIVE_INFINITY - Number.POSITIVE_INFINITY) def test_infinity_from_multiply(self): self.assertEqual(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY * Number.POSITIVE_INFINITY) self.assertEqual(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY * Number.NEGATIVE_INFINITY) self.assertEqual(Number.NEGATIVE_INFINITY, Number.NEGATIVE_INFINITY * Number.POSITIVE_INFINITY) self.assertEqual(Number.POSITIVE_INFINITY, Number.NEGATIVE_INFINITY * Number.NEGATIVE_INFINITY) def test_zero_from_divide_by_infinity(self): self.assertEqual(Number(0), Number(1) / Number.POSITIVE_INFINITY) self.assertEqual(Number(0), Number(1.5) / Number.POSITIVE_INFINITY) self.assertEqual(Number(0), Number(-1.5) / Number.POSITIVE_INFINITY) self.assertEqual(Number(0), Number(-1.5) / Number.NEGATIVE_INFINITY) self.assertEqual(Number(0), Number(1.5) / Number.NEGATIVE_INFINITY) self.assertEqual(Number(0), Number(1) / Number.NEGATIVE_INFINITY) def test_controversial_math(self): """ Controversial computations: 0**0, 1**infinity, infinity**0 These computations on qiki Numbers all result in 1 -- the same as IEEE floating point A different school of thought is that they should result in NAN. You can use limits to justify almost any value for these computations. Here's why 0**0 is not so cut and dry: The limit of x**0 as x approaches 0 from the positive is 1 The limit of x**0 as x approaches 0 from the negative is -1 The limit of 0**x as x approaches 0 from the positive is 0 The limit of 0**x as x approaches 0 from the negative is division by zero, infinity maybe Here's why 1**infinity is not so cut and dry: The limit of 1**x as x approches infinity is 1 The limit of x**infinity as x approches 1 from the positive is infinity The limit of x**infinity as x approches 1 from the negative is 0 (For negative infinity the limits are 1, 0, positive infinity) Here's why infinity**0 is not so cut and dry: The limit of x**0 as x approaches infinity is 1 The limit of infinity**x as x approaches 0 from the positive is infinity The limit of infinity**x as x approaches 0 from the negative is 0 (For negative infinity the limits are -1, negative infinity, 0) """ self.assertEqual(Number(1), Number(0) ** Number(0)) self.assertEqual(Number(1), Number(1) ** Number.POSITIVE_INFINITY) self.assertEqual(Number(1), Number(1) ** Number.NEGATIVE_INFINITY) self.assertEqual(Number(1), Number.POSITIVE_INFINITY ** Number(0)) self.assertEqual(Number(1), Number.NEGATIVE_INFINITY ** Number(0)) def test_ludicrous_boundary(self): big_reasonable = Number(2**999-1) self.assertEqual(Number(2**1000-2), big_reasonable + big_reasonable) max_reasonable = Number(2**1000-1) if LUDICROUS_NUMBER_SUPPORT: self.assertEqual(Number(2**1001-2), max_reasonable + max_reasonable) else: with self.assertRaises(Number.LudicrousNotImplemented): _ = Number(2**1001-2) with self.assertRaises(Number.LudicrousNotImplemented): _ = max_reasonable + max_reasonable def test_zero_division(self): with self.assertRaises(ZeroDivisionError): _ = Number(0) / Number(0) with self.assertRaises(ZeroDivisionError): _ = Number(1) / Number(0) # white box: uses integer math with self.assertRaises(ZeroDivisionError): _ = Number(1.5) / Number(0) # white box: uses floating math with self.assertRaises(ZeroDivisionError): _ = Number(-0.0) / Number(0) with self.assertRaises(ZeroDivisionError): _ = Number.POSITIVE_INFINITY / Number(0) with self.assertRaises(ZeroDivisionError): _ = Number.NEGATIVE_INFINITY / Number(0) def test_infinite_constants(self): self.assertEqual('0qFF_81', Number.POSITIVE_INFINITY.qstring()) self.assertEqual('0q00_7F', Number.NEGATIVE_INFINITY.qstring()) self.assertEqual(float('+inf'), Number.POSITIVE_INFINITY) self.assertEqual(float('-inf'), Number.NEGATIVE_INFINITY) def test_infinitesimal_float(self): self.assertNotEqual(0.0, Number.POSITIVE_INFINITESIMAL) self.assertNotEqual(0.0, Number.NEGATIVE_INFINITESIMAL) self.assertEqual(0.0, float(Number.POSITIVE_INFINITESIMAL)) self.assertEqual(0.0, float(Number.NEGATIVE_INFINITESIMAL)) self.assertFloatSame(+0.0, float(Number.POSITIVE_INFINITESIMAL)) self.assertFloatSame(-0.0, float(Number.NEGATIVE_INFINITESIMAL)) def test_infinitesimal_int(self): self.assertNotEqual(0, Number.POSITIVE_INFINITESIMAL) self.assertNotEqual(0, Number.NEGATIVE_INFINITESIMAL) self.assertEqual(0, int(Number.POSITIVE_INFINITESIMAL)) self.assertEqual(0, int(Number.NEGATIVE_INFINITESIMAL)) def test_qantissa_max_qigits(self): qstring = '0q82_112233445566778899AABBCCDDEEFF' self.assertEqual((0x112233445566778899AABBCCDDEEFF, 15), Number(qstring).qan_int_len()) self.assertEqual((0x112233445566778899AABBCCDD , 13), Number(qstring).qan_int_len(max_qigits=13)) self.assertEqual((0x112233445566778899AABBCCDDEE , 14), Number(qstring).qan_int_len(max_qigits=14)) self.assertEqual((0x112233445566778899AABBCCDDEEFF, 15), Number(qstring).qan_int_len(max_qigits=15)) self.assertEqual((0x112233445566778899AABBCCDDEEFF, 15), Number(qstring).qan_int_len(max_qigits=16)) def test_qantissa_positive(self): self.assertEqual((0x03E8,2), Number('0q83_03E8').qan_int_len()) self.assertEqual((0x03E8,2), Number('0q83_03E8').qan_int_len()) self.assertEqual((0x0101,2), Number('0q83_0101').qan_int_len()) self.assertEqual(( 0x01,1), Number('0q83_01').qan_int_len()) self.assertEqual(( 0x00,0), Number('0q83').qan_int_len()) self.assertEqual(( 0xFF,1), Number('0q82_FF').qan_int_len()) self.assertEqual(( 0xFA,1), Number('0q7D_FA').qan_int_len()) def test_qantissa_negative(self): self.assertEqual((0xFE,1), Number('0q7D_FE').qan_int_len()) self.assertEqual((0x01,1), Number('0q7D_01').qan_int_len()) self.assertEqual((0xFEFFFFFA,4), Number('0q7A_FEFFFFFA').qan_int_len()) self.assertEqual((0x00000001,4), Number('0q7A_00000001').qan_int_len()) def test_qantissa_fractional(self): self.assertEqual( (0x80,1), Number('0q81FF_80').qan_int_len()) self.assertEqual( (0x40,1), Number('0q81FF_40').qan_int_len()) self.assertEqual((0x4220,2), Number('0q81FF_4220').qan_int_len()) def test_qantissa_fractional_neg(self): self.assertEqual( (0x01,1), Number('0q7E00_01').qan_int_len()) self.assertEqual( (0x80,1), Number('0q7E00_80').qan_int_len()) self.assertEqual( (0xC0,1), Number('0q7E00_C0').qan_int_len()) self.assertEqual( (0xFF,1), Number('0q7E00_FF').qan_int_len()) self.assertEqual( (0xFF,1), Number('0q7E01_FF').qan_int_len()) self.assertEqual((0xFF80,2), Number('0q7E01_FF80').qan_int_len()) def test_qantissa_unsupported(self): number_has_no_qantissa = Number(0) with self.assertRaises(Number.QanValueError): number_has_no_qantissa.qan_int_len() def test_qexponent_unsupported(self): number_has_no_qexponent = Number(0) with self.assertRaises(Number.QexValueError): number_has_no_qexponent.base_256_exponent() def test_qexponent_positive(self): self.assertEqual(1, Number('0q82_01').base_256_exponent()) self.assertEqual(1, Number('0q82_01000001').base_256_exponent()) self.assertEqual(1, Number('0q82_02').base_256_exponent()) self.assertEqual(1, Number('0q82_FF').base_256_exponent()) self.assertEqual(2, Number('0q83_01').base_256_exponent()) self.assertEqual(3, Number('0q84_01').base_256_exponent()) self.assertEqual(4, Number('0q85_01').base_256_exponent()) self.assertEqual(5, Number('0q86_01').base_256_exponent()) self.assertEqual(6, Number('0q87_01').base_256_exponent()) self.assertEqual(124, Number('0qFD_01').base_256_exponent()) self.assertEqual(125, Number('0qFE_01').base_256_exponent()) def test_qexponent_negative(self): self.assertEqual(6, Number('0q78').base_256_exponent()) self.assertEqual(5, Number('0q79').base_256_exponent()) self.assertEqual(4, Number('0q7A').base_256_exponent()) self.assertEqual(3, Number('0q7B').base_256_exponent()) self.assertEqual(2, Number('0q7C').base_256_exponent()) self.assertEqual(1, Number('0q7D').base_256_exponent()) self.assertEqual(125, Number('0q01').base_256_exponent()) self.assertEqual(124, Number('0q02').base_256_exponent()) def test_qexponent_fractional(self): self.assertEqual(0, Number('0q81FF_80').base_256_exponent()) self.assertEqual(0, Number('0q81FF_01').base_256_exponent()) self.assertEqual(-1, Number('0q81FE_01').base_256_exponent()) self.assertEqual(-2, Number('0q81FD_01').base_256_exponent()) self.assertEqual(-123, Number('0q8184_01').base_256_exponent()) self.assertEqual(-124, Number('0q8183_01').base_256_exponent()) def test_qexponent_fractional_neg(self): self.assertEqual(0, Number('0q7E00_01').base_256_exponent()) # -.996 self.assertEqual(0, Number('0q7E00_80').base_256_exponent()) # -.5 self.assertEqual(0, Number('0q7E00_FF').base_256_exponent()) # -.004 self.assertEqual(-1, Number('0q7E01_FF').base_256_exponent()) self.assertEqual(-2, Number('0q7E02_FF').base_256_exponent()) self.assertEqual(-123, Number('0q7E7B_FF').base_256_exponent()) self.assertEqual(-124, Number('0q7E7C_FF').base_256_exponent()) def test_alias_one(self): """ Redundant, invalid values near 1 should be interpreted as 1. NOTE: Every integral power of 256 (including negative exponents or significands) has a plateau of redundant, invalid values like this. """ self.assertEqual(1.0, float(Number('0q82_01'))) self.assertEqual(1.0, float(Number('0q82_00FFFFFF'))) self.assertEqual(1.0, float(Number('0q82_00C0'))) self.assertEqual(1.0, float(Number('0q82_0080'))) self.assertEqual(1.0, float(Number('0q82_0040'))) self.assertEqual(1.0, float(Number('0q82__0000__0000'))) self.assertEqual(1.0, float(Number('0q82__0000'))) self.assertEqual(1.0, float(Number('0q82'))) def test_alias_one_neg(self): self.assertEqual(-1.0, float(Number('0q7D_FF'))) self.assertEqual(-1.0, float(Number('0q7D_FF__0000'))) self.assertEqual(-1.0, float(Number('0q7D_FF3C7A38A1F250DE7E9071'))) self.assertEqual(-1.0, float(Number('0q7D_FF40'))) self.assertEqual(-1.0, float(Number('0q7D_FF80'))) self.assertEqual(-1.0, float(Number('0q7D_FFC0'))) self.assertEqual(-1.0, float(Number('0q7D_FFF0'))) self.assertEqual(-1.0, float(Number('0q7D_FFFF'))) self.assertEqual(-1.0, float(Number('0q7E'))) def test_alias_positive(self): self.assertEqual(256.0, float(Number('0q83_01'))) self.assertEqual(256.0, float(Number('0q83_00FFFFFF'))) self.assertEqual(256.0, float(Number('0q83_00C0'))) self.assertEqual(256.0, float(Number('0q83_0080'))) self.assertEqual(256.0, float(Number('0q83_0040'))) self.assertEqual(256.0, float(Number('0q83__0000__0000'))) self.assertEqual(256.0, float(Number('0q83__0000'))) self.assertEqual(256.0, float(Number('0q83'))) self.assertEqual(65536.0, float(Number('0q84_01'))) self.assertEqual(65536.0, float(Number('0q84_00FFFFFF'))) self.assertEqual(65536.0, float(Number('0q84_00C0'))) self.assertEqual(65536.0, float(Number('0q84_0080'))) self.assertEqual(65536.0, float(Number('0q84_0040'))) self.assertEqual(65536.0, float(Number('0q84__0000__0000'))) self.assertEqual(65536.0, float(Number('0q84__0000'))) self.assertEqual(65536.0, float(Number('0q84'))) def test_alias_negative(self): self.assertEqual(-256.0, float(Number('0q7C_FF'))) self.assertEqual(-256.0, float(Number('0q7C_FF__0000'))) self.assertEqual(-256.0, float(Number('0q7C_FF3C7A38A1F250DE7E9071'))) self.assertEqual(-256.0, float(Number('0q7C_FF40'))) self.assertEqual(-256.0, float(Number('0q7C_FF80'))) self.assertEqual(-256.0, float(Number('0q7C_FFC0'))) self.assertEqual(-256.0, float(Number('0q7C_FFF0'))) self.assertEqual(-256.0, float(Number('0q7C_FFFF'))) self.assertEqual(-256.0, float(Number('0q7D'))) self.assertEqual(-65536.0, float(Number('0q7B_FF'))) self.assertEqual(-65536.0, float(Number('0q7B_FF__0000'))) self.assertEqual(-65536.0, float(Number('0q7B_FF3C7A38A1F250DE7E9071'))) self.assertEqual(-65536.0, float(Number('0q7B_FF40'))) self.assertEqual(-65536.0, float(Number('0q7B_FF80'))) self.assertEqual(-65536.0, float(Number('0q7B_FFC0'))) self.assertEqual(-65536.0, float(Number('0q7B_FFF0'))) self.assertEqual(-65536.0, float(Number('0q7B_FFFF'))) self.assertEqual(-65536.0, float(Number('0q7C'))) def test_alias_positive_fractional(self): self.assertEqual(1.0/256.0, float(Number('0q81FF_01'))) self.assertEqual(1.0/256.0, float(Number('0q81FF_00FFFFFF'))) self.assertEqual(1.0/256.0, float(Number('0q81FF_00C0'))) self.assertEqual(1.0/256.0, float(Number('0q81FF_0080'))) self.assertEqual(1.0/256.0, float(Number('0q81FF_0040'))) self.assertEqual(1.0/256.0, float(Number('0q81FF__0000__0000'))) self.assertEqual(1.0/256.0, float(Number('0q81FF__0000'))) self.assertEqual(1.0/256.0, float(Number('0q81FF'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_01'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_00FFFFFF'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_00C0'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_0080'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_0040'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE_00000000'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE__0000'))) self.assertEqual(1.0/65536.0, float(Number('0q81FE'))) def test_alias_negative_fractional(self): self.assertEqual(-1.0/256.0, float(Number('0q7E00_FF'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FF__0000'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FF3C7A38A1F250DE7E9071'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FF40'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FF80'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FFC0'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FFF0'))) self.assertEqual(-1.0/256.0, float(Number('0q7E00_FFFF'))) self.assertEqual(-1.0/256.0, float(Number('0q7E01_0000'))) self.assertEqual(-1.0/256.0, float(Number('0q7E01'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FF'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FF__0000'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FF3C7A38A1F250DE7E9071'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FF40'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FF80'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FFC0'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FFF0'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E01_FFFF'))) self.assertEqual(-1.0/65536.0, float(Number('0q7E02'))) def test_normalize_plateau_compact_256(self): # 256**1 self.assertEqual('0q83' , Number('0q83' ).qstring()) self.assertEqual('0q83' , Number('0q83', normalize=False).qstring()) self.assertEqual('0q83_01', Number('0q83', normalize=True).qstring()) self.assertEqual('0q83_01', Number('0q83').normalized().qstring()) def test_normalize_plateau_compact_one(self): # 256**0 self.assertEqual('0q82' , Number('0q82' ).qstring()) self.assertEqual('0q82' , Number('0q82', normalize=False).qstring()) self.assertEqual('0q82_01', Number('0q82', normalize=True).qstring()) self.assertEqual('0q82_01', Number('0q82').normalized().qstring()) def test_normalize_plateau_compact_positive_fractional(self): # 256**-1 self.assertEqual('0q81FF' , Number('0q81FF' ).qstring()) self.assertEqual('0q81FF' , Number('0q81FF', normalize=False).qstring()) self.assertEqual('0q81FF_01', Number('0q81FF', normalize=True).qstring()) self.assertEqual('0q81FF_01', Number('0q81FF').normalized().qstring()) def test_normalize_plateau_compact_negative_fractional(self): # -256**-1 self.assertEqual('0q7E01' , Number('0q7E01' ).qstring()) self.assertEqual('0q7E01' , Number('0q7E01', normalize=False).qstring()) self.assertEqual('0q7E00_FF', Number('0q7E01', normalize=True).qstring()) self.assertEqual('0q7E00_FF', Number('0q7E01').normalized().qstring()) def test_normalize_plateau_compact_one_negative(self): # -256**0 self.assertEqual('0q7E' , Number('0q7E' ).qstring()) self.assertEqual('0q7E' , Number('0q7E', normalize=False).qstring()) self.assertEqual('0q7D_FF', Number('0q7E', normalize=True).qstring()) self.assertEqual('0q7D_FF', Number('0q7E').normalized().qstring()) def test_normalize_plateau_compact_256_negative(self): # -256**1 self.assertEqual('0q7D' , Number('0q7D' ).qstring()) self.assertEqual('0q7D' , Number('0q7D', normalize=False).qstring()) self.assertEqual('0q7C_FF', Number('0q7D', normalize=True).qstring()) self.assertEqual('0q7C_FF', Number('0q7D').normalized().qstring()) def test_normalize_plateau_gibberish(self): self.assertEqual('0q82_00DEADBEEF', Number('0q82_00DEADBEEF' ).qstring()) self.assertEqual('0q82_00DEADBEEF', Number('0q82_00DEADBEEF', normalize=False).qstring()) self.assertEqual('0q82_01', Number('0q82_00DEADBEEF', normalize=True).qstring()) self.assertEqual('0q82_01', Number('0q82_00DEADBEEF').normalized().qstring()) self.assertEqual('0q81FF_00DEADBEEF', Number('0q81FF_00DEADBEEF' ).qstring()) self.assertEqual('0q81FF_00DEADBEEF', Number('0q81FF_00DEADBEEF', normalize=False).qstring()) self.assertEqual('0q81FF_01', Number('0q81FF_00DEADBEEF', normalize=True).qstring()) self.assertEqual('0q81FF_01', Number('0q81FF_00DEADBEEF').normalized().qstring()) self.assertEqual('0q7E00_FFDEADBEEF', Number('0q7E00_FFDEADBEEF' ).qstring()) self.assertEqual('0q7E00_FFDEADBEEF', Number('0q7E00_FFDEADBEEF', normalize=False).qstring()) self.assertEqual('0q7E00_FF', Number('0q7E00_FFDEADBEEF', normalize=True).qstring()) self.assertEqual('0q7E00_FF', Number('0q7E00_FFDEADBEEF').normalized().qstring()) self.assertEqual('0q7D_FFDEADBEEF', Number('0q7D_FFDEADBEEF' ).qstring()) self.assertEqual('0q7D_FFDEADBEEF', Number('0q7D_FFDEADBEEF', normalize=False).qstring()) self.assertEqual('0q7D_FF', Number('0q7D_FFDEADBEEF', normalize=True).qstring()) self.assertEqual('0q7D_FF', Number('0q7D_FFDEADBEEF').normalized().qstring()) def test_normalize_plateau_suffixed(self): self.assertEqual('0q83_01__7E0100', Number('0q83', Suffix(Suffix.Type.TEST)).normalized().qstring()) self.assertEqual('0q82_01__7E0100', Number('0q82', Suffix(Suffix.Type.TEST)).normalized().qstring()) self.assertEqual('0q81FF_01__7E0100', Number('0q81FF_00BEEF', Suffix(Suffix.Type.TEST)).normalized().qstring()) self.assertEqual('0q7E00_FF__7E0100', Number('0q7E00_FFBEEF', Suffix(Suffix.Type.TEST)).normalized().qstring()) self.assertEqual('0q7D_FF__7E0100', Number('0q7E', Suffix(Suffix.Type.TEST)).normalized().qstring()) self.assertEqual('0q7C_FF__7E0100', Number('0q7D', Suffix(Suffix.Type.TEST)).normalized().qstring()) def test_normalize_imaginary(self): n = Number(42, Suffix(Suffix.Type.IMAGINARY, Number(10))) self.assertEqual('0q82_2A__820A_690300', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A__820A_690300', n.qstring()) n = Number(42, Suffix(Suffix.Type.IMAGINARY, Number(0))) self.assertEqual('0q82_2A__80_690200', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A', n.qstring()) n = Number( 42, Suffix(Suffix.Type.IMAGINARY, Number(10)), Suffix(Suffix.Type.IMAGINARY, Number(10)) ) self.assertEqual('0q82_2A__820A_690300__820A_690300', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A__820A_690300__820A_690300', n.qstring()) n = Number( 42, Suffix(Suffix.Type.IMAGINARY, Number(10)), Suffix(Suffix.Type.IMAGINARY, Number(0)) ) self.assertEqual('0q82_2A__820A_690300__80_690200', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A__820A_690300__80_690200', n.qstring()) n = Number( 42, Suffix(Suffix.Type.IMAGINARY, Number(0)), Suffix(Suffix.Type.IMAGINARY, Number(10)) ) self.assertEqual('0q82_2A__80_690200__820A_690300', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A__80_690200__820A_690300', n.qstring()) n = Number( 42, Suffix(Suffix.Type.IMAGINARY, Number(0)), Suffix(Suffix.Type.IMAGINARY, Number(0)) ) self.assertEqual('0q82_2A__80_690200__80_690200', n.qstring()) n._normalize_imaginary() self.assertEqual('0q82_2A', n.qstring()) def test_int_plateau(self): self.assertEqual(65536, int(Number('0q84_01'))) self.assertEqual(65536, int(Number('0q84'))) self.assertEqual(256, int(Number('0q83_01'))) self.assertEqual(256, int(Number('0q83'))) self.assertEqual(1, int(Number('0q82_01'))) self.assertEqual(1, int(Number('0q82'))) self.assertEqual(-1, int(Number('0q7E'))) self.assertEqual(-1, int(Number('0q7D_FF'))) self.assertEqual(-256, int(Number('0q7D'))) self.assertEqual(-256, int(Number('0q7C_FF'))) self.assertEqual(-65536, int(Number('0q7C'))) self.assertEqual(-65536, int(Number('0q7B_FF'))) def test_normalize_less(self): self.assertFalse(Number('0q82') < Number('0q82_01')) self.assertFalse(Number('0q81FF') < Number('0q81FF_01')) self.assertFalse(Number('0q7E00_FF') < Number('0q7E01')) self.assertFalse(Number('0q7D_FF') < Number('0q7E')) def test_normalize_greater(self): self.assertFalse(Number('0q82_01') > Number('0q82')) self.assertFalse(Number('0q81FF_01') > Number('0q81FF')) self.assertFalse(Number('0q7E01') > Number('0q7E00_FF')) self.assertFalse(Number('0q7E') > Number('0q7D_FF')) def test_normalize_less_equal(self): self.assertTrue(Number('0q82_01') <= Number('0q82')) self.assertTrue(Number('0q81FF_01') <= Number('0q81FF')) self.assertTrue(Number('0q7E01') <= Number('0q7E00_FF')) self.assertTrue(Number('0q7E') <= Number('0q7D_FF')) def test_normalize_greater_equal(self): self.assertTrue(Number('0q82') >= Number('0q82_01')) self.assertTrue(Number('0q81FF') >= Number('0q81FF_01')) self.assertTrue(Number('0q7E00_FF') >= Number('0q7E01')) self.assertTrue(Number('0q7D_FF') >= Number('0q7E')) def test_alias_equality(self): """Test number plateaus at +/-256**+/-n for n=0,1,2.""" self.assertEqual(Number('0q84'), Number('0q84_01')) # 256**2 self.assertEqual(Number('0q83'), Number('0q83_01')) # 256**1 self.assertEqual(Number('0q82'), Number('0q82_01')) # 256**0 self.assertEqual(Number('0q81FF'), Number('0q81FF_01')) # 256**-1 self.assertEqual(Number('0q7E02'), Number('0q7E01_FF')) # 256**-2 self.assertEqual(Number('0q81FE'), Number('0q81FE_01')) # -256**-2 self.assertEqual(Number('0q7E01'), Number('0q7E00_FF')) # -256**-1 self.assertEqual(Number('0q7E'), Number('0q7D_FF')) # -256**0 self.assertEqual(Number('0q7D'), Number('0q7C_FF')) # -256**1 self.assertEqual(Number('0q7C'), Number('0q7B_FF')) # -256**2 def test_integers_and_qstrings(self): def i__q(i, q): """ Test the Number constructor on an integer and a qstring, converting in both directions. Very Short Version: assert Number(i) == Number(q) Less Short version: assert q == Number(i).qstring() assert i == int(Number(q)) Verify each integer and qstring is monotonic -- the values are tested in descending order. Why a buncha i__q() calls are superior to a list of test case data: so the stack trace identifies the line with the failing data.
<gh_stars>0 import FWCore.ParameterSet.Config as cms ########################reconstruction_step/ MC_Tracing_v0 path modules ###added for cmssw_11_1 TrackProducer = cms.EDProducer("TrackProducer", AlgorithmName = cms.string('undefAlgorithm'), Fitter = cms.string('KFFittingSmootherWithOutliersRejectionAndRK'), GeometricInnerState = cms.bool(False), MeasurementTracker = cms.string(''), MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent"), NavigationSchool = cms.string('SimpleNavigationSchool'), Propagator = cms.string('RungeKuttaTrackerPropagator'), SimpleMagneticField = cms.string(''), TTRHBuilder = cms.string('WithAngleAndTemplate'), # this will be changed in the main file for now to "WithTrackAngle" TrajectoryInEvent = cms.bool(False), alias = cms.untracked.string('ctfWithMaterialTracks'), beamSpot = cms.InputTag("offlineBeamSpot"), clusterRemovalInfo = cms.InputTag(""), src = cms.InputTag("ckfTrackCandidates"), useHitsSplitting = cms.bool(False), useSimpleMF = cms.bool(False) ) ###added for cmssw_10_6 SiStripClusterChargeCutNone = cms.PSet( value = cms.double( -1.0 ) ) SiStripClusterChargeCutLoose = cms.PSet( value = cms.double( 1620.0 ) ) SiStripClusterChargeCutTight = cms.PSet( value = cms.double( 1945.0 ) ) ######################## CkfBaseTrajectoryFilter_block = cms.PSet( ComponentType = cms.string('CkfBaseTrajectoryFilter'), chargeSignificance = cms.double(-1.0), constantValueForLostHitsFractionFilter = cms.double(2.0), extraNumberOfHitsBeforeTheFirstLoop = cms.int32(4), maxCCCLostHits = cms.int32(9999), maxConsecLostHits = cms.int32(1), maxLostHits = cms.int32(999), maxLostHitsFraction = cms.double(0.1), maxNumberOfHits = cms.int32(100), minGoodStripCharge = cms.PSet( refToPSet_ = cms.string('SiStripClusterChargeCutNone') ), minHitsMinPt = cms.int32(3), minNumberOfHitsForLoopers = cms.int32(13), minNumberOfHitsPerLoop = cms.int32(4), minPt = cms.double(0.9), minimumNumberOfHits = cms.int32(5), nSigmaMinPt = cms.double(5.0), pixelSeedExtension = cms.bool(False), seedExtension = cms.int32(0), seedPairPenalty = cms.int32(0), strictSeedExtension = cms.bool(False) ) bunchSpacingProducer = cms.EDProducer("BunchSpacingProducer") csc2DRecHits = cms.EDProducer("CSCRecHitDProducer", CSCDebug = cms.untracked.bool(False), CSCNoOfTimeBinsForDynamicPedestal = cms.int32(2), CSCStripClusterChargeCut = cms.double(25.0), CSCStripClusterSize = cms.untracked.int32(3), CSCStripPeakThreshold = cms.double(10.0), CSCStripxtalksOffset = cms.double(0.03), CSCUseCalibrations = cms.bool(True), CSCUseGasGainCorrections = cms.bool(False), CSCUseReducedWireTimeWindow = cms.bool(False), CSCUseStaticPedestals = cms.bool(False), CSCUseTimingCorrections = cms.bool(True), CSCWireClusterDeltaT = cms.int32(1), CSCWireTimeWindowHigh = cms.int32(15), CSCWireTimeWindowLow = cms.int32(0), CSCstripWireDeltaTime = cms.int32(8), ConstSyst_ME12 = cms.double(0.02), ConstSyst_ME13 = cms.double(0.03), ConstSyst_ME1a = cms.double(0.01), ConstSyst_ME1b = cms.double(0.02), ConstSyst_ME21 = cms.double(0.03), ConstSyst_ME22 = cms.double(0.03), ConstSyst_ME31 = cms.double(0.03), ConstSyst_ME32 = cms.double(0.03), ConstSyst_ME41 = cms.double(0.03), NoiseLevel_ME12 = cms.double(7.0), NoiseLevel_ME13 = cms.double(4.0), NoiseLevel_ME1a = cms.double(9.0), NoiseLevel_ME1b = cms.double(6.0), NoiseLevel_ME21 = cms.double(5.0), NoiseLevel_ME22 = cms.double(7.0), NoiseLevel_ME31 = cms.double(5.0), NoiseLevel_ME32 = cms.double(7.0), NoiseLevel_ME41 = cms.double(5.0), UseAverageTime = cms.bool(False), UseFivePoleFit = cms.bool(True), UseParabolaFit = cms.bool(False), XTasymmetry_ME12 = cms.double(0.015), XTasymmetry_ME13 = cms.double(0.02), XTasymmetry_ME1a = cms.double(0.023), XTasymmetry_ME1b = cms.double(0.01), XTasymmetry_ME21 = cms.double(0.023), XTasymmetry_ME22 = cms.double(0.023), XTasymmetry_ME31 = cms.double(0.023), XTasymmetry_ME32 = cms.double(0.023), XTasymmetry_ME41 = cms.double(0.023), readBadChambers = cms.bool(True), readBadChannels = cms.bool(False), stripDigiTag = cms.InputTag("muonCSCDigis","MuonCSCStripDigi"), wireDigiTag = cms.InputTag("muonCSCDigis","MuonCSCWireDigi") ) cscSegments = cms.EDProducer("CSCSegmentProducer", algo_psets = cms.VPSet( cms.PSet( algo_name = cms.string('CSCSegAlgoSK'), algo_psets = cms.VPSet( cms.PSet( chi2Max = cms.double(99999.0), dPhiFineMax = cms.double(0.025), dPhiMax = cms.double(0.003), dRPhiFineMax = cms.double(8.0), dRPhiMax = cms.double(8.0), minLayersApart = cms.int32(2), verboseInfo = cms.untracked.bool(True), wideSeg = cms.double(3.0) ), cms.PSet( chi2Max = cms.double(99999.0), dPhiFineMax = cms.double(0.025), dPhiMax = cms.double(0.025), dRPhiFineMax = cms.double(3.0), dRPhiMax = cms.double(8.0), minLayersApart = cms.int32(2), verboseInfo = cms.untracked.bool(True), wideSeg = cms.double(3.0) ) ), chamber_types = cms.vstring( 'ME1/a', 'ME1/b', 'ME1/2', 'ME1/3', 'ME2/1', 'ME2/2', 'ME3/1', 'ME3/2', 'ME4/1', 'ME4/2' ), parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ) ), cms.PSet( algo_name = cms.string('CSCSegAlgoTC'), algo_psets = cms.VPSet( cms.PSet( SegmentSorting = cms.int32(1), chi2Max = cms.double(6000.0), chi2ndfProbMin = cms.double(0.0001), dPhiFineMax = cms.double(0.02), dPhiMax = cms.double(0.003), dRPhiFineMax = cms.double(6.0), dRPhiMax = cms.double(1.2), minLayersApart = cms.int32(2), verboseInfo = cms.untracked.bool(True) ), cms.PSet( SegmentSorting = cms.int32(1), chi2Max = cms.double(6000.0), chi2ndfProbMin = cms.double(0.0001), dPhiFineMax = cms.double(0.013), dPhiMax = cms.double(0.00198), dRPhiFineMax = cms.double(3.0), dRPhiMax = cms.double(0.6), minLayersApart = cms.int32(2), verboseInfo = cms.untracked.bool(True) ) ), chamber_types = cms.vstring( 'ME1/a', 'ME1/b', 'ME1/2', 'ME1/3', 'ME2/1', 'ME2/2', 'ME3/1', 'ME3/2', 'ME4/1', 'ME4/2' ), parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ) ), cms.PSet( algo_name = cms.string('CSCSegAlgoDF'), algo_psets = cms.VPSet( cms.PSet( CSCSegmentDebug = cms.untracked.bool(False), Pruning = cms.untracked.bool(False), chi2Max = cms.double(5000.0), dPhiFineMax = cms.double(0.025), dRPhiFineMax = cms.double(8.0), dXclusBoxMax = cms.double(8.0), dYclusBoxMax = cms.double(8.0), maxDPhi = cms.double(999.0), maxDTheta = cms.double(999.0), maxRatioResidualPrune = cms.double(3.0), minHitsForPreClustering = cms.int32(10), minHitsPerSegment = cms.int32(3), minLayersApart = cms.int32(2), nHitsPerClusterIsShower = cms.int32(20), preClustering = cms.untracked.bool(False), tanPhiMax = cms.double(0.5), tanThetaMax = cms.double(1.2) ), cms.PSet( CSCSegmentDebug = cms.untracked.bool(False), Pruning = cms.untracked.bool(False), chi2Max = cms.double(5000.0), dPhiFineMax = cms.double(0.025), dRPhiFineMax = cms.double(12.0), dXclusBoxMax = cms.double(8.0), dYclusBoxMax = cms.double(12.0), maxDPhi = cms.double(999.0), maxDTheta = cms.double(999.0), maxRatioResidualPrune = cms.double(3.0), minHitsForPreClustering = cms.int32(10), minHitsPerSegment = cms.int32(3), minLayersApart = cms.int32(2), nHitsPerClusterIsShower = cms.int32(20), preClustering = cms.untracked.bool(False), tanPhiMax = cms.double(0.8), tanThetaMax = cms.double(2.0) ), cms.PSet( CSCSegmentDebug = cms.untracked.bool(False), Pruning = cms.untracked.bool(False), chi2Max = cms.double(5000.0), dPhiFineMax = cms.double(0.025), dRPhiFineMax = cms.double(8.0), dXclusBoxMax = cms.double(8.0), dYclusBoxMax = cms.double(8.0), maxDPhi = cms.double(999.0), maxDTheta = cms.double(999.0), maxRatioResidualPrune = cms.double(3.0), minHitsForPreClustering = cms.int32(30), minHitsPerSegment = cms.int32(3), minLayersApart = cms.int32(2), nHitsPerClusterIsShower = cms.int32(20), preClustering = cms.untracked.bool(False), tanPhiMax = cms.double(0.5), tanThetaMax = cms.double(1.2) ) ), chamber_types = cms.vstring( 'ME1/a', 'ME1/b', 'ME1/2', 'ME1/3', 'ME2/1', 'ME2/2', 'ME3/1', 'ME3/2', 'ME4/1', 'ME4/2' ), parameters_per_chamber_type = cms.vint32( 3, 1, 2, 2, 1, 2, 1, 2, 1, 2 ) ), cms.PSet( algo_name = cms.string('CSCSegAlgoST'), algo_psets = cms.VPSet( cms.PSet( BPMinImprovement = cms.double(10000.0), BrutePruning = cms.bool(True), CSCDebug = cms.untracked.bool(False), CorrectTheErrors = cms.bool(True), Covariance = cms.double(0.0), ForceCovariance = cms.bool(False), ForceCovarianceAll = cms.bool(False), NormChi2Cut2D = cms.double(20.0), NormChi2Cut3D = cms.double(10.0), Pruning = cms.bool(True), SeedBig = cms.double(0.0015), SeedSmall = cms.double(0.0002), curvePenalty = cms.double(2.0), curvePenaltyThreshold = cms.double(0.85), dPhiFineMax = cms.double(0.025), dRPhiFineMax = cms.double(8.0), dXclusBoxMax = cms.double(4.0), dYclusBoxMax = cms.double(8.0), hitDropLimit4Hits = cms.double(0.6), hitDropLimit5Hits = cms.double(0.8), hitDropLimit6Hits = cms.double(0.3333), maxDPhi = cms.double(999.0), maxDTheta = cms.double(999.0), maxRatioResidualPrune = cms.double(3), maxRecHitsInCluster = cms.int32(20), minHitsPerSegment = cms.int32(3), onlyBestSegment = cms.bool(False), preClustering = cms.bool(True), preClusteringUseChaining = cms.bool(True), prePrun = cms.bool(True), prePrunLimit = cms.double(3.17), tanPhiMax = cms.double(0.5), tanThetaMax = cms.double(1.2), useShowering = cms.bool(False), yweightPenalty = cms.double(1.5), yweightPenaltyThreshold = cms.double(1.0) ), cms.PSet( BPMinImprovement = cms.double(10000.0), BrutePruning = cms.bool(True), CSCDebug = cms.untracked.bool(False), CorrectTheErrors = cms.bool(True), Covariance = cms.double(0.0), ForceCovariance = cms.bool(False), ForceCovarianceAll = cms.bool(False), NormChi2Cut2D = cms.double(20.0), NormChi2Cut3D = cms.double(10.0), Pruning = cms.bool(True), SeedBig = cms.double(0.0015), SeedSmall = cms.double(0.0002), curvePenalty = cms.double(2.0), curvePenaltyThreshold = cms.double(0.85), dPhiFineMax = cms.double(0.025), dRPhiFineMax = cms.double(8.0), dXclusBoxMax = cms.double(4.0), dYclusBoxMax = cms.double(8.0), hitDropLimit4Hits = cms.double(0.6), hitDropLimit5Hits = cms.double(0.8), hitDropLimit6Hits = cms.double(0.3333), maxDPhi = cms.double(999.0), maxDTheta = cms.double(999.0), maxRatioResidualPrune = cms.double(3), maxRecHitsInCluster = cms.int32(24), minHitsPerSegment = cms.int32(3), onlyBestSegment = cms.bool(False), preClustering = cms.bool(True), preClusteringUseChaining = cms.bool(True), prePrun = cms.bool(True), prePrunLimit = cms.double(3.17), tanPhiMax = cms.double(0.5), tanThetaMax = cms.double(1.2), useShowering = cms.bool(False), yweightPenalty = cms.double(1.5), yweightPenaltyThreshold = cms.double(1.0) ) ), chamber_types = cms.vstring( 'ME1/a', 'ME1/b', 'ME1/2', 'ME1/3', 'ME2/1', 'ME2/2', 'ME3/1', 'ME3/2', 'ME4/1', 'ME4/2' ), parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ) ), cms.PSet( algo_name = cms.string('CSCSegAlgoRU'), algo_psets = cms.VPSet( cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(100.0), chi2Norm_2D_ = cms.double(35), chi2_str = cms.double(50.0), dPhiIntMax = cms.double(0.005), dPhiMax = cms.double(0.006), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ), cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(100.0), chi2Norm_2D_ = cms.double(35), chi2_str = cms.double(50.0), dPhiIntMax = cms.double(0.004), dPhiMax = cms.double(0.005), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ), cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(100.0), chi2Norm_2D_ = cms.double(35), chi2_str = cms.double(50.0), dPhiIntMax = cms.double(0.003), dPhiMax = cms.double(0.004), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ), cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(60.0), chi2Norm_2D_ = cms.double(20), chi2_str = cms.double(30.0), dPhiIntMax = cms.double(0.002), dPhiMax = cms.double(0.003), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ), cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(180.0), chi2Norm_2D_ = cms.double(60), chi2_str = cms.double(80.0), dPhiIntMax = cms.double(0.005), dPhiMax = cms.double(0.007), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ), cms.PSet( enlarge = cms.bool(False), #cmssw_10_6 chi2Max = cms.double(100.0), chi2Norm_2D_ = cms.double(35), chi2_str = cms.double(50.0), dPhiIntMax = cms.double(0.004), dPhiMax = cms.double(0.006), dRIntMax = cms.double(2.0), dRMax = cms.double(1.5), doCollisions = cms.bool(True), minLayersApart = cms.int32(1), wideSeg = cms.double(3.0) ) ), chamber_types = cms.vstring( 'ME1/a', 'ME1/b', 'ME1/2', 'ME1/3', 'ME2/1', 'ME2/2', 'ME3/1', 'ME3/2', 'ME4/1', 'ME4/2' ), parameters_per_chamber_type = cms.vint32( 1, 2, 3, 4, 5, 6, 5, 6, 5, 6 ) ) ), algo_type = cms.int32(5), inputObjects =
<reponame>diogocs1/comps<gh_stars>0 # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import os import re import openerp from openerp import SUPERUSER_ID, tools from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools.safe_eval import safe_eval as eval from openerp.tools import image_resize_image class multi_company_default(osv.osv): """ Manage multi company default value """ _name = 'multi_company.default' _description = 'Default multi company' _order = 'company_id,sequence,id' _columns = { 'sequence': fields.integer('Sequence'), 'name': fields.char('Name', required=True, help='Name it to easily find a record'), 'company_id': fields.many2one('res.company', 'Main Company', required=True, help='Company where the user is connected'), 'company_dest_id': fields.many2one('res.company', 'Default Company', required=True, help='Company to store the current record'), 'object_id': fields.many2one('ir.model', 'Object', required=True, help='Object affected by this rule'), 'expression': fields.char('Expression', required=True, help='Expression, must be True to match\nuse context.get or user (browse)'), 'field_id': fields.many2one('ir.model.fields', 'Field', help='Select field property'), } _defaults = { 'expression': 'True', 'sequence': 100, } def copy(self, cr, uid, id, default=None, context=None): """ Add (copy) in the name when duplicate record """ if not context: context = {} if not default: default = {} company = self.browse(cr, uid, id, context=context) default = default.copy() default['name'] = company.name + _(' (copy)') return super(multi_company_default, self).copy(cr, uid, id, default, context=context) multi_company_default() class res_company(osv.osv): _name = "res.company" _description = 'Companies' _order = 'name' def _get_address_data(self, cr, uid, ids, field_names, arg, context=None): """ Read the 'address' functional fields. """ result = {} part_obj = self.pool.get('res.partner') for company in self.browse(cr, uid, ids, context=context): result[company.id] = {}.fromkeys(field_names, False) if company.partner_id: address_data = part_obj.address_get(cr, openerp.SUPERUSER_ID, [company.partner_id.id], adr_pref=['default']) if address_data['default']: address = part_obj.read(cr, openerp.SUPERUSER_ID, [address_data['default']], field_names, context=context)[0] for field in field_names: result[company.id][field] = address[field] or False return result def _set_address_data(self, cr, uid, company_id, name, value, arg, context=None): """ Write the 'address' functional fields. """ company = self.browse(cr, uid, company_id, context=context) if company.partner_id: part_obj = self.pool.get('res.partner') address_data = part_obj.address_get(cr, uid, [company.partner_id.id], adr_pref=['default']) address = address_data['default'] if address: part_obj.write(cr, uid, [address], {name: value or False}, context=context) else: part_obj.create(cr, uid, {name: value or False, 'parent_id': company.partner_id.id}, context=context) return True def _get_logo_web(self, cr, uid, ids, _field_name, _args, context=None): result = dict.fromkeys(ids, False) for record in self.browse(cr, uid, ids, context=context): size = (180, None) result[record.id] = image_resize_image(record.partner_id.image, size) return result def _get_companies_from_partner(self, cr, uid, ids, context=None): return self.pool['res.company'].search(cr, uid, [('partner_id', 'in', ids)], context=context) _columns = { 'name': fields.related('partner_id', 'name', string='Company Name', size=128, required=True, store=True, type='char'), 'parent_id': fields.many2one('res.company', 'Parent Company', select=True), 'child_ids': fields.one2many('res.company', 'parent_id', 'Child Companies'), 'partner_id': fields.many2one('res.partner', 'Partner', required=True), 'rml_header': fields.text('RML Header', required=True), 'rml_header1': fields.char('Company Tagline', help="Appears by default on the top right corner of your printed documents (report header)."), 'rml_header2': fields.text('RML Internal Header', required=True), 'rml_header3': fields.text('RML Internal Header for Landscape Reports', required=True), 'rml_footer': fields.text('Report Footer', help="Footer text displayed at the bottom of all reports."), 'rml_footer_readonly': fields.related('rml_footer', type='text', string='Report Footer', readonly=True), 'custom_footer': fields.boolean('Custom Footer', help="Check this to define the report footer manually. Otherwise it will be filled in automatically."), 'font': fields.many2one('res.font', string="Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))], help="Set the font into the report header, it will be used as default font in the RML reports of the user company"), 'logo': fields.related('partner_id', 'image', string="Logo", type="binary"), 'logo_web': fields.function(_get_logo_web, string="Logo Web", type="binary", store={ 'res.company': (lambda s, c, u, i, x: i, ['partner_id'], 10), 'res.partner': (_get_companies_from_partner, ['image'], 10), }), 'currency_id': fields.many2one('res.currency', 'Currency', required=True), 'currency_ids': fields.one2many('res.currency', 'company_id', 'Currency'), 'user_ids': fields.many2many('res.users', 'res_company_users_rel', 'cid', 'user_id', 'Accepted Users'), 'account_no':fields.char('Account No.'), 'street': fields.function(_get_address_data, fnct_inv=_set_address_data, size=128, type='char', string="Street", multi='address'), 'street2': fields.function(_get_address_data, fnct_inv=_set_address_data, size=128, type='char', string="Street2", multi='address'), 'zip': fields.function(_get_address_data, fnct_inv=_set_address_data, size=24, type='char', string="Zip", multi='address'), 'city': fields.function(_get_address_data, fnct_inv=_set_address_data, size=24, type='char', string="City", multi='address'), 'state_id': fields.function(_get_address_data, fnct_inv=_set_address_data, type='many2one', relation='res.country.state', string="Fed. State", multi='address'), 'bank_ids': fields.one2many('res.partner.bank','company_id', 'Bank Accounts', help='Bank accounts related to this company'), 'country_id': fields.function(_get_address_data, fnct_inv=_set_address_data, type='many2one', relation='res.country', string="Country", multi='address'), 'email': fields.related('partner_id', 'email', size=64, type='char', string="Email", store=True), 'phone': fields.related('partner_id', 'phone', size=64, type='char', string="Phone", store=True), 'fax': fields.function(_get_address_data, fnct_inv=_set_address_data, size=64, type='char', string="Fax", multi='address'), 'website': fields.related('partner_id', 'website', string="Website", type="char", size=64), 'vat': fields.related('partner_id', 'vat', string="Tax ID", type="char", size=32), 'company_registry': fields.char('Company Registry', size=64), 'rml_paper_format': fields.selection([('a4', 'A4'), ('us_letter', 'US Letter')], "Paper Format", required=True, oldname='paper_format'), } _sql_constraints = [ ('name_uniq', 'unique (name)', 'The company name must be unique !') ] def onchange_footer(self, cr, uid, ids, custom_footer, phone, fax, email, website, vat, company_registry, bank_ids, context=None): if custom_footer: return {} # first line (notice that missing elements are filtered out before the join) res = ' | '.join(filter(bool, [ phone and '%s: %s' % (_('Phone'), phone), fax and '%s: %s' % (_('Fax'), fax), email and '%s: %s' % (_('Email'), email), website and '%s: %s' % (_('Website'), website), vat and '%s: %s' % (_('TIN'), vat), company_registry and '%s: %s' % (_('Reg'), company_registry), ])) # second line: bank accounts res_partner_bank = self.pool.get('res.partner.bank') account_data = self.resolve_2many_commands(cr, uid, 'bank_ids', bank_ids, context=context) account_names = res_partner_bank._prepare_name_get(cr, uid, account_data, context=context) if account_names: title = _('Bank Accounts') if len(account_names) > 1 else _('Bank Account') res += '\n%s: %s' % (title, ', '.join(name for id, name in account_names)) return {'value': {'rml_footer': res, 'rml_footer_readonly': res}} def onchange_state(self, cr, uid, ids, state_id, context=None): if state_id: return {'value':{'country_id': self.pool.get('res.country.state').browse(cr, uid, state_id, context).country_id.id }} return {} def onchange_font_name(self, cr, uid, ids, font, rml_header, rml_header2, rml_header3, context=None): """ To change default header style of all <para> and drawstring. """ def _change_header(header,font): """ Replace default fontname use in header and setfont tag """ default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font, header) return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font, default_para) if not font: return True fontname = self.pool.get('res.font').browse(cr, uid, font, context=context).name return {'value':{ 'rml_header': _change_header(rml_header, fontname), 'rml_header2':_change_header(rml_header2, fontname), 'rml_header3':_change_header(rml_header3, fontname) }} def on_change_country(self, cr, uid, ids, country_id, context=None): res = {'domain': {'state_id': []}} currency_id = self._get_euro(cr, uid, context=context) if country_id: currency_id = self.pool.get('res.country').browse(cr, uid, country_id, context=context).currency_id.id res['domain'] = {'state_id': [('country_id','=',country_id)]} res['value'] = {'currency_id': currency_id} return res def name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100): context = dict(context or {}) if context.pop('user_preference', None): # We browse as superuser. Otherwise, the user would be able to # select only the currently visible companies (according to rules, # which are probably to allow to see the child companies) even if # she belongs to some other companies. user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context) cmp_ids = list(set([user.company_id.id] + [cmp.id for cmp in user.company_ids])) uid = SUPERUSER_ID args = (args or []) + [('id', 'in', cmp_ids)] return super(res_company, self).name_search(cr, uid, name=name, args=args, operator=operator, context=context, limit=limit) def _company_default_get(self, cr, uid, object=False, field=False, context=None): """ Check if the object for this company have a default value """ if not context: context = {} proxy = self.pool.get('multi_company.default') args = [ ('object_id.model', '=', object), ('field_id', '=', field), ] ids = proxy.search(cr, uid, args, context=context) user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context) for rule in proxy.browse(cr, uid, ids, context): if eval(rule.expression, {'context': context, 'user': user}): return rule.company_dest_id.id return user.company_id.id @tools.ormcache() def _get_company_children(self, cr, uid=None, company=None): if not company: return [] ids = self.search(cr, uid, [('parent_id','child_of',[company])]) return ids def _get_partner_hierarchy(self, cr, uid, company_id, context=None): if company_id: parent_id = self.browse(cr, uid, company_id)['parent_id'] if parent_id: return self._get_partner_hierarchy(cr, uid, parent_id.id, context) else: return self._get_partner_descendance(cr, uid, company_id, [], context) return [] def _get_partner_descendance(self, cr, uid, company_id, descendance, context=None): descendance.append(self.browse(cr, uid, company_id).partner_id.id) for child_id in self._get_company_children(cr, uid, company_id): if child_id != company_id: descendance = self._get_partner_descendance(cr, uid, child_id, descendance) return descendance # # This function restart the cache on the _get_company_children method # def cache_restart(self, cr): self._get_company_children.clear_cache(self) def create(self, cr, uid, vals, context=None): if not vals.get('name', False) or vals.get('partner_id', False): self.cache_restart(cr) return super(res_company, self).create(cr, uid, vals, context=context) obj_partner = self.pool.get('res.partner') partner_id = obj_partner.create(cr, uid,
would be transferred to federal detention .", 'DA Rosen\'s November 7 presentation claimed honoring ICE holds would "produce an undetermined amount of cost savings by reducing probation costs", as individuals otherwise on probation would be transferred to federal detention.', ], [ "Canberra 's response came from Norwood , who drew a foul and scored a point from a free throw .", "Canberra's response came from Norwood, who drew a foul and scored a point from a free throw.", ], [ "He displayed an interest in literature from a young age , and began reading Greek and Roman myths and the fables of the Grimm brothers which `` instilled in him a lifelong affinity with Europe '' .", 'He displayed an interest in literature from a young age, and began reading Greek and Roman myths and the fables of the Grimm brothers which "instilled in him a lifelong affinity with Europe".', ], [ "In the history of India-Pakistan bilateral relations , the leader of one country has not visited the swearing-in ceremony of a leader of the other since 1947 , when the two countries became independent .", "In the history of India-Pakistan bilateral relations, the leader of one country has not visited the swearing-in ceremony of a leader of the other since 1947, when the two countries became independent.", ], [ "On Wednesday open-access journal ZooKeys published their paper on two of the new species , Sturnira bakeri and Sturnira burtonlimi .", "On Wednesday open-access journal ZooKeys published their paper on two of the new species, Sturnira bakeri and Sturnira burtonlimi.", ], [ "These groups often argue for the recognition of obesity as a disability under the US Americans With Disabilities Act -LRB- ADA -RRB- .", "These groups often argue for the recognition of obesity as a disability under the US Americans With Disabilities Act (ADA).", ], [ "An art car , featuring what might be the largest collection of singing robotic lobsters anywhere in the world was on display , curiously titled the `` Sashimi Tabernacle Choir . ''", 'An art car, featuring what might be the largest collection of singing robotic lobsters anywhere in the world was on display, curiously titled the "Sashimi Tabernacle Choir."', ], [ "As with many of Rio de Janeiro 's cultural monuments , the library was originally off-limits to the general public .", "As with many of Rio de Janeiro's cultural monuments, the library was originally off-limits to the general public.", ], [ "The initial condition and the final condition of the system are respectively described by values in a configuration space , for example a position space , or some equivalent space such as a momentum space .", "The initial condition and the final condition of the system are respectively described by values in a configuration space, for example a position space, or some equivalent space such as a momentum space.", ], [ "Weber began his studies of the subject in The Protestant Ethic and the Spirit of Capitalism , in which he argued that the redefinition of the connection between work and piety in Protestantism and especially in ascetic Protestant denominations , particularly Calvinism , shifted human effort towards rational efforts aimed at achieving economic gain .", "Weber began his studies of the subject in The Protestant Ethic and the Spirit of Capitalism, in which he argued that the redefinition of the connection between work and piety in Protestantism and especially in ascetic Protestant denominations, particularly Calvinism, shifted human effort towards rational efforts aimed at achieving economic gain.", ], [ "Under the assumption of perfect competition , supply is determined by marginal cost .", "Under the assumption of perfect competition, supply is determined by marginal cost.", ], [ "De Beauvoir 's adopted daughter and literary heir <NAME> , unlike Elka\u00efm , published de Beauvoir 's unedited letters to both Sartre and Algren .", "De Beauvoir's adopted daughter and literary heir <NAME>, unlike Elka\u00efm, published de Beauvoir's unedited letters to both Sartre and Algren.", ], [ "There were some residents in the damaged house who were not accounted for until about 4:30 p.m. when emergency personnel confirmed that three people inside the house were also killed .", "There were some residents in the damaged house who were not accounted for until about 4:30 p.m. when emergency personnel confirmed that three people inside the house were also killed.", ], [ "`` I think from the very beginning , one of the challenges we 've had with Iran is that they have looked at this administration and felt that the administration was not as strong as it needed to be .", "\"I think from the very beginning, one of the challenges we've had with Iran is that they have looked at this administration and felt that the administration was not as strong as it needed to be.", ], [ "<NAME>\u00e9ndez , vice president of the Productive Economic Area , said Venezuelan President <NAME>\u00e1vez has yearned for the creation of this project to empower Venezuelan construction .", "<NAME>\u00e9ndez, vice president of the Productive Economic Area, said Venezuelan President <NAME>\u00e1vez has yearned for the creation of this project to empower Venezuelan construction.", ], [ "In ancient Roman culture , Sunday was the day of the Sun god .", "In ancient Roman culture, Sunday was the day of the Sun god.", ], [ "According to him from his autobiography `` Tezkiret\u00fc ' l B\u00fcnyan '' , his masterpiece is the Selimiye Mosque in Edirne .", 'According to him from his autobiography "Tezkiret\u00fc \'l B\u00fcnyan", his masterpiece is the Selimiye Mosque in Edirne.', ], [ "These groups appear to have had a common origin with Viridiplantae and the three groups form the clade Archaeplastida , whose name implies that their chloroplasts were derived from a single ancient endosymbiotic event .", "These groups appear to have had a common origin with Viridiplantae and the three groups form the clade Archaeplastida, whose name implies that their chloroplasts were derived from a single ancient endosymbiotic event.", ], [ "Yesterday , the US 's Obama administration said it has ordered an investigation into the appropriateness of military hardware being sold to and deployed by police forces in the United States .", "Yesterday, the US's Obama administration said it has ordered an investigation into the appropriateness of military hardware being sold to and deployed by police forces in the United States.", ], [ "The five primary classifications can be further divided into secondary classifications such as rain forest , monsoon , tropical savanna , humid subtropical , humid continental , oceanic climate , Mediterranean climate , desert , steppe , subarctic climate , tundra , and polar ice cap .", "The five primary classifications can be further divided into secondary classifications such as rain forest, monsoon, tropical savanna, humid subtropical, humid continental, oceanic climate, Mediterranean climate, desert, steppe, subarctic climate, tundra, and polar ice cap.", ], [ "Some of the pictures and video coming out of the country do n't show blood and dismembered bodies , but instead show , according to witnesses , those who died from apparent suffocation ; some were foaming at the mouth and others were having convulsions .", "Some of the pictures and video coming out of the country do n't show blood and dismembered bodies, but instead show, according to witnesses, those who died from apparent suffocation; some were foaming at the mouth and others were having convulsions.", ], [ "Its glyphs were formed by pressing the end of a reed stylus into moist clay , not by tracing lines in the clay with the stylus as had been done previously .", "Its glyphs were formed by pressing the end of a reed stylus into moist clay,
<reponame>matiscke/eccentricWarmJupiter<gh_stars>0 """ modified from <NAME> script. """ import numpy as np import juliet from math import log10, floor import decimal import aux # https://exoplanetarchive.ipac.caltech.edu/docs/poet_calculations.html G = 6.67408e-11 # m3 kg-1 s-2 solarrad2m = 6.957e8 # solar radii in meters solarmass2kg = 1.9891e30 # solar mass in kg earthrad2m = 6.371e6 # earth radii in meters jupiterrad2m = 69.911e6 # jupiter radii in meters earthmass2kg = 5.972e24 # earth mass in kg jupitermass2kg = 1.898e27 # jupiet mass in kg ergs2solarlum = 3.839e33 # erg s-1 = 1L solar AU2m = 149600000000 # 1 AU = 149600000000m #ms$^{-1}$ priors_dict = {\ 'P': {'units': 'd', 'description': 'Period'},\ 't0': {'units': 'd', 'description': 'Time of transit center'},\ 'a': {'units': '??', 'description': '??'},\ 'r1': {'units': '---', 'description': 'Parametrization for p and b'},\ 'r2': {'units': '---', 'description': 'Parametrization for p and b'},\ 'b': {'units': '---', 'description': 'Impact factor'},\ 'p': {'units': '---', 'description': 'Planet-to-star ratio'},\ 'K': {'units': 'm/s', 'description': 'Radial velocity semi-amplitude'},\ 'ecc': {'units': '---', 'description': 'Orbital eccentricity'},\ 'sesinomega': {'units': '---', 'description': 'Parametrization for $e$ and $\\omega$'},\ 'secosomega': {'units': '---', 'description': 'Parametrization for $e$ and $\\omega$'},\ 'esinomega': {'units': '---', 'description': 'Parametrization for $e$ and $\\omega$'},\ 'ecosomega': {'units': '---', 'description': 'Parametrization for $e$ and $\\omega$'},\ # RV instrumental 'mu': {'units': 'm/s', 'description': 'Systemic velocity for '},\ 'sigma_w': {'units': 'm/s', 'description': 'Extra jitter term for '},\ 'rv_quad': {'units': 'm/s/d$^2$', 'description': 'Quadratic term for the RVs'},\ 'rv_slope': {'units': 'm/s/d', 'description': 'Linear term for the RVs'},\ 'rv_intercept': {'units': 'm/s', 'description': 'Intercept term for the Rvs'},\ # Photometry instrumental 'mdilution': {'units': '---', 'description': 'Dilution factor for '},\ 'mflux': {'units': 'ppm', 'description': 'Relative flux offset for '},\ # 'sigma_w': {'units': 'ppm', 'description': 'Extra jitter term for '},\ 'q1': {'units': '---', 'description': 'Linear limb-darkening parametrization'},\ 'q2': {'units': '---', 'description': 'Quadratic limb-darkening parametrization'},\ # Theta terms 'theta0': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta1': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta2': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta3': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta4': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta5': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ 'theta6': {'units': '', 'description': 'Offset value applied to \\textcolor{red}{add}'},\ # Other 'rho': {'units': 'kg/m$^3$', 'description': 'Stellar density'},\ # GP 'GP_Prot': {'units': '\\textcolor{red}{add}', 'description': 'Rotational period component for the GP'},\ 'GP_Gamma': {'units': '\\textcolor{red}{add}', 'description': 'Amplitude of periodic component for the GP'},\ 'GP_sigma': {'units': 'm/s', 'description': 'Amplitude of the GP component'},\ 'GP_alpha': {'units': '\\textcolor{red}{add}', 'description': 'Parametrization of the lengthscale of the GP'},\ } dists_types = {'normal': '\\mathcal{N}', \ 'uniform': '\\mathcal{U}', \ 'loguniform': '\\mathcal{J}', \ 'jeffreys': '\\mathcal{J}', \ 'beta': '\\mathcal{B}', \ 'exponential': '\\Gamma', \ 'truncatednormal': '\\mathcal{N_T}'}#, 'fixed'] order_planetary = ['P', 't0', 'a', 'r1', 'r2', 'b', 'p', 'K', 'ecc', 'omega', 'sesinomega', 'secosomega', 'esinomega', 'ecosomega'] planet_names = ['', 'b', 'c', 'd', 'e', 'f'] # the 0th index is empty just to keep indexing normal later on orderrv_lt = ['rv_quad', 'rv_slope', 'rv_intercept'] thetaterms = ['theta0', 'theta1', 'theta2', 'theta3', 'theta4', 'theta5', 'theta6'] orderinst_lc = np.append(['mdilution','mflux', 'sigma_w', 'q1', 'q2'], thetaterms) orderinst_rv = np.append(['mu','sigma_w'], thetaterms) linend = '\\\\' # end of a line def print_data_table(dataset, type='rv'): out_folder = dataset.out_folder # if latex_fil = out_folder+'/data_table.tex' fout = open('data_table.tex', 'w') def produce_this(s_param, dataset, param, key, params_priors, inst): if dataset.priors[key]['distribution'] == 'fixed': s_param += '{} (fixed)'.format(dataset.priors[key]['hyperparameters']) elif dataset.priors[key]['distribution'] == 'exponential': s_param += '$'+dists_types[dataset.priors[key]['distribution']]+'('+str(dataset.priors[key]['hyperparameters'][0])+')' # s_param += ')$ & ' elif dataset.priors[key]['distribution'] == 'truncatednormal': s_param += '$'+dists_types[dataset.priors[key]['distribution']]+'('+\ str(dataset.priors[key]['hyperparameters'][0])+','+\ str(dataset.priors[key]['hyperparameters'][1])+','+\ str(dataset.priors[key]['hyperparameters'][2])+','+\ str(dataset.priors[key]['hyperparameters'][3])+')' # s_param += ')$ & ' else: if dataset.priors[key]['distribution'] not in dists_types: print('BUG_PRODUCE_THIS_FUNCTION') print(key) print(dataset.priors[key]) print() quit() s_param += '$'+dists_types[dataset.priors[key]['distribution']]+'('+str(dataset.priors[key]['hyperparameters'][0])+','+\ str(dataset.priors[key]['hyperparameters'][1]) if dataset.priors[key]['distribution'] in ['normal', 'loguniform', 'jeffreys']: s_param += '^2' s_param+=')' s_param += ' $ & ' if param == 'sigma_w': s_param += 'ppm & ' # there already is a sigma_w for RV with ms-1 else: s_param += priors_dict[param]['units']+' & ' if param == 'q1' and 'q2' in params_priors: s_param += priors_dict['q2']['description'] # change from linear to quad else: s_param += priors_dict[param]['description'] if priors_dict[param]['description'][-4:] == 'for ': s_param += inst s_param += linend return s_param def print_prior_table(dataset): out_folder = dataset.out_folder latex_fil = out_folder+'/prior_table.tex' fout = open(latex_fil, 'w') params_priors = np.array([i for i in dataset.priors.keys()]) # transform it to array in order to manipulate it within the function ## Beginning of the table tab_start = ['\\begin{table*}', '\\centering', '\\caption{Prior parameters}', '\\label{tab:priors}', '\\begin{tabular}{lccl}',\ '\\hline', '\\hline', '\\noalign{\\smallskip}', 'Parameter name & Prior & Units & Description \\\\',\ '\\noalign{\\smallskip}', '\\hline', '\\hline'] for elem in tab_start: fout.write(elem+'\n') ## Stellar parameters first if 'rho' in params_priors: fout.write('\\noalign{\\smallskip}\n') fout.write('Stellar Parameters '+linend+' \n') fout.write('\\noalign{\\smallskip}\n') s_param = '~~~' s_param += '$\\rho_{*}$' s_param += ' & ' s_param = produce_this(s_param=s_param, dataset=dataset, param='rho', key='rho', params_priors=params_priors, inst='') fout.write(s_param+'\n') params_priors = np.delete(params_priors, np.where(params_priors == 'rho')[0]) ## Planet part first # Save information stored in the prior: the dictionary, number of transiting planets, # number of RV planets, numbering of transiting and rv planets (e.g., if p1 and p3 transit # and all of them are RV planets, numbering_transit = [1,3] and numbering_rv = [1,2,3]). # Save also number of *free* parameters (FIXED don't count here). rv_planets = dataset.numbering_rv_planets transiting_planets = dataset.numbering_transiting_planets for pl in np.unique(np.append(rv_planets, transiting_planets)): fout.write('\\noalign{\\smallskip}\n') fout.write('Parameters for planet {} '.format(planet_names[pl])+linend+' \n') fout.write('\\noalign{\\smallskip}\n') for param in order_planetary: key = '{}_p{}'.format(param, pl) if key not in params_priors: continue s_param = '~~~' if param == 'P': s_param += '$P_{' + planet_names[pl] + '}$' elif param == 't0': s_param += '$t_{0,' + planet_names[pl] + '}$' elif param == 'a': s_param += '$a_{' + planet_names[pl] + '}/R_*$' elif param == 'r1': s_param += '$r_{1,' + planet_names[pl] + '}$' elif param == 'r2': s_param += '$r_{2,' + planet_names[pl] + '}$' elif param == 'p': s_param += '$R_{' + planet_names[pl] + '}/R_*$' elif param == 'b': s_param += '$b = (a_{' + planet_names[pl] + '}/R_*) \\cos (i_{'+ planet_names[pl] +'}) $' elif param == 'ecc': s_param += '$e_{' + planet_names[pl] + '}$' elif param == 'omega': s_param += '$\\omega_{' + planet_names[pl] + '}$' elif param == 'sesinomega': s_param += '$S_{1,' + planet_names[pl] + '} = \\sqrt{e_'+planet_names[pl]+'}\\sin \\omega_'+planet_names[pl]+'$' elif param == 'secosomega': s_param += '$S_{2,' + planet_names[pl] + '} = \\sqrt{e_'+planet_names[pl]+'}\\cos \\omega_'+planet_names[pl]+'$' elif param == 'esinomega': s_param += '$S_{1,' + planet_names[pl] + '} = e_'+planet_names[pl]+'\\sin \\omega_'+planet_names[pl]+'$' elif param == 'ecosomega': s_param += '$S_{1,' + planet_names[pl] + '} = e_'+planet_names[pl]+'\\cos \\omega_'+planet_names[pl]+'$' else: s_param += '$' + param + '_{' + planet_names[pl] + '}$' s_param += ' & ' s_param = produce_this(s_param=s_param, dataset=dataset, param=param, key=key, params_priors=params_priors, inst='') fout.write(s_param+'\n') params_priors = np.delete(params_priors, np.where(params_priors == key)[0]) # Instruments instruments_rv = dataset.inames_rv instruments_lc = dataset.inames_lc if instruments_rv is not None and len(instruments_rv) > 0: fout.write('\\noalign{\\smallskip}\n') fout.write('RV instrumental parameters'+linend+'\n') fout.write('\\noalign{\\smallskip}\n') for inst in instruments_rv: for param in orderinst_rv: key = '{}_{}'.format(param, inst) if key not in params_priors: continue s_param = '~~~' if param == 'mu': s_param += '$\\mu' elif param == 'sigma_w': s_param += '$\\sigma' elif param[:5] == 'theta': s_param += '$\\theta_{'+str(param[-1])+',' s_param += '_{\\textnormal{' + inst + '}}$ & ' s_param = produce_this(s_param=s_param, dataset=dataset, param=param, key=key, params_priors=params_priors, inst=inst) fout.write(s_param+'\n') params_priors = np.delete(params_priors, np.where(params_priors == key)[0]) # if there were linear/quadratic trends for the rv if 'rv_slope' in params_priors: for param in orderrv_lt: key = param if key not in params_priors: continue s_param = '~~~' if param == 'rv_quad': s_param += '$\\textnormal{RV}_{\\textnormal{quadratic}}$' elif param == 'rv_slope': s_param += '$\\textnormal{RV}_{\\textnormal{linear}}$' else: s_param += '$\\textnormal{RV}_{\\textnormal{intercept}}$' s_param += ' & ' s_param = produce_this(s_param=s_param, dataset=dataset, param=param, key=key, params_priors=params_priors, inst='') fout.write(s_param+'\n') params_priors = np.delete(params_priors, np.where(params_priors == key)[0]) # instruments_lc = ['TESSERACT+TESS'] if instruments_lc is not None and len(instruments_lc) > 0: fout.write('\\noalign{\\smallskip}\n') fout.write('Photometry instrumental parameters'+linend+'\n') fout.write('\\noalign{\\smallskip}\n') for inst in instruments_lc: for param in orderinst_lc: key = '{}_{}'.format(param, inst) if key not in params_priors: continue s_param = '~~~' print('lc param "{}"'.format(param)) if param == 'mdilution': s_param += '$D_{' elif param == 'mflux': s_param += '$M_{' elif param == 'sigma_w': s_param += '$\\sigma_{' elif param == 'q1': s_param += '$q_{1,' elif param == 'q2' and key in params_priors: s_param += '$q_{2,' elif param[:5] == 'theta' and key in params_priors: s_param += '$\\theta_{'+str(param[-1])+',' s_param += '\\textnormal{' + inst + '}}$ & ' s_param = produce_this(s_param=s_param, dataset=dataset, param=param, key=key, params_priors=params_priors, inst=inst) fout.write(s_param+'\n') params_priors = np.delete(params_priors, np.where(params_priors == key)[0]) fout.write('\\noalign{\\medskip}\n') # print('prior leftover params', params_priors) gp_names = ['sigma', 'alpha', 'Gamma', 'Prot', 'B', 'L', 'C', 'timescale', 'rho', 'S0', 'Q', 'omega0'] gp_names_latex = ['\\sigma', '\\alpha', '\\Gamma', 'P_{rot}', \ '\\textnormal{B}',
# String form: either a repository or remote location meta = {} url = urlparse.urlparse(file_data) if url.netloc: meta['remote'] = file_data else: path = file_data if not 'remote' in meta: location = os.path.expanduser(path) location_paths.append(os.path.dirname(location)) files_data.append(dict(path=path, location=location, meta=meta)) # Get the common prefix of all local paths if len(location_paths): if len(location_paths) > 1: common_prefix = common_directory(location_paths) else: common_prefix = location_paths[0] # Strip common prefix from all files with a location for file_data in files_data: if file_data['location'] != None: file_data['location'] = file_data['location'][len(common_prefix):] if file_data['location'][0] == '/': file_data['location'] = file_data['location'][1:] return files_data @classmethod def new(cls, name, files_data=None, metadata=None, do_bundle=False, publish=True): """ A convenience factory method that creates a new, unsaved Resource of the given name, using file information and metadata. The file data can be a single string filename or a dictionary of file metadata. The filename can either be a local path ('path') or a remote URL ('remote') that is either HTTP or FTP. For more than one file provide an array of these. The rest of the keyword arguments are used as Resource meta-data. The Resource and all its ResourceFile objects ready to be saved to a Repository. """ resource_files = [] bundle = None if files_data: if do_bundle: bundle_path = cls.bundle_temp_path(name) bundle = ResourceFile(bundle_path, resource=None, metadata={ 'location': posixpath.join(Repository.files_prefix, name, '.bundle', 'bundle.tar.gz')}) mkdir_p(os.path.dirname(bundle_path)) bundle.files = [] bundle_archive = tarfile.open(name=bundle_path, mode='w:gz') # resource_files.append(bundle) files_data = cls.__normalise_file_data(files_data) for file_data in files_data: path = file_data.pop('path', None) location = file_data.pop('location', None) meta = file_data.pop('meta', None) if 'remote' in meta: remote_url = urllib2.urlopen(urllib2.Request(meta['remote'])) keyset = set(k.lower() for k in meta) for header_name in [ 'etag', 'last-modified', 'content-length', 'content-type' ]: if not header_name in keyset and remote_url.info().has_key(header_name): meta[header_name] = remote_url.info().getheader(header_name) else: if do_bundle: meta['bundled'] = True meta['location'] = posixpath.join(Repository.files_prefix, name, location) if path: path = os.path.expanduser(path) if not 'md5sum' in meta: meta['md5sum'] = checksum(path) if not 'last-modified' in meta: meta['last-modified'] = time.strftime(TIME_FORMAT, time.gmtime(os.path.getmtime(path))) if not 'content-length' in meta: meta['content-length'] = os.stat(path).st_size if bundle: bundle_archive.add(name=path, arcname=location) else: raise ValueError("For Resource files, either a path to a local file or a remote URL is required") resource_file = ResourceFile(path, resource=None, metadata=meta) resource_files.append(resource_file) if do_bundle: bundle_archive.close() resource = cls(name, files=resource_files, metadata=metadata, publish=publish) if publish: missing_fields = resource.validate_mandatory_metadata() if missing_fields: raise MetadataException(missing_fields) if do_bundle: resource.bundle = bundle for resource_file in resource_files: resource_file.resource = resource return resource @classmethod def load(cls, local_resource_filename): """ Load a Resource from a local JSON file containing Resource meta-data. """ resource = cls(None, None) resource.reload(local_resource_filename) resource.path = local_resource_filename return resource def _process_files(self, files_data, resource_name=None, bundle_archive=None): """ Processes normalised file data """ if not resource_name: name = self.name else: name = resource_name resource_files = [] for file_data in files_data: path = file_data.pop('path', None) location = file_data.pop('location', None) meta = file_data.pop('meta', None) if 'remote' in meta: remote_url = urllib2.urlopen(urllib2.Request(meta['remote'])) keyset = set(k.lower() for k in meta) for header_name in [ 'etag', 'last-modified', 'content-length', 'content-type' ]: if not header_name in keyset and remote_url.info().has_key(header_name): meta[header_name] = remote_url.info().getheader(header_name) else: if bundle_archive: meta['bundled'] = True meta['location'] = posixpath.join(Repository.files_prefix, name, location) if path: path = os.path.expanduser(path) if not 'md5sum' in meta: meta['md5sum'] = checksum(path) if not 'last-modified' in meta: meta['last-modified'] = time.strftime(TIME_FORMAT, time.gmtime(os.path.getmtime(path))) if not 'content-length' in meta: meta['content-length'] = os.stat(path).st_size if bundle_archive: bundle_archive.add(name=path, arcname=location) else: raise ValueError("For Resource files, either a path to a local file or a remote URL is required") resource_file = ResourceFile(path, resource=None, metadata=meta) resource_files.append(resource_file) return resource_files def add_files(self, files=None, add_to_published=False, overwrite=False): if self.published and add_to_published == False: raise ValueError("Cannot add files to a published Resource unless override is specified.") if files: files = self.__normalise_file_data(files) resource_files = self._process_files(files) # Check if any of the files already exist conflicting_file_names = [] conflicting_files = [] for existing_file in self.files: for resource_file in resource_files: if existing_file.metadata['location'] == resource_file.metadata['location']: conflicting_file_names.append(resource_file.path) conflicting_files.append(resource_file.metadata['location']) if conflicting_file_names and not overwrite: raise AddFilesException(conflicting_file_names) if overwrite: non_conflicting_files = [] for existing_file in self.files: if existing_file.metadata['location'] not in conflicting_files: non_conflicting_files.append(existing_file) self.files = non_conflicting_files + resource_files else: self.files += resource_files return True else: return False def delete_files_from_remote(self, filenames, delete_from_published=False): if self.published and delete_from_published == False: raise ValueError("Cannot delete files from a published Resource unless override is specified.") matching_files = [] files_not_found = [] for filename in filenames: found = False for existing_file in self.files: if filename == existing_file.storage_location(): matching_files.append(existing_file) found = True break if not found: files_not_found.append(filename) if files_not_found: raise DeleteFilesException(non_existent_files=files_not_found) # Delete from files list for file in matching_files: self.files.remove(file) self.files_to_be_deleted = matching_files return True def validate_mandatory_metadata(self): """ Checks if mandatory fields are present, and the values are not None. Returns list of fields that are not found. """ fields_not_found = [] for field in Resource.mandatory_metadata_fields: if not field in self.metadata or self.metadata[field] is None: fields_not_found.append(field) return fields_not_found def add_files_from_storage_paths(self, file_paths): for path, (size, last_modified, md5sum) in file_paths.iteritems(): meta = {} meta['location'] = path meta['content-length'] = size dt = datetime.strptime(last_modified, ISO_8601_UTC_FORMAT) meta['last-modified'] = datetime.strftime(dt, TIME_FORMAT) + " UTC" meta['md5sum'] = md5sum resource_file = ResourceFile(path, resource=None, metadata=meta) # Check if resource of same name already exists (in case this is an overwrite) already_exists = False for i in range(len(self.files)): if self.files[i].location() == resource_file.location(): self.files[i] = resource_file already_exists = True break # Otherwise assume this is a new file if not already_exists: self.files.append(resource_file) def reload(self, local_resource_filename): """ Reload a Resource from a Resource metadata file (local). """ if local_resource_filename and os.path.exists(local_resource_filename): resource_files = [] with io.open(local_resource_filename, encoding='UTF-8') as fh: data = json.load(fh) files_data = data.pop('files', []) for file_data in files_data: resource_files.append(ResourceFile(None, resource=self, metadata=file_data)) bundle_data = data.pop('bundle', None) if bundle_data: self.bundle = ResourceFile(None, resource=self, metadata=bundle_data) self.name = data.pop('name', None) self.path = local_resource_filename self.metadata = data.get('metadata', dict()) self.files = resource_files self.published = data.pop('published', None) def to_json(self, **kwargs): """ Create a JSON string representation of the Resource: its files and meta-data. """ return Resource.ResourceJSONEncoder(ensure_ascii=False, encoding='UTF-8', **kwargs).encode(self) def write(self, dest_path, mod=stat.S_IRWXU): """ Write the JSON file representation of a Resource to a destination file. """ if os.path.exists(dest_path): os.remove(dest_path) else: mkdir_p(os.path.dirname(dest_path)) with io.open(dest_path, encoding='UTF-8', mode='w') as fh: logger.debug("Writing JSON serialised resource to %s", dest_path) fh.write(unicode(self.to_json())) os.chmod(dest_path, mod) self.path = dest_path def local_paths(self): """ Get a list of local filenames for all the File data associated with this Resource. (Note that this method will trigger a refresh of the Resource, ensuring that all locally-stored data is relatively up-to-date.) """ if self.repository: self.repository.refresh_resource(self, True) paths = [] do_refresh = True if self.bundle: self.bundle.unpack_bundle(do_refresh=True) for resource_file in self.files: paths.append(resource_file.local_path()) return paths def files_matching(self, pattern): """ Return a list of ResourceFile objects where the location or remote matches a given pattern. If no files match an empty array is returned. """ matches = [] for resource_file in self.files: if re.search(pattern, resource_file.location_or_remote()): matches.append(resource_file) return matches def file_ending(self, suffix): """ Returns the first ResourceFile ending with the given suffix. If no ResourceFiles match, None is returned. """ match = None for resource_file in self.files: if resource_file.location_or_remote().endswith(suffix): if match: warnings.warn("Found multiple files: also '" + match.location_or_remote() + "'", RuntimeWarning) match = resource_file return match def update_bundle(self): """ Update the bundle with any local file changes. """ if not self.bundle: return # no-op bundle_file = tarfile.open(self.bundle.local_path(), mode='w:gz') for resource_file in self.files: if resource_file.path and resource_file.location(): storage_location = resource_file.storage_location() bundle_file.add(resource_file.path, resource_file.storage_location()) bundle_file.close() def save(self): """ Helper method that saves the resource back to the repository that it was loaded from. Can only save if the resource was loaded from a repository, otherwise it throws. """ if not self.repository: raise ValueError("Cannot save a resource that is not loaded from a repository") # Always overwrite the existing one since it was loaded from the repository anyway. self.repository.save(self, overwrite=True) def delete(self): if not self.repository: raise ValueError("Cannot delete a resource that is not loaded from a repository") self.repository.delete(self) def is_bundled(self): return self.bundle != None def publish(self): if self.published: print "Nothing to do, resource is already 'Published'" return False missing_fields = self.validate_mandatory_metadata() if missing_fields: raise ValueError("Missing mandatory fields: '{0}'".format(missing_fields)) self.repository.rebuild_file_list(self) self.published=True self.repository.save(self, overwrite=True) self.repository.refresh_resource(resource=self, refresh_all=True) return True def unpublish(self): if not self.published: print "Nothing to
import re import string from urllib.parse import urlsplit from xml.sax.saxutils import unescape import nltk import numpy as np from twitter_bot_type_classification.features.utils import URL_SHORTER_REGEX, URL_REGEX, EMOJI_REGEX, HASHTAG_REGEX, \ USERNAME_REGEX, NUMBER_CHAR_REGEX, \ get_expanded_url, URL_PATH_SPLIT, COORDINATE_GROUPS, HTML_ESCAPE_TABLE # download punkt sentence tokenizer model nltk.download("punkt", quiet=True) # , download_dir=) TEXT_ENCODING = "UTF-8" PAGINATION_START_REGEX = re.compile(r"(^(\d+/\d+)|(?<=^\()\d+/\d+(?=\)))(?![.,]\d)") PAGINATION_END_REGEX = re.compile(r"(?<!\d[.,])((\d+/\d+)\Z|(?<=\()\d+/\d+(?=\)\Z))") # ISO 639 1 Language codes # The following changes have been made in comparison to ISO-639-1: # "iw" for Hebrew, "ckb" for Kurdish and "in" for Indonesian was added LANG_CODES_IDX = { "ab": 1, "aa": 2, "af": 3, "ak": 4, "sq": 5, "am": 6, "ar": 7, "an": 8, "hy": 9, "as": 10, "av": 11, "ae": 12, "ay": 13, "az": 14, "bm": 15, "ba": 16, "eu": 17, "be": 18, "bn": 19, "bh": 20, "bi": 21, "bs": 22, "br": 23, "bg": 24, "my": 25, "ca": 26, "ch": 27, "ce": 28, "ny": 29, "zh": 30, "cv": 31, "kw": 32, "co": 33, "cr": 34, "hr": 35, "cs": 36, "da": 37, "dv": 38, "nl": 39, "dz": 40, "en": 41, "eo": 42, "et": 43, "ee": 44, "fo": 45, "fj": 46, "fi": 47, "fr": 48, "ff": 49, "gl": 50, "ka": 51, "de": 52, "el": 53, "gn": 54, "gu": 55, "ht": 56, "ha": 57, "he": 58, "hz": 59, "hi": 60, "ho": 61, "hu": 62, "ia": 63, "id": 64, "ie": 65, "ga": 66, "ig": 67, "ik": 68, "io": 69, "is": 70, "it": 71, "iu": 72, "ja": 73, "jv": 74, "kl": 75, "kn": 76, "kr": 77, "ks": 78, "kk": 79, "km": 80, "ki": 81, "rw": 82, "ky": 83, "kv": 84, "kg": 85, "ko": 86, "ku": 87, "kj": 88, "la": 89, "lb": 90, "lg": 91, "li": 92, "ln": 93, "lo": 94, "lt": 95, "lu": 96, "lv": 97, "gv": 98, "mk": 99, "mg": 100, "ms": 101, "ml": 102, "mt": 103, "mi": 104, "mr": 105, "mh": 106, "mn": 107, "na": 108, "nv": 109, "nd": 110, "ne": 111, "ng": 112, "nb": 113, "nn": 114, "no": 115, "ii": 116, "nr": 117, "oc": 118, "oj": 119, "cu": 120, "om": 121, "or": 122, "os": 123, "pa": 124, "pi": 125, "fa": 126, "pl": 127, "ps": 128, "pt": 129, "qu": 130, "rm": 131, "rn": 132, "ro": 133, "ru": 134, "sa": 135, "sc": 136, "sd": 137, "se": 138, "sm": 139, "sg": 140, "sr": 141, "gd": 142, "sn": 143, "si": 144, "sk": 145, "sl": 146, "so": 147, "st": 148, "es": 149, "su": 150, "sw": 151, "ss": 152, "sv": 153, "ta": 154, "te": 155, "tg": 156, "th": 157, "ti": 158, "bo": 159, "tk": 160, "tl": 161, "tn": 162, "to": 163, "tr": 164, "ts": 165, "tt": 166, "tw": 167, "ty": 168, "ug": 169, "uk": 170, "ur": 171, "uz": 172, "ve": 173, "vi": 174, "vo": 175, "wa": 176, "cy": 177, "wo": 178, "fy": 179, "xh": 180, "yi": 181, "yo": 182, "za": 183, "zu": 184, "in": 185, "iw": 186, "ckb": 187, "und": 188 } TWEET_SOURCES_IDX = { "web": 1, "Twitter Web App": 2, "Twitter for Android": 3, "Twitter for Android Tablets": 4, "Twitter for iPhone": 5, "Twitter for iPad": 6, "Twitter for Mac": 7, "Twitter for BlackBerry®": 8, "Twitter for BlackBerry": 9, "Twitter for Windows": 10, "Twitter for Windows Phone": 11, "Twitter for Websites": 12, "Twitter for Google TV": 13, "TweetDeck": 14, "": 15 # empty source } TWEET_CUSTOM_SOURCES_IDX = { "url": 16, "other": 17 } # ISO 3166-1 alpha-2 codes for countries in tweets # https://developer.twitter.com/en/docs/tutorials/filtering-tweets-by-location # Note: the temporary country code for Kosovo (XK) was added COUNTRY_CODES_IDX = { "AD": 1, "AE": 2, "AF": 3, "AG": 4, "AI": 5, "AL": 6, "AM": 7, "AO": 8, "AQ": 9, "AR": 10, "AS": 11, "AT": 12, "AU": 13, "AW": 14, "AX": 15, "AZ": 16, "BA": 17, "BB": 18, "BD": 19, "BE": 20, "BF": 21, "BG": 22, "BH": 23, "BI": 24, "BJ": 25, "BL": 26, "BM": 27, "BN": 28, "BO": 29, "BQ": 30, "BR": 31, "BS": 32, "BT": 33, "BV": 34, "BW": 35, "BY": 36, "BZ": 37, "CA": 38, "CC": 39, "CD": 40, "CF": 41, "CG": 42, "CH": 43, "CI": 44, "CK": 45, "CL": 46, "CM": 47, "CN": 48, "CO": 49, "CR": 50, "CU": 51, "CV": 52, "CW": 53, "CX": 54, "CY": 55, "CZ": 56, "DE": 57, "DJ": 58, "DK": 59, "DM": 60, "DO": 61, "DZ": 62, "EC": 63, "EE": 64, "EG": 65, "EH": 66, "ER": 67, "ES": 68, "ET": 69, "FI": 70, "FJ": 71, "FK": 72, "FM": 73, "FO": 74, "FR": 75, "GA": 76, "GB": 77, "GD": 78, "GE": 79, "GF": 80, "GG": 81, "GH": 82, "GI": 83, "GL": 84, "GM": 85, "GN": 86, "GP": 87, "GQ": 88, "GR": 89, "GS": 90, "GT": 91, "GU": 92, "GW": 93, "GY": 94, "HK": 95, "HM": 96, "HN": 97, "HR": 98, "HT": 99, "HU": 100, "ID": 101, "IE": 102, "IL": 103, "IM": 104, "IN": 105, "IO": 106, "IQ": 107, "IR": 108, "IS": 109, "IT": 110, "JE": 111, "JM": 112, "JO": 113, "JP": 114, "KE": 115, "KG": 116, "KH": 117, "KI": 118, "KM": 119, "KN": 120, "KP": 121, "KR": 122, "KW": 123, "KY": 124, "KZ": 125, "LA": 126, "LB": 127, "LC": 128, "LI": 129, "LK": 130, "LR": 131, "LS": 132, "LT": 133, "LU": 134, "LV": 135, "LY": 136, "MA": 137, "MC": 138, "MD": 139, "ME": 140, "MF": 141, "MG": 142, "MH": 143, "MK": 144, "ML": 145, "MM": 146, "MN": 147, "MO": 148, "MP": 149, "MQ": 150, "MR": 151, "MS": 152, "MT": 153, "MU": 154, "MV": 155, "MW": 156, "MX": 157, "MY": 158, "MZ": 159, "NA": 160, "NC": 161, "NE": 162, "NF": 163, "NG": 164, "NI": 165, "NL": 166, "NO": 167, "NP": 168, "NR": 169, "NU": 170, "NZ": 171, "OM": 172, "PA": 173, "PE": 174, "PF": 175, "PG": 176, "PH": 177, "PK": 178, "PL": 179, "PM": 180, "PN": 181, "PR": 182, "PS": 183, "PT": 184, "PW": 185, "PY": 186, "QA": 187, "RE": 188, "RO": 189, "RS": 190, "RU": 191, "RW": 192, "SA": 193, "SB": 194, "SC": 195, "SD": 196, "SE": 197, "SG": 198, "SH": 199, "SI": 200, "SJ": 201, "SK": 202, "SL": 203, "SM": 204, "SN": 205, "SO": 206, "SR": 207, "SS": 208, "ST": 209, "SV": 210, "SX": 211, "SY": 212, "SZ": 213, "TC": 214, "TD": 215, "TF": 216, "TG": 217, "TH": 218, "TJ": 219, "TK": 220, "TL": 221, "TM": 222, "TN": 223, "TO": 224, "TR": 225, "TT": 226, "TV": 227, "TW": 228, "TZ": 229, "UA": 230, "UG": 231, "UM": 232, "US": 233, "UY": 234, "UZ": 235, "VA": 236, "VC": 237, "VE": 238, "VG": 239, "VI": 240, "VN": 241, "VU": 242, "WF": 243, "WS": 244, "YE": 245, "YT": 246, "ZA": 247, "ZM": 248, "ZW": 249, "XK": 250 } COUNTRY_CODES_IDX_UNDEF = 0 TWEET_TEXT_SIMILARITY_FEATURES = list(range(14, 43)) TWEET_FEATURES_INDEX = { "retweet_count": 0, "likes_count": 1, "coordinates_group": 2, "country_code_encoded": 3, "source_encoded": 4, "is_retweet": 5, "is_answer": 6, "is_self_reply": 7, "contains_quote": 8, "number_of_withheld_countries": 9, "lang_encoded": 10, "number_of_photos": 11, "number_of_videos": 12, "number_of_gifs": 13, "contains_pagination": 14, "contains_only_emojis": 15, "contains_only_urls": 16, "number_of_urls": 17, "number_of_other_shortened_urls": 18, "number_of_url_domains_matches_username": 19, "number_of_url_domains_matches_profile_url_domain": 20, "number_of_urls_matches_tweet_text": 21, "number_of_hashtags": 22, "mean_hashtag_length": 23, "max_hashtag_length": 24, "min_hashtag_length": 25, "number_of_user_mentions": 26, "number_of_sentences": 27, "mean_sentence_length": 28, "number_of_numbers": 29, "max_number_length": 30, "min_number_length": 31, "max_word_length": 32, "min_word_length": 33, "number_of_words": 34, "mean_number_of_words_per_sentences": 35, "max_number_of_words_per_sentences": 36, "min_number_of_words_per_sentences": 37, "number_of_punctuations": 38, "number_of_uppercase_words": 39, "cleaned_tweet_text_length": 40, "tweet_text_length": 41, "number_emojis": 42 } N_FEATURES = len(TWEET_FEATURES_INDEX) class TweetFeatures(np.ndarray): def __new__(cls, tweet, user): """ Calculate all tweet features and return as a numpy vector :param tweet: tweepy tweet object :param user: tweepy user object :return: numpy 1 dim vector with all calculated tweet features """ #### tweet metadata features #### likes_count = 0 if getattr(tweet, "favorite_count", None) is None else tweet.favorite_count # longitude (-180 to 180), latitude (-90 to 90) coordinates_group = -1 if tweet.coordinates is not None: coords = tweet.coordinates.get("coordinates", []) if len(coords) > 1: coordinates_group = len(COORDINATE_GROUPS) for i, paths in enumerate(COORDINATE_GROUPS): if coordinates_group != len(COORDINATE_GROUPS): break for path in paths: if path.contains_point(coords[:2], radius=-1e-8): coordinates_group = i break country_code_encoded = COUNTRY_CODES_IDX_UNDEF place = tweet.place if place is not None: country_code_encoded = COUNTRY_CODES_IDX.get(place.country_code, COUNTRY_CODES_IDX_UNDEF) source_encoded = TWEET_SOURCES_IDX.get(tweet.source, None) if source_encoded is None: if URL_REGEX.match(tweet.source): source_encoded = TWEET_CUSTOM_SOURCES_IDX["url"] else: source_encoded = TWEET_CUSTOM_SOURCES_IDX["other"] is_retweet = int(getattr(tweet, "retweeted_status", None) is not None) is_answer = int(getattr(tweet, "in_reply_to_status_id", None) is not
filter_min_counts_per_sample)) sample_names = r('''colnames(countsTable)''') nempty_samples = sum(empty_samples) if nempty_samples: E.warn("%i empty samples are being removed: %s" % (nempty_samples, ",".join([sample_names[x] for x, y in enumerate(empty_samples) if y]))) r('''countsTable <- countsTable[, max_counts >= %i]''' % filter_min_counts_per_sample) nobservations, nsamples = tuple(r('''dim(countsTable)''')) r('''groups = factor(colnames( countsTable ))''') E.debug("sample names: %s" % r('''colnames(countsTable)''')) nrows, ncolumns = tuple(r('''dim(countsTable)''')) outfile.write("metric\tvalue\tpercent\n") outfile.write("number of observations\t%i\t100\n" % nobservations) outfile.write("number of samples\t%i\t100\n" % nsamples) # Count windows with no data r('''max_counts = apply(countsTable,1,max)''') # output distribution of maximum number of counts per window outfilename = output_filename_pattern + "max_counts.tsv.gz" E.info("outputting maximum counts per window to %s" % outfilename) r('''write.table(table(max_counts), file=gzfile('%(outfilename)s'), sep="\t", row.names=FALSE, quote=FALSE)''' % locals()) # removing empty rows E.info("removing rows with no counts in any sample") r('''countsTable = countsTable[max_counts>0,]''') if nrows > 0: for x in range(0, 20): nempty = tuple(r('''sum(max_counts <= %i)''' % x))[0] outfile.write("max per row<=%i\t%i\t%f\n" % (x, nempty, 100.0 * nempty / nrows)) E.info("removed %i empty rows" % tuple(r('''sum(max_counts == 0)'''))) observations, samples = tuple(r('''dim(countsTable)''')) E.info("trimmed data: %i observations for %i samples" % (observations, samples)) # build correlation r('''correlations = cor(countsTable)''') outfilename = output_filename_pattern + "correlation.tsv" E.info("outputting sample correlations table to %s" % outfilename) r('''write.table(correlations, file='%(outfilename)s', sep="\t", row.names=TRUE, col.names=NA, quote=FALSE)''' % locals()) # output scatter plots outfilename = output_filename_pattern + "scatter.png" E.info("outputting scatter plots to %s" % outfilename) R.png(outfilename, width=960, height=960) plotPairs() r['dev.off']() # output heatmap based on correlations outfilename = output_filename_pattern + "heatmap.svg" E.info("outputting correlation heatmap to %s" % outfilename) R.svg(outfilename) plotCorrelationHeatmap(method="correlation") r['dev.off']() # output PCA outfilename = output_filename_pattern + "pca.svg" E.info("outputting PCA plot to %s" % outfilename) R.svg(outfilename) plotPCA(groups=False) r['dev.off']() # output an MDS plot r('''suppressMessages(library('limma'))''') outfilename = output_filename_pattern + "mds.svg" E.info("outputting mds plot to %s" % outfilename) R.svg(outfilename) try: r('''plotMDS(countsTable)''') except RRuntimeError: E.warn("can not plot mds") r['dev.off']() def dumpTagData(filename_tags, filename_design, outfile): '''output filtered tag table.''' if outfile == sys.stdout: outfilename = "" else: outfilename = outfile.name # load all tag data loadTagData(filename_tags, filename_design) # filter nobservations, nsamples = filterTagData() # output r('''write.table( countsTable, file='%(outfilename)s', sep='\t', quote=FALSE)''' % locals()) def runTTest(outfile, outfile_prefix, fdr=0.1, ref_group=None, ref_regex=None): '''apply a ttest on the data. For the T-test it is best to use FPKM values as this method does not perform any library normalization. ''' groups, pairs, has_replicates, has_pairs = groupTagData(ref_group, ref_regex) results = [] for combination in itertools.combinations(groups, 2): control, treatment = combination r = r('''r = apply(countsTable, 1, function(x) { t.test( x[groups == '%(treatment)s'], x[groups == '%(control)s']) } ) ''' % locals()) for test_id, ttest in zip(r.names, r): # TS, swapped order below as assignment was incorrect treatment_mean, control_mean = tuple(ttest.rx2('estimate')) fold_change = treatment_mean / control_mean pvalue = tuple(ttest.rx2('p.value'))[0] significant = (0, 1)[pvalue < fdr] results.append(GeneExpressionResult._make((test_id, treatment, treatment_mean, 0, control, control_mean, 0, pvalue, pvalue, numpy.log2(fold_change), fold_change, numpy.log2(fold_change), significant, "OK"))) writeExpressionResults(outfile, results) ##################################################################### # Pandas-based functions and matplotlib-based plotting functions #### ##################################################################### def loadTagDataPandas(tags_filename, design_filename): '''load tag data for deseq/edger analysis. *Infile* is a tab-separated file with counts. *design_file* is a tab-separated file with the experimental design with four columns:: track include group pair CW-CD14-R1 0 CD14 1 CW-CD14-R2 0 CD14 1 CW-CD14-R3 1 CD14 1 CW-CD4-R1 1 CD4 1 FM-CD14-R1 1 CD14 2 FM-CD4-R2 0 CD4 2 FM-CD4-R3 0 CD4 2 FM-CD4-R4 0 CD4 2 track name of track - should correspond to column header in *infile* include flag to indicate whether or not to include this data group group indicator - experimental group pair pair that sample belongs to (for paired tests) This method creates various R objects: countsTable : data frame with counts. groups : vector with groups pairs : vector with pairs ''' E.info("loading tag data from %s" % tags_filename) inf = iotools.open_file(tags_filename) counts_table = pandas.read_csv(inf, sep="\t", index_col=0, comment="#") inf.close() E.info("read data: %i observations for %i samples" % counts_table.shape) E.debug("sample names: %s" % list(counts_table.columns)) inf = iotools.open_file(design_filename) design_table = pandas.read_csv(inf, sep="\t", index_col=0) inf.close() E.debug("design names: %s" % list(design_table.index)) missing = set(counts_table.columns).difference(design_table.index) if missing: E.warn("missing samples from design file are ignored: %s" % missing) # remove unnecessary samples design_table = design_table[design_table["include"] != 0] E.debug("included samples: %s" % list(design_table.index)) counts_table = counts_table[list(design_table.index)] E.info("filtered data: %i observations for %i samples" % counts_table.shape) return counts_table, design_table def filterTagDataPandas(counts_table, design_table, filter_min_counts_per_row=1, filter_min_counts_per_sample=10, filter_percentile_rowsums=0): '''filter tag data. * remove rows with at least x number of counts * remove samples with a maximum of *min_sample_counts* * remove the lowest percentile of rows in the table, sorted by total tags per row ''' # Remove windows with no data max_counts_per_row = counts_table.max(1) counts_table = counts_table[ max_counts_per_row >= filter_min_counts_per_row] observations, samples = counts_table.shape E.info("trimmed data: %i observations for %i samples" % (observations, samples)) # remove samples without data max_counts_per_sample = counts_table.max() empty_samples = max_counts_per_sample < filter_min_counts_per_sample sample_names = counts_table.columns nempty_samples = sum(empty_samples) if nempty_samples: E.warn("%i empty samples are being removed: %s" % (nempty_samples, ",".join([sample_names[x] for x, y in enumerate(empty_samples) if y]))) raise NotImplementedError("removing empty samples needs to be done") # r('''countsTable <- countsTable[, max_counts >= %i]''' % filter_min_counts_per_sample) # r('''groups <- groups[max_counts >= %i]''' % filter_min_counts_per_sample) # r('''pairs <- pairs[max_counts >= %i]''' % filter_min_counts_per_sample) # observations, samples = tuple( r('''dim(countsTable)''')) # percentile filtering if filter_percentile_rowsums > 0: percentile = float(filter_percentile_rowsums) / 100.0 sum_counts = counts_table.sum(1) take = sum_counts > sum_counts.quantile(percentile) E.info("percentile filtering at level %f: keep=%i, discard=%i" % (filter_percentile_rowsums, sum(take), len(take) - sum(take))) counts_table = counts_table[take] return counts_table def identifyVariablesPandas(design_table): # design table should have been processed by loadTagDataPandas already # just in case, re-filter for not included samples here design_table = design_table[design_table["include"] != 0] conds = design_table['group'].tolist() pairs = design_table['pair'].tolist() # TS, adapted from JJ code for DESeq2 design tables: # if additional columns present, pass to 'factors' if len(design_table.columns) > 3: factors = design_table.iloc[:, 3:] else: factors = None return conds, pairs, factors def checkTagGroupsPandas(design_table, ref_group=None): '''compute groups and pairs from tag data table.''' conds, pairs, factors = identifyVariablesPandas(design_table) groups = list(set(conds)) # Relevel the groups so that the reference comes first # how to do this in python? # if ref_group is not None: # r('''groups <- relevel(groups, ref = "%s")''' % ref_group) # check this works, will need to make factors from normal df # TS adapted from JJ code for DESeq2 - # check whether there are additional factors in design file... if factors: E.warn("There are additional factors in design file that are ignored" " by groupTagData: ", factors) else: pass # Test if replicates exist - at least one group must have multiple samples max_per_group = max([conds.count(x) for x in groups]) has_replicates = max_per_group >= 2 # Test if pairs exist: npairs = len(set(pairs)) has_pairs = npairs == 2 # ..if so, at least two samples are required per pair if has_pairs: min_per_pair = min([pairs.count(x) for x in set(pairs)]) has_pairs = min_per_pair >= 2 return groups, pairs, conds, factors, has_replicates, has_pairs ResultColumns = ["test_id", "treatment_name", "treatment_mean", "treatment_std", "control_name", "control_mean", "control_std", "p_value", "p_value_adj", "l2fold", "fold", "transformed_l2fold", "significant", "status"] ResultColumns_dtype = {"test_id": object, "treatment_name": object, "treatment_mean": float, "treatment_std": float, "control_name": object, "control_mean": float, "control_std": float, "p_value": float, "p_value_adj": float, "l2fold": float, "fold": float, "transformed_l2fold": float, "significant": int, "status": object} def makeEmptyDataFrameDict(): return {key: [] for key in ResultColumns} def runTTestPandas(counts_table, design_table, outfile, outfile_prefix, fdr, ref_group=None): '''apply a ttest on the data. For the T-test it is best to use FPKM values as this method does not perform any library normalization. Alternatively, perform normalisation on counts table using Counts.py ''' stats = importr('stats') (groups, pairs, conds, factors, has_replicates, has_pairs) = checkTagGroupsPandas(design_table, ref_group) df_dict = makeEmptyDataFrameDict() for combination in itertools.combinations(groups, 2): # as each combination may have different numbers of samples in control # and treatment, calculations have to be performed on a per # combination basis control, treatment = combination n_rows = counts_table.shape[0] df_dict["control_name"].extend((control,)*n_rows) df_dict["treatment_name"].extend((treatment,)*n_rows) df_dict["test_id"].extend(counts_table.index.tolist()) # subset counts table for each combination c_keep = [x == control for x in conds] control_counts = counts_table.iloc[:, c_keep] t_keep = [x == treatment for x in conds] treatment_counts = counts_table.iloc[:, t_keep] c_mean = control_counts.mean(axis=1) df_dict["control_mean"].extend(c_mean) df_dict["control_std"].extend(control_counts.std(axis=1)) t_mean = treatment_counts.mean(axis=1) df_dict["treatment_mean"].extend(t_mean) df_dict["treatment_std"].extend(treatment_counts.std(axis=1)) t, prob = ttest_ind(control_counts,
import json import os import pytest import requests from .common import ADMIN_TOKEN from .common import AUTH_PROVIDER from .common import AUTH_USER_PASSWORD from .common import CATTLE_TEST_URL from .common import cluster_cleanup from .common import create_project_and_ns from .common import get_admin_client from .common import get_client_for_token from .test_rke_cluster_provisioning import create_and_validate_custom_host ''' Prerequisite: 1. testautoadmin as your admin user, if the fixture detects the auth is disabled it will be enabled automatically. 2. Two clusters in your setup, if none or one are detected by the fixture will create clusters to match two ''' # Config Fields HOSTNAME_OR_IP_ADDRESS = os.environ.get("RANCHER_HOSTNAME_OR_IP_ADDRESS") PORT = os.environ.get("RANCHER_PORT", "") CA_CERTIFICATE = os.environ.get("RANCHER_CA_CERTIFICATE", "") OPENLDAP_CA_CERTIFICATE = os.environ.get("RANCHER_OPENLDAP_CA_CERTIFICATE", "") FREEIPA_CA_CERTIFICATE = os.environ.get("RANCHER_FREEIPA_CA_CERTIFICATE", "") CONNECTION_TIMEOUT = os.environ.get("RANCHER_CONNECTION_TIMEOUT", 5000) SERVICE_ACCOUNT_NAME = os.environ.get("RANCHER_SERVICE_ACCOUNT_NAME") SERVICE_ACCOUNT_PASSWORD = <PASSWORD>("RANCHER_SERVICE_ACCOUNT_PASSWORD") DEFAULT_LOGIN_DOMAIN = os.environ.get("RANCHER_DEFAULT_LOGIN_DOMAIN") USER_SEARCH_BASE = os.environ.get("RANCHER_USER_SEARCH_BASE") GROUP_SEARCH_BASE = os.environ.get("RANCHER_GROUP_SEARCH_BASE") AD_SPECIAL_CHAR_PASSWORD = os.environ.get("RANCHER_AD_SPECIAL_CHAR_PASSWORD") OPENLDAP_SPECIAL_CHAR_PASSWORD = \ os.environ.get("RANCHER_OPENLDAP_SPECIAL_CHAR_PASSWORD") FREEIPA_SPECIAL_CHAR_PASSWORD = \ os.environ.get("RANCHER_FREEIPA_SPECIAL_CHAR_PASSWORD") OPENLDAP_HOSTNAME_OR_IP_ADDRESS = \ os.environ.get("RANCHER_OPENLDAP_HOSTNAME_OR_IP_ADDRESS") OPENLDAP_SERVICE_ACCOUNT_NAME = \ os.environ.get("RANCHER_OPENLDAP_SERVICE_ACCOUNT_NAME") OPENLDAP_SERVICE_ACCOUNT_PASSWORD = \ os.environ.get("RANCHER_OPENLDAP_SERVICE_ACCOUNT_PASSWORD") OPENLDAP_USER_SEARCH_BASE = os.environ.get("RANCHER_OPENLDAP_USER_SEARCH_BASE") OPENLDAP_AUTH_USER_PASSWORD = \ os.environ.get("RANCHER_OPENLDAP_AUTH_USER_PASSWORD") FREEIPA_HOSTNAME_OR_IP_ADDRESS = \ os.environ.get("RANCHER_FREEIPA_HOSTNAME_OR_IP_ADDRESS") FREEIPA_SERVICE_ACCOUNT_NAME = \ os.environ.get("RANCHER_FREEIPA_SERVICE_ACCOUNT_NAME") FREEIPA_SERVICE_ACCOUNT_PASSWORD = \ os.environ.get("RANCHER_FREEIPA_SERVICE_ACCOUNT_PASSWORD") FREEIPA_USER_SEARCH_BASE = os.environ.get("RANCHER_FREEIPA_USER_SEARCH_BASE") FREEIPA_GROUP_SEARCH_BASE = os.environ.get("RANCHER_FREEIPA_GROUP_SEARCH_BASE") FREEIPA_AUTH_USER_PASSWORD = \ os.environ.get("RANCHER_FREEIPA_AUTH_USER_PASSWORD") PASSWORD = "" if AUTH_PROVIDER == "activeDirectory": PASSWORD = <PASSWORD> elif AUTH_PROVIDER == "openLdap": PASSWORD = <PASSWORD> elif AUTH_PROVIDER == "freeIpa": PASSWORD = <PASSWORD> CATTLE_AUTH_URL = \ CATTLE_TEST_URL + \ "/v3-public/" + AUTH_PROVIDER + "Providers/" + \ AUTH_PROVIDER.lower() + "?action=login" CATTLE_AUTH_PROVIDER_URL = \ CATTLE_TEST_URL + "/v3/" + AUTH_PROVIDER + "Configs/" +\ AUTH_PROVIDER.lower() CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search" CATTLE_AUTH_ENABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=testAndApply" CATTLE_AUTH_DISABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=disable" setup = {"cluster1": None, "project1": None, "ns1": None, "cluster2": None, "project2": None, "ns2": None, "auth_setup_data": {}, "permission_denied_code": 403} auth_setup_fname = \ os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource", AUTH_PROVIDER.lower() + ".json") def test_access_control_required_set_access_mode_required(): access_mode = "required" validate_access_control_set_access_mode(access_mode) def test_access_control_restricted_set_access_mode_required(): access_mode = "restricted" validate_access_control_set_access_mode(access_mode) def test_access_control_required_add_users_and_groups_to_cluster(): access_mode = "required" validate_add_users_and_groups_to_cluster_or_project( access_mode, add_users_to_cluster=True) def test_access_control_restricted_add_users_and_groups_to_cluster(): access_mode = "restricted" validate_add_users_and_groups_to_cluster_or_project( access_mode, add_users_to_cluster=True) def test_access_control_required_add_users_and_groups_to_project(): access_mode = "required" validate_add_users_and_groups_to_cluster_or_project( access_mode, add_users_to_cluster=False) def test_access_control_restricted_add_users_and_groups_to_project(): access_mode = "restricted" validate_add_users_and_groups_to_cluster_or_project( access_mode, add_users_to_cluster=False) def test_disable_and_enable_auth_set_access_control_required(): access_mode = "required" validate_access_control_disable_and_enable_auth(access_mode) def test_disable_and_enable_auth_set_access_control_restricted(): access_mode = "restricted" validate_access_control_disable_and_enable_auth(access_mode) # By default nestedgroup is disabled for ad and openldap, enabled for freeipa def test_disable_and_enable_nestedgroups_set_access_control_required(): access_mode = "required" validate_access_control_disable_and_enable_nestedgroups(access_mode) def test_disable_and_enable_nestedgroup_set_access_control_restricted(): access_mode = "restricted" validate_access_control_disable_and_enable_nestedgroups(access_mode) def test_ad_service_account_login(): delete_project_users() delete_cluster_users() auth_setup_data = setup["auth_setup_data"] admin_user = auth_setup_data["admin_user"] # admin_user here is the AD admin user if AUTH_PROVIDER == "activeDirectory": admin_token = login(admin_user, AUTH_USER_PASSWORD) disable_ad(admin_user, admin_token) enable_ad(admin_user, admin_token) login(SERVICE_ACCOUNT_NAME, SERVICE_ACCOUNT_PASSWORD) def test_special_character_users_login_access_mode_required(): access_mode = "required" special_character_users_login(access_mode) def test_special_character_users_login_access_mode_restricted(): access_mode = "restricted" special_character_users_login(access_mode) def special_character_users_login(access_mode): delete_project_users() delete_cluster_users() auth_setup_data = setup["auth_setup_data"] admin_user = auth_setup_data["admin_user"] admin_token = login(admin_user, PASSWORD) allowed_principal_ids = [] if AUTH_PROVIDER == "activeDirectory": disable_ad(admin_user, admin_token) enable_ad(admin_user, admin_token) if AUTH_PROVIDER == "openLdap": disable_openldap(admin_user, admin_token) enable_openldap(admin_user, admin_token) if AUTH_PROVIDER == "freeIpa": disable_freeipa(admin_user, admin_token) enable_freeipa(admin_user, admin_token) if AUTH_PROVIDER == "activeDirectory": for user in auth_setup_data["specialchar_in_username"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for user in auth_setup_data["specialchar_in_password"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for user in auth_setup_data["specialchar_in_userdn"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for group in auth_setup_data["specialchar_in_groupname"]: allowed_principal_ids.append(principal_lookup(group, admin_token)) allowed_principal_ids.append( principal_lookup(admin_user, admin_token)) add_users_to_site_access( admin_token, access_mode, allowed_principal_ids) for user in auth_setup_data["specialchar_in_username"]: login(user, PASSWORD) for user in auth_setup_data["specialchar_in_password"]: login(user, AD_SPECIAL_CHAR_PASSWORD) for user in auth_setup_data["specialchar_in_userdn"]: login(user, PASSWORD) for group in auth_setup_data["specialchar_in_groupname"]: for user in auth_setup_data[group]: login(user, PASSWORD) if AUTH_PROVIDER == "openLdap": for user in auth_setup_data["specialchar_in_user_cn_sn"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for user in auth_setup_data["specialchar_in_uid"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for user in auth_setup_data["specialchar_in_password"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for group in auth_setup_data["specialchar_in_groupname"]: allowed_principal_ids.append(principal_lookup(group, admin_token)) allowed_principal_ids.append(principal_lookup(admin_user, admin_token)) add_users_to_site_access( admin_token, access_mode, allowed_principal_ids) for user in auth_setup_data["specialchar_in_user_cn_sn"]: login(user, PASSWORD) for user in auth_setup_data["specialchar_in_uid"]: login(user, PASSWORD) for user in auth_setup_data["specialchar_in_password"]: login(user, OPENLDAP_SPECIAL_CHAR_PASSWORD) for group in auth_setup_data["specialchar_in_groupname"]: for user in auth_setup_data[group]: login(user, PASSWORD) if AUTH_PROVIDER == "freeIpa": for user in auth_setup_data["specialchar_in_users"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for user in auth_setup_data["specialchar_in_password"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) for group in auth_setup_data["specialchar_in_groupname"]: allowed_principal_ids.append(principal_lookup(group, admin_token)) allowed_principal_ids.append( principal_lookup(admin_user, admin_token)) add_users_to_site_access( admin_token, access_mode, allowed_principal_ids) for user in auth_setup_data["specialchar_in_users"]: login(user, PASSWORD) for user in auth_setup_data["specialchar_in_password"]: login(user, FREEIPA_SPECIAL_CHAR_PASSWORD) for group in auth_setup_data["specialchar_in_groupname"]: for user in auth_setup_data[group]: login(user, PASSWORD) def validate_access_control_set_access_mode(access_mode): delete_cluster_users() auth_setup_data = setup["auth_setup_data"] admin_user = auth_setup_data["admin_user"] token = login(admin_user, PASSWORD) allowed_principal_ids = [] for user in auth_setup_data["allowed_users"]: allowed_principal_ids.append(principal_lookup(user, token)) for group in auth_setup_data["allowed_groups"]: allowed_principal_ids.append(principal_lookup(group, token)) allowed_principal_ids.append(principal_lookup(admin_user, token)) # Add users and groups in allowed list to access rancher-server add_users_to_site_access(token, access_mode, allowed_principal_ids) for user in auth_setup_data["allowed_users"]: login(user, PASSWORD) for group in auth_setup_data["allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD) for user in auth_setup_data["dis_allowed_users"]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) for group in auth_setup_data["dis_allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) # Add users and groups from dis allowed list to access rancher-server for user in auth_setup_data["dis_allowed_users"]: allowed_principal_ids.append(principal_lookup(user, token)) for group in auth_setup_data["dis_allowed_groups"]: for user in auth_setup_data[group]: allowed_principal_ids.append(principal_lookup(user, token)) add_users_to_site_access(token, access_mode, allowed_principal_ids) for user in auth_setup_data["allowed_users"]: login(user, PASSWORD) for group in auth_setup_data["allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD) for user in auth_setup_data["dis_allowed_users"]: login(user, PASSWORD) for group in auth_setup_data["dis_allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD) # Remove users and groups from allowed list to access rancher-server allowed_principal_ids = [principal_lookup(admin_user, token)] for user in auth_setup_data["dis_allowed_users"]: allowed_principal_ids.append(principal_lookup(user, token)) for group in auth_setup_data["dis_allowed_groups"]: for user in auth_setup_data[group]: allowed_principal_ids.append(principal_lookup(user, token)) add_users_to_site_access(token, access_mode, allowed_principal_ids) for user in auth_setup_data["allowed_users"]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) for group in auth_setup_data["allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) for user in auth_setup_data["dis_allowed_users"]: login(user, PASSWORD) for group in auth_setup_data["dis_allowed_groups"]: for user in auth_setup_data[group]: login(user, PASSWORD) def validate_add_users_and_groups_to_cluster_or_project( access_mode, add_users_to_cluster=True): delete_cluster_users() client = get_admin_client() for project in client.list_project(): delete_existing_users_in_project(client, project) auth_setup_data = setup["auth_setup_data"] admin_user = auth_setup_data["admin_user"] token = login(admin_user, PASSWORD) allowed_principal_ids = [principal_lookup(admin_user, token)] # Add users and groups in allowed list to access rancher-server add_users_to_site_access(token, access_mode, allowed_principal_ids) if add_users_to_cluster: groups_to_check = auth_setup_data["groups_added_to_cluster"] users_to_check = auth_setup_data["users_added_to_cluster"] else: groups_to_check = auth_setup_data["groups_added_to_project"] users_to_check = auth_setup_data["users_added_to_project"] for group in groups_to_check: for user in auth_setup_data[group]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) for user in users_to_check: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) client = get_client_for_token(token) for group in groups_to_check: if add_users_to_cluster: assign_user_to_cluster(client, principal_lookup(group, token), setup["cluster1"], "cluster-owner") else: assign_user_to_project(client, principal_lookup(group, token), setup["project2"], "project-owner") for user in users_to_check: if add_users_to_cluster: assign_user_to_cluster(client, principal_lookup(user, token), setup["cluster1"], "cluster-owner") else: assign_user_to_project(client, principal_lookup(user, token), setup["project2"], "project-owner") expected_status = setup["permission_denied_code"] if access_mode == "required": expected_status = setup["permission_denied_code"] if access_mode == "restricted": expected_status = 201 for group in groups_to_check: for user in auth_setup_data[group]: login(user, PASSWORD, expected_status) for user in users_to_check: login(user, PASSWORD, expected_status) def validate_access_control_disable_and_enable_auth(access_mode): delete_cluster_users() delete_project_users() auth_setup_data = setup["auth_setup_data"] # Login as admin user to disable auth, should be success, then enable it. admin_user = auth_setup_data["admin_user"] admin_token = login(admin_user, PASSWORD) if AUTH_PROVIDER == "activeDirectory": disable_ad(admin_user, admin_token) enable_ad(admin_user, admin_token) if AUTH_PROVIDER == "openLdap": disable_openldap(admin_user, admin_token) enable_openldap(admin_user, admin_token) if AUTH_PROVIDER == "freeIpa": disable_freeipa(admin_user, admin_token) enable_freeipa(admin_user, admin_token) # Login as users within allowed principal id list, which cannot perform # disable action. allowed_principal_ids = [] for user in auth_setup_data["allowed_users"]: allowed_principal_ids.append(principal_lookup(user, admin_token)) allowed_principal_ids.append(principal_lookup(admin_user, admin_token)) # Add users in allowed list to access rancher-server add_users_to_site_access(admin_token, access_mode, allowed_principal_ids) for user in auth_setup_data["allowed_users"]: token = login(user, PASSWORD) if AUTH_PROVIDER == "activeDirectory": disable_ad(user, token, expected_status=setup["permission_denied_code"]) enable_ad(user, token, expected_status=setup["permission_denied_code"]) if AUTH_PROVIDER == "openLdap": disable_openldap(user, token, expected_status=setup["permission_denied_code"]) enable_openldap(user, token, expected_status=setup["permission_denied_code"]) if AUTH_PROVIDER == "freeIpa": disable_freeipa(user, token, expected_status=setup["permission_denied_code"]) enable_freeipa(user, token, expected_status=setup["permission_denied_code"]) def validate_access_control_disable_and_enable_nestedgroups(access_mode): delete_project_users() delete_cluster_users() auth_setup_data = setup["auth_setup_data"] admin_user = auth_setup_data["admin_user"] token = login(admin_user, PASSWORD) if AUTH_PROVIDER == "activeDirectory": enable_ad(admin_user, token) if AUTH_PROVIDER == "openLdap": enable_openldap(admin_user, token) if AUTH_PROVIDER == "freeIpa": enable_freeipa(admin_user, token) allowed_principal_ids = [] for group in auth_setup_data["allowed_nestedgroups"]: allowed_principal_ids.append(principal_lookup(group, token)) allowed_principal_ids.append(principal_lookup(admin_user, token)) # Add users in allowed list to access rancher-server add_users_to_site_access(token, access_mode, allowed_principal_ids) for group in auth_setup_data["allowed_nestedgroups"]: for user in auth_setup_data[group]: login(user, PASSWORD) if AUTH_PROVIDER == "freeIpa": for user in auth_setup_data["users_under_nestedgroups"]: login(user, PASSWORD) if AUTH_PROVIDER == "activeDirectory" or AUTH_PROVIDER == "openLdap": for user in auth_setup_data["users_under_nestedgroups"]: login(user, PASSWORD, expected_status=setup["permission_denied_code"]) # Enable nestedgroup feature, so users under nestedgroups can login # successfully if AUTH_PROVIDER == "activeDirectory": enable_ad(admin_user, token, nested=True) if AUTH_PROVIDER == "openLdap": enable_openldap(admin_user, token, nested=True) allowed_principal_ids = [] for group in auth_setup_data["allowed_nestedgroups"]: allowed_principal_ids.append(principal_lookup(group, token)) allowed_principal_ids.append(principal_lookup(admin_user, token)) # Add users in allowed list to access rancher-server add_users_to_site_access(token, access_mode, allowed_principal_ids) for group in auth_setup_data["allowed_nestedgroups"]: for user in auth_setup_data[group]: login(user, PASSWORD) for user in auth_setup_data["users_under_nestedgroups"]: login(user, PASSWORD) def login(username, password, expected_status=201): token = "" r = requests.post(CATTLE_AUTH_URL, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) assert r.status_code == expected_status print("Login request for " + username + " " + str(expected_status)) if expected_status == 201: token = r.json()['token'] return token def get_tls(certificate): if len(certificate) != 0: tls = True else: tls = False return tls def enable_openldap(username, token, enable_url=CATTLE_AUTH_ENABLE_URL, password=PASSWORD, nested=False, expected_status=200): headers = {'Authorization': 'Bearer ' + token} ldap_config = { "accessMode": "unrestricted", "connectionTimeout": CONNECTION_TIMEOUT, "certificate": OPENLDAP_CA_CERTIFICATE, "groupDNAttribute": "entryDN", "groupMemberMappingAttribute": "member", "groupMemberUserAttribute": "entryDN", "groupNameAttribute": "cn", "groupObjectClass": "groupOfNames", "groupSearchAttribute": "cn", "nestedGroupMembershipEnabled": nested, "enabled": True, "port": PORT, "servers": [OPENLDAP_HOSTNAME_OR_IP_ADDRESS],
<reponame>IMULMUL/etl-parser # -*- coding: utf-8 -*- """ Microsoft-Windows-WinNat GUID : 66c07ecd-6667-43fc-93f8-05cf07f446ec """ from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct from etl.utils import WString, CString, SystemTime, Guid from etl.dtyp import Sid from etl.parsers.etw.core import Etw, declare, guid @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1001, version=0) class Microsoft_Windows_WinNat_1001_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1001, version=1) class Microsoft_Windows_WinNat_1001_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1002, version=0) class Microsoft_Windows_WinNat_1002_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1002, version=1) class Microsoft_Windows_WinNat_1002_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1003, version=0) class Microsoft_Windows_WinNat_1003_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1003, version=1) class Microsoft_Windows_WinNat_1003_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1004, version=0) class Microsoft_Windows_WinNat_1004_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1004, version=1) class Microsoft_Windows_WinNat_1004_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1005, version=0) class Microsoft_Windows_WinNat_1005_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1005, version=1) class Microsoft_Windows_WinNat_1005_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Lifetime" / Int32ul, "TcpSessionState" / Int32ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1006, version=0) class Microsoft_Windows_WinNat_1006_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1006, version=1) class Microsoft_Windows_WinNat_1006_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1007, version=0) class Microsoft_Windows_WinNat_1007_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1007, version=1) class Microsoft_Windows_WinNat_1007_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1008, version=0) class Microsoft_Windows_WinNat_1008_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1008, version=1) class Microsoft_Windows_WinNat_1008_1(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "SessionCount" / Int32ul, "Configured" / Int8ul, "InternalCompartmentId" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1009, version=0) class Microsoft_Windows_WinNat_1009_0(Etw): pattern = Struct( "IncomingAddrLen" / Int32ul, "IncomingSrcAddr" / Bytes(lambda this: this.IncomingAddrLen), "IncomingDstAddr" / Bytes(lambda this: this.IncomingAddrLen), "TranslatedAddrLen" / Int32ul, "TranslatedSrcAddr" / Bytes(lambda this: this.TranslatedAddrLen), "TranslatedDstAddr" / Bytes(lambda this: this.TranslatedAddrLen), "Identification" / Int32ul, "TransportProtocol" / Int32ul, "Status" / Int32ul, "IcmpType" / Int32ul, "IcmpCode" / Int32ul, "IcmpPayload" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1010, version=0) class Microsoft_Windows_WinNat_1010_0(Etw): pattern = Struct( "InstanceName" / WString, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul, "Enabled" / Int8ul, "Status" / Int32ul, "Action" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1011, version=0) class Microsoft_Windows_WinNat_1011_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalPrefixAddrLength" / Int32ul, "InternalSrcPrefix" / Bytes(lambda this: this.InternalPrefixAddrLength), "InternalSrcPrefixLength" / Int32ul, "InternaDstlPrefix" / Bytes(lambda this: this.InternalPrefixAddrLength), "InternalDstPrefixLength" / Int32ul, "IPv4Prefix" / Int32ul, "IPv4PrefixLength" / Int32ul, "Nat64" / Int8ul, "InterfaceLuid" / Int64ul, "FilterId" / Int64ul, "Action" / Int32ul, "Status" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1012, version=0) class Microsoft_Windows_WinNat_1012_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalPrefixAddrLength" / Int32ul, "InternalSrcPrefix" / Bytes(lambda this: this.InternalPrefixAddrLength), "InternalSrcPrefixLength" / Int32ul, "InternaDstlPrefix" / Bytes(lambda this: this.InternalPrefixAddrLength), "InternalDstPrefixLength" / Int32ul, "IPv4Prefix" / Int32ul, "IPv4PrefixLength" / Int32ul, "Nat64" / Int8ul, "InterfaceLuid" / Int64ul, "FilterId" / Int64ul, "Action" / Int32ul, "Status" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1013, version=0) class Microsoft_Windows_WinNat_1013_0(Etw): pattern = Struct( "InstanceName" / WString, "Address" / Int32ul, "StartingPort" / Int16ul, "EndingPort" / Int16ul, "InterfaceLuid" / Int64ul, "Action" / Int32ul, "Status" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1014, version=0) class Microsoft_Windows_WinNat_1014_0(Etw): pattern = Struct( "InstanceName" / WString, "Address" / Int32ul, "StartingPort" / Int16ul, "EndingPort" / Int16ul, "InterfaceLuid" / Int64ul, "Action" / Int32ul, "Status" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1015, version=0) class Microsoft_Windows_WinNat_1015_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul, "Action" / Int32ul, "Status" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1016, version=0) class Microsoft_Windows_WinNat_1016_0(Etw): pattern = Struct( "Description" / WString ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1017, version=0) class Microsoft_Windows_WinNat_1017_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1018, version=0) class Microsoft_Windows_WinNat_1018_0(Etw): pattern = Struct( "InternalAddrLen" / Int32ul, "InternalSrcAddr" / Bytes(lambda this: this.InternalAddrLen), "InternalDstAddr" / Bytes(lambda this: this.InternalAddrLen), "ExternalAddrLen" / Int32ul, "ExternalSrcAddr" / Bytes(lambda this: this.ExternalAddrLen), "ExternalDstAddr" / Bytes(lambda this: this.ExternalAddrLen), "TransportProtocol" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1019, version=0) class Microsoft_Windows_WinNat_1019_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "ExternalIPInterfaceAddressPrefixLength" / Int32ul, "ExternalIPInterfaceAddressPrefix" / Int32ul, "CompartmentId" / Int32ul, "ExternalInterfaceIndex" / Int32ul, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1019, version=1) class Microsoft_Windows_WinNat_1019_1(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "PrefixLength" / Int32ul, "AddressPrefix" / Int32ul, "CompartmentId" / Int32ul, "ExternalInterfaceIndex" / Int32ul, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul, "InstanceType" / Int32ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1020, version=0) class Microsoft_Windows_WinNat_1020_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "ExternalIPInterfaceAddressPrefixLength" / Int32ul, "ExternalIPInterfaceAddressPrefix" / Int32ul, "CompartmentId" / Int32ul, "ExternalInterfaceIndex" / Int32ul, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1021, version=0) class Microsoft_Windows_WinNat_1021_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "ExternalIPInterfaceAddressPrefixLength" / Int32ul, "ExternalIPInterfaceAddressPrefix" / Int32ul, "CompartmentId" / Int32ul, "ExternalInterfaceIndex" / Int32ul, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1022, version=0) class Microsoft_Windows_WinNat_1022_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "ExternalIPInterfaceAddressPrefixLength" / Int32ul, "ExternalIPInterfaceAddressPrefix" / Int32ul, "CompartmentId" / Int32ul, "ExternalInterfaceIndex" / Int32ul, "UdpIdleSessionTimeout" / Int32ul, "TcpTransientConnectionTimeout" / Int32ul, "TcpEstablishedConnectionTimeout" / Int32ul, "IcmpQueryTimeout" / Int32ul, "TcpFilteringBehavior" / Int32ul, "UdpFilteringBehavior" / Int32ul, "UdpInboundRefresh" / Int8ul ) @declare(guid=guid("66c07ecd-6667-43fc-93f8-05cf07f446ec"), event_id=1023, version=0) class Microsoft_Windows_WinNat_1023_0(Etw): pattern = Struct( "InstanceName" / WString, "InternalRoutingDomainId" / Guid, "ExternalIPInterfaceAddressPrefixLength" / Int32ul,
power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0482207, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.240563, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.257869, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.253493, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.438957, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.251754, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.944204, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.211032, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.74465, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.048717, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0091893, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0846066, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0679605, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.133324, 'Execution Unit/Register Files/Runtime Dynamic': 0.0771498, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.217655, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.541875, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 2.20917, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00167741, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00167741, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00146641, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000570613, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000976258, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00579749, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0158906, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0653322, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.15569, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.193041, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.221898, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 6.57897, 'Instruction Fetch Unit/Runtime Dynamic': 0.501959, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0926151, 'L2/Runtime Dynamic': 0.0166103, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.41189, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.06738, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.070359, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.070359, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.7455, 'Load Store Unit/Runtime Dynamic': 1.48473, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.173493, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.346987, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0615734, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0628899, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.258385, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0318667, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.523332, 'Memory Management Unit/Runtime Dynamic': 0.0947567, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 21.2468, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.169963, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0150074, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.128856, 'Renaming Unit/Int Front End RAT/Subthreshold
# -*- coding: utf-8 -*- import json import math import os import platform import random import re import sys import time from collections import OrderedDict from io import StringIO import requests import numpy as np from scipy import optimize __author__ = "<NAME> and <NAME>" __version__ = "2022.06.14" __license__ = "MIT" def removeHTMLTags(s): """Remove HTML tags, notably for use as page title""" return re.sub('<[^<]+?>', '', s) def finishWebPage(outbuffer): """ Write the footer and finish the page """ print('<div id="footer" class="content">') print('Code version: ' + __version__ + ' (running on Python ' + platform.python_version() + ')<br/>') print('<script type="text/javascript">var endTime = %g;' % time.perf_counter()) print('document.write("Execution time: " + (endTime-startTime).toFixed(3) + " seconds<br/>");') print('if(typeof isOrtho !== \'undefined\') document.write("Specific (faster) code for orthorhombic case was used.");') print('</script></div>') print('</div>') print('</body></html>') return outbuffer.getvalue() def writeHeader(outbuffer, title="Elastic Tensor Analysis"): """ Write the header of the HTML page """ print(""" <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html> <head> <title>%s</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <link rel="stylesheet" type="text/css" href="/default.css" /> <link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/jsxgraph@1.1.0/distrib/jsxgraph.css" /> <script src="https://cdn.jsdelivr.net/npm/jsxgraph@1.1.0/distrib/jsxgraphcore.js"></script> <script src="http://cdn.plot.ly/plotly-latest.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script> </head> """ % (title)) # printTitle writes the introduction of Elate def printTitle(outbuffer, title="Elastic Tensor Analysis"): writeHeader(outbuffer, title) print(""" <body> <div class="content"> <h1><a href="/elate">ELATE: Elastic tensor analysis</a></h1> <p>Welcome to ELATE, the online tool for analysis of elastic tensors, developed by <b><NAME></b> and <b><a href="http://coudert.name"><NAME></a></b> at <a href="http://www.chimie-paristech.fr/molsim/">CNRS / Chimie ParisTech</a>. <br/> If you use the software in published results (paper, conference, etc.), please cite the <a href="http://dx.doi.org/10.1088/0953-8984/28/27/275201">corresponding paper</a> (<em><NAME></em>, 2016, 28, 275201) and give the website URL.</p> <p>ELATE is <a href="https://github.com/fxcoudert/elate">open source software</a>. Any queries or comments are welcome at <script type="text/javascript"> //<![CDATA[ var c_="";for(var o5=0;o5<411;o5++)c_+=String.fromCharCode(("s%oz65j5>oJ.~~vs!Kt00}.~|}{\\"$s~%}!s0Kv#\\"wv<s!~tjjK{j5wo#zH}<j5s!z~qo6s~=u=i:00ikk>97a6!#|w<u!t{}vQ!o}Qsr?6F8G9:B8D9>@?7>a9!#|w<u!t{}vQ!o}QsrB67Dj59}qr$!s8#vq{wsw~;!oAA\\"wA#qsj5v!<~sozsq=6=A:u00970i0<ikk>a9!#|w<u!t{}vQ!o}QsrA69DDD>:E\\'7@<7s!z~qo6sjj==8:uN070j59j5jj.0|}}{\\"$}s#$0Kv#\\"wv<s!Ktj5jjj5jjL0\\'t14>O>>DBqI$}sr#!14>>>>BDqIwvw{sO~;!o\\"ws#vq14>>B>ID!t=JLo<j5s!z~qo6sO=u=0:705<!s~zoqs6=6<76<7=u:02@2?07<\\"$p\\"#!6?77".charCodeAt(o5)-(14)+0x3f)%(2*6+83)+64-32);document.write(eval(c_)) //]]> </script> </p> """) # 3D plot functions ################################################################################################ def write3DPlotData(dataX, dataY, dataZ, dataR, n, opacity=1.0): showcont = "true" if (opacity != 1.0): showcont = "false" if (n == 1): js = OrderedDict([ ("x", dataX), ("y", dataY), ("z", dataZ), ("text", dataR), ("showscale", "false"), ("colorscale", "[[\'0\',\'rgb(22,136,51)\'],[\'0.125\',\'rgb(61,153,85)\'],[\'0.25\',\'rgb(121,178,136)\'],[\'0.375\',\'rgb(181,204,187)\'],[\'0.5\',\'rgb(195,230,200)\'],[\'0.625\',\'rgb(181,204,187)\'],[\'0.75\',\'rgb(121,178,136)\'],[\'0.875\',\'rgb(61,153,85)\'],[\'1\',\'rgb(22,136,51)\']]"), ("zsmooth", "'fast'"), ("type", "'surface'"), ("hoverinfo", "'text'"), ("opacity", opacity), ("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}") ]) if (n == 2): js = OrderedDict([ ("x", dataX), ("y", dataY), ("z", dataZ), ("text", dataR), ("showscale", "false"), ("colorscale", "[[\'0\',\'rgb(180,4,38)\'],[\'0.125\',\'rgb(222,96,77)\'],[\'0.25\',\'rgb(244,154,123)\'],[\'0.375\',\'rgb(245,196,173)\'],[\'0.5\',\'rgb(246,216,201)\'],[\'0.625\',\'rgb(245,196,173)\'],[\'0.75\',\'rgb(244,154,123)\'],[\'0.875\',\'rgb(222,96,77)\'],[\'1\',\'rgb(180,4,38)\']]"), ("zsmooth", "'fast'"), ("type", "'surface'"), ("hoverinfo", "'text'"), ("opacity", opacity), ("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}") ]) if (n == 3): js = OrderedDict([ ("x", dataX), ("y", dataY), ("z", dataZ), ("text", dataR), ("showscale", "false"), ("colorscale", "[[\'0\',\'rgb(59,76,192)\'],[\'0.125\',\'rgb(98,130,234)\'],[\'0.25\',\'rgb(141,176,254)\'],[\'0.375\',\'rgb(184,208,249)\'],[\'0.5\',\'rgb(207,223,250)\'],[\'0.625\',\'rgb(184,208,249)\'],[\'0.75\',\'rgb(141,176,254)\'],[\'0.875\',\'rgb(98,130,234)\'],[\'1\',\'rgb(59,76,192)\']]"), ("zsmooth", "'fast'"), ("type", "'surface'"), ("hoverinfo", "'text'"), ("opacity", opacity), ("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}") ]) print(json.dumps(js, indent=3).replace('\"', '') + ";") def make3DPlot(func, legend='', width=600, height=600, npoints=200): str1 = legend.split("\'")[0] str2 = legend.split("\'")[1] u = np.linspace(0, np.pi, npoints) v = np.linspace(0, 2*np.pi, 2*npoints) r = np.zeros(len(u)*len(v)) dataX = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR = [["0.0" for i in range(len(v))] for j in range(len(u))] count = 0 for cu in range(len(u)): for cv in range(len(v)): r_tmp = func(u[cu], v[cv]) z = r_tmp * np.cos(u[cu]) x = r_tmp * np.sin(u[cu]) * np.cos(v[cv]) y = r_tmp * np.sin(u[cu]) * np.sin(v[cv]) dataX[cu][cv] = x dataY[cu][cv] = y dataZ[cu][cv] = z dataR[cu][cv] = "'E = "+str(float(int(10*r_tmp))/10.0)+" GPa, "+"\u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'" count = count+1 i = random.randint(0, 100000) print('<div class="plot3D">') print('<div id="box%d" style="width: %dpx; height: %dpx; display:block;"></div>' % (i, width, height)) print('</div>') print('<script type="text/javascript">') print("var trace =") write3DPlotData(dataX, dataY, dataZ, dataR, 1) print("var data = [trace]") print("var layout =") layout = {"title": "\'"+str1+"\\"+"\'"+str2+"\'", "width": "650", "height": "700", "autosize": "false", "autorange": "true", "margin": "{l: 65, r: 50, b: 65, t: 90}"} print(json.dumps(layout, indent=3).replace('\\\\', '\\').replace('\"', '') + ";") print("Plotly.newPlot('box%d',data,layout);" % (i)) print('</script>') def make3DPlotPosNeg(func, legend='', width=600, height=600, npoints=200): u = np.linspace(0, np.pi, npoints) v = np.linspace(0, 2*np.pi, 2*npoints) dataX1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR1 = [["0.0" for i in range(len(v))] for j in range(len(u))] count = 0 for cu in range(len(u)): for cv in range(len(v)): r_tmp = max(0, func(u[cu], v[cv])) z = r_tmp * np.cos(u[cu]) x = r_tmp * np.sin(u[cu]) * np.cos(v[cv]) y = r_tmp * np.sin(u[cu]) * np.sin(v[cv]) dataX1[cu][cv] = x dataY1[cu][cv] = y dataZ1[cu][cv] = z dataR1[cu][cv] = "'"+"\u03B2 = "+str(float(int(10*r_tmp))/10.0)+" TPa'"+"+'-1'.sup()+"+"', \u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'" count = count+1 dataX2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR2 = [["0.0" for i in range(len(v))] for j in range(len(u))] count = 0 for cu in range(len(u)): for cv in range(len(v)): r_tmp = max(0, -func(u[cu], v[cv])) z = r_tmp * np.cos(u[cu]) x = r_tmp * np.sin(u[cu]) * np.cos(v[cv]) y = r_tmp * np.sin(u[cu]) * np.sin(v[cv]) dataX2[cu][cv] = x dataY2[cu][cv] = y dataZ2[cu][cv] = z dataR2[cu][cv] = "'"+"\u03B2 = -"+str(float(int(10*r_tmp))/10.0)+" TPa'"+"+'-1'.sup()+"+"', \u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'" count = count+1 i = random.randint(0, 100000) print('<div class="plot3D">') print('<div id="box%d" style="width: %dpx; height: %dpx; display:block;"></div>' % (i, width, height)) print('</div>') print('<script type="text/javascript">') print("var trace1 =") write3DPlotData(dataX1, dataY1, dataZ1, dataR1, 1) print("var trace2 =") write3DPlotData(dataX2, dataY2, dataZ2, dataR2, 2) print("var data = [trace1, trace2]") print("var layout =") layout = {"title": "\'"+legend+"\'", "width": "650", "height": "700", "autosize": "false", "autorange": "true", "margin": "{l: 65, r: 50, b: 65, t: 90}"} print(json.dumps(layout, indent=3).replace('\\\\', '\\').replace('\"', '') + ";") print("Plotly.newPlot('box%d',data,layout);" % (i)) print('</script>') def make3DPlot2(func, legend='', width=600, height=600, npoints=50): u = np.linspace(0, np.pi, npoints) v = np.linspace(0, np.pi, npoints) w = [v[i]+np.pi for i in range(1,len(v))] v = np.append(v, w) dataX1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR1 = [["0.0" for i in range(len(v))] for j in range(len(u))] dataX2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR2 = [["0.0" for i in range(len(v))] for j in range(len(u))] count = 0 r = [0.0,0.0,np.pi/2.0,np.pi/2.0] for cu in range(len(u)): for cv in range(len(v)): r = func(u[cu],v[cv],r[2],r[3]) z = np.cos(u[cu]) x = np.sin(u[cu]) * np.cos(v[cv]) y = np.sin(u[cu]) * np.sin(v[cv]) r1_tmp = r[0] z1 = r1_tmp * z x1 = r1_tmp * x y1 = r1_tmp * y dataX1[cu][cv] = x1 dataY1[cu][cv] = y1 dataZ1[cu][cv] = z1 dataR1[cu][cv] = "'"+"G'"+"+'min'.sub()+"+"' = "+str(float(int(10*r1_tmp))/10.0)+"GPa, "+"\u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'" r2_tmp = r[1] z2 = r2_tmp * z x2 = r2_tmp * x y2 = r2_tmp * y dataX2[cu][cv] = x2 dataY2[cu][cv] = y2 dataZ2[cu][cv] = z2 dataR2[cu][cv] = "'"+"G'"+"+'max'.sub()+"+"' = "+str(float(int(10*r1_tmp))/10.0)+"GPa, "+"\u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'" count = count+1 i = random.randint(0, 100000) print('<div class="plot3D">') print('<div id="box%d" style="width: %dpx; height: %dpx; display:block;"></div>' % (i, width, height)) print('</div>') print('<script type="text/javascript">') print("var trace1 =") write3DPlotData(dataX1, dataY1, dataZ1, dataR1, 1) print("var trace2 =") write3DPlotData(dataX2, dataY2, dataZ2, dataR2, 3, 0.5) print("var data = [trace1, trace2]") print("var layout =") layout = {"title": "\'"+legend+"\'", "width":"650", "height":"700" , "autosize":"false", "autorange":"true", "margin": "{l: 65, r: 50, b: 65, t: 90}"} print(json.dumps(layout, indent=3).replace('\\\\','\\').replace('\"','') + ";") print("Plotly.newPlot('box%d',data,layout);" % (i)) print('</script>') def make3DPlot3(func, legend='', width=600, height=600, npoints=50): str1 = legend.split("\'")[0] str2 = legend.split("\'")[1] u = np.linspace(0, np.pi, npoints) v = np.linspace(0, np.pi, npoints) w = [v[i]+np.pi for i in range(1,len(v))] v = np.append(v, w) dataX1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataZ1 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataR1 = [["0.0" for i in range(len(v))] for j in range(len(u))] dataX2 = [[0.0 for i in range(len(v))] for j in range(len(u))] dataY2 = [[0.0 for i in range(len(v))] for j
enable the user to choose the tokens to consider when computing probabilities. Args: sequences : List of sequences, path of fasta file or path to a folder with msa to a3m format. batch_size: number of sequences to consider for the forward pass tokens_list: List of tokens to consider pass_mode: Mode of model evaluation ('forward' or 'masked') silent : display or not progress bar n_seqs_msa: number of sequence to consider in an msa file. Returns: List[Dict[int, Dict[str, float]]]: dictionaries of probabilities per seq """ tokens_list = NATURAL_AAS_LIST if tokens_list is None else tokens_list sequences, lengths = init_model_sequences( sequences=sequences, model_dir=self._model_dir, model_is_msa=self._language_model.is_msa, n_seqs_msa=n_seqs_msa, vocab_size=self._language_model.vocab_size, pass_mode=pass_mode, tokens_list=tokens_list, ) self.init_ray_workers() # Perform inference in model to compute the logits inputs = self._language_model.process_sequences_and_tokens(sequences) logits = self._compute_logits(inputs, batch_size, pass_mode, silent=silent) # Remove padded logits # Use transpose so that function works for MSA and sequence logits = [ torch.from_numpy(logit.numpy().transpose()[:, :length].transpose()) for logit, length in zip(list(logits), lengths) ] # Set to -inf logits that correspond to tokens that are not in tokens list vocabulary_mask = torch.from_numpy(self.get_vocabulary_mask(tokens_list)) # Avoid printing warnings np.seterr(divide="ignore") # Put -inf on character not in token list. Shape of this mask depends on masked_logits = [] for logit in logits: # repeat MSA dimension if self._language_model.is_msa: repeat_dim = (logit.shape[0], logit.shape[1], 1) # type: ignore else: repeat_dim = (logit.shape[0], 1) # type: ignore masked_logit = logit + torch.from_numpy(np.tile(np.log(vocabulary_mask), repeat_dim)) masked_logits.append(masked_logit) # Use softmax to compute probabilities from logits # Due to the -inf, probs of tokens that are not in token list will be zero softmax = torch.nn.Softmax(dim=-1) probabilities = [softmax(logits) for logits in masked_logits] def _get_probabilities_dict(probs: torch.Tensor) -> Dict[str, float]: return { token: float(probs[i].cpu().numpy()) for i, token in enumerate(self._language_model.model_vocabulary) if token in tokens_list } probabilities_dict_type = Union[sequence_probs_list, List[sequence_probs_list]] if self._language_model.is_msa: probabilities_dict: probabilities_dict_type = [ [ { key: _get_probabilities_dict(value) for key, value in dict(enumerate(probs)).items() } for probs in msa ] for msa in probabilities ] else: probabilities_dict = [ { key: _get_probabilities_dict(value) for key, value in dict(enumerate(probs)).items() } for probs in probabilities ] self.delete_ray_workers() return probabilities_dict # type: ignore def compute_loglikelihood( self, sequences: Union[List[str], str], batch_size: int = 1, tokens_list: List[str] = None, pass_mode: str = "forward", silent: bool = False, ) -> List[float]: """Function that computes loglikelihoods of sequences. It returns a list of float values. Both ProtBert and ESM models have more tokens than the 20 natural amino-acids (for instance MASK or PAD tokens). It might not be of interest to take these tokens into account when computing probabilities or log-likelihood. By default we remove them and compute probabilities only over the 20 natural amino-acids. This behavior can be overridden through the tokens_list argument that enable the user to choose the tokens to consider when computing probabilities. Args: sequences: List of sequences batch_size: Batch size tokens_list: List of tokens to consider pass_mode: Mode of model evaluation ('forward' or 'masked') silent : display or not progress bar Returns: List[float]: list of log-likelihoods, one per sequence """ self.init_ray_workers() if self._language_model.is_msa: raise NotImplementedError( "compute_loglikelihood for MSA transformers is not implemented." ) probabilities = self.compute_probabilities( sequences, batch_size, tokens_list, pass_mode, silent ) log_likelihoods = [] for sequence, probabilities_dict in zip(sequences, probabilities): log_likelihood = np.sum( [np.log(probabilities_dict[i][sequence[i]]) for i in range(len(sequence))] # type: ignore ) log_likelihoods.append(float(log_likelihood)) self.delete_ray_workers() return log_likelihoods def compute_embeddings( self, sequences: Union[List[str], str], batch_size: int = 1, pool_mode: Tuple[str, ...] = ("cls", "mean", "full"), silent: bool = False, n_seqs_msa: int = 6, ) -> Dict[str, Union[List[np.ndarray], np.ndarray]]: """Function that computes embeddings of sequences. The embedding of one sequence has a shape (sequence_length, embedding_size) where embedding_size equals 768 or 1024., thus we may want to use an aggregation function specified in pool_mode to aggregate the tensor on the num_tokens dimension. It might for instance avoid blowing the machine RAM when computing embeddings for a large number of sequences. 'mean' signifies that we take the mean over the num_tokens dimension. 'cls' means that only the class token embedding is used. This function returns a dictionary of lists. The dictionary will have one key per pool-mode that has been specified. The corresponding value is a list of embeddings, one per sequence in sequences. When working with MSA, an extra dimension is added to the final tensor. Args: sequences: List of sequences, path of fasta file or path to a folder with msa to a3m format. batch_size: batch size pool_mode: Mode of pooling ('cls', 'mean', 'full') silent : whereas to display or not progress bar n_seqs_msa: number of sequence to consider in an msa file. Returns: Dict[str, List[np.ndarray]]: dict matching pool-mode and list of embeddings """ sequences, lengths = init_model_sequences( sequences=sequences, model_dir=self._model_dir, model_is_msa=self._language_model.is_msa, n_seqs_msa=n_seqs_msa, vocab_size=self._language_model.vocab_size, pool_mode=pool_mode, ) self.init_ray_workers() # Compute a forward pass to get the embeddings inputs = self._language_model.process_sequences_and_tokens(sequences) _, embeddings = self._model_evaluation(inputs, batch_size=batch_size, silent=silent) embeddings = [emb.cpu().numpy() for emb in embeddings] # Remove class token and padding # Use tranpose to filter on the two last dimensions. Doing this, we don't have to manage # the first dimension of the tensor. It works for [dim1, dim2, token_size, emb_size] and # for [dim1, token_size, emb_size] filtered_embeddings = [ emb.transpose()[:, 1 : (length + 1)].transpose() for emb, length in zip(list(embeddings), lengths) ] # Keep class token only cls_embeddings = [emb.transpose()[:, 0].transpose() for emb in list(embeddings)] embeddings_dict = {} # Keep only what's necessary if "full" in pool_mode: embeddings_dict["full"] = filtered_embeddings if "cls" in pool_mode: embeddings_dict["cls"] = np.stack(cls_embeddings) if "mean" in pool_mode: # For msa embbedings, we average over tokens of each msa, we don't average over sequence. # We use transpose and average over axis 1 to not take in account msa dimension # esm model: [tokens , embedding] msa: [n_msa, tokens, embedding] embeddings_dict["mean"] = np.stack( [e.transpose().mean(1).transpose() for e in filtered_embeddings] ) self.delete_ray_workers() return embeddings_dict def compute_accuracy( self, sequences: Union[List[str], str], batch_size: int = 1, pass_mode: str = "forward", silent: bool = False, n_seqs_msa: int = 6, ) -> float: """Compute model accuracy from the input sequences When working with MSA, the accuracy is computed over all the tokens of the msa' sequences. Args: sequences (Union[List[str],str]): List of sequences, path of fasta file or path to a folder with msa to a3m format. batch_size ([type], optional): [description]. Defaults to 1. pass_mode ([type], optional): [description]. Defaults to "forward". silent : whereas to display or not progress bar n_seqs_msa: number of sequence to consider in an msa file. Returns: float: model's accuracy over the given sequences """ sequences, _ = init_model_sequences( sequences=sequences, model_dir=self._model_dir, model_is_msa=self._language_model.is_msa, n_seqs_msa=n_seqs_msa, vocab_size=self._language_model.vocab_size, pass_mode=pass_mode, ) self.init_ray_workers() # Perform inference in model to compute the logits inputs = self._language_model.process_sequences_and_tokens(sequences) logits = self._compute_logits(inputs, batch_size, pass_mode, silent=silent) # Get length of sequence labels = inputs["input_ids"] # Get the predicted labels predicted_labels = torch.argmax(logits, dim=-1) # Compute the accuracy accuracy = float(torch.mean(torch.eq(predicted_labels, labels).float())) self.delete_ray_workers() return accuracy def load_model(self, model_dir: str, map_location=None): """Load state_dict a finetune pytorch model ro a checkpoint directory More informations about how to load a model with map_location: https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-model-for-inference Args: model_dir: path file of the pt model or checkpoint. the checkpoint should be a pytorch model checkpoint """ if not os.path.isfile(model_dir): raise FileNotFoundError if self._multi_gpus: self.init_ray_workers() ray.get( [worker._load_model.remote(model_dir, map_location) for worker in self._workers] # type: ignore ) pass else: self._language_model._load_model(model_dir, map_location) # type: ignore def _save_model(self, exp_path: str, lightning_model: pl.LightningModule): """Save pytorch model in pytorch-lightning logs directory Args: exp_path (str): path of the experiments directory in the logs """ version = get_logs_version(exp_path) model_dir = self._model_dir.replace("/", "_") if version is not None: save_name = os.path.join(exp_path, version, model_dir + "_finetuned.pt") else: save_name = os.path.join(exp_path, model_dir + "_finetuned.pt") torch.save(lightning_model.model.state_dict(), save_name) log.info("Model save at %s." % save_name) return save_name def finetune( self, train_sequences: Union[List[str], str], lr: float = 1.0e-5, warmup_updates: int = 1024, warmup_init_lr: float = 1e-7, epochs: int = 10, batch_size: int = 2, acc_batch_size: int = 256, masking_ratio: float = 0.025, masking_prob: float = 0.8, random_token_prob: float = 0.15, toks_per_batch: int = 2048, filter_len: Optional[int]
* from cloud_images) as i) as i where cloud_type='openstack';" rc, msg = config.db_execute(sql) image_list = [] for row in config.db_cursor: image_list.append(row) target_image = image_list[image_index] if image_name is not None: if str(target_image["name"]) != str(image_name): return HttpResponse(json.dumps({'response_code': 1, 'message': '%s %s' % (lno(MODID), "Image name (%s) from index (%s) does not match supplied image name (%s) please check your request and try again." % (target_image["name"], image_index, image_name))})) if cloud_name is not None: if str(target_image["cloud_name"]) != str(cloud_name): return HttpResponse(json.dumps({'response_code': 1, 'message': '%s %s' % (lno(MODID), "cloud name (%s) from index (%s) does not match supplied cloud name (%s) please check your request and try again." % (target_image["cloud_name"], image_index, cloud_name))})) else: # we have at least an image name, lets build the where clause where = "name='%s'" % image_name if image_checksum is not None: where = where + " and checksum='%s'" % image_checksum if image_date is not None: where = where + " and created_at like '%s'" % image_date if cloud_name is not None: where = where + " and cloud_name='%s'" % cloud_name sql = "select * from cloud_images where %s" % where rc, msg, image_list = config.db_query(IMAGES, where=where) if len(image_list) > 1: return HttpResponse(json.dumps({'response_code': 1, 'message': '%s %s' % (lno(MODID), "Unable to uniquely identify target image with given parameters, %s images matched." % len(image_list))})) elif len(image_list) == 0: return HttpResponse(json.dumps({'response_code': 1, 'message': '%s %s' % (lno(MODID), "No images matched sepcified parameters, please check your request and try again.")})) else: target_image = image_list[0] # we have target image, lets delete it where_clause = "group_name='%s' and cloud_name='%s'" % (target_image["group_name"], target_image["cloud_name"]) rc, msg, cloud_list = config.db_query(CLOUDS, where=where_clause) cloud = cloud_list[0] result = delete_image(cloud, target_image["id"]) if result[0] == 0: # Successful delete, now delete it from csv2 database - build delete statement sql ="delete from cloud_images where group_name='%s' and cloud_name='%s' and id='%s';" % (target_image["group_name"], target_image["cloud_name"], target_image["id"]) try: config.db_execute(sql) except Exction as exc: context = { "response_code": 1, "message": "Failed to execute delete for image %s: %s" % (target_image["name"], exc) } return HttpResponse(json.dumps(context)) context = { "response_code": 0, "message": "Image %s deleted successfully" % target_image["name"] } return HttpResponse(json.dumps(context)) else: return HttpResponse(json.dumps({'response_code': 1, 'message': '%s %s: %s' % (lno(MODID), "Delete failed...", result[1])})) else: #Not a post request, render image page again return None @silkp(name='Image Upload') @requires_csrf_token def upload(request, group_name=None): config.db_open() IMAGES = "cloud_images" IMAGE_TX = "csv2_image_transactions" CLOUDS = "csv2_clouds" CACHE_IMAGES = "csv2_image_cache" rc, qmsg, active_user = set_user_groups(config, request, super_user=False) if rc != 0: config.db_close() #return render(request, 'glintwebui/images.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)}) return None try: image_file = request.FILES['myfile'] except Exception: # no file means it's not a POST or it's an upload by URL image_file = False if request.method == 'POST' and image_file: logger.info("File to upload: %s" % image_file.name) if group_name is None: # need to figure out where to get group name group_name = active_user.active_group #process image upload image_file = request.FILES['myfile'] file_path = config.categories["glintPoller.py"]["image_cache_dir"] + image_file.name # This file will have to be renamed with the checksum after uploading to a cloud #before we save it locally let us check if it is already in the repos cloud_name_list = request.POST.getlist('clouds') if len(cloud_name_list)==1 and "," in cloud_name_list[0]: # could be a cli command packaged as a string, cloud_name_list = cloud_name_list[0].replace(" ", "").split(",") where_clause = "name='%s' and group_name='%s'" % (image_file.name, group_name) rc, qmsg, image_list = config.db_query(IMAGES, where=where_clause) bad_clouds = [] if len(image_list) > 0: #we've got some images by this name already lets see if any are in the target clouds for image in image_list: if image["cloud_name"] in cloud_name_list: bad_clouds.append(image["cloud_name"]) if len(bad_clouds) > 0: for cloud in bad_clouds: if cloud in cloud_name_list: cloud_name_list.remove(cloud) msg = ("Upload failed for one or more projects because the image name was already in use.") if len(cloud_name_list) == 0: #if we have eliminated all the target clouds, return with error message msg = "Upload failed to all target projects because the image name was already in use." where_clause = "group_name='%s' and cloud_type='%s'" % (group_name, "openstack") rc, qmsg, cloud_list = config.db_query(CLOUDS, where=where_clause) context = { 'group_name': group_name, 'cloud_list': cloud_list, 'max_repos': len(cloud_list), 'redirect': "false", #view agnostic data 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups, 'response_code': 1, 'message': msg, 'is_superuser': active_user.is_superuser, 'version': config.get_version() } config.db_close() return render(request, 'glintwebui/upload_image.html', context) #And finally before we save locally double check that file doesn't already exist if os.path.exists(file_path): # Filename exists locally, lets add some random suffix and check again suffix = 1 new_path = file_path + str(suffix) while os.path.exists(new_path): suffix = suffix + 1 new_path = file_path + str(suffix) file_path = new_path disk_format = request.POST.get('disk_format') with open(file_path, 'wb+') as destination: for chunk in image_file.chunks(): destination.write(chunk) # Now we have a source file we need to upload it to one of the clouds to get a checksum so we can queue up transfer requests # get a cloud of of the list, first one is fine target_cloud_name = cloud_name_list[0] # get the cloud row for this cloud where_clause = "group_name='%s' and cloud_name='%s'" % (group_name, target_cloud_name) rc, qmsg, target_cloud_list = config.db_query(CLOUDS, where=where_clause) try: target_cloud = target_cloud_list[0] except IndexError: logger.error("Unable to find target cloud: %s" % target_cloud_name) msg = "Unable to find target cloud: %s" % target_cloud_name where_clause = "group_name='%s' and cloud_type='%s'" % (group_name, "openstack") rc, qmsg, cloud_list = config.db_query(CLOUDS, where=where_clause) context = { 'group_name': group_name, 'cloud_list': cloud_list, 'max_repos': len(cloud_list), 'redirect': "false", #view agnostic data 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups, 'response_code': 1, 'message': msg, 'is_superuser': active_user.is_superuser, 'version': config.get_version() } config.db_close() return render(request, 'glintwebui/upload_image.html', context) logger.info("uploading image %s to cloud %s" % (image_file.name, target_cloud_name)) image = upload_image(target_cloud, None, image_file.name, file_path, disk_format=request.POST.get('disk_format')) if image is False: logger.error("Upload failed for image %s" % image_file.name) msg = "Upload failed for image: %s" % image_file.name where_clause = "group_name='%s' and cloud_type='%s'" % (group_name, "openstack") rc, qmsg, cloud_list = config.db_query(CLOUDS, where=where_clause) context = { 'group_name': group_name, 'cloud_list': cloud_list, 'max_repos': len(cloud_list), 'redirect': "false", #view agnostic data 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups, 'response_code': 1, 'message': msg, 'is_superuser': active_user.is_superuser, 'version': config.get_version() } config.db_close() return render(request, 'glintwebui/upload_image.html', context) logger.info("Upload result: %s" % image) # add it to the cloud_images table if image.size == "" or image.size is None: size = 0 else: size = image.size created_datetime = datetime.datetime.now() created_time = created_datetime.strftime("%Y-%m-%d %H:%M:%S") new_image_dict = { 'group_name': target_cloud["group_name"], 'cloud_name': target_cloud["cloud_name"], 'container_format': image.container_format, 'checksum': image.checksum, 'cloud_type': "openstack", 'disk_format': image.disk_format, 'min_ram': image.min_ram, 'id': image.id, 'size': size, 'visibility': image.visibility, 'min_disk': image.min_disk, 'name': image.name, 'created_at': created_time, 'last_updated': time.time() } img_dict, unmapped = map_attributes(src="os_images", dest="csv2", attr_dict=new_image_dict, config=config) if unmapped: logging.error("Unmapped attributes found during mapping, discarding:") logging.error(unmapped) config.db_merge(IMAGES, img_dict) config.db_commit() # now we have the os image object, lets rename the file and add it to out cache cache_path = config.categories["glintPoller.py"]["image_cache_dir"] + image_file.name + "---" + image.checksum os.rename(file_path, cache_path) cache_dict = { "image_name": image_file.name, "checksum": image.checksum, "container_format": image.container_format, "disk_format": image.disk_format } config.db_merge(CACHE_IMAGES, cache_dict) config.db_commit() #now we need to queue transfer requests for the remaining clouds cloud_name_list.remove(target_cloud_name) if len(cloud_name_list) == 0: # we are done and can return successfully where_clause = "group_name='%s' and cloud_type='%s'" % (group_name, "openstack") rc, qmsg, cloud_list = config.db_query(CLOUDS, where=where_clause) context = { 'group_name': group_name, 'cloud_list': cloud_list, 'max_repos': len(cloud_list), 'redirect': "true", #view agnostic data 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups, 'response_code': rc, 'message': "Upload Successful: image %s uploaded to %s-%s" % (image.name, group_name, target_cloud_name), 'is_superuser': active_user.is_superuser, 'version': config.get_version() } config.db_close() return render(request, 'glintwebui/upload_image.html', context) else: # loop over remaining clouds and queue transfers logger.info("Queuing additonal uploads...") for cloud in cloud_name_list: tx_id = generate_tx_id() tx_req = { "tx_id": tx_id, "status": "pending", "target_group_name": group_name, "target_cloud_name": cloud, "image_name": image_file.name, "image_id": image.id, "checksum": image.checksum, "requester": active_user.username, } config.db_merge(IMAGE_TX, tx_req) config.db_commit() logger.info("Transfer id:%s queued" % tx_id) tx_request.apply_async((tx_id,), queue='tx_requests') #return to project details page with message msg="Uploads successfully queued, returning to images..." where_clause = "group_name='%s' and cloud_type='%s'" % (group_name, "openstack") rc, qmsg, cloud_list = config.db_query(CLOUDS, where=where_clause) context = { 'group_name': group_name, 'cloud_list': cloud_list, 'max_repos': len(cloud_list), 'redirect': "true", #view agnostic data 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups, 'response_code': rc, 'message': msg, 'is_superuser':
1.29456))^Ts_Rank(correlation(IndNeutralize(close,IndClass.industry), adv50, 17.8256), 17.9171)) * -1) # Alpha#71 max(Ts_Rank(decay_linear(correlation(Ts_Rank(close, 3.43976), Ts_Rank(adv180,12.0647), 18.0175), 4.20501), 15.6948), Ts_Rank(decay_linear((rank(((low + open) - (vwap +vwap)))^2), 16.4662), 4.4388)) def alpha071(self): adv180 = sma(self.volume, 180) p1 = ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180, 12), 18).to_frame(), 4).CLOSE, 16) p2 = ts_rank( decay_linear((rank(((self.low + self.open) - (self.vwap + self.vwap))).pow(2)).to_frame(), 16).CLOSE, 4) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'max'] = df['p1'] df.at[df['p2'] >= df['p1'], 'max'] = df['p2'] return df['max'] # return max(ts_rank(decay_linear(correlation(ts_rank(self.close, 3), ts_rank(adv180,12), 18).to_frame(), 4).CLOSE, 16), ts_rank(decay_linear((rank(((self.low + self.open) - (self.vwap +self.vwap))).pow(2)).to_frame(), 16).CLOSE, 4)) # Alpha#72 (rank(decay_linear(correlation(((high + low) / 2), adv40, 8.93345), 10.1519)) /rank(decay_linear(correlation(Ts_Rank(vwap, 3.72469), Ts_Rank(volume, 18.5188), 6.86671),2.95011))) def alpha072(self): adv40 = sma(self.volume, 40) return (rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 9).to_frame(), 10).CLOSE) / rank( decay_linear(correlation(ts_rank(self.vwap, 4), ts_rank(self.volume, 19), 7).to_frame(), 3).CLOSE)) # Alpha#73 (max(rank(decay_linear(delta(vwap, 4.72775), 2.91864)),Ts_Rank(decay_linear(((delta(((open * 0.147155) + (low * (1 - 0.147155))), 2.03608) / ((open *0.147155) + (low * (1 - 0.147155)))) * -1), 3.33829), 16.7411)) * -1) def alpha073(self): p1 = rank(decay_linear(delta(self.vwap, 5).to_frame(), 3).CLOSE) p2 = ts_rank(decay_linear(((delta(((self.open * 0.147155) + (self.low * (1 - 0.147155))), 2) / ( (self.open * 0.147155) + (self.low * (1 - 0.147155)))) * -1).to_frame(), 3).CLOSE, 17) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'max'] = df['p1'] df.at[df['p2'] >= df['p1'], 'max'] = df['p2'] return -1 * df['max'] # return (max(rank(decay_linear(delta(self.vwap, 5).to_frame(), 3).CLOSE),ts_rank(decay_linear(((delta(((self.open * 0.147155) + (self.low * (1 - 0.147155))), 2) / ((self.open *0.147155) + (self.low * (1 - 0.147155)))) * -1).to_frame(), 3).CLOSE, 17)) * -1) # Alpha#74 ((rank(correlation(close, sum(adv30, 37.4843), 15.1365)) <rank(correlation(rank(((high * 0.0261661) + (vwap * (1 - 0.0261661)))), rank(volume), 11.4791)))* -1) def alpha074(self): adv30 = sma(self.volume, 30) return ((rank(correlation(self.close, sma(adv30, 37), 15)) < rank( correlation(rank(((self.high * 0.0261661) + (self.vwap * (1 - 0.0261661)))), rank(self.volume), 11))) * -1) # Alpha#75 (rank(correlation(vwap, volume, 4.24304)) < rank(correlation(rank(low), rank(adv50),12.4413))) def alpha075(self): adv50 = sma(self.volume, 50) return (rank(correlation(self.vwap, self.volume, 4)) < rank(correlation(rank(self.low), rank(adv50), 12))) # Alpha#76 (max(rank(decay_linear(delta(vwap, 1.24383), 11.8259)),Ts_Rank(decay_linear(Ts_Rank(correlation(IndNeutralize(low, IndClass.sector), adv81,8.14941), 19.569), 17.1543), 19.383)) * -1) # Alpha#77 min(rank(decay_linear(((((high + low) / 2) + high) - (vwap + high)), 20.0451)),rank(decay_linear(correlation(((high + low) / 2), adv40, 3.1614), 5.64125))) def alpha077(self): adv40 = sma(self.volume, 40) p1 = rank( decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)).to_frame(), 20).CLOSE) p2 = rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 3).to_frame(), 6).CLOSE) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'min'] = df['p2'] df.at[df['p2'] >= df['p1'], 'min'] = df['p1'] return df['min'] # return min(rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)).to_frame(), 20).CLOSE),rank(decay_linear(correlation(((self.high + self.low) / 2), adv40, 3).to_frame(), 6).CLOSE)) # Alpha#78 (rank(correlation(sum(((low * 0.352233) + (vwap * (1 - 0.352233))), 19.7428),sum(adv40, 19.7428), 6.83313))^rank(correlation(rank(vwap), rank(volume), 5.77492))) def alpha078(self): adv40 = sma(self.volume, 40) return (rank( correlation(ts_sum(((self.low * 0.352233) + (self.vwap * (1 - 0.352233))), 20), ts_sum(adv40, 20), 7)).pow( rank(correlation(rank(self.vwap), rank(self.volume), 6)))) # Alpha#79 (rank(delta(IndNeutralize(((close * 0.60733) + (open * (1 - 0.60733))),IndClass.sector), 1.23438)) < rank(correlation(Ts_Rank(vwap, 3.60973), Ts_Rank(adv150,9.18637), 14.6644))) # Alpha#80 ((rank(Sign(delta(IndNeutralize(((open * 0.868128) + (high * (1 - 0.868128))),IndClass.industry), 4.04545)))^Ts_Rank(correlation(high, adv10, 5.11456), 5.53756)) * -1) # Alpha#81 ((rank(Log(product(rank((rank(correlation(vwap, sum(adv10, 49.6054),8.47743))^4)), 14.9655))) < rank(correlation(rank(vwap), rank(volume), 5.07914))) * -1) def alpha081(self): adv10 = sma(self.volume, 10) return ((rank(log(product(rank((rank(correlation(self.vwap, ts_sum(adv10, 50), 8)).pow(4))), 15))) < rank( correlation(rank(self.vwap), rank(self.volume), 5))) * -1) # Alpha#82 (min(rank(decay_linear(delta(open, 1.46063), 14.8717)),Ts_Rank(decay_linear(correlation(IndNeutralize(volume, IndClass.sector), ((open * 0.634196) +(open * (1 - 0.634196))), 17.4842), 6.92131), 13.4283)) * -1) # Alpha#83 ((rank(delay(((high - low) / (sum(close, 5) / 5)), 2)) * rank(rank(volume))) / (((high -low) / (sum(close, 5) / 5)) / (vwap - close))) def alpha083(self): return ((rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rank(rank(self.volume))) / ( ((self.high - self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close))) # Alpha#84 SignedPower(Ts_Rank((vwap - ts_max(vwap, 15.3217)), 20.7127), delta(close,4.96796)) def alpha084(self): return pow(ts_rank((self.vwap - ts_max(self.vwap, 15)), 21), delta(self.close, 5)) # Alpha#85 (rank(correlation(((high * 0.876703) + (close * (1 - 0.876703))), adv30,9.61331))^rank(correlation(Ts_Rank(((high + low) / 2), 3.70596), Ts_Rank(volume, 10.1595),7.11408))) def alpha085(self): adv30 = sma(self.volume, 30) return (rank(correlation(((self.high * 0.876703) + (self.close * (1 - 0.876703))), adv30, 10)).pow( rank(correlation(ts_rank(((self.high + self.low) / 2), 4), ts_rank(self.volume, 10), 7)))) # Alpha#86 ((Ts_Rank(correlation(close, sum(adv20, 14.7444), 6.00049), 20.4195) < rank(((open+ close) - (vwap + open)))) * -1) def alpha086(self): adv20 = sma(self.volume, 20) return ((ts_rank(correlation(self.close, sma(adv20, 15), 6), 20) < rank( ((self.open + self.close) - (self.vwap + self.open)))) * -1) # Alpha#87 (max(rank(decay_linear(delta(((close * 0.369701) + (vwap * (1 - 0.369701))),1.91233), 2.65461)), Ts_Rank(decay_linear(abs(correlation(IndNeutralize(adv81,IndClass.industry), close, 13.4132)), 4.89768), 14.4535)) * -1) # Alpha#88 min(rank(decay_linear(((rank(open) + rank(low)) - (rank(high) + rank(close))),8.06882)), Ts_Rank(decay_linear(correlation(Ts_Rank(close, 8.44728), Ts_Rank(adv60,20.6966), 8.01266), 6.65053), 2.61957)) def alpha088(self): adv60 = sma(self.volume, 60) p1 = rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))).to_frame(), 8).CLOSE) p2 = ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(adv60, 21), 8).to_frame(), 7).CLOSE, 3) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'min'] = df['p2'] df.at[df['p2'] >= df['p1'], 'min'] = df['p1'] return df['min'] # return min(rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))).to_frame(),8).CLOSE), ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(adv60,20.6966), 8).to_frame(), 7).CLOSE, 3)) # Alpha#89 (Ts_Rank(decay_linear(correlation(((low * 0.967285) + (low * (1 - 0.967285))), adv10,6.94279), 5.51607), 3.79744) - Ts_Rank(decay_linear(delta(IndNeutralize(vwap,IndClass.industry), 3.48158), 10.1466), 15.3012)) # Alpha#90 ((rank((close - ts_max(close, 4.66719)))^Ts_Rank(correlation(IndNeutralize(adv40,IndClass.subindustry), low, 5.38375), 3.21856)) * -1) # Alpha#91 ((Ts_Rank(decay_linear(decay_linear(correlation(IndNeutralize(close,IndClass.industry), volume, 9.74928), 16.398), 3.83219), 4.8667) -rank(decay_linear(correlation(vwap, adv30, 4.01303), 2.6809))) * -1) # Alpha#92 min(Ts_Rank(decay_linear(((((high + low) / 2) + close) < (low + open)), 14.7221),18.8683), Ts_Rank(decay_linear(correlation(rank(low), rank(adv30), 7.58555), 6.94024),6.80584)) def alpha092(self): adv30 = sma(self.volume, 30) p1 = ts_rank( decay_linear(((((self.high + self.low) / 2) + self.close) < (self.low + self.open)).to_frame(), 15).CLOSE, 19) p2 = ts_rank(decay_linear(correlation(rank(self.low), rank(adv30), 8).to_frame(), 7).CLOSE, 7) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'min'] = df['p2'] df.at[df['p2'] >= df['p1'], 'min'] = df['p1'] return df['min'] # return min(ts_rank(decay_linear(((((self.high + self.low) / 2) + self.close) < (self.low + self.open)).to_frame(), 15).CLOSE,19), ts_rank(decay_linear(correlation(rank(self.low), rank(adv30), 8).to_frame(), 7).CLOSE,7)) # Alpha#93 (Ts_Rank(decay_linear(correlation(IndNeutralize(vwap, IndClass.industry), adv81,17.4193), 19.848), 7.54455) / rank(decay_linear(delta(((close * 0.524434) + (vwap * (1 -0.524434))), 2.77377), 16.2664))) # Alpha#94 ((rank((vwap - ts_min(vwap, 11.5783)))^Ts_Rank(correlation(Ts_Rank(vwap,19.6462), Ts_Rank(adv60, 4.02992), 18.0926), 2.70756)) * -1) def alpha094(self): adv60 = sma(self.volume, 60) return ((rank((self.vwap - ts_min(self.vwap, 12))).pow( ts_rank(correlation(ts_rank(self.vwap, 20), ts_rank(adv60, 4), 18), 3)) * -1)) # Alpha#95 (rank((open - ts_min(open, 12.4105))) < Ts_Rank((rank(correlation(sum(((high + low)/ 2), 19.1351), sum(adv40, 19.1351), 12.8742))^5), 11.7584)) def alpha095(self): adv40 = sma(self.volume, 40) return (rank((self.open - ts_min(self.open, 12))) < ts_rank( (rank(correlation(sma(((self.high + self.low) / 2), 19), sma(adv40, 19), 13)).pow(5)), 12)) # Alpha#96 (max(Ts_Rank(decay_linear(correlation(rank(vwap), rank(volume), 3.83878),4.16783), 8.38151), Ts_Rank(decay_linear(Ts_ArgMax(correlation(Ts_Rank(close, 7.45404),Ts_Rank(adv60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)) * -1) def alpha096(self): adv60 = sma(self.volume, 60) p1 = ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4), 4).CLOSE, 8) p2 = ts_rank( decay_linear(ts_argmax(correlation(ts_rank(self.close, 7), ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13) df = pd.DataFrame({'p1': p1, 'p2': p2}) df.at[df['p1'] >= df['p2'], 'max'] = df['p1'] df.at[df['p2'] >= df['p1'], 'max'] = df['p2'] return -1 * df['max'] # return (max(ts_rank(decay_linear(correlation(rank(self.vwap), rank(self.volume).to_frame(), 4),4).CLOSE, 8), ts_rank(decay_linear(ts_argmax(correlation(ts_rank(self.close, 7),ts_rank(adv60, 4), 4), 13).to_frame(), 14).CLOSE, 13)) * -1) # Alpha#97 ((rank(decay_linear(delta(IndNeutralize(((low * 0.721001) + (vwap * (1 - 0.721001))),IndClass.industry), 3.3705), 20.4523)) - Ts_Rank(decay_linear(Ts_Rank(correlation(Ts_Rank(low,7.87871), Ts_Rank(adv60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)) * -1) # Alpha#98 (rank(decay_linear(correlation(vwap, sum(adv5, 26.4719), 4.58418), 7.18088)) -rank(decay_linear(Ts_Rank(Ts_ArgMin(correlation(rank(open), rank(adv15), 20.8187), 8.62571),6.95668), 8.07206))) def alpha098(self): adv5 = sma(self.volume, 5) adv15 = sma(self.volume, 15) return (rank(decay_linear(correlation(self.vwap, sma(adv5, 26), 5).to_frame(), 7).CLOSE) - rank( decay_linear(ts_rank(ts_argmin(correlation(rank(self.open), rank(adv15), 21), 9), 7).to_frame(), 8).CLOSE)) # Alpha#99 ((rank(correlation(sum(((high + low) / 2), 19.8975), sum(adv60, 19.8975), 8.8136)) <rank(correlation(low, volume, 6.28259))) * -1) def alpha099(self): adv60 = sma(self.volume, 60) return ((rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum(adv60, 20), 9)) < rank( correlation(self.low, self.volume, 6))) * -1) # Alpha#100 (0 - (1 * (((1.5 * scale(indneutralize(indneutralize(rank(((((close - low) - (high -close)) / (high - low)) * volume)), IndClass.subindustry), IndClass.subindustry))) -scale(indneutralize((correlation(close, rank(adv20), 5) - rank(ts_argmin(close, 30))),IndClass.subindustry))) * (volume / adv20)))) # Alpha#101 ((close - open) / ((high - low) + .001)) def alpha101(self): return (self.close - self.open) / ((self.high - self.low) + 0.001) def get_alpha(df): stock = Alphas(df) df['alpha001'] = stock.alpha001() df['alpha002'] = stock.alpha002() df['alpha003'] = stock.alpha003() df['alpha004'] = stock.alpha004() df['alpha005'] = stock.alpha005() df['alpha006'] = stock.alpha006() df['alpha007'] = stock.alpha007() df['alpha008'] = stock.alpha008() df['alpha009'] = stock.alpha009() df['alpha010'] = stock.alpha010() df['alpha011'] = stock.alpha011() df['alpha012'] = stock.alpha012() df['alpha013'] = stock.alpha013() df['alpha014'] = stock.alpha014() df['alpha015'] = stock.alpha015() df['alpha016'] = stock.alpha016()
from datetime import datetime from enum import IntEnum from typing import List, Optional, Union from .channel import Channel, ChannelType from .member import Member from .misc import MISSING, DictSerializerMixin, Snowflake from .team import Application from .user import User class MessageType(IntEnum): """An enumerable object representing the types of messages.""" DEFAULT = 0 RECIPIENT_ADD = 1 RECIPIENT_REMOVE = 2 CALL = 3 CHANNEL_NAME_CHANGE = 4 CHANNEL_ICON_CHANGE = 5 CHANNEL_PINNED_MESSAGE = 6 GUILD_MEMBER_JOIN = 7 USER_PREMIUM_GUILD_SUBSCRIPTION = 8 USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_1 = 9 USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_2 = 10 USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_3 = 11 CHANNEL_FOLLOW_ADD = 12 GUILD_DISCOVERY_DISQUALIFIED = 14 GUILD_DISCOVERY_REQUALIFIED = 15 GUILD_DISCOVERY_GRACE_PERIOD_INITIAL_WARNING = 16 GUILD_DISCOVERY_GRACE_PERIOD_FINAL_WARNING = 17 THREAD_CREATED = 18 REPLY = 19 APPLICATION_COMMAND = 20 THREAD_STARTER_MESSAGE = 21 GUILD_INVITE_REMINDER = 22 CONTEXT_MENU_COMMAND = 23 class MessageActivity(DictSerializerMixin): """A class object representing the activity state of a message. .. note:: ``party_id`` is ambiguous -- Discord poorly documented this. :) We assume it's for game rich presence invites? i.e. : Phasmophobia and Call of Duty. :ivar str type: The message activity type. :ivar Optional[Snowflake] party_id?: The party ID of the activity. """ __slots__ = ("_json", "type", "party_id") def __init__(self, **kwargs): super().__init__(**kwargs) self.party_id = Snowflake(self.party_id) if self._json.get("party_id") else None class MessageReference(DictSerializerMixin): """ A class object representing the "referenced"/replied message. .. note:: All of the attributes in this class are optionals because a message can potentially never be referenced. :ivar Optional[Snowflake] message_id?: The ID of the referenced message. :ivar Optional[Snowflake] channel_id?: The channel ID of the referenced message. :ivar Optional[Snowflake] guild_id?: The guild ID of the referenced message. :ivar Optional[bool] fail_if_not_exists?: Whether the message reference exists. """ __slots__ = ("_json", "message_id", "channel_id", "guild_id", "fail_if_not_exists") def __init__(self, **kwargs): super().__init__(**kwargs) self.message_id = Snowflake(self.message_id) if self._json.get("message_id") else None self.channel_id = Snowflake(self.channel_id) if self._json.get("channel_id") else None self.guild_id = Snowflake(self.guild_id) if self._json.get("guild_id") else None class Attachment(DictSerializerMixin): """ A class object representing an attachment in a message. .. note:: ``height`` and ``width`` have values based off of ``content_type``, which requires it to be a media file with viewabiltity as a photo, animated photo, GIF and/or video. If `ephemeral` is given, the attachments will automatically be removed after a set period of time. In the case of regular messages, they're available as long as the message associated with the attachment exists. :ivar int id: The ID of the attachment. :ivar str filename: The name of the attachment file. :ivar Optional[str] description?: The description of the file. :ivar Optional[str] content_type?: The type of attachment file. :ivar int size: The size of the attachment file. :ivar str url: The CDN URL of the attachment file. :ivar str proxy_url: The proxied/cached CDN URL of the attachment file. :ivar Optional[int] height?: The height of the attachment file. :ivar Optional[int] width?: The width of the attachment file. :ivar Optional[bool] ephemeral: Whether the attachment is ephemeral. """ __slots__ = ( "_client", "_json", "id", "filename", "content_type", "size", "url", "proxy_url", "height", "width", "ephemeral", ) def __init__(self, **kwargs): super().__init__(**kwargs) self.id = Snowflake(self.id) if self._json.get("id") else None class MessageInteraction(DictSerializerMixin): """ A class object that resembles the interaction used to generate the associated message. :ivar Snowflake id: ID of the interaction. :ivar int type: Type of interaction. :ivar str name: Name of the application command. :ivar User user: The user who invoked the interaction. """ # TODO: document member attr. __slots__ = ("_json", "id", "type", "name", "user", "member") def __init__(self, **kwargs): super().__init__(**kwargs) self.id = Snowflake(self.id) if self._json.get("id") else None self.user = User(**self.user) if self._json.get("user") else None class ChannelMention(DictSerializerMixin): """ A class object that resembles the mention of a channel in a guild. :ivar Snowflake id: The ID of the channel. :ivar Snowflake guild_id: The ID of the guild that contains the channel. :ivar int type: The channel type. :ivar str name: The name of the channel. """ __slots__ = ("_json", "id", "type", "name", "guild_id") def __init__(self, **kwargs): super().__init__(**kwargs) self.id = Snowflake(self.id) if self._json.get("id") else None self.guild_id = Snowflake(self.guild_id) if self._json.get("guild_id") else None self.type = ChannelType(self.type) class Message(DictSerializerMixin): """ A class object representing a message. :ivar Snowflake id: ID of the message. :ivar Snowflake channel_id: ID of the channel the message was sent in :ivar Optional[Snowflake] guild_id?: ID of the guild the message was sent in, if it exists. :ivar User author: The author of the message. :ivar Optional[Member] member?: The member object associated with the author, if any. :ivar str content: Message contents. :ivar datetime timestamp: Timestamp denoting when the message was sent. :ivar Optional[datetime] edited_timestamp?: Timestamp denoting when the message was edited, if any. :ivar bool tts: Status dictating if this was a TTS message or not. :ivar bool mention_everyone: Status dictating of this message mentions everyone :ivar Optional[List[Union[Member, User]]] mentions?: Array of user objects with an additional partial member field. :ivar Optional[List[str]] mention_roles?: Array of roles mentioned in this message :ivar Optional[List[ChannelMention]] mention_channels?: Channels mentioned in this message, if any. :ivar List[Attachment] attachments: An array of attachments :ivar List[Embed] embeds: An array of embeds :ivar Optional[List[ReactionObject]] reactions?: Reactions to the message. :ivar Union[int, str] nonce: Used for message validation :ivar bool pinned: Whether this message is pinned. :ivar Optional[Snowflake] webhook_id?: Webhook ID if the message is generated by a webhook. :ivar int type: Type of message :ivar Optional[MessageActivity] activity?: Message activity object that's sent by Rich Presence :ivar Optional[Application] application?: Application object that's sent by Rich Presence :ivar Optional[MessageReference] message_reference?: Data showing the source of a message (crosspost, channel follow, add, pin, or replied message) :ivar Optional[Any] allowed_mentions: The allowed mentions of roles attached in the message. :ivar int flags: Message flags :ivar Optional[MessageInteraction] interaction?: Message interaction object, if the message is sent by an interaction. :ivar Optional[Channel] thread?: The thread that started from this message, if any, with a thread member object embedded. :ivar Optional[Union[Component, List[Component]]] components?: Components associated with this message, if any. :ivar Optional[List[PartialSticker]] sticker_items?: An array of message sticker item objects, if sent with them. :ivar Optional[List[Sticker]] stickers?: Array of sticker objects sent with the message if any. Deprecated. """ __slots__ = ( "_json", "id", "channel_id", "guild_id", "author", "member", "content", "timestamp", "edited_timestamp", "tts", "mention_everyone", "mentions", "mention_roles", "mention_channels", "attachments", "embeds", "reactions", "nonce", "pinned", "webhook_id", "type", "activity", "application", "application_id", "message_reference", "allowed_mentions", "flags", "referenced_message", "interaction", "thread", "components", "sticker_items", "stickers", "_client", ) def __init__(self, **kwargs): super().__init__(**kwargs) self.id = Snowflake(self.id) if self._json.get("id") else None self.channel_id = Snowflake(self.channel_id) if self._json.get("channel_id") else None self.guild_id = Snowflake(self.guild_id) if self._json.get("guild_id") else None self.webhook_id = Snowflake(self.webhook_id) if self._json.get("webhook_id") else None self.application_id = ( Snowflake(self.application_id) if self._json.get("application_id") else None ) self.timestamp = ( datetime.fromisoformat(self._json.get("timestamp")) if self._json.get("timestamp") else datetime.utcnow() ) self.author = User(**self._json.get("author")) if self._json.get("author") else None self.member = ( Member( **self._json.get("member"), _client=self._client, user=self.author._json, ) if self._json.get("member") else None ) self.type = MessageType(self.type) if self._json.get("type") else None self.edited_timestamp = ( datetime.fromisoformat(self._json.get("edited_timestamp")) if self._json.get("edited_timestamp") else datetime.utcnow() ) self.mention_channels = ( [ChannelMention(**mention) for mention in self.mention_channels] if self._json.get("mention_channels") else None ) self.attachments = ( [Attachment(**attachment) for attachment in self.attachments] if self._json.get("attachments") else None ) self.embeds = ( [ Embed(**embed) if isinstance(embed, dict) else Embed(**embed._json) for embed in self.embeds ] if self._json.get("embeds") else None ) self.activity = MessageActivity(**self.activity) if self._json.get("activity") else None self.application = ( Application(**self.application) if self._json.get("application") else None ) self.message_reference = ( MessageReference(**self.message_reference) if self._json.get("message_reference") else None ) self.interaction = ( MessageInteraction(**self.interaction) if self._json.get("interaction") else None ) self.thread = Channel(**self.thread) if self._json.get("thread") else None async def get_channel(self) -> Channel: """ Gets the channel where the message was sent. :rtype: Channel """ if not self._client: raise AttributeError("HTTPClient not found!") res = await self._client.get_channel(channel_id=int(self.channel_id)) return Channel(**res, _client=self._client) async def get_guild(self): """ Gets the guild where the message was sent. :rtype: Guild """ if not self._client: raise AttributeError("HTTPClient not found!") from .guild import Guild res = await self._client.get_guild(guild_id=int(self.guild_id)) return Guild(**res, _client=self._client) async def delete(self, reason: Optional[str] = None) -> None: """ Deletes the message. :param reason: Optional reason to show up in the audit log. Defaults to `None`. :type reason: Optional[str] """ if not self._client: raise AttributeError("HTTPClient not found!") await self._client.delete_message( message_id=int(self.id), channel_id=int(self.channel_id), reason=reason ) async def edit( self, content: Optional[str] = MISSING, *, tts:
import unittest from typen._decorators import ( enforce_type_hints, strict_type_hints, ) from typen.exceptions import ( ParameterTypeError, ReturnTypeError, UnspecifiedParameterTypeError, UnspecifiedReturnTypeError, ) # Note: Type checking is found in test_enforcer.py class TestEnforceTypeHints(unittest.TestCase): def test_enforce_type_hints_vanilla(self): def example_function(a, b): return a+b new_func = enforce_type_hints(example_function) self.assertEqual(new_func(1, 2), 3) def test_enforce_type_hints_parameters_with_hints(self): def example_function(a: int, b: int) -> int: return a+b new_func = enforce_type_hints(example_function) self.assertEqual(new_func(1, 2), 3) with self.assertRaises(ParameterTypeError) as err: new_func(1.0, 2) self.assertEqual( "The 'a' parameter of 'example_function' must be <class 'int'>, " "but a value of 1.0 <class 'float'> was specified.", str(err.exception) ) with self.assertRaises(ParameterTypeError) as err: new_func(b=1.0, a=2) self.assertEqual( "The 'b' parameter of 'example_function' must be <class 'int'>, " "but a value of 1.0 <class 'float'> was specified.", str(err.exception) ) def test_enforce_type_hints_return_with_hints(self): def example_function(a) -> int: return a new_func = enforce_type_hints(example_function) self.assertEqual(new_func(1), 1) with self.assertRaises(ReturnTypeError) as err: new_func("a") self.assertEqual( "The return type of 'example_function' must be <class 'int'>, " "but a value of 'a' <class 'str'> was returned.", str(err.exception) ) self.assertEqual("a", err.exception.return_value) def test_enforce_type_hints_return_coercibility(self): def example_function(a: float, b: float) -> float: return a + b new_func = enforce_type_hints(example_function) # Result is not cast to a float result = new_func(2, 3) self.assertEqual(result, 5) self.assertIsInstance(result, int) def test_enforce_type_hints_defaults(self): def example_function(a: int = 5, b: int = 6) -> int: return a + b new_func = enforce_type_hints(example_function) self.assertEqual(new_func(), 11) def test_enforce_type_hints_invalid_defaults(self): def example_function(a: int = 0.5, b: int = 6) -> int: return a + b new_func = enforce_type_hints(example_function) with self.assertRaises(ParameterTypeError) as err: new_func() self.assertEqual( "The 'a' parameter of 'example_function' must be <class 'int'>, " "but a value of 0.5 <class 'float'> was specified.", str(err.exception) ) def test_enforce_type_hints_packed_args_vanilla(self): def example_function(a, *args, b=5, c=6): return sum([a, *args, b, c]) new_func = enforce_type_hints(example_function) result = new_func(1, 2, 3, b=3, c=2) self.assertEqual(result, 11) def test_enforce_type_hints_packed_args_hint(self): def example_function(a: int, b: float, *args: str): return str(a) + str(b) + "".join(str(arg) for arg in args) new_func = enforce_type_hints(example_function) result = new_func(1, 0.5, "a", "b", "c") self.assertEqual(result, "10.5abc") result = new_func(2, 3.0) self.assertEqual(result, "23.0") with self.assertRaises(ParameterTypeError) as err: new_func(1, 0.5, "a", "b", "c", 6) self.assertEqual( "The 'args' parameters of 'example_function' must be " "<class 'str'>, but a value of 6 <class 'int'> was specified.", str(err.exception) ) def test_enforce_type_hints_packed_kwargs_vanilla(self): def example_function(a, b, **kwargs): return (a, b, *kwargs.keys(), *kwargs.values()) new_func = enforce_type_hints(example_function) result = new_func(1, 2, y=2, z="v", x=4.5) self.assertEqual( (1, 2, "y", "z", "x", 2, "v", 4.5), result ) def test_enforce_type_hints_packed_kwargs_hint(self): def example_function(a: float, b: float, **kwargs: str): return (a, b, *kwargs.keys(), *kwargs.values()) new_func = enforce_type_hints(example_function) result = new_func(0.5, 1.0, y="a", word="bird", g="c") self.assertEqual( (0.5, 1.0, "y", "word", "g", "a", "bird", "c"), result ) result = new_func(0.5, 2.0) self.assertEqual((0.5, 2.0), result) with self.assertRaises(ParameterTypeError) as err: new_func(0.5, 1.0, g="r", word=10) self.assertEqual( "The 'kwargs' keywords of 'example_function' must have values of " "type <class 'str'>, but 'word':10 <class 'int'> was specified.", str(err.exception) ) def test_enforce_type_hints_packed_args_kwargs_vanilla(self): def example_function(*foos, **bars): return sum(foos) >= len(bars) new_func = enforce_type_hints(example_function) self.assertEqual(new_func(1, 2, 3, a="a", b="b", c="c"), True) def test_enforce_type_hints_packed_args_kwargs_hint(self): def example_function(*foos: int, **bars: str) -> bool: return sum(foos) >= len(bars) new_func = enforce_type_hints(example_function) self.assertEqual(new_func(1, 2, 3, a="a", b="b", c="c"), True) with self.assertRaises(ParameterTypeError): new_func(2, 3, 5, d=4) with self.assertRaises(ParameterTypeError): new_func(2, "three", 5, e="e") def test_enforce_type_hints_packed_args_kwargs_method(self): class ExClass: @enforce_type_hints def example_method(self, *foos: int, **bars: str) -> bool: return sum(foos) >= len(bars) inst = ExClass() self.assertEqual(inst.example_method(1, 2, 3, a="a", b="b", c="c"), True) with self.assertRaises(ParameterTypeError): inst.example_method(2, 3, 5, d=4) with self.assertRaises(ParameterTypeError): inst.example_method(2, "three", 5, e="e") def test_enforce_type_hints_on_init_method(self): class ExClass: @enforce_type_hints def __init__(self, a: int, b: str): self.a = a self.b = b ExClass(1, "b") with self.assertRaises(ParameterTypeError) as err: ExClass("a", "b") self.assertEqual( "The 'a' parameter of '__init__' must be <class 'int'>, " "but a value of 'a' <class 'str'> was specified.", str(err.exception) ) def test_enforce_type_hints_on_method(self): class ExClass: @enforce_type_hints def __init__(self, a: int, b: int): self.a = a self.b = b @enforce_type_hints def ex_method(self, c: int, d) -> int: return self.a + self.b + c + d inst = ExClass(1, 2) self.assertEqual( inst.ex_method(6, 0), 9 ) with self.assertRaises(ParameterTypeError) as err: inst.ex_method(1.0, 2) self.assertEqual( "The 'c' parameter of 'ex_method' must be <class 'int'>, but a " "value of 1.0 <class 'float'> was specified.", str(err.exception) ) with self.assertRaises(ReturnTypeError) as err: inst.ex_method(1, 1.0) self.assertEqual( "The return type of 'ex_method' must be <class 'int'>, " "but a value of 5.0 <class 'float'> was returned.", str(err.exception) ) self.assertEqual(5.0, err.exception.return_value) def test_enforce_type_hints_on_method_self_not_named_self_params(self): class ExClass: @enforce_type_hints def __init__(this, self): this.self = self @enforce_type_hints def ex_method(this, self: int) -> int: return this.self + self inst = ExClass(1) self.assertEqual(inst.ex_method(6), 7) inst = ExClass(self=1) self.assertEqual(inst.ex_method(self=6), 7) def test_enforce_type_hints_on_method_self_not_named_self_return(self): class ExClass: @enforce_type_hints def __init__(this, self: int): this.self = self @enforce_type_hints def ex_method(this, self) -> int: return this.self*self inst = ExClass(2) self.assertEqual(inst.ex_method(5), 10) with self.assertRaises(ReturnTypeError) as err: inst.ex_method(5.0) self.assertEqual( "The return type of 'ex_method' must be <class 'int'>, " "but a value of 10.0 <class 'float'> was returned.", str(err.exception) ) def test_enforce_type_hints_self_passed_as_kwarg(self): class ExClass: @enforce_type_hints def ex_method(self, a: int, b: float) -> float: return a + b inst = ExClass() self.assertEqual(ExClass.ex_method(a=1, b=2, self=inst), 3) def test_enforce_type_hints_on_class_method_params(self): class ExClass: @enforce_type_hints def __init__(self, a: int, b: int): self.a = a self.b = b @classmethod @enforce_type_hints def ex_method1(cls, a: int, c: int) -> int: return a + c @enforce_type_hints @classmethod def ex_method2(cls, a: int, c: int) -> int: return a + c result = ExClass.ex_method1(2, 4) self.assertEqual(result, 6) with self.assertRaises(ParameterTypeError) as err: ExClass.ex_method1("a", 4) self.assertEqual( "The 'a' parameter of 'ex_method1' must be <class 'int'>, " "but a value of 'a' <class 'str'> was specified.", str(err.exception) ) result = ExClass.ex_method2(5, 4) self.assertEqual(result, 9) with self.assertRaises(ParameterTypeError) as err: ExClass.ex_method2("b", 4) self.assertEqual( "The 'a' parameter of 'ex_method2' must be <class 'int'>, " "but a value of 'b' <class 'str'> was specified.", str(err.exception) ) inst = ExClass(1, 2) result = inst.ex_method1(5, 3) self.assertEqual(result, 8) with self.assertRaises(ParameterTypeError) as err: inst.ex_method1("c", 5) self.assertEqual( "The 'a' parameter of 'ex_method1' must be <class 'int'>, " "but a value of 'c' <class 'str'> was specified.", str(err.exception) ) result = inst.ex_method2(9, 3) self.assertEqual(result, 12) with self.assertRaises(ParameterTypeError) as err: inst.ex_method2("d", 5) self.assertEqual( "The 'a' parameter of 'ex_method2' must be <class 'int'>, " "but a value of 'd' <class 'str'> was specified.", str(err.exception) ) def test_enforce_type_hints_on_class_method_return(self): class ExClass: @classmethod @enforce_type_hints def ex_method1(cls, self) -> int: return self * 2 @enforce_type_hints @classmethod def ex_method2(cls, self) -> int: return self * 2 result = ExClass.ex_method1(4) self.assertEqual(result, 8) with self.assertRaises(ReturnTypeError) as err: ExClass.ex_method1(6.0) self.assertEqual( "The return type of 'ex_method1' must be <class 'int'>, " "but a value of 12.0 <class 'float'> was returned.", str(err.exception) ) self.assertEqual(12.0, err.exception.return_value) result = ExClass.ex_method2(5) self.assertEqual(result, 10) with self.assertRaises(ReturnTypeError) as err: ExClass.ex_method2(7.0) self.assertEqual( "The return type of 'ex_method2' must be <class 'int'>, " "but a value of 14.0 <class 'float'> was returned.", str(err.exception) ) self.assertEqual(14.0, err.exception.return_value) result = ExClass().ex_method1(6) self.assertEqual(result, 12) with self.assertRaises(ReturnTypeError) as err: ExClass().ex_method1(8.0) self.assertEqual( "The return type of 'ex_method1' must be <class 'int'>, " "but a value of 16.0 <class 'float'> was returned.", str(err.exception) ) self.assertEqual(16.0, err.exception.return_value) result = ExClass().ex_method2(7) self.assertEqual(result, 14) with self.assertRaises(ReturnTypeError) as err: ExClass().ex_method2(9.0) self.assertEqual( "The return type of 'ex_method2' must be <class 'int'>, " "but a value of 18.0 <class 'float'> was returned.", str(err.exception) ) self.assertEqual(18.0, err.exception.return_value) def test_enforce_type_hints_on_static_method_params(self): class ExClass: @enforce_type_hints def __init__(self, a: int, b: int): self.a = a self.b = b @staticmethod @enforce_type_hints def ex_method1(a: int, c: int) -> int: return a + c @enforce_type_hints @staticmethod def ex_method2(a: int, c: int) -> int: return a + c result = ExClass.ex_method1(2, 4) self.assertEqual(result, 6) with self.assertRaises(ParameterTypeError) as err: ExClass.ex_method1("a", 4) self.assertEqual( "The 'a' parameter of 'ex_method1' must be <class 'int'>, " "but a value of 'a' <class 'str'> was specified.", str(err.exception) ) result = ExClass.ex_method2(5, 4) self.assertEqual(result, 9) with self.assertRaises(ParameterTypeError) as err: ExClass.ex_method2("b", 4) self.assertEqual( "The 'a' parameter of 'ex_method2' must be <class 'int'>, " "but a value of 'b' <class 'str'> was specified.", str(err.exception) ) inst = ExClass(1, 2) result = inst.ex_method1(5, 3) self.assertEqual(result, 8) with self.assertRaises(ParameterTypeError) as err: inst.ex_method1("c", 5) self.assertEqual( "The 'a' parameter of 'ex_method1' must be <class 'int'>,
import matplotlib.pyplot as plt import numpy as np from CASutils import colormap_utils as mycolors import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.util import add_cyclic_point from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import matplotlib.ticker as mticker def contourmap_bothcontinents_fill_nh_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red", fontsize=15): """ plot a contour map of 2D data dat with coordinates lon and lat Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.PlateCarree()) #ax.cmap.set_over(mymap(len(mymap)-1)) ax.set_aspect('auto') ax.add_feature(cfeature.COASTLINE) ax.set_extent([-180,180,0,90], crs = ccrs.PlateCarree()) if (labels): ax.set_xticks([-180, -120, -60, 0,60,120, 180], crs = ccrs.PlateCarree()) ax.set_xticklabels(['180W','120W','60W','0','60E','120E','180E'], fontsize=fontsize-3) ax.set_yticks([0,30,60,90], crs = ccrs.PlateCarree()) ax.set_yticklabels(['0','30N','60N','90N'], fontsize=fontsize-3) ax.xformatter = LongitudeFormatter() ax.yformatter = LatitudeFormatter() ax.set_title(titlestr, fontsize=fontsize) dat, lon = add_cyclic_point(dat, coord=lon) ax.contourf(lon, lat, dat, levels=clevs, cmap = mymap, extend="max") return ax def contourmap_bothcontinents_fill_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red", fontsize=15): """ plot a contour map of 2D data dat with coordinates lon and lat Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.PlateCarree()) #ax.cmap.set_over(mymap(len(mymap)-1)) ax.set_aspect('auto') ax.add_feature(cfeature.COASTLINE) ax.set_extent([-180,180,-90,90], crs = ccrs.PlateCarree()) if (labels): ax.set_xticks([-180, -120, -60, 0,60,120, 180], crs = ccrs.PlateCarree()) ax.set_xticklabels(['180W','120W','60W','0','60E','120E','180E'], fontsize=fontsize-3) ax.set_yticks([-90,-60,-30,0,30,60,90], crs = ccrs.PlateCarree()) ax.set_yticklabels(['90S','60S','30S','0','30N','60N','90N'], fontsize=fontsize-3) ax.xformatter = LongitudeFormatter() ax.yformatter = LatitudeFormatter() ax.set_title(titlestr, fontsize=fontsize) dat, lon = add_cyclic_point(dat, coord=lon) ax.contourf(lon, lat, dat, levels=clevs, cmap = mymap, extend="max") return ax def contourmap_bothoceans_robinson_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red", fontsize=15): """ plot a contour map of 2D data dat with coordinates lon and lat Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) if (cmap == "precip"): mymap = mycolors.precip_cmap(nlevs) ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.Robinson(central_longitude=210)) ax.set_aspect('auto') ax.add_feature(cfeature.COASTLINE) # ax.set_extent([-180,180,0,90], crs = ccrs.PlateCarree()) # if (labels): #ax.set_xticks([-180, -120, -60, 0,60,120, 180], crs = ccrs.PlateCarree()) #ax.set_xticklabels(['180W','120W','60W','0','60E','120E','180E'], fontsize=fontsize-3) #ax.set_yticks([0,30,60,90], crs = ccrs.PlateCarree()) #ax.set_yticklabels(['0','30N','60N','90N'], fontsize=fontsize-3) #ax.xformatter = LongitudeFormatter() #ax.yformatter = LatitudeFormatter() ax.set_title(titlestr, fontsize=fontsize) dat, lon = add_cyclic_point(dat, coord=lon) ax.contourf(lon, lat, dat, levels=clevs, cmap = mymap, extend="max", transform=ccrs.PlateCarree()) return ax def contourmap_bothcontinents_robinson_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red", fontsize=15): """ plot a contour map of 2D data dat with coordinates lon and lat Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) if (cmap == "precip"): mymap = mycolors.precip_cmap(nlevs) ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.Robinson(central_longitude=0)) ax.set_aspect('auto') ax.add_feature(cfeature.COASTLINE) # ax.set_extent([-180,180,0,90], crs = ccrs.PlateCarree()) # if (labels): #ax.set_xticks([-180, -120, -60, 0,60,120, 180], crs = ccrs.PlateCarree()) #ax.set_xticklabels(['180W','120W','60W','0','60E','120E','180E'], fontsize=fontsize-3) #ax.set_yticks([0,30,60,90], crs = ccrs.PlateCarree()) #ax.set_yticklabels(['0','30N','60N','90N'], fontsize=fontsize-3) #ax.xformatter = LongitudeFormatter() #ax.yformatter = LatitudeFormatter() ax.set_title(titlestr, fontsize=fontsize) dat, lon = add_cyclic_point(dat, coord=lon) ax.contourf(lon, lat, dat, levels=clevs, cmap = mymap, extend="max", transform=ccrs.PlateCarree()) return ax def contourmap_bothcontinents_scatter_nh_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red"): """ plot a map plot of scatter points for the northern hemisphere with the greenwich meridian at the center. Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) ax = fig.add_axes([x1, y1, x2-x1, y2-y1], projection=ccrs.PlateCarree()) ax.set_aspect('auto') ax.add_feature(cfeature.COASTLINE) ax.set_extent([-180,180,0,90], crs = ccrs.PlateCarree()) if (labels): ax.set_xticks([-180, -120, -60, 0,60,120, 180], crs = ccrs.PlateCarree()) ax.set_xticklabels(['180W','120W','60W','0','60E','120E','180E'], fontsize=12) ax.set_yticks([0,30,60,90], crs = ccrs.PlateCarree()) ax.set_yticklabels(['0','30N','60N','90N'], fontsize=12) ax.xformatter = LongitudeFormatter() ax.yformatter = LatitudeFormatter() ax.set_title(titlestr, fontsize=16) ax.scatter(lon, lat, c=dat, marker="o", vmin=cmin, vmax=cmax, cmap = mymap) #ax.scatter(lon, lat, c=dat, marker="o", vmin=-170, vmax=170, cmap="RdYlBu_r") return ax def contourmap_northamerica_scatter_pos(fig, dat, lon, lat, ci, cmin, cmax, titlestr, x1, x2, y1, y2, labels=True, cmap="blue2red"): """ plot a map plot of scatter points for the northern america Input: fig = the figure identifier dat = the data to be plotted lon = the longitude coordinate lat = the latitude coordinate ci = the contour interval cmin = the minimum of the contour range cmax = the maximum of the contour range titlestr = the title of the map x1 = position of the left edge x2 = position of the right edge y1 = position of the bottom edge y2 = position of the top edge labels = True/False (ticks and labels are plotted if true) cmap = color map (only set up for blue2red at the moment) """ # set up contour levels and color map nlevs = (cmax-cmin)/ci + 1 clevs = np.arange(cmin, cmax+ci, ci) if (cmap == "blue2red"): mymap = mycolors.blue2red_cmap(nlevs) if (cmap
- a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return w def func_59ff2411202c4e71bc7fa20b6f3053f9(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return a def func_01e8fb107ea846bf8a43e6faece93d47(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return x def func_2f7d2c27a6c94b1b96cd6d84499c4db4(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return sL def func_36dbf0656a0542208508590edbd58e2d(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return na def func_0d52a0c770374ec1a0593cf0188f8e44(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return nxU def func_ae4212519b1c4cdf869452f01fc2364b(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s)) x += dx a += 2 * w * dx + s * dx * dx cuts.append(x) w += s * dx a = 0 else: dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 if iL >= len(L) - 1: break if iU >= len(U) - 1: break return sU def func_9a7e242f8d0f436fb83953f04affba94(U, cuts, part, L): x = 0 while True: sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0]) sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0]) s = sU - sL nxL = L[iL + 1][0] nxU = U[iU + 1][0] nx = min(nxL, nxU) na = 2 * w * (nx - x) + s * (nx - x) * (nx - x) if a + na >= part: dx = (part - a) * 1.0
at the same time.") elif matrix is not None: if matrix.shape != (3, 3): raise ValueError("Invalid shape of transformation matrix.") self.params = matrix elif params: if translation is None: translation = (0, 0) if rotation is None: rotation = 0 self.params = np.array([ [np.cos(rotation), - np.sin(rotation), 0], [np.sin(rotation), np.cos(rotation), 0], [0, 0, 1] ]) self.params[0:2, 2] = translation else: # default to an identity transform self.params = np.eye(3) def estimate(self, src, dst): """ Set the transformation matrix with the explicit parameters. You can determine the over-, well- and under-determined parameters with the total least-squares method. Number of source and destination coordinates must match. The transformation is defined as:: X = a0 * x - b0 * y + a1 Y = b0 * x + a0 * y + b1 These equations can be transformed to the following form:: 0 = a0 * x - b0 * y + a1 - X 0 = b0 * x + a0 * y + b1 - Y which exist for each set of corresponding points, so we have a set of N * 2 equations. The coefficients appear linearly so we can write A x = 0, where:: A = [[x 1 -y 0 -X] [y 0 x 1 -Y] ... ... ] x.T = [a0 a1 b0 b1 c3] In case of total least-squares the solution of this homogeneous system of equations is the right singular vector of A which corresponds to the smallest singular value normed by the coefficient c3. Parameters ---------- src : (N, 2) array Source coordinates. dst : (N, 2) array Destination coordinates. Returns ------- success : bool True, if model estimation succeeds. """ try: src_matrix, src = _center_and_normalize_points(src) dst_matrix, dst = _center_and_normalize_points(dst) except ZeroDivisionError: self.params = np.nan * np.empty((3, 3)) return False xs = src[:, 0] ys = src[:, 1] xd = dst[:, 0] yd = dst[:, 1] rows = src.shape[0] # params: a0, a1, b0, b1 A = np.zeros((rows * 2, 5)) A[:rows, 0] = xs A[:rows, 2] = - ys A[:rows, 1] = 1 A[rows:, 2] = xs A[rows:, 0] = ys A[rows:, 3] = 1 A[:rows, 4] = xd A[rows:, 4] = yd _, _, V = np.linalg.svd(A) # solution is right singular vector that corresponds to smallest # singular value a0, a1, b0, b1 = - V[-1, :-1] / V[-1, -1] S = np.array([[a0, -b0, a1], [b0, a0, b1], [0, 0, 1]]) # De-center and de-normalize S = np.dot(np.linalg.inv(dst_matrix), np.dot(S, src_matrix)) self.params = S return True @staticmethod def _apply_mat(coords, matrix): """ Parameters ---------- coords matrix Returns ------- """ coords = np.array(coords, copy=False, ndmin=2) x, y = np.transpose(coords) src = np.vstack((x, y, np.ones_like(x))) dst = np.dot(src.transpose(), matrix.transpose()) # rescale to homogeneous coordinates dst[:, 0] /= dst[:, 2] dst[:, 1] /= dst[:, 2] return dst[:, :2] def __call__(self, coords): return self._apply_mat(coords, self.params) def inverse(self, coords): """ Apply inverse transformation. Parameters ---------- coords : (N, 2) array Source coordinates. Returns ------- coords : (N, 2) array Transformed coordinates. """ return self._apply_mat(coords, self._inv_matrix) def residuals(self, src, dst): """ Determine residuals of transformed destination coordinates. For each transformed source coordinate the euclidean distance to the respective destination coordinate is determined. Parameters ---------- src : (N, 2) array Source coordinates. dst : (N, 2) array Destination coordinates. Returns ------- residuals : (N, ) array Residual for coordinate. """ return np.sqrt(np.sum((self(src) - dst) ** 2, axis=1)) @property def _inv_matrix(self): return np.linalg.inv(self.params) @property def rotation(self): return np.atan2(self.params[1, 0], self.params[1, 1]) @property def translation(self): return self.params[0:2, 2] # Class to do geometric transformations. This is a wrapper on scikit-image functionality. # TODO: io operations for features and optical geometric transformations. class geoTransformerParallel(object): """ This object contains methods to perform geometric transformations on a sequence of images. Some of the capabilities are: + Homography by feature extraction. + Intensity-based image registration. + Projection Correction. """ def __init__(self): self.data = [] self.features = [] def clearData(self): """ This is a Method to clear the data from the object. """ del self.data self.data = [] def loadData(self, dataset): """ This is a Method that loads h5 Dataset to be corrected. Parameters ---------- dataset: h5py.dataset The dataset to be corrected """ if not isinstance(dataset, h5py.Dataset): warnings.warn('Error: Data must be an h5 Dataset object') else: self.data = dataset dim = int(np.sqrt(self.data.shape[-1])) self.data = self.data.reshape(-1, dim, dim) def loadFeatures(self, features): """ This is a Method that loads features to be used for homography etc ... Parameters ---------- features : tuple [keypoints, descriptors] These can come from FeatureExtractor.getFeatures() or elsewhere. The format is : keypoints = [np.ndarray([y_position, x_position])] descriptors = [np.ndarray()] """ self.features = features def matchFeatures(self, **kwargs): """ This is a Method that computes similarity between keypoints based on their descriptors. Currently only skimage.feature.match_descriptors is implemented. In the future will need to add opencv2.matchers. Parameters ---------- processors: int, optional Number of processors to use, default = 1. maximum_distance: int, optional maximum_distance (int) of misalignment, default = infinity. Used to filter the matches before optimizing the transformation. Returns ------- Matches """ desc = self.features[-1] keypts = self.features[0] processes = kwargs.get('processors', 1) maxDis = kwargs.get('maximum_distance', np.infty) def match(desc): desc1, desc2 = desc[0], desc[1] matches = match_descriptors(desc1, desc2, cross_check=True) return matches # start pool of workers pool = mp.Pool(processes) print('launching %i kernels...' % processes) tasks = [(desc1, desc2) for desc1, desc2 in zip(desc[:], desc[1:])] chunk = int(len(desc) / processes) jobs = pool.imap(match, tasks, chunksize=chunk) # get matches print('Extracting Matches From the Descriptors...') matches = [] for j in jobs: matches.append(j) # close the pool print('Closing down the kernels...\n') pool.close() # impose maximum_distance misalignment constraints on matches filt_matches = [] for match, key1, key2 in zip(matches, keypts[:], keypts[1:]): filteredMask = euclidMatch(match, key1, key2, maxDis) filt_matches.append(match[filteredMask]) return matches, filt_matches def findTransformation(self, transform, matches, processes, **kwargs): """ This is a Method that finds the optimal transformation between two images given matching features using a random sample consensus. Parameters ---------- transform: skimage.transform object matches : list matches found through match_features method. processes : int Number of processors to use. **kwargs are passed to skimage.transform.ransac Returns ------- Transformations """ keypts = self.features[0] def optimization(Pts): robustTrans, inliers = ransac((Pts[0], Pts[1]), transform, **kwargs) output = [robustTrans, inliers] return output # start pool of workers print('launching %i kernels...' % processes) pool = mp.Pool(processes) tasks = [(key1[match[:, 0]], key2[match[:, 1]]) for match, key1, key2 in zip(matches, keypts[:], keypts[1:])] chunk = int(len(keypts) / processes) jobs = pool.imap(optimization, tasks, chunksize=chunk) # get Transforms and inlier matches transforms, trueMatches = [], [] print('Extracting Inlier Matches with RANSAC...') try: for j in jobs: transforms.append(j[0]) trueMatches.append(j[1]) except np.linalg.LinAlgError: pass # close the pool pool.close() print('Closing down the kernels...\n') return transforms, trueMatches # TODO: Need parallel version for transforming stack of images. def applyTransformation(self, transforms, **kwargs): """ This is the method that takes the list of transformation found by findTransformation and applies them to the data set. Parameters ---------- transforms: (list of skimage.GeoemetricTransform objects). The objects must be inititated with the desired parameters. transformation : string, optional. The type of geometric transformation to use (i.e. translation, rigid, etc..) Currently, only translation is implemented. default, translation. origin : int, optional The position in the data to take as origin, i.e. don't transform. default, center image in the stack. processors : int, optional Number of processors to use, default = 1. Currently,only one processor is used. Returns ------- Transformed images, transformations """ dic = ['processors', 'origin', 'transformation'] for key in kwargs.keys(): if key not in dic: print('%s is not a parameter of this function' % (str(key))) processes = kwargs.get('processors', 1) origin = kwargs.get('origin', int(self.data.shape[0] / 2)) transformation = kwargs.get('transformation', 'translation') dset = self.data # For now restricting this to just translation... Straightforward to generalize to other transform objects. if transformation == 'translation': YTrans = np.array([trans.translation[0] for trans in transforms]) XTrans = np.array([trans.translation[1] for trans in transforms]) chainL = [] for
to parse unescape_names : bool Remove extraneous quote marks around names. Sometimes other programs are sensitive to the characters used in names, and it is essential (at times) to quote node names for compatibility. Returns ------- TreeNode The root of the parsed tree Raises ------ RecordError The following three conditions will trigger a `RecordError`: * Unbalanced number of left and right parentheses * A malformed newick string. For instance, if a semicolon is embedded within the string as opposed to at the end. * If a non-newick string is passed. See Also -------- to_newick Examples -------- >>> from skbio import TreeNode >>> TreeNode.from_newick("((a,b)c,(d,e)f)root;") <TreeNode, name: root, internal node count: 2, tips count: 4> >>> from StringIO import StringIO >>> s = StringIO("((a,b),c);") >>> TreeNode.from_newick(s) <TreeNode, name: unnamed, internal node count: 1, tips count: 3> References ---------- [1] http://evolution.genetics.washington.edu/phylip/newicktree.html """ def _new_child(old_node): """Returns new_node which has old_node as its parent.""" new_node = cls() new_node.parent = old_node if old_node is not None: if new_node not in old_node.children: old_node.children.append(new_node) return new_node if isinstance(lines, str): data = lines else: data = ''.join(lines) # skip arb comment stuff if present: start at first paren paren_index = data.find('(') data = data[paren_index:] left_count = data.count('(') right_count = data.count(')') if left_count != right_count: raise RecordError("Found %s left parens but %s right parens." % (left_count, right_count)) curr_node = None state = 'PreColon' state1 = 'PreClosed' last_token = None for t in _dnd_tokenizer(data): if t == ':': # expecting branch length state = 'PostColon' # prevent state reset last_token = t continue if t == ')' and last_token in ',(': # node without name new_node = _new_child(curr_node) new_node.name = None curr_node = new_node.parent state1 = 'PostClosed' last_token = t continue if t == ')': # closing the current node curr_node = curr_node.parent state1 = 'PostClosed' last_token = t continue if t == '(': # opening a new node curr_node = _new_child(curr_node) elif t == ';': # end of data last_token = t break elif t == ',' and last_token in ',(': # node without name new_node = _new_child(curr_node) new_node.name = None curr_node = new_node.parent elif t == ',': # separator: next node adds to this node's parent curr_node = curr_node.parent elif state == 'PreColon' and state1 == 'PreClosed': # data for the current node new_node = _new_child(curr_node) if unescape_name: if t.startswith("'") and t.endswith("'"): while t.startswith("'") and t.endswith("'"): t = t[1:-1] else: if '_' in t: t = t.replace('_', ' ') new_node.name = t curr_node = new_node elif state == 'PreColon' and state1 == 'PostClosed': if unescape_name: while t.startswith("'") and t.endswith("'"): t = t[1:-1] curr_node.name = t elif state == 'PostColon': # length data for the current node curr_node.length = float(t) else: # can't think of a reason to get here raise RecordError("Incorrect PhyloNode state? %s" % t) state = 'PreColon' # get here for any non-colon token state1 = 'PreClosed' last_token = t if curr_node is not None and curr_node.parent is not None: raise RecordError("Didn't get back to root of tree. The newick " "string may be malformed.") if curr_node is None: # no data -- return empty node return cls() return curr_node # this should be the root of the tree def to_array(self, attrs=None): """Return an array representation of self Parameters ---------- attrs : list of tuple or None The attributes and types to return. The expected form is [(attribute_name, type)]. If `None`, then `name`, `length`, and `id` are returned. Returns ------- dict of array {id_index: {id: TreeNode}, child_index: [(node_id, left_child_id, right_child_id)], attr_1: array(...), ... attr_N: array(...)} Notes ----- Attribute arrays are in index order such that TreeNode.id can be used as a lookup into the the array If `length` is an attribute, this will also record the length off the root which is `nan`. Take care when summing. Examples -------- >>> from skbio import TreeNode >>> t = TreeNode.from_newick('(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7)') >>> res = t.to_array() >>> res.keys() ['child_index', 'length', 'name', 'id_index', 'id'] >>> res['child_index'] [(4, 0, 2), (5, 3, 3), (6, 4, 5), (7, 6, 6)] >>> for k, v in res['id_index'].items(): ... print(k, v) ... 0 a:1.0; 1 b:2.0; 2 c:3.0; 3 d:5.0; 4 (a:1.0,b:2.0,c:3.0)x:4.0; 5 (d:5.0)y:6.0; 6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0; 7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0); >>> res['id'] array([0, 1, 2, 3, 4, 5, 6, 7]) >>> res['name'] array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object) """ if attrs is None: attrs = [('name', object), ('length', float), ('id', int)] else: for attr, dtype in attrs: if not hasattr(self, attr): raise AttributeError("Invalid attribute '%s'." % attr) id_index, child_index = self.index_tree() n = self.id + 1 # assign_ids starts at 0 tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs] for node in self.traverse(include_self=True): n_id = node.id for idx, (attr, dtype) in enumerate(attrs): tmp[idx][n_id] = getattr(node, attr) results = {'id_index': id_index, 'child_index': child_index} results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)}) return results def to_newick(self, with_distances=False, semicolon=True, escape_name=True): r"""Return the newick string representation of this tree. Please see `TreeNode.from_newick` for a further description of the Newick format. Parameters ---------- with_distances : bool If true, include lengths between nodes semicolon : bool If true, terminate the tree string with a semicolon escape_name : bool If true, wrap node names that include []'"(),:;_ in single quotes Returns ------- str A Newick string representation of the tree See Also -------- from_newick Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.from_newick("((a,b)c,(d,e)f)root;") >>> print(tree.to_newick()) ((a,b)c,(d,e)f)root; """ result = ['('] nodes_stack = [[self, len(self.children)]] node_count = 1 while nodes_stack: node_count += 1 # check the top node, any children left unvisited? top = nodes_stack[-1] top_node, num_unvisited_children = top if num_unvisited_children: # has any child unvisited top[1] -= 1 # decrease the #of children unvisited next_child = top_node.children[-num_unvisited_children] # pre-visit if next_child.children: result.append('(') nodes_stack.append([next_child, len(next_child.children)]) else: # no unvisited children nodes_stack.pop() # post-visit if top_node.children: result[-1] = ')' if top_node.name is None: name = '' else: name = str(top_node.name) if escape_name and not (name.startswith("'") and name.endswith("'")): if re.search("""[]['"(),:;_]""", name): name = "'%s'" % name.replace("'", "''") else: name = name.replace(' ', '_') result.append(name) if with_distances and top_node.length is not None: result[-1] = "%s:%s" % (result[-1], top_node.length) result.append(',') if len(result) <= 3: # single node with or without name if semicolon: return "%s;" % result[1] else: return result[1] else: if semicolon: result[-1] = ';' else: result.pop(-1) return ''.join(result) def _ascii_art(self, char1='-', show_internal=True, compact=False): LEN = 10 PAD = ' ' * LEN PA = ' ' * (LEN - 1) namestr = self.name or '' # prevents name of NoneType if self.children: mids = [] result = [] for c in self.children: if c is self.children[0]: char2 = '/' elif c is self.children[-1]: char2 = '\\' else: char2 = '-' (clines, mid) = c._ascii_art(char2, show_internal, compact) mids.append(mid + len(result)) result.extend(clines) if not compact: result.append('') if not compact: result.pop() (lo, hi, end) = (mids[0], mids[-1], len(result)) prefixes = [PAD] * (lo + 1) + [PA + '|'] * \ (hi - lo - 1) + [PAD] * (end - hi) mid = np.int(np.trunc((lo + hi) / 2)) prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1] result = [p + l for (p, l) in zip(prefixes, result)] if show_internal: stem = result[mid] result[mid] = stem[0] + namestr + stem[len(namestr) + 1:] return (result, mid) else: return ([char1 + '-' + namestr], 0) def ascii_art(self, show_internal=True, compact=False): r"""Returns a string containing an ascii drawing of the tree Note, this method calls a private recursive function and is not safe for large trees. Parameters ---------- show_internal : bool includes internal edge names compact : bool use exactly one line per tip Returns ------- str an ASCII formatted version of the tree Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.from_newick("((a,b)c,(d,e)f)root;") >>> print(tree.ascii_art()) /-a /c-------| | \-b -root----| | /-d \f-------| \-e """ (lines, mid) = self._ascii_art(show_internal=show_internal, compact=compact) return '\n'.join(lines) def
# Copyright 2021 MosaicML. All Rights Reserved. """Utility for uploading to and downloading from cloud object stores.""" import dataclasses import os import sys import textwrap from typing import Any, Dict, Iterator, Optional, Union import yahp as hp from libcloud.storage.providers import get_driver __all__ = ["ObjectStoreHparams", "ObjectStore"] @dataclasses.dataclass class ObjectStoreHparams(hp.Hparams): """:class:`~composer.utils.object_store.ObjectStore` hyperparameters. .. rubric:: Example Here's an example on how to connect to an Amazon S3 bucket. This example assumes: * The container is named named ``MY_CONTAINER``. * The AWS Access Key ID is stored in an environment variable named ``AWS_ACCESS_KEY_ID``. * The Secret Access Key is in an environmental variable named ``AWS_SECRET_ACCESS_KEY``. .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.s3 import os os.environ["AWS_ACCESS_KEY_ID"] = "key" os.environ["AWS_SECRET_ACCESS_KEY"] = "secret" .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.s3 >>> from composer.utils import ObjectStoreHparams >>> provider_hparams = ObjectStoreHparams( ... provider="s3", ... container="MY_CONTAINER", ... key_environ="AWS_ACCESS_KEY_ID", ... secret_environ="AWS_SECRET_ACCESS_KEY", ... ) >>> provider = provider_hparams.initialize_object() >>> provider <composer.utils.object_store.ObjectStore object at ...> Args: provider (str): Cloud provider to use. See :class:`ObjectStore` for documentation. container (str): The name of the container (i.e. bucket) to use. key_environ (str, optional): The name of an environment variable containing the API key or username to use to connect to the provider. If no key is required, then set this field to ``None``. (default: ``None``) For security reasons, composer requires that the key be specified via an environment variable. For example, if your key is an environment variable called ``OBJECT_STORE_KEY`` that is set to ``MY_KEY``, then you should set this parameter equal to ``OBJECT_STORE_KEY``. Composer will read the key like this: .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.key import os import functools from composer.utils import ObjectStoreHparams os.environ["OBJECT_STORE_KEY"] = "MY_KEY" ObjectStoreHparams = functools.partial(ObjectStoreHparams, provider="s3", container="container") .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.key >>> import os >>> params = ObjectStoreHparams(key_environ="OBJECT_STORE_KEY") >>> key = os.environ[params.key_environ] >>> key 'MY_KEY' secret_environ (str, optional): The name of an environment variable containing the API secret or password to use for the provider. If no secret is required, then set this field to ``None``. (default: ``None``) For security reasons, composer requires that the secret be specified via an environment variable. For example, if your secret is an environment variable called ``OBJECT_STORE_SECRET`` that is set to ``MY_SECRET``, then you should set this parameter equal to ``OBJECT_STORE_SECRET``. Composer will read the secret like this: .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.secret import os import functools from composer.utils import ObjectStoreHparams original_secret = os.environ.get("OBJECT_STORE_SECRET") os.environ["OBJECT_STORE_SECRET"] = "MY_SECRET" ObjectStoreHparams = functools.partial(ObjectStoreHparams, provider="s3", container="container") .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.secret >>> import os >>> params = ObjectStoreHparams(secret_environ="OBJECT_STORE_SECRET") >>> secret = os.environ[params.secret_environ] >>> secret 'MY_SECRET' region (str, optional): Cloud region to use for the cloud provider. Most providers do not require the region to be specified. (default: ``None``) host (str, optional): Override the hostname for the cloud provider. (default: ``None``) port (int, optional): Override the port for the cloud provider. (default: ``None``) extra_init_kwargs (Dict[str, Any], optional): Extra keyword arguments to pass into the constructor for the specified provider. (default: ``None``, which is equivalent to an empty dictionary) .. seealso:: :class:`libcloud.storage.base.StorageDriver` """ provider: str = hp.required("Cloud provider to use.") container: str = hp.required("The name of the container (i.e. bucket) to use.") key_environ: Optional[str] = hp.optional(textwrap.dedent("""\ The name of an environment variable containing an API key or username to use to connect to the provider."""), default=None) secret_environ: Optional[str] = hp.optional(textwrap.dedent("""\ The name of an environment variable containing an API secret or password to use to connect to the provider."""), default=None) region: Optional[str] = hp.optional("Cloud region to use", default=None) host: Optional[str] = hp.optional("Override hostname for connections", default=None) port: Optional[int] = hp.optional("Override port for connections", default=None) extra_init_kwargs: Dict[str, Any] = hp.optional( "Extra keyword arguments to pass into the constructor for the specified provider.", default_factory=dict) def get_provider_kwargs(self) -> Dict[str, Any]: """Returns the ``provider_kwargs`` argument, which is used to construct a :class:`.ObjectStore`. Returns: Dict[str, Any]: The ``provider_kwargs`` for use in constructing an :class:`.ObjectStore`. """ init_kwargs = {} for key in ("host", "port", "region"): kwarg = getattr(self, key) if getattr(self, key) is not None: init_kwargs[key] = kwarg init_kwargs["key"] = None if self.key_environ is None else os.environ[self.key_environ] init_kwargs["secret"] = None if self.secret_environ is None else os.environ[self.secret_environ] init_kwargs.update(self.extra_init_kwargs) return init_kwargs def initialize_object(self): """Returns an instance of :class:`.ObjectStore`. Returns: ObjectStore: The object_store. """ return ObjectStore( provider=self.provider, container=self.container, provider_kwargs=self.get_provider_kwargs(), ) class ObjectStore: """Utility for uploading to and downloading from object (blob) stores, such as Amazon S3. .. rubric:: Example Here's an example for an Amazon S3 bucket named ``MY_CONTAINER``: >>> from composer.utils import ObjectStore >>> object_store = ObjectStore( ... provider="s3", ... container="MY_CONTAINER", ... provider_kwargs={ ... "key": "AKIA...", ... "secret": "*********", ... } ... ) >>> object_store <composer.utils.object_store.ObjectStore object at ...> Args: provider (str): Cloud provider to use. Valid options are: * :mod:`~libcloud.storage.drivers.atmos` * :mod:`~libcloud.storage.drivers.auroraobjects` * :mod:`~libcloud.storage.drivers.azure_blobs` * :mod:`~libcloud.storage.drivers.backblaze_b2` * :mod:`~libcloud.storage.drivers.cloudfiles` * :mod:`~libcloud.storage.drivers.digitalocean_spaces` * :mod:`~libcloud.storage.drivers.google_storage` * :mod:`~libcloud.storage.drivers.ktucloud` * :mod:`~libcloud.storage.drivers.local` * :mod:`~libcloud.storage.drivers.minio` * :mod:`~libcloud.storage.drivers.nimbus` * :mod:`~libcloud.storage.drivers.ninefold` * :mod:`~libcloud.storage.drivers.oss` * :mod:`~libcloud.storage.drivers.rgw` * :mod:`~libcloud.storage.drivers.s3` .. seealso:: :doc:`Full list of libcloud providers <libcloud:storage/supported_providers>` container (str): The name of the container (i.e. bucket) to use. provider_kwargs (Dict[str, Any], optional): Keyword arguments to pass into the constructor for the specified provider. These arguments would usually include the cloud region and credentials. Common keys are: * ``key`` (str): API key or username to be used (required). * ``secret`` (str): Secret password to be used (required). * ``secure`` (bool): Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. * ``host`` (str): Override hostname used for connections. * ``port`` (int): Override port used for connections. * ``api_version`` (str): Optional API version. Only used by drivers which support multiple API versions. * ``region`` (str): Optional driver region. Only used by drivers which support multiple regions. .. seealso:: :class:`libcloud.storage.base.StorageDriver` """ def __init__(self, provider: str, container: str, provider_kwargs: Optional[Dict[str, Any]] = None) -> None: provider_cls = get_driver(provider) if provider_kwargs is None: provider_kwargs = {} self._provider = provider_cls(**provider_kwargs) self._container = self._provider.get_container(container) @property def provider_name(self): """The name of the cloud provider.""" return self._provider.name @property def container_name(self): """The name of the object storage container.""" return self._container.name def upload_object(self, file_path: str, object_name: str, verify_hash: bool = True, extra: Optional[Dict] = None, headers: Optional[Dict[str, str]] = None): """Upload an object currently located on a disk. .. seealso:: :meth:`libcloud.storage.base.StorageDriver.upload_object`. Args: file_path (str): Path to the object on disk. object_name (str): Object name (i.e. where the object will be stored in the container.) verify_hash (bool, optional): Whether to verify hashes (default: ``True``) extra (Optional[Dict], optional): Extra attributes to pass to the underlying provider driver. (default: ``None``, which is equivalent to an empty dictionary) headers (Optional[Dict[str, str]], optional): Additional request headers, such as CORS headers. (defaults: ``None``, which is equivalent to an empty dictionary) """ self._provider.upload_object(file_path=file_path, container=self._container, object_name=object_name, extra=extra, verify_hash=verify_hash, headers=headers) def upload_object_via_stream(self, obj: Union[bytes, Iterator[bytes]], object_name: str, extra: Optional[Dict] = None, headers: Optional[Dict[str, str]] = None): """Upload an object. .. seealso:: :meth:`libcloud.storage.base.StorageDriver.upload_object_via_stream`. Args: obj (bytes | Iterator[bytes]): The object. object_name (str): Object name (i.e. where the object will be stored in the container.) verify_hash (bool, optional): Whether to verify hashes (default: ``True``) extra (Optional[Dict], optional): Extra attributes to pass to the underlying provider driver. (default: ``None``) headers (Optional[Dict[str, str]], optional): Additional request headers, such as CORS headers. (defaults: ``None``) """ if isinstance(obj, bytes): obj = iter(i.to_bytes(1, sys.byteorder) for i in obj) self._provider.upload_object_via_stream(iterator=obj, container=self._container, object_name=object_name, extra=extra, headers=headers) def _get_object(self, object_name: str): return self._provider.get_object(self._container.name, object_name) def get_object_size(self, object_name: str) -> int: """Get the size of an object, in bytes. Args: object_name (str): The name of the object. Returns: int: The object size, in bytes. """ return self._get_object(object_name).size def download_object(self, object_name: str, destination_path: str, overwrite_existing: bool = False, delete_on_failure: bool = True): """Download an object to the specified destination path. .. seealso:: :meth:`libcloud.storage.base.StorageDriver.download_object`. Args: object_name (str): The name of the object to download. destination_path (str): Full path to a file or a directory where the incoming file will be saved. overwrite_existing (bool, optional): Set to ``True`` to overwrite an existing file. (default: ``False``) delete_on_failure (bool, optional): Set to ``True`` to delete a partially downloaded file if
<filename>gemstash.py<gh_stars>0 #!/usr/bin/env python3 # Copyright 2015 <NAME>. See LICENSE for details. """ Server-free memcached replacement. Gemstash provides a simple cache similar to memcached, appropriate for use in standalone applications which do not have access to a memcached server. Its usage should be familiar to users of python-memcached. Usage: >>> import gemstash >>> gs = gemstash.Client(gemstash.Stash()) Each stash stores its own independent set of keys and values. Two clients accessing the same stash will get the same values. >>> gs.set("foo", "bar", time=0) True >>> gs.get("foo") 'bar' >>> gs.set("spam", "eggs", 300) True >>> gs.get("spam") 'eggs' # five minutes later >>> print(gs.get("spam")) None Values can be stored with an expiry time, specified in seconds, after which they will be deleted from the cache (and not returned). Numbers greater than 60*60*24*30 (thirty days) will be interpreted instead as an absolute timestamp in seconds since January 1, 1970 (epoch time). If the time parameter is set to 0 or omitted, the item will never expire. """ import sys import collections import datetime import threading import uuid SERVER_MAX_KEY_LENGTH = 250 SERVER_MAX_VALUE_LENGTH = 1024*1024 _DEAD_RETRY = 30 # number of seconds before retrying a dead server. _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout. class Stash(collections.MutableMapping): """A cache, taking place of a memcached server for a gemstash Client.""" CachedItem = collections.namedtuple('CachedItem', ['value', 'expires', 'cas_id']) def __init__(self, *args, **kwargs): """Create a new Stash.""" self.cache = dict() self.write_lock = threading.RLock() def __getitem__(self, key): try: item = self.cache[key] except KeyError: return None if item.expires and item.expires < datetime.datetime.now(): del self.cache[key] return None else: return item.value, item.cas_id def __setitem__(self, key, value): raise NotImplementedError("Add items to the stash using the set method.") def __delitem__(self, key): with self.write_lock: try: del self.cache[key] except KeyError: pass def __iter__(self): return iter(self.cache) def __len__(self): return len(self.cache) def incr(self, key, delta): with self.write_lock: try: value, _ = self[key] except TypeError: value = None if not value: return None if isinstance(value, str): value = str(int(value) + delta) elif isinstance(value, int): value = value + delta else: # not a str or int, can't increment raise ValueError("cannot increment or decrement non-numeric value") self.update(key, value) return int(value) def update(self, key, value, time=None): with self.write_lock: if key not in self.cache: return False else: return self.set(key, value, time) def set(self, key, value, time): with self.write_lock: now = datetime.datetime.now() if time and time > 60*60*24*30: expires = datetime.datetime.utcfromtimestamp(time) elif (not time) or time == 0: expires = None else: expires = now + datetime.timedelta(seconds=time) self.cache[key] = self.CachedItem(value, expires, uuid.uuid4()) return True def flush(self): with self.write_lock: self.cache = dict() def append(self, key, value, time): with self.write_lock: try: original, _ = self[key] except TypeError: original = None if not original: return False if isinstance(original, str): value = original + str(value) elif isinstance(original, int): try: value = int(str(original) + str(value)) except ValueError as e: raise ValueError("cannot append non-numeric value to int") from e elif isinstance(original, float): try: value = float(str(original) + str(value)) except ValueError as e: raise ValueError("cannot append non-numeric value to float") from e else: return False return self.set(key, value, time) def prepend(self, key, value, time): with self.write_lock: try: original, _ = self[key] except TypeError: original = None if not original: return False if isinstance(original, str): value = str(value) + original elif isinstance(original, int): try: value = int(str(value) + str(original)) except ValueError as e: raise ValueError("cannot prepend non-numeric value to int") from e elif isinstance(original, float): try: value = float(str(value) + str(original)) except ValueError as e: raise ValueError("cannot prepend non-numeric value to float") from e else: return False return self.set(key, value, time) def cas(self, key, value, time, cas_id): with self.write_lock: if key not in self.cache or not cas_id: return self.set(key, value, time) else: if cas_id == self.cache[key].cas_id: return self.set(key, value, time) else: return 0 def cleanup(self): """Remove expired items from the cache. Returns a list of keys removed. """ removed = [] for key in self.cache.keys(): with self.write_lock: try: item = self.cache[key] except KeyError: # the item vanished while we weren't looking! pass if item.expires and item.expires < datetime.datetime.now(): del self.cache[key] removed.append(key) return removed class MimicStash(collections.MutableMapping): """ A cache, mimicking a memcached server for a gemstash Client. This Stash behaves more like python-memcached + memcached. Use when closer correspondence between gemstash and memcache behvior is required. """ CachedItem = collections.namedtuple('CachedItem', ['value', 'expires', 'parse', 'cas_id']) def __init__(self, mimic=True, *args, **kwargs): """Create a new Stash.""" self.cache = dict() self.write_lock = threading.RLock() self.mimic = mimic def __getitem__(self, key): try: item = self.cache[key] except KeyError: return None if item.expires and item.expires < datetime.datetime.now(): del self.cache[key] return None else: return item.parse(item.value), item.cas_id def __setitem__(self, key, value): raise NotImplementedError("Add items to the stash using the set method.") def __delitem__(self, key): with self.write_lock: try: del self.cache[key] except KeyError: pass def __iter__(self): return iter(self.cache) def __len__(self): return len(self.cache) def incr(self, key, delta): with self.write_lock: try: value, _ = self[key] except TypeError: value= None if not value: return None if isinstance(value, str): value = str(int(value) + delta) elif isinstance(value, int): value = value + delta else: # not a str or int, can't increment raise ValueError("cannot increment or decrement non-numeric value") self.update(key, value) return int(value) def update(self, key, value, time=None): with self.write_lock: if key not in self.cache: return False else: return self.set(key, value, time) def set(self, key, value, time): with self.write_lock: expires = self._expires(time) if isinstance(value, int): parse = lambda x: int(x.decode("utf_8")) elif isinstance(value, float): parse = lambda x: float(x.decode("utf_8")) else: parse = lambda x: x.decode("utf_8") value = str(value).encode("utf_8") self.cache[key] = self.CachedItem(value, expires, parse, uuid.uuid4()) return True def flush(self): with self.write_lock: self.cache = dict() def append(self, key, value, time): with self.write_lock: try: original, _, parse, cas_id = self.cache[key] except KeyError: return False if isinstance(parse(original), float): return True value = original + str(value).encode("utf_8") self.cache[key] = self.CachedItem(value, self._expires(time), parse, uuid.uuid4()) return True def prepend(self, key, value, time): with self.write_lock: try: original, _, parse, _ = self.cache[key] except KeyError: return False if isinstance(parse(original), float): return True value = str(value).encode("utf_8") + original self.cache[key] = self.CachedItem(value, self._expires(time), parse, uuid.uuid4()) return True def cas(self, key, value, time, cas_id): with self.write_lock: if key not in self.cache or not cas_id: return self.set(key, value, time) else: if cas_id == self.cache[key].cas_id: return self.set(key, value, time) else: return 0 def cleanup(self): """Remove expired items from the cache. Returns a list of keys removed. """ removed = [] for key in self.cache.keys(): with self.write_lock: try: item = self.cache[key] except KeyError: # the item vanished while we weren't looking! pass if item.expires and item.expires < datetime.datetime.now(): del self.cache[key] removed.append(key) return removed @staticmethod def _expires(time): if time and time > 60*60*24*30: expires = datetime.datetime.utcfromtimestamp(time) elif (not time) or time == 0: expires = None else: expires = datetime.datetime.now() + datetime.timedelta(seconds=time) return expires class Client(object): """Client mimicking a memcached client.""" class MemcachedKeyError(Exception): pass class MemcachedKeyLengthError(MemcachedKeyError): pass class MemcachedKeyCharacterError(MemcachedKeyError): pass class MemcachedKeyNoneError(MemcachedKeyError): pass class MemcachedKeyTypeError(MemcachedKeyError): pass def __init__(self, servers, debug=0, pickleProtocol=0, pickler=None, unpickler=None, pload=None, pid=None, server_max_key_length=SERVER_MAX_KEY_LENGTH, server_max_value_length=SERVER_MAX_VALUE_LENGTH, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas = False, flush_on_reconnect=0, check_keys=True): """Create a new Client attached to a specified Stash.""" self.stash = servers self.debug = debug self.server_max_key_length = server_max_key_length self.cache_cas = cache_cas self.cas_cache = {} def flush_all(self): """ Expires all data in the connected Stash, including data with no expiry time. """ self.stash.flush() def debuglog(self, str): """ Write a log entry to stderr. This method is provided purely for compatibility with python-memcached. """ if self.debug: sys.stderr.write("MemCached: {}\n".format(str)) def delete_multi(self, keys, time=0, key_prefix=''): """ Delete multiple keys from the connected Stash. If a key_prefix is specified, it is prepended to each of the keys in the list. So: delete_multi(['bar', 'baz'], key_prefix='foo') is equivalent to: delete('foobar') delete('foobaz') The full operation IS NOT atomic. """ # TODO: the time param does nothing for key in keys: self.delete(key_prefix + key, time) def delete(self, key, time=0): """Delete a key from the connected Stash.""" # TODO: the time param does nothing del self.stash[key] def incr(self, key, delta=1): """ Increment the value assigned to key by delta. This operation is performed by the stash with atomicity guaranteed. """ return self.stash.incr(key, delta) def decr(self, key, delta=1): """ Decrement the value assigned to
<reponame>jmadamesila/TBCCpylinac """The picket fence module is meant for analyzing EPID images where a "picket fence" MLC pattern has been made. Physicists regularly check MLC positioning through this test. This test can be done using film and one can "eyeball" it, but this is the 21st century and we have numerous ways of quantifying such data. This module attains to be one of them. It can load in an EPID dicom image (or superimpose multiple images) and determine the MLC peaks, error of each MLC pair to the picket, and give a few visual indicators for passing/warning/failing. Features: * **Analyze either HD or regular MLCs** - Just pass a flag and tell pylinac whether it's HD or not. * **Easy-to-read pass/warn/fail overlay** - Analysis gives you easy-to-read tools for determining the status of an MLC pair. * **Any Source-to-Image distance** - Whatever your clinic uses as the SID for picket fence, pylinac can account for it. * **Account for panel translation** - Have an off-CAX setup? No problem. Translate your EPID and pylinac knows. * **Account for panel sag** - If your EPID sags at certain angles, just tell pylinac and the results will be shifted. """ from collections import Sequence from functools import lru_cache import os.path as osp import io from itertools import cycle from tempfile import TemporaryDirectory from typing import Union, Tuple, List import argue import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np from numpy.core._multiarray_umath import ndarray from pylinac.core.typing import NumberLike from pylinac.core.utilities import open_path from .core import image from .core.geometry import Line, Rectangle, Point from .core.io import get_url, retrieve_demo_file from .core import pdf from .core.profile import MultiProfile, SingleProfile from .log_analyzer import load_log from .settings import get_dicom_cmap # possible orientations of the pickets. UP_DOWN = 'Up-Down' LEFT_RIGHT = 'Left-Right' class PFDicomImage(image.LinacDicomImage): """A subclass of a DICOM image that checks for noise and inversion when instantiated. Can also adjust for EPID sag.""" def __init__(self, path: str, **kwargs): super().__init__(path, **kwargs) self._check_for_noise() self.check_inversion_by_histogram() def _check_for_noise(self): """Check if the image has extreme noise (dead pixel, etc) by comparing min/max to 1/99 percentiles and smoothing if need be.""" safety_stop = 5 while self._has_noise() and safety_stop > 0: self.filter(size=3) safety_stop -= 1 def _has_noise(self) -> bool: """Helper method to determine if there is spurious signal in the image.""" min = self.array.min() max = self.array.max() near_min, near_max = np.percentile(self.array, [0.5, 99.5]) max_is_extreme = max > near_max * 1.25 min_is_extreme = (min < near_min * 0.75) and (abs(min - near_min) > 0.1 * (near_max - near_min)) return max_is_extreme or min_is_extreme def adjust_for_sag(self, sag, orientation): """Roll the image to adjust for EPID sag.""" direction = 'y' if orientation == UP_DOWN else 'x' self.roll(direction, sag) class PicketFence: """A class used for analyzing EPID images where radiation strips have been formed by the MLCs. The strips are assumed to be parallel to one another and normal to the image edge; i.e. a "left-right" or "up-down" orientation is assumed. Further work could follow up by accounting for any angle. Attributes ---------- pickets: :class:`~pylinac.picketfence.PicketHandler` image: :class:`~pylinac.core.image.DicomImage` Examples -------- Run the demo:: >>> PicketFence.run_demo() Load the demo image: >>> pf = PicketFence.from_demo_image() Load an image along with its machine log: >>> pf_w_log = PicketFence('my/pf.dcm', log='my/log.bin') Typical session: >>> img_path = r"C:/QA/June/PF.dcm" # the EPID image >>> mypf = PicketFence(img_path) >>> mypf.analyze(tolerance=0.5, action_tolerance=0.3) >>> print(mypf.results()) >>> mypf.plot_analyzed_image() """ def __init__(self, filename: str, filter: int=None, log: str=None, use_filename: bool=False): """ Parameters ---------- filename : str, None Name of the file as a string. If None, image must be loaded later. filter : int, None If None (default), no filtering will be done to the image. If an int, will perform median filtering over image of size ``filter``. log : str Path to a log file corresponding to the delivery. The expected fluence of the log file is used to construct the pickets. MLC peaks are then compared to an absolute reference instead of a fitted picket. use_filename : bool If False (default), no action will be performed. If True, the filename will be searched for keywords that describe the gantry and/or collimator angle. For example, if set to True and the file name was "PF_gantry45.dcm" the gantry would be interpreted as being at 45 degrees. """ if filename is not None: self.image = PFDicomImage(filename, use_filenames=use_filename) if isinstance(filter, int): self.image.filter(size=filter) if log is not None: self._load_log(log) else: self._log_fits = None self._is_analyzed = False @classmethod def from_url(cls, url: str, filter: int=None): """Instantiate from a URL.""" filename = get_url(url, progress_bar=True) return cls(filename, filter=filter) @classmethod def from_demo_image(cls, filter: int=None): """Construct a PicketFence instance using the demo image.""" demo_file = retrieve_demo_file(url='EPID-PF-LR.dcm') return cls(demo_file, filter=filter) @classmethod def from_multiple_images(cls, path_list: Sequence): """Load and superimpose multiple images and instantiate a Starshot object. Parameters ---------- path_list : iterable An iterable of path locations to the files to be loaded/combined. """ obj = cls.from_demo_image() # save a combined image to a temporary dir, then load it back in as a PFDicomImage with TemporaryDirectory() as tmp: filename = osp.join(tmp, 'mydcm.dcm') image.load_multiples(path_list, method='mean').save(filename) obj.image = PFDicomImage(filename) return obj @property def passed(self) -> bool: """Boolean specifying if all MLC positions were within tolerance.""" return self.pickets.passed @property def percent_passing(self) -> float: """Return the percentage of MLC positions under tolerance.""" num = 0 num_pass = 0 for picket in self.pickets: num += len(picket.error_array) num_pass += sum(picket.error_array < self.settings.tolerance) pct_pass = 100 * num_pass / num return pct_pass @property def max_error(self) -> float: """Return the maximum error found.""" return max(picket.max_error for picket in self.pickets) @property def max_std(self) -> float: pos, vals, err, leaf_nums = self.pickets.error_hist() return np.max(err) @property def max_error_picket(self) -> int: """Return the picket number where the maximum error occurred.""" return np.argmax([picket.max_error for picket in self.pickets]) @property def max_error_leaf(self) -> int: """Return the leaf that had the maximum error.""" picket = self.pickets[self.max_error_picket] return np.argmax(picket.error_array) @property @lru_cache() def abs_median_error(self) -> float: """Return the median error found.""" return np.median(np.hstack([picket.error_array for picket in self.pickets])) @property def num_pickets(self) -> int: """Return the number of pickets determined.""" return len(self.pickets) def _load_log(self, log: str): """Load a machine log that corresponds to the picket fence delivery. This log determines the location of the pickets. The MLC peaks are then compared to the expected log pickets, not a simple fit of the peaks.""" # load the log fluence image mlog = load_log(log) fl = mlog.fluence.expected.calc_map(equal_aspect=True) fli = image.load(fl, dpi=254) # 254 pix/in => 1 pix/0.1mm (default fluence calc) # equate them such that they're the same size & DPI fluence_img, self.image = image.equate_images(fli, self.image) # get picket fits from the modified fluence image pf = PicketFence.from_demo_image() pf.image = fluence_img pf.analyze() self._log_fits = cycle([p.fit for p in pf.pickets]) @staticmethod def run_demo(tolerance: float=0.5, action_tolerance: float=None): """Run the Picket Fence demo using the demo image. See analyze() for parameter info.""" pf = PicketFence.from_demo_image() pf.analyze(tolerance, action_tolerance=action_tolerance) print(pf.results()) pf.plot_analyzed_image(leaf_error_subplot=True) def analyze(self, tolerance: float=0.5, action_tolerance: float=None, hdmlc: bool=False, num_pickets: int=None, sag_adjustment: Union[float, int]=0, orientation: str=None, invert: bool=False): """Analyze the picket fence image. Parameters ---------- tolerance : int, float The tolerance of difference in mm between an MLC pair position and the picket fit line. action_tolerance : int, float, None If None (default), no action tolerance is set or compared to. If an int or float, the MLC pair measurement is also compared to this tolerance. Must be lower than tolerance. This value is usually meant to indicate that a physicist should take an "action" to reduce the error, but should not stop treatment. hdmlc : bool If False (default), a standard (5/10mm leaves) Millennium MLC model is assumed. If True, an HD (2.5/5mm leaves) Millennium is assumed. num_pickets : int, None .. versionadded:: 0.8 The number of pickets in the image. A helper parameter to limit the total number of pickets, only needed if analysis is catching more pickets than there really are. sag_adjustment : float, int .. versionadded:: 0.8 The amount of shift in mm to apply to the image to correct for EPID sag. For Up-Down picket images, positive moves the image down, negative up. For Left-Right picket images, positive moves the image left, negative right.
import ply.lex as lex import re from parse.errors import Error as our_error from parse.expressions.expressions_math import * from parse.expressions.expressions_base import * from parse.expressions.expressions_trig import * from parse.sql_common.sql_general import * from parse.sql_ddl.create import * from parse.sql_ddl.alter import * from parse.sql_dml.insert import * from parse.sql_ddl.drop import * from parse.sql_dml.select import * from parse.sql_dml.update import * from parse.sql_dml.delete import * from treeGraph import * from parse.symbol_table import * from parse.plpgsql.function import * from parse.plpgsql.declaration import * from parse.plpgsql.control import * # =========================================================================================== # ==================================== LEXICAL ANALYSIS ================================== # =========================================================================================== reserved = { 'smallint' : 'SMALLINT', 'integer' : 'INTEGER', 'bigint' : 'BIGINT', 'decimal' : 'DECIMAL', 'numeric' : 'NUMERIC', 'real' : 'REAL', 'double' : 'DOUBLE', 'precision' : 'PRECISION', 'money' : 'MONEY', 'caracter' : 'CARACTER', 'varying' : 'VARYING', 'varchar' : 'VARCHAR', 'character' : 'CHARACTER', 'char' : 'CHAR', 'text' : 'TEXT', 'timestamp' : 'TIMESTAMP', 'date' : 'DATE', 'time' : 'TIME', 'interval' : 'INTERVAL', 'year' : 'YEAR', 'month' : 'MONTH', 'day' : 'DAY', 'hour' : 'HOUR', 'minute' : 'MINUTE', 'second' : 'SECOND', 'extract' : 'EXTRACT', 'date_part' : 'DATE_PART', 'now' : 'NOW', 'current_date' : 'CURRENT_DATE', 'current_time' : 'CURRENT_TIME', 'boolean' : 'BOOLEAN', 'between' : 'BETWEEN', 'symmetric' : 'SYMMETRIC', 'in' : 'IN', 'like' : 'LIKE', 'ilike' : 'ILIKE', 'similar' : 'SIMILAR', 'is' : 'IS', 'null' : 'NULL', 'not' : 'NOT', 'and' : 'AND', 'or' : 'OR', 'select' : 'SELECT', 'from' : 'FROM', 'where' : 'WHERE', 'create' : 'CREATE', 'type' : 'TYPE', 'as' : 'AS', 'enum' : 'ENUM', 'replace' : 'REPLACE', 'database' : 'DATABASE', 'if' : 'IF', 'exists' : 'EXISTS', 'owner' : 'OWNER', 'mode' : 'MODE', 'show' : 'SHOW', 'databases' : 'DATABASES', 'alter' : 'ALTER', 'rename' : 'RENAME', 'to' : 'TO', 'drop' : 'DROP', 'current_user' : 'CURRENT_USER', 'session_user' : 'SESSION_USER', 'table' : 'TABLE', 'default' : 'DEFAULT', 'constraint' : 'CONSTRAINT', 'unique' : 'UNIQUE', 'check' : 'CHECK', 'primary' : 'PRIMARY', 'key' : 'KEY', 'references' : 'REFERENCES', 'foreign' : 'FOREIGN', 'add' : 'ADD', 'column' : 'COLUMN', 'set' : 'SET', 'inherits' : 'INHERITS', 'insert' : 'INSERT', 'into' : 'INTO', 'values' : 'VALUES', 'update' : 'UPDATE', 'delete' : 'DELETE', 'distinct' : 'DISTINCT', 'group' : 'GROUP', 'by' : 'BY', 'having' : 'HAVING', 'unknown' : 'UNKNOWN', 'count' : 'COUNT', 'min' : 'MIN', 'max' : 'MAX', 'sum' : 'SUM', 'avg' : 'AVG', 'abs' : 'ABS', 'cbrt' : 'CBRT', 'ceil' : 'CEIL', 'ceiling' : 'CEILING', 'degrees' : 'DEGREES', 'div' : 'DIV', 'exp' : 'EXP', 'factorial' : 'FACTORIAL', 'floor' : 'FLOOR', 'gcd' : 'GCD', 'lcm' : 'LCM', 'ln' : 'LN', 'log' : 'LOG', 'log10' : 'LOG10', 'min_scale' : 'MIN_SCALE', 'mod' : 'MOD', 'pi' : 'PI', 'power' : 'POWER', 'radians' : 'RADIANS', 'round' : 'ROUND', 'scale' : 'SCALE', 'sign' : 'SIGN', 'sqrt' : 'SQRT', 'trim_scale' : 'TRIM_SCALE', 'trunc' : 'TRUNC', 'width_bucket' : 'WIDTH_BUCKET', 'random' : 'RANDOM', 'setseed' : 'SETSEED', 'acos' : 'ACOS', 'acosd' : 'ACOSD', 'asin' : 'ASIN', 'asind' : 'ASIND', 'atan' : 'ATAN', 'atand' : 'ATAND', 'atan2' : 'ATAN2', 'atan2d' : 'ATAN2D', 'cos' : 'COS', 'cosd' : 'COSD', 'cot' : 'COT', 'cotd' : 'COTD', 'sin' : 'SIN', 'sind' : 'SIND', 'tan' : 'TAN', 'tand' : 'TAND', 'sinh' : 'SINH', 'cosh' : 'COSH', 'tanh' : 'TANH', 'asinh' : 'ASINH', 'acosh' : 'ACOSH', 'atanh' : 'ATANH', 'length' : 'LENGTH', 'substring' : 'SUBSTRING', 'trim' : 'TRIM', 'get_byte' : 'GET_BYTE', 'md5' : 'MD5', 'set_byte' : 'SET_BYTE', 'sha256' : 'SHA256', 'substr' : 'SUBSTR', 'convert' : 'CONVERT', 'encode' : 'ENCODE', 'decode' : 'DECODE', 'substring' : 'SUBSTRING', 'any' : 'ANY', 'all' : 'ALL', 'some' : 'SOME', 'asc' : 'ASC', 'desc' : 'DESC', 'case' : 'CASE', 'when' : 'WHEN', 'then' : 'THEN', 'else' : 'ELSE', 'end' : 'END', 'greatest' : 'GREATEST', 'least' : 'LEAST', 'order' : 'ORDER', 'limit' : 'LIMIT', 'offset' : 'OFFSET', 'union' : 'UNION', 'intersect' : 'INTERSECT', 'except' : 'EXCEPT', 'inner' : 'INNER', 'left' : 'LEFT', 'right' : 'RIGHT', 'full' : 'FULL', 'outer' : 'OUTER', 'join' : 'JOIN', 'on' : 'ON', 'using' : 'USING', 'natural' : 'NATURAL', 'first' : 'FIRST', 'last' : 'LAST', 'nulls' : 'NULLS', 'use' : 'USE', 'constant' : 'CONSTANT', 'collate' : 'COLLATE', 'function' : 'FUNCTION', 'begin' : 'BEGIN', 'return' : 'RETURN', 'returns' : 'RETURNS', 'alias' : 'ALIAS', 'for' : 'FOR', 'language' : 'LANGUAGE', 'out' : 'OUT', 'declare' : 'DECLARE', 'rowtype' : 'ROWTYPE', 'record' : 'RECORD', 'prepare' : 'PREPARE', 'perform' : 'PERFORM', 'found' : 'FOUND', 'raise' : 'RAISE', 'no_data_found' : 'NO_DATA_FOUND', 'too_many_rows' : 'TOO_MANY_ROWS', 'execute' : 'EXECUTE', 'get' : 'GET', 'notice' : 'NOTICE', 'elseif' : 'ELSEIF', 'exception' : 'EXCEPTION', 'plpgsql' : 'PLPGSQL', 'diagnostics' : 'DIAGNOSTICS', 'inout' : 'INOUT', 'cascade' : 'CASCADE', 'restrict' : 'RESTRICT', 'index' : 'INDEX', 'hash' : 'HASH', } tokens = [ 'PARA', 'PARC', 'CORCHA', 'CORCHC', 'PUNTO', 'COMA', 'PUNTOCOMA', 'DOSPUNTOS', 'MAS', 'MENOS', 'POR', 'DIAGONAL', 'EXPONENCIANCION', 'PORCENTAJE', 'MAYOR', 'MENOR', 'IGUAL', 'MAYORQ', 'MENORQ', 'DIFERENTE', 'ENTERO', 'FLOAT', 'TEXTO', 'FECHA_HORA', 'PATTERN_LIKE', 'BOOLEAN_VALUE', 'ID', 'SQUARE_ROOT', 'CUBE_ROOT', 'AMPERSON', 'NUMERAL', 'PRIME', 'SHIFT_L', 'SHIFT_R', 'DOLLAR', ] +list(reserved.values()) t_PARA = r'\(' t_PARC = r'\)' t_CORCHA = r'\[' t_CORCHC = r'\]' t_PUNTO = r'\.' t_COMA = r'\,' t_PUNTOCOMA = r'\;' t_DOSPUNTOS = r'\:' t_MAS = r'\+' t_MENOS = r'\-' t_POR = r'\*' t_DIAGONAL = r'\/' t_EXPONENCIANCION = r'\^' t_PORCENTAJE = r'%' t_DOLLAR = r'\$' t_MAYOR = r'>' t_MENOR = r'<' t_IGUAL = r'=' t_MAYORQ = r'>=' t_MENORQ = r'<=' t_SQUARE_ROOT = r'\|' t_CUBE_ROOT = r'\|\|' t_AMPERSON = r'\&' t_NUMERAL = r'\#' t_PRIME = r'\~' t_SHIFT_L = r'<<' t_SHIFT_R = r'>>' # ignored regular expressions t_ignore = " \t" t_ignore_COMMENT = r'\-\-.*' t_ignore_COMMENTMULTI = r'(/\*(.|\n)*?\*/)|(//.*)' def t_DIFERENTE(t): r'((<>)|(!=))' t.type = reserved.get(t.value, 'DIFERENTE') return t def t_FLOAT(t): r'((\d+\.\d*)((e[\+-]?\d+)?)|(\d*e[\+-]?\d+))' t.value = float(t.value) return t def t_ENTERO(t): r'\d+' t.value = int(float(t.value)) return t def t_FECHA_HORA(t): r'\'\d{4}-[0-1]?\d-[0-3]?\d [0-2]\d:[0-5]\d:[0-5]\d\'' t.value = t.value[1:-1] t.type = reserved.get(t.value, 'FECHA_HORA') return t def t_PATTERN_LIKE(t): r'\'\%.*\%\'' t.value = t.value[2:-2] t.type = reserved.get(t.value, 'PATTERN_LIKE') return t def t_TEXTO(t): r'\'([^\\\n]|(\\.))*?\'' t.value = t.value[1:-1] t.type = 'TEXTO' return t def t_BOOLEAN_VALUE(t): r'((false)|(true))' t.value = t.value.lower() t.type = reserved.get(t.value, 'BOOLEAN_VALUE') return t def t_ID(t): r'[a-zA-Z_][a-zA-Z_0-9]*' t.type = reserved.get(t.value.lower(), 'ID') return t def t_newline(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): err = Error(t.lineno, t.lexpos, ErrorType.LEXICAL, 'Ilegal character \'' + t.value[0] + '\'') errorsList.append(err) t.lexer.skip(1) lexer = lex.lex(debug=False, reflags=re.IGNORECASE) # =========================================================================================== # ==================================== SYNTACTIC ANALYSIS ================================== # =========================================================================================== start = 'init' precedence = ( # Arthmetic ('left', 'MAS', 'MENOS'), ('left', 'POR', 'DIAGONAL'), ('left', 'EXPONENCIANCION'), ('right', 'UMENOS'), ('right', 'UMAS'), # Relational ('left', 'MENOR', 'MAYOR', 'IGUAL', 'MENORQ', 'MAYORQ'), # logic ('left', 'OR'), ('left', 'AND'), ('right', 'NOT'), ('left', 'AS') ) def p_init(t): ''' init : statements''' t[0] = t[1] # ===================================== SQL ===================================== def p_statements(t): ''' statements : statements statement ''' t[1].append(t[2]) t[0] = t[1] def p_statements2(t): ''' statements : statement ''' t[0] = [t[1]] def p_statement(t): '''statement : stm_show PUNTOCOMA | stm_create PUNTOCOMA | stm_alter PUNTOCOMA | stm_use_db PUNTOCOMA | stm_select PUNTOCOMA | stm_insert PUNTOCOMA | stm_update PUNTOCOMA | stm_delete PUNTOCOMA | stm_drop PUNTOCOMA | stm_select UNION all_opt stm_select PUNTOCOMA | stm_select INTERSECT all_opt stm_select PUNTOCOMA | stm_select EXCEPT all_opt stm_select PUNTOCOMA | asig_basica PUNTOCOMA | stm_perform PUNTOCOMA | stm_begin PUNTOCOMA | stm_if PUNTOCOMA | stm_language PUNTOCOMA | stm_create_function PUNTOCOMA | stm_execute PUNTOCOMA | stm_get PUNTOCOMA | stm_drop_function PUNTOCOMA | stm_index PUNTOCOMA ''' # | stm_select PUNTOCOMA # | stm_select UNION all_opt stm_select # | stm_select INTERSECT all_opt stm_select # | stm_select EXCEPT all_opt try: if len(t) == 3: punteroinicio(t[1].graph_ref) except: print("falta parametro graph_ref") if len(t) == 3: t[0] = t[1] else: token_op = t.slice[2] graph_ref = None if token_op.type == 'UNION': childsProduction = addNotNoneChild(t, [1, 3, 4]) graph_ref = graph_node(str("stm_union"), [t[1], t[2], t[3], t[4]], childsProduction) punteroinicio(graph_ref) addCad("**\<STATEMENT>** ::= \<STM_SELECT> tUnion tAll \<STM_SELECT> ") t[0] = Union(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref) if token_op.type == 'INTERSECT': childsProduction = addNotNoneChild(t, [1, 3, 4]) graph_ref = graph_node(str("stm_intersect"), [t[1], t[2], t[3], t[4]], childsProduction) punteroinicio(graph_ref) addCad("**\<STATEMENT>** ::= \<STM_SELECT> tUnion tAll \<STM_SELECT> ") t[0] = Intersect(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref) if token_op.type == 'EXCEPT': childsProduction = addNotNoneChild(t, [1, 3, 4]) graph_ref = graph_node(str("stm_except"), [t[1], t[2], t[3], t[4]], childsProduction) punteroinicio(graph_ref) addCad("**\<STATEMENT>** ::= \<STM_SELECT> tUnion tAll \<STM_SELECT> ") t[0] = Except(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref) def p_statement_error(t): '''statement : error PUNTOCOMA ''' token = t.slice[1] t[0] = Error(token.lineno, token.lexpos, ErrorType.SYNTAX, 'Ilegal token ' + str(token.lineno)) ################# # Parte de PGSQL def p_asig_basica(t): '''asig_basica : ID DOSPUNTOS IGUAL expression | ID IGUAL expression ''' if len(t) == 5: childsProduction = addNotNoneChild(t,[4]) graph_ref = graph_node(str("asig_basica"),
[3, 2, 8, 10], [3, 1, 10, 12], [4, 4, 13, 15], [4, 3, 15, 17], [4, 2, 17, 19], [4, 1, 19, 21], [5, 4, 14, 16], [5, 3, 16, 18], [5, 2, 18, 20], [5, 1, 20, 22]] class mpiinf3d28j3d(): """Body joints from MPI-INF-3DHP.""" num_joints = 28 dim = 3 """Color map""" color = ['r'] cmap = num_joints * [0] links = [] class est34j3d(): """Elementary Skeleton Template.""" num_joints = 34 dim = 3 """Projections to other formats.""" map_to_mpii = [28, 26, 24, 25, 27, 29, 0, 4, 5, 9, 20, 18, 16, 17, 19, 21] map_to_mpii3dhp = [2, 3, 1, 1, 0, 5, 6, 9, 15, 17, 19, 21, 23, 14, 16, 18, 20, 22, 25, 27, 29, 31, 33, 24, 26, 28, 30, 32] mat_to_mpi3d_te = [0, 5, 6, 9, 16, 17, 18, 19, 20, 21, 24, 25, 26, 27, 28, 29, 1] map_to_h36m23j = [0, 1, 5, 6, 8, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] map_to_coco = \ [7, 11, 10, 13, 12, 17, 16, 19, 18, 21, 20, 25, 24, 27, 26, 29, 28] map_to_pa16j = [0, 5, 6, 8, 16, 17, 18, 19, 20, 21, 24, 25, 26, 27, 28, 29] map_to_pa17j = map_to_pa16j + [1] neighbors = np.array([ [24, 25], # 0 [0, 2], # 1 [1, 4], # 2 [1, 4], # 3 [3, 5], # 4 [4, 6], # 5 [5, 7], # 6 [5, 8], # 7 [7, 9], # 8 [8, 8], # 9 [8, 7], # 10 [8, 7], # 11 [7, 10], # 12 [7, 11], # 13 [5, 16], # 14 [5, 17], # 15 [14, 18], # 16 [15, 19], # 17 [16, 20], # 18 [17, 21], # 19 [18, 22], # 20 [19, 23], # 21 [20, 20], # 22 [21, 21], # 23 [0, 26], # 24 [0, 27], # 25 [24, 28], # 26 [25, 29], # 27 [26, 30], # 28 [27, 31], # 29 [28, 32], # 30 [29, 33], # 31 [28, 30], # 32 [29, 31], # 33 ]) """Horizontal flip mapping""" map_hflip = list(range(10)) + [pairflip(x) for x in range(10, 34)] """Skeleton or joint links""" color = [cnames['darkgreen'], cnames['cyan'], cnames['crimson'], cnames['royalblue'], cnames['gold'], cnames['fuchsia'], ] cmap = 5*[0] + 9*[1] + 5*[2, 3] + 5*[4, 5] links = pairlist(0, 4) + [[0, 24], [0, 25], [1, 24], [1, 25], [1, 16], [1, 17], [4, 16], [4, 17]] \ + pairlist(5, 9) + [[7, 10], [7, 11], [10, 12], [11, 13]] \ + pairlist(14, 22, 2) + pairlist(15, 23, 2) \ + pairlist(24, 32, 2) + pairlist(25, 33, 2) newlinks = [[1, 24, 25], # 0 [24, 25, 16, 17, 2], # 1 [3], # 2 [4], # 3 [5, 16, 17], # 4 [6, 14, 15], # 5 [7, 12, 13], # 6 [8, 10, 11], # 7 [9], # 8 [], # 9 [], # 10 [], # 11 [9, 10], # 12 [9, 11], # 13 [], # 14 [], # 15 [14], # 16 [15], # 17 [16], # 18 [17], # 19 [18], # 20 [19], # 21 [20], # 22 [21], # 23 [], # 24 [], # 25 [24, 0], # 26 [25, 0], # 27 [26], # 28 [27], # 29 [28], # 30 [29], # 31 [30], # 32 [31]] # 33 keypoint_size = np.array(34*[1.]) class dst68j3d(): """Dense Skeleton Template.""" num_joints = 68 dim = 3 """Projections to other formats.""" map_to_mpii = [62, 56, 48, 49, 57, 63, 0, 6, 7, 11, 32, 26, 20, 21, 27, 33] map_to_mpii3dhp = [4, 5, 3, 3, 0, 7, 8, 11, 16, 20, 26, 32, 36, 17, 21, 27, 33, 37, 48, 56, 62, 64, 66, 49, 57, 63, 65, 67] map_to_h36m23j = [0, 3, 7, 8, 10, 20, 21, 26, 27, 32, 33, 36, 37, 48, 49, 56, 57, 62, 63, 64, 65, 66, 67] map_to_coco = [9, 13, 12, 15, 14, 21, 20, 27, 26, 33, 32, 49, 48, 57, 56, 63, 62] map_to_pa16j = [0, 7, 8, 10, 20, 21, 26, 27, 32, 33, 48, 49, 56, 57, 62, 63] map_to_pa17j = map_to_pa16j + [3] """Horizontal flip mapping""" map_hflip = list(range(12)) + [pairflip(x) for x in range(12, 68)] neighbors = np.array([ [48, 49], # 0 [0, 2], # 1 [1, 3], # 2 [2, 4], # 3 [3, 5], # 4 [4, 6], # 5 [5, 7], # 6 [6, 8], # 7 [7, 9], # 8 [8, 10], # 9 [9, 11], # 10 [9, 10], # 11 [10, 14], # 12 [10, 15], # 13 [8, 12], # 14 [8, 13], # 15 [7, 18], # 16 [7, 19], # 17 [6, 20], # 18 [6, 21], # 19 [18, 22], # 20 [19, 23], # 21 [20, 24], # 22 [21, 25], # 23 [22, 26], # 24 [23, 27], # 25 [24, 28], # 26 [25, 29], # 27 [26, 30], # 28 [27, 31], # 29 [28, 32], # 30 [29, 33], # 31 [30, 34], # 32 [31, 35], # 33 [32, 36], # 34 [33, 37], # 35 [32, 34], # 36 [33, 35], # 37 [20, 40], # 38 [21, 41], # 39 [38, 3], # 40 [39, 3], # 41 [3, 40], # 42 [3, 41], # 43 [2, 3], # 44 [2, 3], # 45 [48, 1], # 46 [49, 1], # 47 [46, 50], # 48 [47, 51], # 49 [48, 54], # 50 [49, 55], # 51 [0, 48], # 52 [0, 49], # 53 [50, 56], # 54 [51, 57], # 55 [54, 58], # 56 [55, 59], # 57 [56, 60], # 58 [57, 63], # 59 [58, 62], # 60 [59, 63], # 61 [60, 64], # 62 [61, 65], # 63 [62, 66], # 64 [63, 67], # 65 [62, 64], # 66 [63, 65], # 67 ]) softjoint_rules = [ # Additional points [1, [0, 3], [2/3, 1/3]], [2, [0, 3], [1/3, 2/3]], [18, [16, 20], [1/2, 1/2]], [19, [17, 21], [1/2, 1/2]], [22, [20, 26], [2/3, 1/3]], [23, [21, 27], [2/3, 1/3]], [24, [20, 26], [1/3, 2/3]], [25, [21, 27], [1/3, 2/3]], [28, [26, 32], [2/3, 1/3]], [29, [27, 33], [2/3, 1/3]], [30, [26, 32], [1/3, 2/3]], [31, [27, 33], [1/3, 2/3]], [34, [32, 36], [1/2, 1/2]], [35, [33, 37], [1/2, 1/2]], [38, [20, 3], [2/3, 1/3]], [39, [21, 3], [2/3, 1/3]], [40, [20, 3], [1/3, 2/3]], [41, [21, 3], [1/3, 2/3]], [42, [3, 20, 48], [1/5, 2/5, 2/5]], [43, [3, 21, 49], [1/5, 2/5, 2/5]], [44, [3, 48], [2/3, 1/3]], [45, [3, 49], [2/3, 1/3]], [46, [3, 48], [1/3, 2/3]], [47, [3, 49], [1/3, 2/3]], [50, [48, 56], [2/3, 1/3]], [51, [49, 57], [2/3, 1/3]], [52, [0, 56], [2/3, 1/3]], [53, [0, 57], [2/3, 1/3]], [54, [48, 56], [1/3, 2/3]], [55, [49, 57], [1/3, 2/3]], [58, [56, 62], [2/3, 1/3]], [59, [57, 63], [2/3, 1/3]], [60, [56, 62], [1/3, 2/3]], [61, [57, 63], [1/3, 2/3]], ] @staticmethod def compute_soft_joints(p, num_iter=1): assert (p.ndim == 2) and (p.shape == (dst68j3d.num_joints, 4)), ( 'Invalid pose, expected a pose + confidence ' 'tensor (%d,4), ' 'got %s' %(dst68j3d.num_joints, str(p.shape))) def _apply_rules(rules): for target, bases, weights in rules: try: assert abs(sum(weights) - 1) < 1e-4 if np.isnan(p[target, 0:3]).all(): # joint not computed yet for i in range(4): p[target, i] = np.sum(p[bases, i] * weights) except: warning('Invalid rule: {}, {}, {}'.format( target, bases, weights)) raise for _ in range(num_iter): _apply_rules(dst68j3d.softjoint_rules) return p """Skeleton or joint links""" color = [cnames['darkgreen'], cnames['cyan'], cnames['crimson'], cnames['royalblue'], cnames['gold'], cnames['fuchsia'], ] cmap = 7*[0] + 9*[1] + 11*[2, 3] + 10*[0] + 10*[4, 5] links = pairlist(0, 7) + [[0, 49], [49,
'security')) self.ui.button_nat.clicked.connect(partial(self._load_rulebase, 'nat')) self.ui.button_reports.clicked.connect(partial(self._load_reports, 'reports')) self.ui.button_report_groups.clicked.connect(partial(self._load_reports, 'report-group')) ############################################## # COMBO BOX EVENTS ############################################## self.ui.combo_file.currentIndexChanged.connect(self._update_file_selected) self.ui.combo_from_dg.currentIndexChanged.connect(self._reset_flags_buttons) self.ui.combo_from_rulebase.currentIndexChanged.connect(self._reset_flags_buttons) self.ui.combo_to_dg.currentIndexChanged.connect(self._reset_flags_buttons) self.ui.combo_to_rulebase.currentIndexChanged.connect(self._reset_flags_buttons) self.ui.combo_from_vsys.currentIndexChanged.connect(self._reset_flags_buttons) self.ui.combo_to_vsys.currentIndexChanged.connect(self._reset_flags_buttons) ############################################################################ # LOAD REPORTS ############################################################################ def _load_reports(self, obj): self.obj = obj self.ui.progress_bar.setValue(0) self.ui.label_status.setText('Merging {obj}...'.format(obj=obj)) # validate user input if self._validate_user_input() is not True: return self._xpath_from = None self._xpath_to = None ######################################### # FROM ######################################### # build xpaths - if Panoramma if len(self.ui.combo_from_dg.currentText()) > 1: # shared if self.ui.combo_from_dg.currentText() == 'Shared': self._xpath_from = '/config/shared/' # DG else: self._xpath_from = "/config/devices/entry[<EMAIL>']/device-group/entry[@name='{dg}']/".format(dg=self.ui.combo_from_dg.currentText()) # PAN-OS else: self._xpath_from = '/config/shared/' ######################################### # TO ######################################### # Panorama if len(self.ui.combo_to_dg.currentText()) > 1: # Shared if self.ui.combo_to_dg.currentText() == 'Shared': self._xpath_to = '/config/shared/' # DG else: self._xpath_to = "/config/devices/entry[<EMAIL>']/device-group/entry[@name='{dg}']/".format(dg=self.ui.combo_to_dg.currentText()) # PAN-OS else: self._xpath_to = '/config/shared/' # build out load config partial command cmd = self._load_config_partial.format( file=self._from_file, xpath_from=self._xpath_from, xpath_to=self._xpath_to, obj_from=self.obj, obj_to=self.obj ) cmd_output = 'load config partial from {file} from-xpath {xpath_from}{obj_from} to-xpath {xpath_to}{obj_to} mode merge'.format( file=self._from_file, xpath_from=self._xpath_from, xpath_to=self._xpath_to, obj_from=self.obj, obj_to=self.obj ) # output to text browser self.ui.text_out.clear() self.ui.text_out.append('> Type: <b><font color="yellow">{type}</font></b>'.format(type=self.obj)) self.ui.text_out.append('> Executing the following command...') self.ui.text_out.append('\n') self.ui.text_out.append(cmd_output) self.ui.text_out.append('\n') self.ui.progress_bar.setValue(50) self.connect_api_thread = APIRequest(parent=None, api=self._api, url=self._url, cmd=cmd) self.connect_api_thread.start() self.connect_api_thread.api_values.connect(self._connect_values_thread) ############################################################################ # IRON SKILLET ############################################################################ def _iron_skillet(self): # check user input if not self._validate_user_input(): return # check parameters are not null if self._from_file== 'Select a File' or len(self.ui.combo_file.currentText()) < 1: self._show_critical_error(['Error!', 'Select a "From" file']) return self.ui.progress_bar.setValue(0) self.ui.label_status.setText('Iron Skillet...') self.ui.text_out.clear() self.ui.text_out.append('> Starting Iron Skillet...') d = QDialog() ui = PANORAMA() if self._model == 'Panorama' else PANOS() ui.setupUi(d) d.show() resp = d.exec_() self._process_iron_skillet = True if resp == QDialog.Accepted else False # if they hit 'cancel' -- break out of function if not self._process_iron_skillet: return # PANORAMA if self._model == 'Panorama': self._iron_skillet_panorama['system'] = False if not ui.checkbox_system.isChecked() else True self._iron_skillet_panorama['settings'] = False if not ui.checkbox_setting.isChecked() else True self._iron_skillet_panorama['log settings'] = False if not ui.checkbox_log.isChecked() else True self._iron_skillet_panorama['template'] = False if not ui.checkbox_template.isChecked() else True self._iron_skillet_panorama['device group'] = False if not ui.checkbox_dg.isChecked() else True self._iron_skillet_panorama['shared'] = False if not ui.checkbox_shared.isChecked() else True self._iron_skillet_panorama['log collector'] = False if not ui.checkbox_log_collector.isChecked() else True self.connect_thread_iron_skillet = IronSkillet(parent=None, elements=self._iron_skillet_panorama, os='Panorama', api=self._api, url=self._url, file=self._from_file, from_vsys=self.ui.combo_from_vsys.currentText(), to_vsys=self.ui.combo_to_vsys.currentText()) self.connect_thread_iron_skillet.start() self.connect_thread_iron_skillet.values_iron_skillet.connect(self._iron_skillet_output) self.connect_thread_iron_skillet.done.connect(self._print_iron_skillet) # PAN-OS else: self._iron_skillet_panos['address'] = False if not ui.checkbox_address.isChecked() else True self._iron_skillet_panos['email schedule'] = False if not ui.checkbox_email_schedule.isChecked() else True self._iron_skillet_panos['external list'] = False if not ui.checkbox_ext_list.isChecked() else True self._iron_skillet_panos['log settings'] = False if not ui.checkbox_log.isChecked() else True self._iron_skillet_panos['profile group'] = False if not ui.checkbox_profile_group.isChecked() else True self._iron_skillet_panos['profiles'] = False if not ui.checkbox_profiles.isChecked() else True self._iron_skillet_panos['report group'] = False if not ui.checkbox_report_groups.isChecked() else True self._iron_skillet_panos['reports'] = False if not ui.checkbox_reports.isChecked() else True self._iron_skillet_panos['rulebase'] = False if not ui.checkbox_rulebase.isChecked() else True self._iron_skillet_panos['settings'] = False if not ui.checkbox_setting.isChecked() else True self._iron_skillet_panos['system'] = False if not ui.checkbox_system.isChecked() else True self._iron_skillet_panos['tag'] = False if not ui.checkbox_tag.isChecked() else True self._iron_skillet_panos['zone protection'] = False if not ui.checkbox_zone_protection.isChecked() else True self.connect_thread_iron_skillet = IronSkillet(parent=None, elements=self._iron_skillet_panos, os='PAN-OS', api=self._api, url=self._url, file=self._from_file, from_vsys=self.ui.combo_from_vsys.currentText(), to_vsys=self.ui.combo_to_vsys.currentText()) self.connect_thread_iron_skillet.start() self.connect_thread_iron_skillet.values_iron_skillet.connect(self._iron_skillet_output) self.connect_thread_iron_skillet.done.connect(self._print_iron_skillet) ############################################## # IRON SKILLET OUTPUT ############################################## def _iron_skillet_output(self, output): element = '> {}'.format(output['element']) self.ui.text_out.append(element) self._is_output += '<h2>{}</h2><ul>'.format(element) xml = lxml.fromstring(output['response']) for line in xml.xpath('.//line'): response = ' > {}'.format(line.text) self.ui.text_out.append(response) self._is_output += '<li>{}</li>'.format(line.text) self.ui.text_out.append('') self._is_output += '</ul>' self.ui.progress_bar.setValue(output['pb_value']) ############################################## # IRON SKILLET DONE ############################################## def _print_iron_skillet(self, flag): self._is_output += '</html>' self.ui.progress_bar.setValue(100) d = QDialog() ui = IRONSKILLET() ui.setupUi(d) resp = d.exec_() if resp == QDialog.Accepted: tmp = tempfile.NamedTemporaryFile(delete=True) path = tmp.name + '.html' f = open(path, 'w') f.write('<html><body>{}</body></html>'.format(self._is_output)) f.close() webbrowser.open('file://' + path) ############################################## # RESET FLAGS ############################################## def _reset_flags(self): self._flag_tags = False self._flag_address_objects = False self._flag_address_groups = False self._flag_service_objects = False self._flag_service_groups = False self._flag_connect_success = False ############################################## # RESET FLAGS and BUTTONS ############################################## def _reset_flags_buttons(self): self._reset_flags() self._reset_button_color() ############################################## # CONNECT ############################################## def _connect(self): # get/set IP and credentials (validate parameters) valid = False self.ui.progress_bar.setValue(0) self.ui.label_status.setText('Connecting, retrieving running-config, loading saved configs...') # clear all combo boxes self.ui.text_out.clear() self.ui.combo_to_dg.clear() self.ui.combo_from_dg.clear() self.ui.combo_to_vsys.clear() self.ui.combo_from_vsys.clear() self.ui.combo_from_rulebase.clear() self.ui.combo_to_rulebase.clear() # reset all flags & flags self._reset_flags_buttons() #### self.connect_thread = ConnectThread(parent=None, ip=self.ui.line_ip.text(), user=self.ui.line_user.text(), password=self.ui.line_password.text()) self.connect_thread.start() self.connect_thread.connect_values.connect(self._set_connect_values) ############################################## # SET CONNECT VALUES ############################################## def _set_connect_values(self, values): if values['result']: self._api = values['api'] self._ip = values['ip'] self._user = values['user'] self._password = values['password'] self._url = values['url'] self.ui.button_connect.setStyleSheet('background-color: green; color:white;') self.ui.button_connect.setText('Connected to: {ip}'.format(ip=self._ip)) self.ui.progress_bar.setValue(25) # trigger functions to fill combo boxes self._system_info() self.connect_thread_combo_boxes = ToComboBoxes(parent=None, api=self._api, url=self._url) self.connect_thread_combo_boxes.start() self.connect_thread_combo_boxes.combo_box_values.connect(self._fill_to_combo_boxes) self.connect_thread_setup_ssh = SetupSSH(parent=None, ip=self._ip, user=self._user, password=self._password) self.connect_thread_setup_ssh.start() self.connect_thread_setup_ssh.output.connect(self._load_saved_configs) else: self.ui.button_connect.setStyleSheet('background-color: red; color:white;') self.ui.button_connect.setText('Connection Error: {ip}'.format(ip=values['ip'])) self._show_critical_error([values['response'], values['error']]) ############################################## # IMPORT CONFIG ############################################## def _import(self): # if model isn't set, return if self._model is None: return # prompt user for import file self._import_file, _ = QFileDialog.getOpenFileName(parent=None, caption="Import Local Config (XML)", directory=os.getcwd(), filter="XML files (*.xml)") # if cancelled if len(self._import_file) == 0: return self.ui.progress_bar.setValue(0) self.ui.progress_bar.setValue(10) self.ui.label_status.setText('Importing {file}'.format(file=self._import_file)) try: with open(self._import_file, 'rb') as f: response = requests.post( url=self._url + '/?type=import&category=configuration', data={'key': self._api}, files={'file': f}, verify=False, timeout=10).content # if any errors - display error message and return except (requests.ConnectionError, requests.ConnectTimeout, requests.HTTPError) as error_requests: self._show_critical_error(['Importing File', error_requests]) return else: self.ui.progress_bar.setValue(50) # check if successful, if so, reload saved config files if lxml.fromstring(response).get('status') == 'success': self.ui.text_out.clear() self.ui.text_out.append('> {file} has been successfully imported!'.format(file=self._import_file)) self.ui.text_out.append('> Refreshing...') self.ui.progress_bar.setValue(100) self.connect_thread_setup_ssh = SetupSSH(parent=None, ip=self._ip, user=self._user, password=self._password) self.connect_thread_setup_ssh.start() self.connect_thread_setup_ssh.output.connect(self._load_saved_configs) # if error, return else: self._show_critical_error(['Importing File', 'Unable to load {file}'.format(file=self._import_file)]) return ############################################## # VALIDATE USER INPUT ############################################## def _validate_user_input(self): # did user select a file? if self.ui.combo_file.currentText() == 'Select a File': self._show_critical_error(['Error!', 'You must select a file!']) return # check if FROM Vsys and DG are None if len(self.ui.combo_from_vsys.currentText()) < 1 and len(self.ui.combo_from_dg.currentText()) < 1: return False # check if TO Vsys and DG are None if len(self.ui.combo_to_vsys.currentText()) < 1 and len(self.ui.combo_to_dg.currentText()) < 1: return False # validate model has been set if self._model is not None: return True else: return False ############################################## # BUILD XPATH ############################################## def _build_xpath(self): panorama = "/<EMAIL>/devices/entry[<EMAIL>']/device-group/entry[@name='{dg}']/" shared = '/config/shared/' vsys = "/<EMAIL>/entry[<EMAIL>']/vsys/entry[@name='{vsys}']/" # init and clear out xpaths self._xpath_to = None self._xpath_from = None ######################################### # FROM ######################################### # Panorama if len(self.ui.combo_from_dg.currentText()) > 1 and len(self.ui.combo_from_rulebase.currentText()) > 1: # shared if self.ui.combo_from_dg.currentText() == 'Shared': try: self._xpath_from = shared except KeyError: self._show_critical_error(['Input Error', 'Select a valid "From" Rulebase']) return # device group else: try: self._xpath_from = panorama.format(dg=self.ui.combo_from_dg.currentText()) except KeyError: self._show_critical_error(['Input Error', 'Select a valid "From" Rulebase']) return # PAN-OS (VSYS) elif len(self.ui.combo_from_vsys.currentText()) > 1: self._xpath_from = vsys.format(vsys=self.ui.combo_from_vsys.currentText()) ######################################### # TO ######################################### # Panorama if len(self.ui.combo_to_dg.currentText()) > 1 and len(self.ui.combo_to_rulebase.currentText()) > 1: # shared if self.ui.combo_to_dg.currentText() == 'Shared': try: self._xpath_to = shared except KeyError: self._show_critical_error(['Input Error', 'Select a valid "TO" Rulebase']) return # device group else: try: self._xpath_to = panorama.format(dg=self.ui.combo_to_dg.currentText()) except KeyError: self._show_critical_error(['Input Error', 'Select a valid "TO" Rulebase']) return # PAN-OS (VSYS) elif len(self.ui.combo_to_vsys.currentText()) > 1: self._xpath_to = vsys.format(vsys=self.ui.combo_to_vsys.currentText()) ############################################## # LOAD RULEBASE ############################################## def _load_rulebase(self, rule): self.rule = rule self.ui.progress_bar.setValue(0) self.ui.label_status.setText('Merging {rule}...'.format(rule=self.rule)) # validate user input if self._validate_user_input() is not True: return # valid rulebase? if self.ui.combo_from_rulebase.currentText() == 'Select Rulebase' or self.ui.combo_to_rulebase.currentText() == 'Select Rulebase': self._show_critical_error(['Error!', 'To/From Rulebase not selected']) return # should I prompt an info dialog? info = False info_msg = '<b>The following Objects/Groups have NOT been loaded:</b><ul>' # check flags if self._flag_tags is not True: info_msg += '<li>Tags</li>' info = True if self._flag_address_objects is not True: info_msg += '<li>Address Objects</li>' info = True if self._flag_address_groups is not True: info_msg += '<li>Address Groups</li>' info = True if self._flag_service_objects is not True: info_msg += '<li>Service Objects</li>' info = True if self._flag_service_groups is not True: info_msg += '<li>Service Groups</li>' info = True info_msg += '</ul>The command to add "{rule}" policies will still be executed; make sure to add the above Objects/Groups before committing (if necessary).<br>'.format(rule=rule) # prompt if True if info: # QMessageBox.information(self, 'Info', info_msg, QMessageBox.Ok) mbox = QMessageBox(self) mbox.setText(self.tr('Warning!')) mbox.setInformativeText(info_msg) mbox.resize(400, 200) mbox.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) mbox.show() # build xpaths self._build_xpath() # check rulebase from_rulebase = '' to_rulebase = '' rulebase = { 'Pre Rulebase': 'pre-rulebase', 'Post Rulebase': 'post-rulebase' } # from rulebase if len(self.ui.combo_from_rulebase.currentText()) > 1: from_rulebase += '{x}/{y}'.format(x=rulebase[self.ui.combo_from_rulebase.currentText()], y=self.rule) else: from_rulebase = 'rulebase/' + self.rule # to rulebase if len(self.ui.combo_to_rulebase.currentText()) > 1:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created: Aug 2017 @author: D34D9001@9R0GR4M13 """ import kerri import os import time import webbrowser import random import subprocess import sys import urllib from bs4 import BeautifulSoup as bs from termcolor import colored ########### # STALKER # ########### colors = ("green", "red", "blue", "white", "grey", "magenta", "cyan") def get_permission(info, bypass=0): if bypass == 1: pass else: print(colored("[!] STALKER NEEDS YOUR PERMISSION TO CONTINUE!", 'red', attrs=['bold'])) print(colored("%s" % info, 'yellow', attrs=['bold'])) permission = raw_input(colored("Do You Wish To Continue? (y)es/(no): ", 'blue', attrs=['bold'])) if str(permission) != "y" and str(permission) != "yes": sys.stderr.write("[!] PERMISSION DENIED BY USER!") sys.exit() else: print(colored("[*] PERMISSION GRANTED", 'green', attrs=['bold'])) class Stalker(object): """ Returns Information On Target From Multiple Servers """ # SOME OF THE DATABASES THAT STALKER WILL SEARCH REQUIRE AN ABREVIATED STATE # NAME. TO PREVENT THE USER FROM HAVING TO INPUT BOTH THE ACTUAL STATE NAME # AND THE ABREVIATED VERSION, WE HAVE ADDED THE 'ABV' DICTIONARY TO CONVERT # THE STATE NAME TO THE ABREVIATED VERSION AUTOMATICALLY WHEN NEEDED def __init__(self): global abv abv = {"Alabama": "AL", "Alaska": "AK", "Arizona": "AZ", "Arkansas": "AR", "California": "CA", "Colorado": "CO", "Connecticut": "CT", "Delaware": "DE", "Florida": "FL", "Georgia": "GA", "Hawaii": "HI", "Idaho": "ID", "Illinois": "IL", "Indiana": "IN", "Iowa": "IA", "Kansas": "KS", "Kentucky": "KY", "Louisiana": "LA", "Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI", "Minnesota": "MN", "Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV", "New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY", "North Carolina": "NC", "North Dakota": "ND", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR", "Pennsylvania": "PA", "Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD", "Tennessee": "TN", "Texas": "TX", "Utah": "UT", "Vermont": "VT", "Virginia": "VA", "Washington": "WA", "West Virginia": "WV", "Wisconsin": "WI", "Wyoming": "WY", "Washington DC": "DC"} def __str__(self): return """ Turns Kaos Into Your Average Super Stalker """ def stalk(self, first=None, middle=None, last=None, city=None, state=None, save=1, gui=0, bypass=0, *args): """ Search Public Servers For Information On The Target. Data can be saved to a file using the \'save\' parameter. If the gui parameter is set to 1 (enabled), the data will not be printed to stdout or saved to a file, instead it will be opened in the default web browser.""" # MAKE SURE USER DOESN'T ENTER TOO MANY ARGUMENTS if len(args) >= 1: raise kerri.ExcessArguments("staker()", 5) # MAKE SURE THE USER ENTERED A FIRST NAME elif first is None: raise kerri.InvalidInput("stalk()", """You Must At Least Specify A First And Last Name Along With A City And State""") else: # CHECK TO SEE IF THE USER INTENDS TO USE A TARGETS MIDDLE NAME IN A SEARCH # IF THE MIDDLE NAME IS PROVIDED BUT NOT THE LAST NAME, THE MIDDLE NAME IS REMOVED AND USED AS # THE TARGETS LAST NAME if last is None and middle is not None: last = str(middle) middle = None # MAKE SURE THE USER ENTERED MORE THAN JUST THE TARGETS FIRST NAME elif last == None and middle == None: raise kerri.InvalidInput("stalk()", "You Must At Least Specify A First And Last Name Along With A City And State") try: # DO NOT LET THE USER ATTEMPT TO SAVE THE DATA FROM THE SEARCH IF GUI MODE IS ENABLED if gui == 1 and save == 1: raise kerri.InvalidInput("stalk()", "If the gui option is enabled, the data can not be save to a file...") if gui == 0: # DEFINE URLS TO BE USED IF A MIDDLE NAME FOR THE TARGET WAS SUPPLIED if middle != None: urls = {'find_mugs':"https://www.findmugshots.com/arrests/%s_%s_%s" % (first, last, abv[state]), 'mugshots':"http://mugshots.com/search.html?q=%s+%s" % (first, last), # 'zaba_search':"http://www.zabasearch.com/people/%s+%s/%s/" % (first, last, abv[state]), 'yandex':"https://www.yandex.com/search/smart/?text=%s+%s+%s" % (first, middle, last), 'google':"https://www.google.com/search?q=%s+%s+%s+%s+%s" % (first, middle, last, city, state), 'policearrests':"https://www.policearrests.com/arrests/%s_%s_%s/" % (first, last, abv[state]), 'mylife':"https://www.mylife.com/%s-%s/" % (first, last), 'peekyou':"https://www.peekyou.com/%s_%s" % (first, last), 'peoplesmart':"https://www.peoplesmart.com/find/name/%s-%s/%s/%s" % (first, last, city, abv[state].lower()), 'publicrecords360':"https://www.publicrecords360.com/%s/people-search/%s/%s?city=%s" % (state.lower(), last, first, city.lower()), 'topix':"http://www.topix.com/forumsearch/city/%s-%s?q=%s+%s+%s" % (city.lower(), abv[state].lower(), first, middle, last), 'truepeoplesearch':"https://www.truepeoplesearch.com/results?name="+first+"%20"+last+"&citystatezip="+city+",%20"+abv[state]} # DEFINE URLS TO BE USED IF THE USER DID NOT SUPPLY THE TARGETS MIDDLE NAME else: urls = {'find_mugs':"https://www.findmugshots.com/arrests/%s_%s_%s" % (first, last, abv[state]), 'mugshots':"http://mugshots.com/search.html?q=%s+%s" % (first, last), # 'zaba_search':"http://www.zabasearch.com/people/%s+%s/%s/" % (first, last, abv[state]), 'yandex':"https://www.yandex.com/search/smart/?text=%s+%s" % (first, last), 'google':"https://www.google.com/search?q=%s+%s+%s+%s" % (first, last, city, state), 'policearrests':"https://www.policearrests.com/arrests/%s_%s_%s/" % (first, last, abv[state]), 'mylife':"https://www.mylife.com/%s-%s/" % (first, last), 'peekyou':"https://www.peekyou.com/%s_%s" % (first, last), 'peoplesmart':"https://www.peoplesmart.com/find/name/%s-%s/%s/%s" % (first, last, city, abv[state].lower()), 'publicrecords360':"https://www.publicrecords360.com/%s/people-search/%s/%s?city=%s" % (state.lower(), last, first, city.lower()), 'topix':"http://www.topix.com/forumsearch/city/%s-%s?q=%s+%s" % (city.lower(), abv[state].lower(), first, last), 'truepeoplesearch':"https://www.truepeoplesearch.com/results?name="+first+"%20"+last+"&citystatezip="+city+",%20"+abv[state]} try: # CHECK TO SEE IF THE USER INTENDS TO SAVE THE OUTPUT DATA # IF THE USER DOES NOT WANT TO SAVE THE DATA, PRINT IT TO STDOUT if save == 0: try: for url in urls: if middle != None: print(colored("[*] Searching %s For %s %s %s in %s, %s" % (url, first, middle, last, city, state), random.choice(colors), attrs=['bold'])) else: print(colored("[*] Searching %s For %s %s in %s, %s" % (url, first, last, city, state), random.choice(colors), attrs=['bold'])) # STALKER USES URLLIB AND BEAUTIFULSOUP4 TO GET AND PARSE THE TEXT FROM WEBPAGES html = urllib.urlopen(urls[url]).read() # 'lxml' WAS ADDED TO PREVENT bs4 FROM DISPLAYING A WARNING MESSAGE soup = bs(html, "lxml") # REMOVE ALL STYLE AND SCRIPT BLOCKS FROM THE PAGE for script in soup(["script", "style"]): script.extract() # GET THE REMAINING TEXT FROM THE PAGE text = soup.get_text() print(text.encode('utf-8')) # SPLIT THE TEXT INTO MULTIPLE LINES lines= (line.strip() for line in text.splitlines()) # LINES CONTAINING (2) OR MORE SPACES ARE SEPERATED INTO (2) (OR MORE) SEPERATE LINES chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # REJOIN LINES AND THEN PRINT THE TEXT data = '\n'.join(chunk for chunk in chunks if chunk).encode('utf-8') print(data) except Exception as error: print("%s FAILED WITH ERROR:\n%s" % (url, error)) if save == 1: try: # IF THE USER WANTS TO SAVE THE OUTPUT DATA TO A SERIES OF FILES, # FIRST CHECK TO MAKE SURE THE DATABASE DIRECTORY EXSITS if not os.path.isdir("/root/Stalker"): os.mkdir("/root/Stalker") stalk_dir = "/root/Stalker" else: stalk_dir = "/root/Stalker" # THE NEW DIRECTORY FOR THE TARGET WILL BE NAMED AFTER THE TARGET (/root/Stalker/[FIRST_LAST]) if middle == None: out_dir = "%s/%s_%s" % (stalk_dir, first, last) else: out_dir = "%s/%s_%s_%s" % (stalk_dir, first, middle, last) if not os.path.isdir(out_dir): os.mkdir(out_dir) else: # IF THE TARGETS DIRECTORY ALREADY EXSISTS, ASK THE USER IF THEY WOULD LIKE TO OVERWRITE THE EXSISTING DATA overwrite = raw_input(colored("[!] THIS TARGET ALREADY HAS A DIRECTORY!!!\nDo You Want To Overwrite It? [ (y)es or (n)o ]\n--> ", 'red', attrs=['bold'])) if overwrite == "yes" or overwrite == "y" or overwrite == "YES" or overwrite == "Yes" or overwrite == "Y": subprocess.call(["/usr/bin/srm", "-r", out_dir]) os.mkdir(out_dir) # IF THE USER DOES NOT WANT TO OVERWRITE THE DATA (or enters anything other than ["y", "Y", "yes", "Yes" # or "YES"]). Stalker will # terminate the data search) else: raise kerri.DuplicationError("stalk()", "%s ALREADY EXSISTS!" % out_dir) # CREATE AN INTRODUCTION (.intro) FILE FOR THE TARGET. THIS FILE IS NOT CURRENTLY USED FOR ANY DATA # BUT MAY BE USED IN THE FUTURE AS A 'CHEAT SHEET' FOR THE TARGET OR SOMETHING SIMILAR except Exception as error: print("%s FAILED WITH ERROR: \n%s" % (url, error)) intro = "%s/%s_%s.intro" % (out_dir, first, last) with open(intro, 'w') as outfile: outfile.write("################################## ") outfile.write("Kaos.Stalker Intro For %s %s From %s %s" % (first, last, city, state)) outfile.write(" ##################################") outfile.close() for url in urls: if middle != None: print(colored("Searching %s For %s %s %s in %s, %s" % (url, first, middle, last, city, state), random.choice(colors), attrs=['bold'])) else: print(colored("Searching %s For %s %s in %s, %s" % (url, first, last, city, state), random.choice(colors), attrs=['bold'])) # CREATE A FILE IN THE TARGETS DIRECTORY WITH THE NAMING FORMAT [${FIRSTNAME}_${LASTNAME}.${URL_NAME}] # EXAMPLE: John_Doe.foogle with open("%s/%s_%s.%s" % (out_dir, first, last, url), 'w') as out_file: # [this process is explained above (lines: 93-110)] html = urllib.urlopen(urls[url]).read() soup = bs(html, "lxml") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines= (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) data = '\n'.join(chunk for chunk in chunks if chunk).encode('utf-8') out_file.write(data) out_file.close() time.sleep(1) except Exception as error: raise kerri.Unknown("stalker()", error) elif gui == 1: # THE SAME URL DECISION PROCESS IS USED FROM ABOVE (DETERMINED BY USERS INPUT OF A MIDDLE
<reponame>ryu-sw/alembic ##-***************************************************************************** ## ## Copyright (c) 2009-2011, ## <NAME>, Inc. and ## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd. ## ## All rights reserved. ## ## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above ## copyright notice, this list of conditions and the following disclaimer ## in the documentation and/or other materials provided with the ## distribution. ## * Neither the name of Sony Pictures Imageworks, nor ## Industrial Light & Magic nor the names of their contributors may be used ## to endorse or promote products derived from this software without specific ## prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## ##-***************************************************************************** from maya import cmds as MayaCmds import maya.OpenMaya as OpenMaya import os import math # adds the current working directory so tools don't get confused about where we # are storing files def expandFileName(name): return os.getcwd() + os.path.sep + name # compare the two floating point values def floatDiff(val1, val2, tolerance): diff = math.fabs(val1 - val2) if diff < math.pow(10, -tolerance): return True return False # function that returns a node object given a name def getObjFromName(nodeName): selectionList = OpenMaya.MSelectionList() selectionList.add( nodeName ) obj = OpenMaya.MObject() selectionList.getDependNode(0, obj) return obj # function that finds a plug given a node object and plug name def getPlugFromName(attrName, nodeObj): fnDepNode = OpenMaya.MFnDependencyNode(nodeObj) attrObj = fnDepNode.attribute(attrName) plug = OpenMaya.MPlug(nodeObj, attrObj) return plug # meaning of return value: # 0 if array1 = array2 # 1 if array1 and array2 are of the same length, array1[i] == array2[i] for 0<=i<m<len, and array1[m] < array2[m] # -1 if array1 and array2 are of the same length, array1[i] == array2[i] for 0<=i<m<len, and array1[m] > array2[m] # 2 if array1.length() < array2.length() # -2 if array1.length() > array2.length() def compareArray(array1, array2): len1 = array1.length() len2 = array2.length() if len1 > len2 : return -2 if len1 < len2 : return 2 for i in range(0, len1): if array1[i] < array2[i] : return 1 if array1[i] > array2[i] : return -1 return 0 # return True if the two point arrays are exactly the same def comparePointArray(array1, array2): len1 = array1.length() len2 = array2.length() if len1 != len2 : return False for i in range(0, len1): if not array1[i].isEquivalent(array2[i], 1e-6): return False return True # return True if the two meshes are identical def compareMesh( nodeName1, nodeName2 ): # basic error checking obj1 = getObjFromName(nodeName1) if not obj1.hasFn(OpenMaya.MFn.kMesh): return False obj2 = getObjFromName(nodeName2) if not obj2.hasFn(OpenMaya.MFn.kMesh): return False polyIt1 = OpenMaya.MItMeshPolygon( obj1 ) polyIt2 = OpenMaya.MItMeshPolygon( obj2 ) if polyIt1.count() != polyIt2.count(): return False if polyIt1.polygonVertexCount() != polyIt2.polygonVertexCount(): return False vertices1 = OpenMaya.MIntArray() vertices2 = OpenMaya.MIntArray() pointArray1 = OpenMaya.MPointArray() pointArray2 = OpenMaya.MPointArray() while polyIt1.isDone()==False and polyIt2.isDone()==False : # compare vertex indices polyIt1.getVertices(vertices1) polyIt2.getVertices(vertices2) if compareArray(vertices1, vertices2) != 0: return False # compare vertex positions polyIt1.getPoints(pointArray1) polyIt2.getPoints(pointArray2) if not comparePointArray( pointArray1, pointArray2 ): return False polyIt1.next() polyIt2.next() if polyIt1.isDone() and polyIt2.isDone() : return True return False # return True if the two Nurbs Surfaces are identical def compareNurbsSurface(nodeName1, nodeName2): # basic error checking obj1 = getObjFromName(nodeName1) if not obj1.hasFn(OpenMaya.MFn.kNurbsSurface): return False obj2 = getObjFromName(nodeName2) if not obj2.hasFn(OpenMaya.MFn.kNurbsSurface): return False fn1 = OpenMaya.MFnNurbsSurface(obj1) fn2 = OpenMaya.MFnNurbsSurface(obj2) # degree if fn1.degreeU() != fn2.degreeU(): return False if fn1.degreeV() != fn2.degreeV(): return False # span if fn1.numSpansInU() != fn2.numSpansInU(): return False if fn1.numSpansInV() != fn2.numSpansInV(): return False # form if fn1.formInU() != fn2.formInU(): return False if fn1.formInV() != fn2.formInV(): return False # control points if fn1.numCVsInU() != fn2.numCVsInU(): return False if fn1.numCVsInV() != fn2.numCVsInV(): return False cv1 = OpenMaya.MPointArray() fn1.getCVs(cv1) cv2 = OpenMaya.MPointArray() fn2.getCVs(cv2) if not comparePointArray(cv1, cv2): return False # knots if fn1.numKnotsInU() != fn2.numKnotsInU(): return False if fn1.numKnotsInV() != fn2.numKnotsInV(): return False knotsU1 = OpenMaya.MDoubleArray() fn1.getKnotsInU(knotsU1) knotsV1 = OpenMaya.MDoubleArray() fn1.getKnotsInV(knotsV1) knotsU2 = OpenMaya.MDoubleArray() fn2.getKnotsInU(knotsU2) knotsV2 = OpenMaya.MDoubleArray() fn2.getKnotsInV(knotsV2) if compareArray( knotsU1, knotsU2 ) != 0: return False if compareArray( knotsV1, knotsV2 ) != 0: return False # trim curves if fn1.isTrimmedSurface() != fn2.isTrimmedSurface(): return False # may need to add more trim checks return True # return True if the two locators are idential def compareLocator(nodeName1, nodeName2): # basic error checking obj1 = getObjFromName(nodeName1) if not obj1.hasFn(OpenMaya.MFn.kLocator): return False obj2 = getObjFromName(nodeName2) if not obj2.hasFn(OpenMaya.MFn.kLocator): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionX'), MayaCmds.getAttr(nodeName2+'.localPositionX'), 4): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionY'), MayaCmds.getAttr(nodeName2+'.localPositionY'), 4): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localPositionZ'), MayaCmds.getAttr(nodeName2+'.localPositionZ'), 4): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleX'), MayaCmds.getAttr(nodeName2+'.localScaleX'), 4): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleY'), MayaCmds.getAttr(nodeName2+'.localScaleY'), 4): return False if not floatDiff(MayaCmds.getAttr(nodeName1+'.localScaleZ'), MayaCmds.getAttr(nodeName2+'.localScaleZ'), 4): return False return True # return True if the two cameras are identical def compareCamera( nodeName1, nodeName2 ): # basic error checking obj1 = getObjFromName(nodeName1) if not obj1.hasFn(OpenMaya.MFn.kCamera): return False obj2 = getObjFromName(nodeName2) if not obj2.hasFn(OpenMaya.MFn.kCamera): return False fn1 = OpenMaya.MFnCamera( obj1 ) fn2 = OpenMaya.MFnCamera( obj2 ) if fn1.filmFit() != fn2.filmFit(): print "differ in filmFit" return False if not floatDiff(fn1.filmFitOffset(), fn2.filmFitOffset(), 4): print "differ in filmFitOffset" return False if fn1.isOrtho() != fn2.isOrtho(): print "differ in isOrtho" return False if not floatDiff(fn1.orthoWidth(), fn2.orthoWidth(), 4): print "differ in orthoWidth" return False if not floatDiff(fn1.focalLength(), fn2.focalLength(), 4): print "differ in focalLength" return False if not floatDiff(fn1.lensSqueezeRatio(), fn2.lensSqueezeRatio(), 4): print "differ in lensSqueezeRatio" return False if not floatDiff(fn1.cameraScale(), fn2.cameraScale(), 4): print "differ in cameraScale" return False if not floatDiff(fn1.horizontalFilmAperture(), fn2.horizontalFilmAperture(), 4): print "differ in horizontalFilmAperture" return False if not floatDiff(fn1.verticalFilmAperture(), fn2.verticalFilmAperture(), 4): print "differ in verticalFilmAperture" return False if not floatDiff(fn1.horizontalFilmOffset(), fn2.horizontalFilmOffset(), 4): print "differ in horizontalFilmOffset" return False if not floatDiff(fn1.verticalFilmOffset(), fn2.verticalFilmOffset(), 4): print "differ in verticalFilmOffset" return False if not floatDiff(fn1.overscan(), fn2.overscan(), 4): print "differ in overscan" return False if not floatDiff(fn1.nearClippingPlane(), fn2.nearClippingPlane(), 4): print "differ in nearClippingPlane" return False if not floatDiff(fn1.farClippingPlane(), fn2.farClippingPlane(), 4): print "differ in farClippingPlane" return False if not floatDiff(fn1.preScale(), fn2.preScale(), 4): print "differ in preScale" return False if not floatDiff(fn1.postScale(), fn2.postScale(), 4): print "differ in postScale" return False if not floatDiff(fn1.filmTranslateH(), fn2.filmTranslateH(), 4): print "differ in filmTranslateH" return False if not floatDiff(fn1.filmTranslateV(), fn2.filmTranslateV(), 4): print "differ in filmTranslateV" return False if not floatDiff(fn1.horizontalRollPivot(), fn2.horizontalRollPivot(), 4): print "differ in horizontalRollPivot" return False if not floatDiff(fn1.verticalRollPivot(), fn2.verticalRollPivot(), 4): print "differ in verticalRollPivot" return False if fn1.filmRollOrder() != fn2.filmRollOrder(): print "differ in filmRollOrder" return False if not floatDiff(fn1.filmRollValue(), fn2.filmRollValue(), 4): print "differ in filmRollValue" return False if not floatDiff(fn1.fStop(), fn2.fStop(), 4): print "differ in fStop" return False if not floatDiff(fn1.focusDistance(), fn2.focusDistance(), 4,): print "differ in focusDistance" return False if not floatDiff(fn1.shutterAngle(), fn2.shutterAngle(), 4): print "differ in shutterAngle" return False if fn1.usePivotAsLocalSpace() != fn2.usePivotAsLocalSpace(): print "differ in usePivotAsLocalSpace" return False if fn1.tumblePivot() != fn2.tumblePivot(): print "differ in tumblePivot" return False return True # return True if the two Nurbs curves are identical def compareNurbsCurve(nodeName1, nodeName2): # basic error checking obj1 = getObjFromName(nodeName1) if not obj1.hasFn(OpenMaya.MFn.kNurbsCurve): print nodeName1, "not a curve." return False obj2 = getObjFromName(nodeName2) if not obj2.hasFn(OpenMaya.MFn.kNurbsCurve): print nodeName2, "not a curve." return False fn1 = OpenMaya.MFnNurbsCurve(obj1) fn2 = OpenMaya.MFnNurbsCurve(obj2) if fn1.degree() != fn2.degree(): print nodeName1, nodeName2, "degrees differ." return False if fn1.numCVs() != fn2.numCVs(): print nodeName1, nodeName2, "numCVs differ." return False if fn1.numSpans() != fn2.numSpans(): print nodeName1, nodeName2, "spans differ." return False if fn1.numKnots() != fn2.numKnots(): print nodeName1, nodeName2, "numKnots differ." return False if fn1.form() != fn2.form(): print nodeName1, nodeName2, "form differ."
<filename>src/pymor/algorithms/projection.py # This file is part of the pyMOR project (http://www.pymor.org). # Copyright 2013-2020 pyMOR developers and contributors. All rights reserved. # License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause) import numpy as np from pymor.algorithms.rules import RuleTable, match_class, match_generic, match_always from pymor.core.exceptions import RuleNotMatchingError, NoMatchingRuleError from pymor.operators.block import BlockOperatorBase, BlockRowOperator, BlockColumnOperator from pymor.operators.constructions import (LincombOperator, ConcatenationOperator, ConstantOperator, ProjectedOperator, ZeroOperator, AffineOperator, AdjointOperator, SelectionOperator, IdentityOperator, VectorArrayOperator) from pymor.operators.ei import EmpiricalInterpolatedOperator, ProjectedEmpiciralInterpolatedOperator from pymor.operators.numpy import NumpyMatrixOperator from pymor.vectorarrays.numpy import NumpyVectorSpace def project(op, range_basis, source_basis, product=None): """Petrov-Galerkin projection of a given |Operator|. Given an inner product `( ⋅, ⋅)`, source vectors `b_1, ..., b_N` and range vectors `c_1, ..., c_M`, the projection `op_proj` of `op` is defined by :: [ op_proj(e_j) ]_i = ( c_i, op(b_j) ) for all i,j, where `e_j` denotes the j-th canonical basis vector of R^N. In particular, if the `c_i` are orthonormal w.r.t. the given product, then `op_proj` is the coordinate representation w.r.t. the `b_i/c_i` bases of the restriction of `op` to `span(b_i)` concatenated with the orthogonal projection onto `span(c_i)`. From another point of view, if `op` is viewed as a bilinear form (see :meth:`apply2`) and `( ⋅, ⋅ )` is the Euclidean inner product, then `op_proj` represents the matrix of the bilinear form restricted to `span(b_i) / span(c_i)` (w.r.t. the `b_i/c_i` bases). How the projection is realized will depend on the given |Operator|. While a projected |NumpyMatrixOperator| will again be a |NumpyMatrixOperator|, only a generic :class:`~pymor.operators.constructions.ProjectedOperator` can be returned in general. The exact algorithm is specified in :class:`ProjectRules`. Parameters ---------- range_basis The vectors `c_1, ..., c_M` as a |VectorArray|. If `None`, no projection in the range space is performed. source_basis The vectors `b_1, ..., b_N` as a |VectorArray| or `None`. If `None`, no restriction of the source space is performed. product An |Operator| representing the inner product. If `None`, the Euclidean inner product is chosen. Returns ------- The projected |Operator| `op_proj`. """ assert source_basis is None or source_basis in op.source assert range_basis is None or range_basis in op.range assert product is None or product.source == product.range == op.range rb = product.apply(range_basis) if product is not None and range_basis is not None else range_basis try: return ProjectRules(rb, source_basis).apply(op) except NoMatchingRuleError: op.logger.warning('Using inefficient generic projection operator') return ProjectedOperator(op, range_basis, source_basis, product) class ProjectRules(RuleTable): """|RuleTable| for the :func:`project` algorithm.""" def __init__(self, range_basis, source_basis): super().__init__(use_caching=True) self.__auto_init(locals()) @match_always def action_no_bases(self, op): if self.range_basis is None and self.source_basis is None: return op else: raise RuleNotMatchingError @match_class(ZeroOperator) def action_ZeroOperator(self, op): range_basis, source_basis = self.range_basis, self.source_basis if source_basis is not None and range_basis is not None: from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(np.zeros((len(range_basis), len(source_basis))), name=op.name) else: new_source = NumpyVectorSpace(len(source_basis)) if source_basis is not None else op.source new_range = NumpyVectorSpace(len(range_basis)) if range_basis is not None else op.range return ZeroOperator(new_range, new_source, name=op.name) @match_class(ConstantOperator) def action_ConstantOperator(self, op): range_basis, source_basis = self.range_basis, self.source_basis if range_basis is not None: projected_value = NumpyVectorSpace.make_array(range_basis.inner(op.value).T) else: projected_value = op.value if source_basis is None: return ConstantOperator(projected_value, op.source, name=op.name) else: return ConstantOperator(projected_value, NumpyVectorSpace(len(source_basis)), name=op.name) @match_generic(lambda op: op.linear and not op.parametric, 'linear and not parametric') def action_apply_basis(self, op): range_basis, source_basis = self.range_basis, self.source_basis if source_basis is None: try: V = op.apply_adjoint(range_basis) except NotImplementedError: raise RuleNotMatchingError('apply_adjoint not implemented') if isinstance(op.source, NumpyVectorSpace): from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(V.to_numpy(), source_id=op.source.id, name=op.name) else: from pymor.operators.constructions import VectorArrayOperator return VectorArrayOperator(V, adjoint=True, name=op.name) else: if range_basis is None: V = op.apply(source_basis) if isinstance(op.range, NumpyVectorSpace): from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(V.to_numpy().T, range_id=op.range.id, name=op.name) else: from pymor.operators.constructions import VectorArrayOperator return VectorArrayOperator(V, adjoint=False, name=op.name) else: from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(op.apply2(range_basis, source_basis), name=op.name) @match_class(ConcatenationOperator) def action_ConcatenationOperator(self, op): if len(op.operators) == 1: return self.apply(op.operators[0]) range_basis, source_basis = self.range_basis, self.source_basis last, first = op.operators[0], op.operators[-1] if source_basis is not None and first.linear and not first.parametric: V = first.apply(source_basis) return project(op.with_(operators=op.operators[:-1]), range_basis, V) elif range_basis is not None and last.linear and not last.parametric: V = last.apply_adjoint(range_basis) return project(op.with_(operators=op.operators[1:]), V, source_basis) # the concatenation is too complicated to directly project efficiently # try to simplify the operators in the concatenation by expanding from pymor.algorithms.simplify import expand expanded_op = expand(op) if not isinstance(expanded_op, ConcatenationOperator): # expanding was successful return self.apply(expanded_op) # at least we can try to partially project the outer operators projected_first = project(first, None, source_basis) projected_last = project(last, range_basis, None) return ConcatenationOperator((projected_last,) + op.operators[1:-1] + (projected_first,), name=op.name) @match_class(AdjointOperator) def action_AdjointOperator(self, op): range_basis, source_basis = self.range_basis, self.source_basis if range_basis is not None: if op.source_product: range_basis = op.source_product.apply_inverse(range_basis) if source_basis is not None and op.range_product: source_basis = op.range_product.apply(source_basis) operator = project(op.operator, source_basis, range_basis) range_product = op.range_product if source_basis is None else None source_product = op.source_product if range_basis is None else None return AdjointOperator(operator, source_product=source_product, range_product=range_product, name=op.name) @match_class(EmpiricalInterpolatedOperator) def action_EmpiricalInterpolatedOperator(self, op): range_basis, source_basis = self.range_basis, self.source_basis if len(op.interpolation_dofs) == 0: return self.apply(ZeroOperator(op.range, op.source, op.name)) elif not hasattr(op, 'restricted_operator') or source_basis is None: raise RuleNotMatchingError('Has no restricted operator or source_basis is None') if range_basis is not None: projected_collateral_basis = NumpyVectorSpace.make_array(op.collateral_basis.inner(range_basis)) else: projected_collateral_basis = op.collateral_basis return ProjectedEmpiciralInterpolatedOperator(op.restricted_operator, op.interpolation_matrix, NumpyVectorSpace.make_array(source_basis.dofs(op.source_dofs)), projected_collateral_basis, op.triangular, None, op.name) @match_class(AffineOperator) def action_AffineOperator(self, op): return self.apply(op.affine_shift + op.linear_part) @match_class(LincombOperator) def action_LincombOperator(self, op): return self.replace_children(op).with_(solver_options=None) @match_class(SelectionOperator) def action_SelectionOperator(self, op): return self.replace_children(op) @match_class(BlockOperatorBase) def action_BlockOperatorBase(self, op): if op.blocked_range: if self.range_basis is not None: range_bases = self.range_basis._blocks else: range_bases = [None] * len(op.range.subspaces) else: range_bases = [self.range_basis] if op.blocked_source: if self.source_basis is not None: source_bases = self.source_basis._blocks else: source_bases = [None] * len(op.source.subspaces) else: source_bases = [self.source_basis] projected_ops = np.array([[project(op.blocks[i, j], rb, sb) for j, sb in enumerate(source_bases)] for i, rb in enumerate(range_bases)]) if self.range_basis is None and op.blocked_range: return BlockColumnOperator(np.sum(projected_ops, axis=1)) elif self.source_basis is None and op.blocked_source: return BlockRowOperator(np.sum(projected_ops, axis=0)) else: return np.sum(projected_ops) def project_to_subbasis(op, dim_range=None, dim_source=None): """Project already projected |Operator| to a subbasis. The purpose of this method is to further project an operator that has been obtained through :meth:`project` to subbases of the original projection bases, i.e. :: project_to_subbasis(project(op, r_basis, s_basis, prod), dim_range, dim_source) should be the same as :: project(op, r_basis[:dim_range], s_basis[:dim_source], prod) For a |NumpyMatrixOperator| this amounts to extracting the upper-left (dim_range, dim_source) corner of its matrix. The subbasis projection algorithm is specified in :class:`ProjectToSubbasisRules`. Parameters ---------- dim_range Dimension of the range subbasis. dim_source Dimension of the source subbasis. Returns ------- The projected |Operator|. """ assert dim_source is None or (isinstance(op.source, NumpyVectorSpace) and dim_source <= op.source.dim) assert dim_range is None or (isinstance(op.range, NumpyVectorSpace) and dim_range <= op.range.dim) if dim_range is None and dim_source is None: return op return ProjectToSubbasisRules(dim_range, dim_source).apply(op) class ProjectToSubbasisRules(RuleTable): """|RuleTable| for the :func:`project_to_subbasis` algorithm.""" def __init__(self, dim_range, dim_source): super().__init__(use_caching=True) self.__auto_init(locals()) @match_class(LincombOperator, SelectionOperator) def action_recurse(self, op): return self.replace_children(op) @match_class(NumpyMatrixOperator) def action_NumpyMatrixOperator(self, op): # copy instead of just slicing the matrix to ensure contiguous memory return NumpyMatrixOperator(op.matrix[:self.dim_range, :self.dim_source].copy(), solver_options=op.solver_options, name=op.name, source_id=op.source.id, range_id=op.range.id) @match_class(ConstantOperator) def action_ConstantOperator(self, op): dim_range, dim_source = self.dim_range, self.dim_source source = op.source if dim_source is None else NumpyVectorSpace(dim_source) value = op.value if dim_range is None else NumpyVectorSpace.make_array(op.value.to_numpy()[:, :dim_range]) return ConstantOperator(value, source, name=op.name) @match_class(IdentityOperator) def action_IdentityOperator(self, op): dim_range, dim_source = self.dim_range, self.dim_source if dim_range != dim_source: raise RuleNotMatchingError('dim_range and dim_source must be equal.') space = op.source if dim_source is None else NumpyVectorSpace(dim_source) return IdentityOperator(space, name=op.name) @match_class(ZeroOperator) def action_ZeroOperator(self, op): dim_range, dim_source = self.dim_range, self.dim_source range_space = op.range if dim_range is None else NumpyVectorSpace(dim_range) source_space = op.source if dim_source is None else NumpyVectorSpace(dim_source) return ZeroOperator(range_space, source_space, name=op.name) @match_class(ProjectedEmpiciralInterpolatedOperator) def action_ProjectedEmpiciralInterpolatedOperator(self, op): if not isinstance(op.projected_collateral_basis.space, NumpyVectorSpace): raise NotImplementedError restricted_operator = op.restricted_operator old_pcb = op.projected_collateral_basis projected_collateral_basis = NumpyVectorSpace.make_array(old_pcb.to_numpy()[:, :self.dim_range]) old_sbd = op.source_basis_dofs source_basis_dofs = NumpyVectorSpace.make_array(old_sbd.to_numpy()[:self.dim_source]) return ProjectedEmpiciralInterpolatedOperator(restricted_operator, op.interpolation_matrix, source_basis_dofs, projected_collateral_basis, op.triangular, solver_options=op.solver_options, name=op.name) @match_class(VectorArrayOperator) def action_VectorArrayOperator(self, op): dim_range, dim_source = self.dim_range, self.dim_source if op.adjoint and dim_source is not None: raise RuleNotMatchingError if not op.adjoint and dim_range is not None: raise RuleNotMatchingError array = op.array[:(dim_range if op.adjoint else dim_source)] # use new_type to ensure this also works for child classes return op.with_(array=array, new_type=VectorArrayOperator) @match_class(BlockColumnOperator) def action_BlockColumnOperator(self, op): if self.dim_range is not None: raise RuleNotMatchingError blocks = [self.apply(b) for b in op.blocks.ravel()] return op.with_(blocks=blocks) @match_class(BlockRowOperator) def action_BlockRowOperator(self, op): if self.dim_source is not None: raise RuleNotMatchingError blocks = [self.apply(b) for b
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Sep 27 16:42:52 2019 @author: <NAME> """ import cv2 import numpy as np import dlib import time import math import sys from os import path file_dir = path.dirname(path.realpath(__file__)) import socket detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor( path.join(file_dir, "data", "shape_predictor_68_face_landmarks.dat") ) POINTS_NUM_LANDMARK = 68 clahe = cv2.createCLAHE( clipLimit=2.0, tileGridSize=(8, 8) ) # CLAHE Object (for Adaptive histogram equalization) boxPoints3D = np.array( ( [500.0, 500.0, 500.0], [-500.0, 500.0, 500.0], [-500.0, -500.0, 500.0], [500.0, -500.0, 500.0], [500.0, 500.0, -500.0], [-500.0, 500.0, -500.0], [-500.0, -500.0, -500.0], [500.0, -500.0, -500.0], ) ) boxPoints2D = np.zeros((1, 1, 8, 2)) # parameters for mean filter windowlen_1 = 5 queue3D_points = np.zeros((windowlen_1, POINTS_NUM_LANDMARK, 2)) windowlen_2 = 5 queue1D = np.zeros(windowlen_2) # pamameters for kalman filter XX = 0 PP = 0.01 # Smooth filter def mean_filter_for_landmarks(landmarks_orig): for i in range(windowlen_1 - 1): queue3D_points[i, :, :] = queue3D_points[i + 1, :, :] queue3D_points[windowlen_1 - 1, :, :] = landmarks_orig landmarks = queue3D_points.mean(axis=0) return landmarks def mean_filter_simple(input): for i in range(windowlen_2 - 1): queue1D[i] = queue1D[i + 1] queue1D[windowlen_2 - 1] = input output = queue1D.mean() return output def kalman_filter_simple(input, Q, R): global XX global PP K = PP / (PP + R) XX = XX + K * (input - XX) PP = PP - K * PP + Q return XX class KalmanObject: def __init__(self, m, Qval, Rval): self.K = np.zeros((m, m)) self.xx = np.zeros(m) self.P = np.eye(m) self.F = np.eye(m) self.B = np.eye(m) self.H = np.eye(m) self.Q = Qval * np.eye(m) self.R = Rval * np.eye(m) def kalman_update(self, uu, zz): self.xx = self.F.dot(self.xx) + self.B.dot(uu) self.P = self.F.dot(self.P).dot(self.F.T) + self.Q self.K = self.P.dot(self.H.T).dot( np.linalg.inv(self.H.dot(self.P).dot(self.H.T) + self.R) ) self.xx = self.xx + self.K.dot(zz - self.H.dot(self.xx)) self.P = self.P - self.K.dot(self.H).dot(self.P) # Format convert def landmarks_to_np(landmarks, dtype="int"): # get number of landmarks num = landmarks.num_parts # initialize the list of (x, y)-coordinates coords = np.zeros((num, 2), dtype=dtype) # loop over the 68 facial landmarks and convert them # to a 2-tuple of (x, y)-coordinates for i in range(0, num): coords[i] = (landmarks.part(i).x, landmarks.part(i).y) # return the list of (x, y)-coordinates return coords # Get feature_parameters of facial expressions def get_feature_parameters(landmarks): d00 = np.linalg.norm( landmarks[27] - landmarks[8] ) # Length of face (eyebrow to chin) d11 = np.linalg.norm(landmarks[0] - landmarks[16]) # width of face d_reference = (d00 + d11) / 2 # Left eye d1 = np.linalg.norm(landmarks[37] - landmarks[41]) d2 = np.linalg.norm(landmarks[38] - landmarks[40]) # Right eye d3 = np.linalg.norm(landmarks[43] - landmarks[47]) d4 = np.linalg.norm(landmarks[44] - landmarks[46]) # Mouth width d5 = np.linalg.norm(landmarks[51] - landmarks[57]) # Mouth length d6 = np.linalg.norm(landmarks[60] - landmarks[64]) leftEyeWid = ((d1 + d2) / (2 * d_reference) - 0.02) * 6 rightEyewid = ((d3 + d4) / (2 * d_reference) - 0.02) * 6 mouthWid = (d5 / d_reference - 0.13) * 1.27 + 0.02 mouthLen = d6 / d_reference return leftEyeWid, rightEyewid, mouthWid, mouthLen # Get largest face def get_largest_face(dets): if len(dets) == 1: return 0 face_areas = [ (det.right() - det.left()) * (det.bottom() - det.top()) for det in dets ] largest_area = face_areas[0] largest_index = 0 for index in range(1, len(dets)): if face_areas[index] > largest_area: largest_index = index largest_area = face_areas[index] print("largest_face index is {} in {} faces".format(largest_index, len(dets))) return largest_index # Feature points extraction using dlib def get_image_points(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray_eq = clahe.apply(gray) # Adaptive histogram equalization # cv2.imshow("gray",gray) # cv2.imshow("gray_eq",gray_eq) dets = detector(gray_eq, 0) if 0 == len(dets): print("ERROR: found no face") return -1, None largest_index = get_largest_face(dets) face_rectangle = dets[largest_index] landmark_shape = predictor(img, face_rectangle) return 0, landmark_shape # Pose estimation: get rotation vector and translation vector def get_pose_estimation(img_size, image_points): # 3D model points # model_points = np.array([ # (0.0, 0.0, 0.0), # Nose tip # (0.0, -330.0, -65.0), # Chin # (-225.0, 170.0, -135.0), # Left eye left corner # (225.0, 170.0, -135.0), # Right eye right corner # (-150.0, -150.0, -125.0), # Left Mouth corner # (150.0, -150.0, -125.0) # Right mouth corner # # ]) model_points = np.array( [ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right eye right corner (-349.0, 85.0, -300.0), # Left head corner (349.0, 85.0, -300.0), # Right head corner ] ) # Camera internals focal_length = img_size[1] center = (img_size[1] / 2, img_size[0] / 2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype="double", ) print("Camera Matrix:\n {}".format(camera_matrix)) dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion imagePoints = np.ascontiguousarray(image_points[:, :2]).reshape((6, 1, 2)) (success, rotation_vector, translation_vector) = cv2.solvePnP( model_points, imagePoints, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_DLS ) ############################ # rotation_vector[0] = kalman_filter_simple(rotation_vector[0], 0.1, 0.01) # rotation_vector[1] = kalman_filter_simple(rotation_vector[1], 0.1, 0.01) # rotation_vector[2] = kalman_filter_simple(rotation_vector[2], 0.1, 0.01) print("Rotation Vector:\n {}".format(rotation_vector)) print("Translation Vector:\n {}".format(translation_vector)) return success, rotation_vector, translation_vector, camera_matrix, dist_coeffs # Convert rotation_vector to quaternion def get_quaternion(rotation_vector): # calculate rotation angles theta = cv2.norm(rotation_vector, cv2.NORM_L2) # theta = mean_filter_simple(theta) # transformed to quaterniond w = math.cos(theta / 2) x = math.sin(theta / 2) * rotation_vector[0][0] / theta y = math.sin(theta / 2) * rotation_vector[1][0] / theta z = math.sin(theta / 2) * rotation_vector[2][0] / theta return round(w, 4), round(x, 4), round(y, 4), round(z, 4) if __name__ == "__main__": test_data = [0] test_time = [0] # Socket Connect try: client = socket.socket() client.connect(("127.0.0.1", 1755)) except: print("\nERROR: No socket connection.\n") sys.exit(0) # initialize kalman object KalmanX = KalmanObject( POINTS_NUM_LANDMARK, 1, 10 ) # Tune Q, R to change landmarks_x sensitivity KalmanY = KalmanObject( POINTS_NUM_LANDMARK, 1, 10 ) # Tune Q, R to change landmarks_y sensitivity uu_ = np.zeros((POINTS_NUM_LANDMARK)) # initialize PARAMETERS landmarks = np.zeros((POINTS_NUM_LANDMARK, 2)) open_time = time.time() cap = cv2.VideoCapture(0) while cap.isOpened(): start_time = time.time() # Read Image ret, img = cap.read() if ret != True: print("read frame failed") # continue break size = img.shape if size[0] > 700: h = size[0] / 3 w = size[1] / 3 img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_CUBIC) size = img.shape # img = cv2.normalize(img,dst=None,alpha=350,beta=10,norm_type=cv2.NORM_MINMAX) ret, landmark_shape = get_image_points(img) if ret != 0: print("ERROR: get_image_points failed") continue # Compute feature parameters of facial expressions (eyes, mouth) landmarks_orig = landmarks_to_np(landmark_shape) # convert format # Apply kalman filter to landmarks FOR POSE ESTIMATION KalmanX.kalman_update(uu_, landmarks_orig[:, 0]) KalmanY.kalman_update(uu_, landmarks_orig[:, 1]) landmarks[:, 0] = KalmanX.xx.astype(np.int32) landmarks[:, 1] = KalmanY.xx.astype(np.int32) landmarks = mean_filter_for_landmarks( landmarks ) # Apply smooth filter to landmarks FOR POSE ESTIMATION leftEyeWid, rightEyewid, mouthWid, mouthLen = get_feature_parameters( landmarks_orig ) parameters_str = ( "leftEyeWid:{}, rightEyewid:{}, mouthWid:{}, mouthLen:{}".format( leftEyeWid, rightEyewid, mouthWid, mouthLen ) ) print(parameters_str) # Five feature points for pose estimation # image_points = np.vstack((landmarks[30],landmarks[8],landmarks[36],landmarks[45],landmarks[48],landmarks[54])) image_points = np.vstack( ( landmarks[30], landmarks[8], landmarks[36], landmarks[45], landmarks[1], landmarks[15], ) ) ( ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs, ) = get_pose_estimation(size, image_points) if ret != True: print("ERROR: get_pose_estimation failed") continue used_time = time.time() - start_time print("used_time:{} sec".format(round(used_time, 3))) # Convert rotation_vector to quaternion w, x, y, z = get_quaternion(rotation_vector) quaternion_str = "w:{}, x:{}, y:{}, z:{}".format(w, x, y, z) print(quaternion_str) # Packing data and transmit to server through Socket data = ( str(translation_vector[0, 0]) + ":" + str(translation_vector[1, 0]) + ":" + str(translation_vector[2, 0]) + ":" + str(w) + ":" + str(x) + ":" + str(y) + ":" + str(z) + ":" + str(leftEyeWid) + ":" + str(rightEyewid) + ":" + str(mouthWid) + ":" + str(mouthLen) ) try: client.send(data.encode("utf-8")) except: print("\nSocket connection closed.\n") break # ============================================================================ # For visualization only (below) # ============================================================================ # Project a 3D point set onto the image plane # We use this to draw a bounding box (nose_end_point2D, jacobian) = cv2.projectPoints( np.array([(0.0, 0.0, 1000)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs, ) for i in range(8): (boxPoints2D[:, :, i, :], jacobian) = cv2.projectPoints( np.array([boxPoints3D[i]]), rotation_vector, translation_vector, camera_matrix, dist_coeffs, ) boxPoints2D = boxPoints2D.astype(int) for p in image_points: cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1) boxset_1 = boxPoints2D[0, 0, 0:4, :] boxset_2 = boxPoints2D[0, 0, 4:8, :] boxset_3 = np.vstack((boxPoints2D[0, 0, 0, :], boxPoints2D[0, 0, 4, :])) boxset_4 = np.vstack((boxPoints2D[0, 0, 1, :], boxPoints2D[0, 0, 5, :])) boxset_5 = np.vstack((boxPoints2D[0, 0, 2, :], boxPoints2D[0, 0, 6, :])) boxset_6 = np.vstack((boxPoints2D[0, 0, 3, :], boxPoints2D[0, 0, 7,
imported_comp_names = set() for key, value in data['export_data'][entity_name].items(): if entity_name == GROUP_ENTITY_NAME: # Check if there is already a group with the same name, # and if so, recreate the name orig_label = value['label'] dupl_counter = 0 while QueryBuilder().append(entity, filters={'label': {'==': value['label']}}).count(): # Rename the new group value['label'] = orig_label + DUPL_SUFFIX.format(dupl_counter) dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A group of that label ( {} ) already exists and I could not create a new ' 'one'.format(orig_label) ) elif entity_name == COMPUTER_ENTITY_NAME: # The following is done for compatibility # reasons in case the export file was generated # with the Django export method. In Django the # metadata and the transport parameters are # stored as (unicode) strings of the serialized # JSON objects and not as simple serialized # JSON objects. if isinstance(value['metadata'], (str, bytes)): value['metadata'] = json.loads(value['metadata']) # Check if there is already a computer with the # same name in the database builder = QueryBuilder() builder.append( entity, filters={'name': { '==': value['name'] }}, project=['*'], tag='res' ) dupl = builder.count() or value['name'] in imported_comp_names dupl_counter = 0 orig_name = value['name'] while dupl: # Rename the new computer value['name'] = orig_name + DUPL_SUFFIX.format(dupl_counter) builder = QueryBuilder() builder.append( entity, filters={'name': { '==': value['name'] }}, project=['*'], tag='res' ) dupl = builder.count() or value['name'] in imported_comp_names dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A computer of that name ( {} ) already exists and I could not create a ' 'new one'.format(orig_name) ) imported_comp_names.add(value['name']) if value[unique_identifier] in relevant_db_entries: # Already in DB # again, switched to entity_name in v0.3 existing_entries[entity_name][key] = value else: # To be added new_entries[entity_name][key] = value else: new_entries[entity_name] = data['export_data'][entity_name] # Progress bar - reset for import progress_bar = get_progress_bar(total=number_of_entities, disable=silent) reset_progress_bar = {} # I import data from the given model for entity_name in entity_order: entity = entity_names_to_entities[entity_name] fields_info = metadata['all_fields_info'].get(entity_name, {}) unique_identifier = metadata['unique_identifiers'].get(entity_name, '') # Progress bar initialization - Model if reset_progress_bar: progress_bar = get_progress_bar(total=reset_progress_bar['total'], disable=silent) progress_bar.n = reset_progress_bar['n'] reset_progress_bar = {} pbar_base_str = '{}s - '.format(entity_name) progress_bar.set_description_str(pbar_base_str + 'Initializing', refresh=True) # EXISTING ENTRIES if existing_entries[entity_name]: # Progress bar update - Model progress_bar.set_description_str( pbar_base_str + '{} existing entries'.format(len(existing_entries[entity_name])), refresh=True ) for import_entry_pk, entry_data in existing_entries[entity_name].items(): unique_id = entry_data[unique_identifier] existing_entry_pk = foreign_ids_reverse_mappings[entity_name][unique_id] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings ) for k, v in entry_data.items() ) # TODO COMPARE, AND COMPARE ATTRIBUTES if entity_name == COMMENT_ENTITY_NAME: new_entry_uuid = merge_comment(import_data, comment_mode) if new_entry_uuid is not None: entry_data[unique_identifier] = new_entry_uuid new_entries[entity_name][import_entry_pk] = entry_data if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['existing'].append((import_entry_pk, existing_entry_pk)) IMPORT_LOGGER.debug( 'Existing %s: %s (%s->%s)', entity_name, unique_id, import_entry_pk, existing_entry_pk ) # Store all objects for this model in a list, and store them # all in once at the end. objects_to_create = list() # In the following list we add the objects to be updated objects_to_update = list() # This is needed later to associate the import entry with the new pk import_new_entry_pks = dict() # NEW ENTRIES if new_entries[entity_name]: # Progress bar update - Model progress_bar.set_description_str( pbar_base_str + '{} new entries'.format(len(new_entries[entity_name])), refresh=True ) for import_entry_pk, entry_data in new_entries[entity_name].items(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings ) for k, v in entry_data.items() ) # We convert the Django fields to SQLA. Note that some of # the Django fields were converted to SQLA compatible # fields by the deserialize_field method. This was done # for optimization reasons in Django but makes them # compatible with the SQLA schema and they don't need any # further conversion. if entity_name in file_fields_to_model_fields: for file_fkey in file_fields_to_model_fields[entity_name]: # This is an exception because the DbLog model defines the `_metadata` column instead of the # `metadata` column used in the Django model. This is because the SqlAlchemy model base # class already has a metadata attribute that cannot be overridden. For consistency, the # `DbLog` class however expects the `metadata` keyword in its constructor, so we should # ignore the mapping here if entity_name == LOG_ENTITY_NAME and file_fkey == 'metadata': continue model_fkey = file_fields_to_model_fields[entity_name][file_fkey] if model_fkey in import_data: continue import_data[model_fkey] = import_data[file_fkey] import_data.pop(file_fkey, None) db_entity = get_object_from_string(entity_names_to_sqla_schema[entity_name]) objects_to_create.append(db_entity(**import_data)) import_new_entry_pks[unique_id] = import_entry_pk if entity_name == NODE_ENTITY_NAME: IMPORT_LOGGER.debug('STORING NEW NODE REPOSITORY FILES & ATTRIBUTES...') # NEW NODES for object_ in objects_to_create: import_entry_uuid = object_.uuid import_entry_pk = import_new_entry_pks[import_entry_uuid] # Progress bar initialization - Node progress_bar.update() pbar_node_base_str = pbar_base_str + 'UUID={} - '.format(import_entry_uuid.split('-')[0]) # Before storing entries in the DB, I store the files (if these are nodes). # Note: only for new entries! subfolder = folder.get_subfolder( os.path.join(NODES_EXPORT_SUBFOLDER, export_shard_uuid(import_entry_uuid)) ) if not subfolder.exists(): raise exceptions.CorruptArchive( 'Unable to find the repository folder for Node with UUID={} in the exported ' 'file'.format(import_entry_uuid) ) destdir = RepositoryFolder(section=Repository._section_name, uuid=import_entry_uuid) # Replace the folder, possibly destroying existing previous folders, and move the files # (faster if we are on the same filesystem, and in any case the source is a SandboxFolder) progress_bar.set_description_str(pbar_node_base_str + 'Repository', refresh=True) destdir.replace_with_folder(subfolder.abspath, move=True, overwrite=True) # For Nodes, we also have to store Attributes! IMPORT_LOGGER.debug('STORING NEW NODE ATTRIBUTES...') progress_bar.set_description_str(pbar_node_base_str + 'Attributes', refresh=True) # Get attributes from import file try: object_.attributes = data['node_attributes'][str(import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find attribute info for Node with UUID={}'.format(import_entry_uuid) ) # For DbNodes, we also have to store extras if extras_mode_new == 'import': IMPORT_LOGGER.debug('STORING NEW NODE EXTRAS...') progress_bar.set_description_str(pbar_node_base_str + 'Extras', refresh=True) # Get extras from import file try: extras = data['node_extras'][str(import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for Node with UUID={}'.format(import_entry_uuid) ) # TODO: remove when aiida extras will be moved somewhere else # from here extras = {key: value for key, value in extras.items() if not key.startswith('_aiida_')} if object_.node_type.endswith('code.Code.'): extras = {key: value for key, value in extras.items() if not key == 'hidden'} # till here object_.extras = extras elif extras_mode_new == 'none': IMPORT_LOGGER.debug('SKIPPING NEW NODE EXTRAS...') else: raise exceptions.ImportValidationError( "Unknown extras_mode_new value: {}, should be either 'import' or 'none'" ''.format(extras_mode_new) ) # EXISTING NODES (Extras) IMPORT_LOGGER.debug('UPDATING EXISTING NODE EXTRAS...') import_existing_entry_pks = { entry_data[unique_identifier]: import_entry_pk for import_entry_pk, entry_data in existing_entries[entity_name].items() } for node in session.query(DbNode).filter(DbNode.uuid.in_(import_existing_entry_pks)).all(): import_entry_uuid = str(node.uuid) import_entry_pk = import_existing_entry_pks[import_entry_uuid] # Progress bar initialization - Node pbar_node_base_str = pbar_base_str + 'UUID={} - '.format(import_entry_uuid.split('-')[0]) progress_bar.set_description_str(pbar_node_base_str + 'Extras', refresh=False) progress_bar.update() # Get extras from import file try: extras = data['node_extras'][str(import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for Node with UUID={}'.format(import_entry_uuid) ) old_extras = node.extras.copy() # TODO: remove when aiida extras will be moved somewhere else # from here extras = {key: value for key, value in extras.items() if not key.startswith('_aiida_')} if node.node_type.endswith('code.Code.'): extras = {key: value for key, value in extras.items() if not key == 'hidden'} # till here new_extras = merge_extras(node.extras, extras, extras_mode_existing) if new_extras != old_extras: node.extras = new_extras flag_modified(node, 'extras') objects_to_update.append(node) else: # Update progress bar with new non-Node entries progress_bar.update(n=len(existing_entries[entity_name]) + len(new_entries[entity_name])) progress_bar.set_description_str(pbar_base_str + 'Storing', refresh=True) # Store them all in once; However, the PK are not set in this way... if objects_to_create: session.add_all(objects_to_create) if objects_to_update: session.add_all(objects_to_update) session.flush() just_saved = {} if import_new_entry_pks.keys(): reset_progress_bar = {'total': progress_bar.total, 'n': progress_bar.n} progress_bar = get_progress_bar(total=len(import_new_entry_pks), disable=silent) builder = QueryBuilder() builder.append( entity, filters={unique_identifier: { 'in': list(import_new_entry_pks.keys()) }}, project=[unique_identifier, 'id'] ) for entry in builder.iterall(): progress_bar.update() just_saved.update({entry[0]: entry[1]}) progress_bar.set_description_str(pbar_base_str + 'Done!', refresh=True) # Now I have the PKs, print the info # Moreover, add newly created Nodes to foreign_ids_reverse_mappings for unique_id, new_pk in just_saved.items(): from uuid import UUID if isinstance(unique_id, UUID): unique_id = str(unique_id) import_entry_pk = import_new_entry_pks[unique_id] foreign_ids_reverse_mappings[entity_name][unique_id] = new_pk if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['new'].append((import_entry_pk, new_pk)) IMPORT_LOGGER.debug('N %s: %s (%s->%s)', entity_name, unique_id, import_entry_pk, new_pk) IMPORT_LOGGER.debug('STORING NODE LINKS...') import_links = data['links_uuid'] if import_links: progress_bar = get_progress_bar(total=len(import_links), disable=silent) pbar_base_str = 'Links - ' for link in import_links: # Check for dangling Links within the, supposed, self-consistent archive progress_bar.set_description_str(pbar_base_str + 'label={}'.format(link['label']), refresh=False) progress_bar.update() try: in_id
Valiate that ROOT admin is able to deploy a VM for user in ROOT domain in a shared network with scope=all """ # Deploy VM as user in ROOT domain self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmROOTA"]["name"] + "-shared-scope-all-root-admin" self.vmdata["displayname"] = self.acldata["vmROOTA"]["displayname"] + "-shared-scope-all-root-admin" vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_all.id, accountid=self.account_roota.name, domainid=self.account_roota.domainid ) self.assertEqual(vm.state == "Running" and vm.account == self.account_roota.name and vm.domainid == self.account_roota.domainid, True, "ROOT admin is not able to deploy a VM for user in ROOT domain in a shared network with scope=all") ## Test cases relating to deploying Virtual Machine as ROOT admin for other users in shared network with scope=Domain and no subdomain access @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_domainuser(self): """ Valiate that ROOT admin is able to deploy a VM for domain user in a shared network with scope=domain with no subdomain access """ # Deploy VM as user in a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD11A"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD11A"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d11a.name, domainid=self.account_d11a.domainid ) self.assertEqual(vm.state == "Running" and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid, True, "ROOT admin is not able to deploy a VM for domain user in a shared network with scope=domain with no subdomain access") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_domainadminuser(self): """ Valiate that ROOT admin is able to deploy a VM for domain admin user in a shared network with scope=domain with no subdomain access """ # Deploy VM as an admin user in a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD11"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD11"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d11.name, domainid=self.account_d11.domainid ) self.assertEqual(vm.state == "Running" and vm.account == self.account_d11.name and vm.domainid == self.account_d11.domainid, True, "ROOT admin is not able to deploy a VM for domain admin user in a shared network with scope=domain with no subdomain access") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_subdomainuser(self): """ Valiate that ROOT admin is NOT able to deploy a VM for sub domain user in a shared network with scope=domain with no subdomain access """ # Deploy VM as user in a subdomain under a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD111A"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD111A"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" try: vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d111a.name, domainid=self.account_d111a.domainid ) self.fail("ROOT admin is able to deploy a VM for sub domain user in a shared network with scope=domain with no subdomain access") except Exception as e: self.debug("When a user from a subdomain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN): self.fail( "Error message validation failed when ROOT admin tries to deploy a VM for sub domain user in a shared network with scope=domain with no subdomain access ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_subdomainadminuser(self): """ Valiate that ROOT admin is NOT able to deploy a VM for sub domain admin user in a shared network with scope=domain with no subdomain access """ # Deploy VM as an admin user in a subdomain under a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD111"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD111"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" try: vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d111.name, domainid=self.account_d111.domainid ) self.fail("ROOT admin is able to deploy a VM for sub domain admin user in a shared network with scope=domain with no subdomain access") except Exception as e: self.debug("When a admin user from a subdomain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN): self.fail( "Error message validation failed when ROOT admin tries to deploy a VM for sub domain admin user in a shared network with scope=domain with no subdomain access") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_parentdomainuser(self): """ Valiate that ROOT admin is NOT able to deploy a VM for parent domain user in a shared network with scope=domain with no subdomain access """ # Deploy VM as user in parentdomain of a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD1A"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD1A"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" try: vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d1a.name, domainid=self.account_d1a.domainid ) self.fail(" ROOT admin is able to deploy a VM for parent domain user in a shared network with scope=domain with no subdomain access") except Exception as e: self.debug("When a user from parent domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN): self.fail( "Error message validation failed when ROOT admin tries to deploy a VM for parent domain user in a shared network with scope=domain with no subdomain access") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_parentdomainadminuser(self): """ Valiate that ROOT admin is NOT able to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access """ # Deploy VM as an admin user in parentdomain of a domain that has shared network with no subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD1"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD1"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" try: vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_d1.name, domainid=self.account_d1.domainid ) self.fail("ROOT admin is able to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access") except Exception as e: self.debug("When an admin user from parent domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN): self.fail( "Error message validation failed when ROOT admin tries to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_nosubdomainaccess_ROOTuser(self): """ Valiate that ROOT admin is NOT able to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access """ # Deploy VM as user in ROOT domain self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmROOTA"]["name"] + "-shared-scope-domain-nosubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmROOTA"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-root-admin" try: vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_d11.id, accountid=self.account_roota.name, domainid=self.account_roota.domainid ) self.fail("ROOT admin is able to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access") except Exception as e: self.debug("When a regular user from ROOT domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN): self.fail( "Error message validation failed when ROOT admin tries to deploy a VM for parent domain admin user in a shared network with scope=domain with no subdomain access") ## Test cases relating to deploying Virtual Machine as ROOT admin for other users in shared network with scope=Domain and with subdomain access @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_withsubdomainaccess_domainuser(self): """ Valiate that ROOT admin is able to deploy a VM for domain user in a shared network with scope=domain with subdomain access """ # Deploy VM as user in a domain that has shared network with subdomain access self.apiclient.connection.apiKey = self.default_apikey self.apiclient.connection.securityKey = self.default_secretkey self.vmdata["name"] = self.acldata["vmD11A"]["name"] + "-shared-scope-domain-withsubdomainaccess-root-admin" self.vmdata["displayname"] = self.acldata["vmD11A"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-root-admin" vm = VirtualMachine.create( self.apiclient, self.vmdata, zoneid=self.zone.id, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=self.shared_network_domain_with_subdomain_d11.id, accountid=self.account_d11a.name, domainid=self.account_d11a.domainid ) self.assertEqual(vm.state == "Running" and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid, True, "ROOT admin is NOT able to deploy a VM for domain user in a shared network with scope=domain with subdomain access") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_deployVM_in_sharedNetwork_as_admin_scope_domain_withsubdomainaccess_domainadminuser(self): """ Valiate that ROOT admin is able to deploy a VM for domain
Psat))) ### Liquid Mixtures def Amgat(xs, Vms): r'''Calculate mixture liquid density using the Amgat mixing rule. Highly inacurate, but easy to use. Assumes idea liquids with no excess volume. Average molecular weight should be used with it to obtain density. .. math:: V_{mix} = \sum_i x_i V_i or in terms of density: .. math:: \rho_{mix} = \sum\frac{x_i}{\rho_i} Parameters ---------- xs : array Mole fractions of each component, [] Vms : array Molar volumes of each fluids at conditions [m^3/mol] Returns ------- Vm : float Mixture liquid volume [m^3/mol] Notes ----- Units are that of the given volumes. It has been suggested to use this equation with weight fractions, but the results have been less accurate. Examples -------- >>> Amgat([0.5, 0.5], [4.057e-05, 5.861e-05]) 4.9590000000000005e-05 ''' if not none_and_length_check([xs, Vms]): raise Exception('Function inputs are incorrect format') return mixing_simple(xs, Vms) def Rackett_mixture(T, xs, MWs, Tcs, Pcs, Zrs): r'''Calculate mixture liquid density using the Rackett-derived mixing rule as shown in [2]_. .. math:: V_m = \sum_i\frac{x_i T_{ci}}{MW_i P_{ci}} Z_{R,m}^{(1 + (1 - T_r)^{2/7})} R \sum_i x_i MW_i Parameters ---------- T : float Temperature of liquid [K] xs: list Mole fractions of each component, [] MWs : list Molecular weights of each component [g/mol] Tcs : list Critical temperatures of each component [K] Pcs : list Critical pressures of each component [Pa] Zrs : list Rackett parameters of each component [] Returns ------- Vm : float Mixture liquid volume [m^3/mol] Notes ----- Model for pure compounds in [1]_ forms the basis for this model, shown in [2]_. Molecular weights are used as weighing by such has been found to provide higher accuracy in [2]_. The model can also be used without molecular weights, but results are somewhat different. As with the Rackett model, critical compressibilities may be used if Rackett parameters have not been regressed. Critical mixture temperature, and compressibility are all obtained with simple mixing rules. Examples -------- Calculation in [2]_ for methanol and water mixture. Result matches example. >>> Rackett_mixture(T=298., xs=[0.4576, 0.5424], MWs=[32.04, 18.01], Tcs=[512.58, 647.29], Pcs=[8.096E6, 2.209E7], Zrs=[0.2332, 0.2374]) 2.625288603174508e-05 References ---------- .. [1] Rackett, <NAME>. "Equation of State for Saturated Liquids." Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517. doi:10.1021/je60047a012 .. [2] Danner, <NAME>, and Design Institute for Physical Property Data. Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982. ''' if not none_and_length_check([xs, MWs, Tcs, Pcs, Zrs]): raise Exception('Function inputs are incorrect format') Tc = mixing_simple(xs, Tcs) Zr = mixing_simple(xs, Zrs) MW = mixing_simple(xs, MWs) Tr = T/Tc bigsum = sum(xs[i]*Tcs[i]/Pcs[i]/MWs[i] for i in range(len(xs))) return (R*bigsum*Zr**(1. + (1. - Tr)**(2/7.)))*MW def COSTALD_mixture(xs, T, Tcs, Vcs, omegas): r'''Calculate mixture liquid density using the COSTALD CSP method. A popular and accurate estimation method. If possible, fit parameters are used; alternatively critical properties work well. The mixing rules giving parameters for the pure component COSTALD equation are: .. math:: T_{cm} = \frac{\sum_i\sum_j x_i x_j (V_{ij}T_{cij})}{V_m} V_m = 0.25\left[ \sum x_i V_i + 3(\sum x_i V_i^{2/3})(\sum_i x_i V_i^{1/3})\right] V_{ij}T_{cij} = (V_iT_{ci}V_{j}T_{cj})^{0.5} \omega = \sum_i z_i \omega_i Parameters ---------- xs: list Mole fractions of each component T : float Temperature of fluid [K] Tcs : list Critical temperature of fluids [K] Vcs : list Critical volumes of fluids [m^3/mol]. This parameter is alternatively a fit parameter omegas : list (ideally SRK) Acentric factor of all fluids, [-] This parameter is alternatively a fit parameter. Returns ------- Vs : float Saturation liquid mixture volume Notes ----- Range: 0.25 < Tr < 0.95, often said to be to 1.0 No example has been found. Units are that of critical or fit constant volume. Examples -------- >>> COSTALD_mixture([0.4576, 0.5424], 298., [512.58, 647.29],[0.000117, 5.6e-05], [0.559,0.344] ) 2.706588773271354e-05 References ---------- .. [1] Hankinson, <NAME>., and <NAME>. "A New Correlation for Saturated Densities of Liquids and Their Mixtures." AIChE Journal 25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412 ''' cmps = range(len(xs)) if not none_and_length_check([xs, Tcs, Vcs, omegas]): raise Exception('Function inputs are incorrect format') sum1 = sum([xi*Vci for xi, Vci in zip(xs, Vcs)]) sum2 = sum([xi*Vci**(2/3.) for xi, Vci in zip(xs, Vcs)]) sum3 = sum([xi*Vci**(1/3.) for xi, Vci in zip(xs, Vcs)]) Vm = 0.25*(sum1 + 3.*sum2*sum3) VijTcij = [[(Tcs[i]*Tcs[j]*Vcs[i]*Vcs[j])**0.5 for j in cmps] for i in cmps] omega = mixing_simple(xs, omegas) Tcm = sum([xs[i]*xs[j]*VijTcij[i][j]/Vm for j in cmps for i in cmps]) return COSTALD(T, Tcm, Vm, omega) NONE = 'None' LALIBERTE = 'Laliberte' COSTALD_MIXTURE = 'COSTALD mixture' COSTALD_MIXTURE_FIT = 'COSTALD mixture parameters' SIMPLE = 'SIMPLE' RACKETT = 'RACKETT' RACKETT_PARAMETERS = 'RACKETT Parameters' volume_liquid_mixture_methods = [LALIBERTE, SIMPLE, COSTALD_MIXTURE_FIT, RACKETT_PARAMETERS, COSTALD, RACKETT] class VolumeLiquidMixture(MixtureProperty): '''Class for dealing with the molar volume of a liquid mixture as a function of temperature, pressure, and composition. Consists of one electrolyte-specific method, four corresponding states methods which do not use pure-component volumes, and one mole-weighted averaging method. Prefered method is **SIMPLE**, or **Laliberte** if the mixture is aqueous and has electrolytes. Parameters ---------- MWs : list[float], optional Molecular weights of all species in the mixture, [g/mol] Tcs : list[float], optional Critical temperatures of all species in the mixture, [K] Pcs : list[float], optional Critical pressures of all species in the mixture, [Pa] Vcs : list[float], optional Critical molar volumes of all species in the mixture, [m^3/mol] Zcs : list[float], optional Critical compressibility factors of all species in the mixture, [Pa] omegas : list[float], optional Accentric factors of all species in the mixture, [-] CASs : list[str], optional The CAS numbers of all species in the mixture VolumeLiquids : list[VolumeLiquid], optional VolumeLiquid objects created for all species in the mixture, normally created by :obj:`thermo.chemical.Chemical`. Notes ----- To iterate over all methods, use the list stored in :obj:`volume_liquid_mixture_methods`. **Laliberte**: Aqueous electrolyte model equation with coefficients; see :obj:`thermo.electrochem.Laliberte_density` for more details. **COSTALD mixture**: CSP method described in :obj:`COSTALD_mixture`. **COSTALD mixture parameters**: CSP method described in :obj:`COSTALD_mixture`, with two mixture composition independent fit coefficients, `Vc` and `omega`. **RACKETT**: CSP method described in :obj:`Rackett_mixture`. **RACKETT Parameters**: CSP method described in :obj:`Rackett_mixture`, but with a mixture independent fit coefficient for compressibility factor for each species. **SIMPLE**: Linear mole fraction mixing rule described in :obj:`thermo.utils.mixing_simple`; also known as Amgat's law. See Also -------- References ---------- .. [1] <NAME>. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ''' name = 'Liquid volume' units = 'm^3/mol' property_min = 0 '''Mimimum valid value of liquid molar volume. It should normally occur at the triple point, and be well above this.''' property_max = 2e-3 '''Maximum valid value of liquid molar volume. Generous limit.''' ranked_methods = [LALIBERTE, SIMPLE, COSTALD_MIXTURE_FIT, RACKETT_PARAMETERS, COSTALD_MIXTURE, RACKETT] def __init__(self, MWs=[], Tcs=[], Pcs=[], Vcs=[], Zcs=[], omegas=[], CASs=[], VolumeLiquids=[]): self.MWs = MWs self.Tcs = Tcs self.Pcs = Pcs self.Vcs = Vcs self.Zcs = Zcs self.omegas = omegas self.CASs = CASs self.VolumeLiquids = VolumeLiquids self.Tmin = None '''Minimum temperature at which no method can calculate the liquid molar volume under.''' self.Tmax = None '''Maximum temperature at which no method can calculate the liquid molar volume above.''' self.sorted_valid_methods = [] '''sorted_valid_methods, list: Stored methods which were found valid at a specific temperature; set by `mixture_property`.''' self.user_methods = [] '''user_methods, list: Stored methods which were specified by the user in a ranked order of preference; set by `mixture_property`.''' self.all_methods = set() '''Set of all methods available for a given set of information; filled by :obj:`load_all_methods`.''' self.load_all_methods() def load_all_methods(self): r'''Method to initialize the object by precomputing any values which may be used repeatedly and by retrieving mixture-specific variables. All data are stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should work to calculate the property. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to
<filename>core/domain/auth_jobs_one_off.py # coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Auth-related one-off jobs.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import ast from core import jobs from core.domain import auth_domain from core.platform import models from core.platform.auth import firebase_auth_services from core.platform.auth import gae_auth_services import feconf import python_utils import utils from firebase_admin import auth as firebase_auth auth_models, user_models = ( models.Registry.import_models([models.NAMES.auth, models.NAMES.user])) ID_HASHING_FUNCTION = hash class SyncFirebaseAccountsOneOffJob(jobs.BaseMapReduceOneOffJobManager): """One-off job to sync Firebase accounts with Oppia's user accounts. This job does not print aggregated success output, because the reduce phase is sharded by auth_id and could potentially generate tens of 1000s of privacy-sensitive outputs. Instead, success is interpreted as an output without any ERRORs. This job is idempotent. Pre-conditions: - The models on Oppia reflect the most up-to-date values. Specifically, a user is considered to "exist" IF AND ONLY IF a UserSettingsModel exists. - Exactly 1 FirebaseSeedModel exists. - SeedFirebaseOneOffJob has succeeded AT LEAST ONCE in the history of the database. This includes backups of the database created _after_ the job is run. - PopulateFirebaseAccountsOneOffJob has succeeded AT LEAST ONCE in the history of the database, WITHOUT creating any new accounts, AFTER BOTH: 1. SeedFirebaseOneOffJob has succeeded. 2. Firebase user authentication has been deployed to the server. Post-conditions: - There is a 1-to-1-to-1 mapping between UserAuthDetailsModel, UserIdByFirebaseAuthIdModel, and individual Firebase accounts; using firebase_auth_id as the key. The sole exception is: UserAuthDetailsModel(gae_id=feconf.SYSTEM_COMMITTER_ID) - UserAuthDetailsModel.deleted, UserIdByFirebaseAuthIdModel.deleted, and FirebaseAccount.disabled each have the same respective boolean value. """ SYSTEM_COMMITTER_ACK = 'INFO: SYSTEM_COMMITTER_ID skipped' EMPTY_AUTH_ID_KEY = '<EMPTY_AUTH_ID_KEY>' ASSOC_BY_AUTH_ID_KEY = 'assoc_info_by_auth_id' ASSOC_BY_USER_ID_KEY = 'assoc_info_by_user_id' FIREBASE_ACCOUNT_KEY = 'firebase_account_info' ERROR_ASSOC_INCONSISTENCIES = ( 'ERROR: Found inconsistency in models and/or Firebase account') @classmethod def entity_classes_to_map_over(cls): return [auth_models.UserIdByFirebaseAuthIdModel, auth_models.UserAuthDetailsModel, auth_models.FirebaseSeedModel] @staticmethod def map(item): # The map() function must be static, so we manually create a "cls" # variable instead of changing the function into a classmethod. cls = SyncFirebaseAccountsOneOffJob if isinstance(item, auth_models.UserIdByFirebaseAuthIdModel): auth_id, assoc_info = ( item.id, (cls.ASSOC_BY_AUTH_ID_KEY, (item.user_id, item.deleted))) yield (auth_id, assoc_info) elif isinstance(item, auth_models.UserAuthDetailsModel): if item.gae_id == feconf.SYSTEM_COMMITTER_ID: yield (cls.SYSTEM_COMMITTER_ACK, item.id) return auth_id, assoc_info = ( item.firebase_auth_id or cls.EMPTY_AUTH_ID_KEY, (cls.ASSOC_BY_USER_ID_KEY, (item.id, item.deleted))) yield (auth_id, assoc_info) # The item must be an instance of auth_models.FirebaseSeedModel. elif item.id == auth_models.ONLY_FIREBASE_SEED_MODEL_ID: page = firebase_auth.list_users(max_results=1000) user_batch = [] while page is not None: user_batch[:] = page.users # NOTE: Avoids allocating a new list. if not user_batch: break for user in user_batch: auth_id, assoc_info = ( user.uid, (cls.FIREBASE_ACCOUNT_KEY, (None, user.disabled))) yield (auth_id, assoc_info) page = page.get_next_page() @staticmethod def reduce(key, values): # The reduce() function must be static, so we manually create a "cls" # variable instead of changing the function into a classmethod. cls = SyncFirebaseAccountsOneOffJob if key == cls.SYSTEM_COMMITTER_ACK: yield (key, values) return assoc_info_pairs = [ast.literal_eval(v) for v in values] if key == cls.EMPTY_AUTH_ID_KEY: for report in cls.report_assocs_missing(assoc_info_pairs): yield (cls.ERROR_ASSOC_INCONSISTENCIES, report) return # Else: key is a non-empty auth_id. auth_id = key reports = list(cls.report_assoc_collisions(auth_id, assoc_info_pairs)) if reports: for report in reports: yield (cls.ERROR_ASSOC_INCONSISTENCIES, report) return # Else: 1 <= len(assoc_info_pairs) <= 3. assoc_info = dict(assoc_info_pairs) reports = list(cls.report_assoc_inconsistencies(auth_id, **assoc_info)) if reports: for report in reports: yield (cls.ERROR_ASSOC_INCONSISTENCIES, report) return # Else: assoc_info_by_auth_id and assoc_info_by_user_id are either: # 1. Both deleted. # 2. Both present and contain consistent values. # NOTE: This is an arbitrary choice, could use ASSOC_BY_USER_ID_KEY too. assoc_key = cls.ASSOC_BY_AUTH_ID_KEY user_is_permanently_deleted, firebase_account_exists = ( assoc_key not in assoc_info, cls.FIREBASE_ACCOUNT_KEY in assoc_info) if user_is_permanently_deleted: if firebase_account_exists: firebase_auth.update_user(auth_id, disabled=True) firebase_auth.delete_user(auth_id) return # Else: assoc_info_by_auth_id and assoc_info_by_user_id are both present # and have consistent values. user_id, marked_as_deleted = assoc_info[assoc_key] if not firebase_account_exists: user_settings = user_models.UserSettingsModel.get(user_id) firebase_auth.create_user( uid=auth_id, email=user_settings.email, # NOTE: Even though the user might be marked for deletion, it's # important to create a disabled Firebase account anyway so that # the same email cannot be claimed while the deletion request is # pending. disabled=marked_as_deleted) return _, firebase_account_is_disabled = assoc_info[cls.FIREBASE_ACCOUNT_KEY] if marked_as_deleted != firebase_account_is_disabled: firebase_auth.update_user(auth_id, disabled=marked_as_deleted) @classmethod def report_assocs_missing(cls, assoc_info_pairs): """Yields debug information for each of the given associations. NOTE: Since assoc_info_by_auth_id and Firebase accounts are keyed by auth_id, the only kinds of associations that could ever hit this function are UserAuthDetailsModel. Args: assoc_info_pairs: list(tuple(str, (str, bool))). The list of associations that do not correspond to any auth_id. Yields: str. A debug string for each association. """ for _, (user_id, _) in assoc_info_pairs: yield ( 'UserAuthDetailsModel(id="%s", firebase_auth_id=None) does not ' 'correspond to a firebase_auth_id' % user_id) @classmethod def report_assoc_collisions(cls, auth_id, assoc_info_pairs): """Yields debug information for associations mapped to the same auth_id. Args: auth_id: str. The auth_id to check. assoc_info_pairs: list(tuple(str, (str, bool))). The list of associations that do not correspond to any auth_id. Yields: str. A debug string for the associations with collisions. """ user_id_collisions = sorted( '"%s"' % user_id for assoc_key, (user_id, _) in assoc_info_pairs if assoc_key == cls.ASSOC_BY_USER_ID_KEY) if len(user_id_collisions) > 1: yield '%d UserAuthDetailsModels have auth_id="%s": %s' % ( len(user_id_collisions), auth_id, ', '.join(user_id_collisions)) @classmethod def report_assoc_inconsistencies( cls, auth_id, assoc_info_by_auth_id=None, assoc_info_by_user_id=None, firebase_account_info=None): """Reports inconsistencies between the given values. IMPORTANT: The names of the keyword arguments MUST match the values of their corresponding class keys: ASSOC_BY_AUTH_ID_KEY, ASSOC_BY_USER_ID_KEY, FIREBASE_ACCOUNT_KEY. Args: auth_id: str. The auth_id of the user. assoc_info_by_auth_id: tuple. The (user_id, deleted) properties from the UserIdByFirebaseAuthIdModel corresponding to auth_id. NOTE: UserIdByFirebaseAuthIdModel is keyed by auth_id, so this argument will never use the default value of None. assoc_info_by_user_id: tuple|None. The (user_id, deleted) properties from the UserAuthDetailsModel corresponding to auth_id, or None if one does not exist. firebase_account_info: tuple|None. The (user_id, disabled) properties from the Firebase account corresponding to auth_id, or None if an account does not exist. NOTE: The user_id is always None. Yields: str. Debug information about a discovered inconsistency. """ if assoc_info_by_auth_id is None and assoc_info_by_user_id is None: # User is deleted and will be managed by the reduce() logic. return # NOTE: assoc_info_by_auth_id is never None because such values are # caught by report_assocs_missing(), which is called before this # function. user_id_of_assoc_by_auth_id, deleted_bool_of_assoc_by_auth_id = ( assoc_info_by_auth_id) if assoc_info_by_user_id is None: yield ( 'UserIdByFirebaseAuthIdModel(id="%s") does not correspond to a ' 'unique UserAuthDetailsModel' % auth_id) user_id_of_assoc_by_user_id = deleted_bool_of_assoc_by_user_id = ( None) else: user_id_of_assoc_by_user_id, deleted_bool_of_assoc_by_user_id = ( assoc_info_by_user_id) if (user_id_of_assoc_by_user_id is not None and user_id_of_assoc_by_user_id != user_id_of_assoc_by_auth_id): yield ( 'auth_id="%s" has inconsistent `user_id` assignments: ' 'UserIdByFirebaseAuthIdModel(user_id="%s") does not match ' 'UserAuthDetailsModel(id="%s")' % ( auth_id, user_id_of_assoc_by_auth_id, user_id_of_assoc_by_user_id)) if (deleted_bool_of_assoc_by_user_id is not None and deleted_bool_of_assoc_by_user_id != deleted_bool_of_assoc_by_auth_id): yield ( 'auth_id="%s" has inconsistent `deleted` assignments: ' 'UserIdByFirebaseAuthIdModel(user_id="%s", deleted=%r) does ' 'not match UserAuthDetailsModel(id="%s", deleted=%r)' % ( auth_id, user_id_of_assoc_by_auth_id, deleted_bool_of_assoc_by_auth_id, user_id_of_assoc_by_user_id, deleted_bool_of_assoc_by_user_id)) return if firebase_account_info is not None: _, firebase_account_is_disabled = firebase_account_info if (firebase_account_is_disabled # NOTE: Important that deleted_bool_of_assoc_by_auth_id is # checked first because its value will never be None (hence, # it's always meaningful). and not deleted_bool_of_assoc_by_auth_id and not deleted_bool_of_assoc_by_user_id): yield ( 'Firebase account with auth_id="%s" is disabled, but the ' 'user is not marked for deletion on Oppia' % auth_id) # Else: Firebase account needs to be updated and will be resolved by # the reduce() logic. class SeedFirebaseOneOffJob(jobs.BaseMapReduceOneOffJobManager): """Brings Firebase accounts and association models to a deterministic state. The following pre-conditions must hold for no errors to occur (accomplished by calling the firebase_auth_services.seed_firebase() function): 1. Exactly one FirebaseSeedModel must exist. 2. feconf.ADMIN_EMAIL_ADDRESS must correspond to a UserAuthDetailsModel where: firebase_auth_id is not None AND gae_id != feconf.SYSTEM_COMMITTER_ID. 3. feconf.ADMIN_EMAIL_ADDRESS must correspond to exactly one UserIdByFirebaseAuthIdModel where: id equals the aforementioned firebase_auth_id AND user_id is the ID of the aforementioned UserAuthDetailsModel. 4. feconf.ADMIN_EMAIL_ADDRESS must correspond to exactly one Firebase account where:
+ 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 I 255 I 9.35 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 I 255.000001 I 9 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 & 255 C 0 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 I 255 z 0 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 a 255 M 0 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i panTro1.chr6 I 0 C 0 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG i banana I 1 C 0 s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', '''a score=23262.0 i banana I 1 C 0 s hg18 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG s baboon.chr0 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG s dae.chr0 0 30 + 100 AAA-GGGAATGTTAACCAAATGA-----------TTACGGTG ''', ] def testILines(self): """mafValidator should fail when "i" lines are malformed """ tmpDir = mtt.makeTempDir('iLines') for b in self.badBlocks: mafFile, header = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), b, g_headers) self.assertRaises(mafval.ILineFormatError, mafval.validateMaf, mafFile, options) mtt.removeDir(tmpDir) class LinesStartingWithQChecks(unittest.TestCase): badBlocks = ['''a score=0 s hg18.chr1 32741 26 + 247249719 TTTTTGAAAAACAAACAACAAGTTGG s panTro2.chrUn 9697231 26 + 58616431 TTTTTGAAAAACAAACAACAAGTTGG q panTro2.chrUn 99999999999999999999999999 s dasNov1.scaffold_179265 1474 7 + 4584 TT----------AAGCA--------- q dasNov1.scaffold_179265 99----------32239--------- banana ''', '''a score=0 s hg18.chr1 32741 26 + 247249719 TTTTTGAAAAACAAACAACAAGTTGG s panTro2.chrUn 9697231 26 + 58616431 TTTTTGAAAAACAAACAACAAGTTGG q panTro2.chrUn 99999999999999999999999999 s dasNov1.scaffold_179265 1474 7 + 4584 TT----------AAGCA--------- q dasNov1.scaffold_179265 99----------32239-----v--- ''', '''a score=0 s hg18.chr1 32741 26 + 247249719 TTTTTGAAAAACAAACAACAAGTTGG s panTro2.chrUn 9697231 26 + 58616431 TTTTTGAAAAACAAACAACAAGTTGG q panTro2.chrUn 99999999999999999999999999 s dasNov1.scaffold_179265 1474 7 + 4584 TT----------AAGCA--------- q notTheSame 99----------32239--------- ''', ] def testQLines(self): """mafValidator should fail when "q" lines are malformed """ tmpDir = mtt.makeTempDir('qLines') for b in self.badBlocks: mafFile, header = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), b, g_headers) self.assertRaises(mafval.QLineFormatError, mafval.validateMaf, mafFile, options) mtt.removeDir(tmpDir) class LinesStartingWithEChecks(unittest.TestCase): badBlocks = ['''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # wrong number of fields s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 3310102 13 + 151104725 I I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad length s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 3310102 -13 + 151104725 I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad start s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 -3310102 13 + 151104725 I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad start s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 banana 13 + 151104725 I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad source Length s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 3310102 13 + -151104725 I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad source Length s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 3310102 13 + I I ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # bad status s hg16.chr7 7707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 3310102 13 + 151104725 m ''', ] def testELines(self): """mafValidator should fail when "e" lines are malformed """ tmpDir = mtt.makeTempDir('eLines') for b in self.badBlocks: mafFile, header = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), b, g_headers) self.assertRaises(mafval.ELineFormatError, mafval.validateMaf, mafFile, options) mtt.removeDir(tmpDir) class DuplicateColumnChecks(unittest.TestCase): badMafs = ['''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=23262.0 # hg16.chr7 in this block contains duplicate columns s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 1 13 + 161576975 gcagctgaaaaca s baboon.chr0 2 13 + 4622798 gcagctgaaaaca s mm4.chr6 2 13 + 151104725 ACAGCTGAAAATA ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca i panTro1.chr6 N 0 C 0 s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca i baboon.chr0 I 234 n 19 a score=0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 53310102 13 + 151104725 I a score=0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca ''' ] def testDuplicateColumns(self): """ mafValidator should fail when a column is duplicated """ tmpDir = mtt.makeTempDir('duplicateColumns') for g in self.badMafs: mafFile, header = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), g, g_headers) self.assertRaises(mafval.DuplicateColumnError, mafval.validateMaf, mafFile, options) mtt.removeDir(tmpDir) def testNotTestingDuplicateColumns(self): """ mafValidator should ignore when a column is duplicated if option is switched off """ customOpts = GenericObject() customOpts.lookForDuplicateColumns = False customOpts.testChromNames = True customOpts.validateSequence = True tmpDir = mtt.makeTempDir('notTestingDuplicateColumns') for g in self.badMafs: mafFile, header = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), g, g_headers) self.assertTrue(mafval.validateMaf(mafFile, customOpts)) mtt.removeDir(tmpDir) class InconsistentSequenceChecks(unittest.TestCase): badMafs = ['''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=23262.0 # hg16.chr7 in this block contains inconsistent sequences # ----------*** s hg16.chr7 27707221 13 + 158545518 gcagctgaaaTTT s panTro1.chr6 1 13 + 161576975 gcagctgaaaaca s baboon.chr0 2 13 + 4622798 gcagctgaaaaca s mm4.chr6 2 13 + 151104725 ACAGCTGAAAATA ''', '''a score=23262.0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=0 # **----------- s hg16.chr7 27707221 13 + 158545518 AAagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca i panTro1.chr6 N 0 C 0 s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca i baboon.chr0 I 234 n 19 a score=0 s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca e mm4.chr6 53310102 13 + 151104725 I a score=0 s hg16.chr7 27707221 13 + 158545518 Tcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca ''', '''a score=23262.0 s hg16.chr7 0 13 + 20 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=23262.0 # hg16.chr7 in this block contains inconsistent sequences # ----*- s hg16.chr7 0 13 - 20 agtagtatgttAt s panTro1.chr6 1 13 + 161576975 gcagctgaaaaca s baboon.chr0 2 13 + 4622798 gcagctgaaaaca s mm4.chr6 2 13 + 151104725 ACAGCTGAAAATA ''', '''a score=23262.0 s hg16.chr7 0 13 + 20 gcagctgaaaaca s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca s baboon.chr0 249182 13 + 4622798 gcagctgaaaaca s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA a score=23262.0 # hg16.chr7 in
:] pos = text.find("(") # Remove orphan closing braces. text = text.replace(")", " ") return text def _expand_macros(self, text): macros = {} new = "" for line in text.splitlines(True): if "=" in line: head, tail = line.split("=") tail = tail.replace("\n", "") macros[head.strip()] = tail.strip() else: new = new + line # Scan the text two times in case a macro uses another macro. for i in range(2): for macro in sorted(macros, key=len, reverse=True): pos = new.find(macro) while pos != -1: if new[pos - 1] in MACRO_DELIMITERS and new[pos + len(macro)] in MACRO_DELIMITERS: new = new[:pos] + macros[macro] + new[pos + len(macro) :] pos = new.find(macro, pos + len(macro)) return new def _remove_extra_spaces(self, text): while " " in text: text = text.replace(" ", " ") return text def _add_space_around_loops(self, text): pos = text.find("|:") while pos != -1: if text[pos + 2] != " ": text = text[: pos + 2] + " " + text[pos + 2 :] if text[pos - 1] != " ": text = text[:pos] + " " + text[pos:] pos = text.find("|:", pos + 3) pos = text.find(":|") while pos != -1: if text[pos - 1] != " ": text = text[:pos] + " " + text[pos:] pos = text.find(":|", pos + 3) return text def _process_specific_group(self, text, inchar, outchar): pos = text.find(inchar) while pos != -1: nextpos = text.find(inchar, pos + 1) pos2 = text.find(outchar, pos + 1) if pos2 == -1: raise Exception("Missing %s symbol..." % outchar) elif nextpos != -1 and nextpos < pos2: raise Exception("Missing %s symbol..." % outchar) else: format = text[pos : pos2 + 1].replace(" ", ",") format = format.replace(" ", "") text = text[:pos] + format + text[pos2 + 1 :] pos = text.find(inchar, pos2 + 1) return text def _process_groups(self, text): text = self._process_specific_group(text, "[", "]") text = self._process_specific_group(text, "{", "}") return text def _expand_line_continuation(self, text): text = text.replace("\r", "") pos = text.find("\\") while pos != -1: pos2 = text.find("\n", pos + 1) text = text[:pos] + text[pos2 + 1 :] pos = text.find("\\", pos + 1) return text def _preproc(self, text): text = self._remove_comments(text) text = self._expand_line_continuation(text) text = self._remove_extra_spaces(text) text = self._expand_macros(text) text = self._expand_tuplets(text) text = self._add_space_around_loops(text) text = self._process_groups(text) return text def setText(self, text): self.text = text def getSequences(self): sequences = [None] * self.voices text = self._preproc(self.text) lines = [l.replace("\n", "").split() for l in text.splitlines() if l] for line in lines: if line[0].startswith("#"): voice = int(line[0][1:]) if voice < self.voices: sequences[voice] = line[1:] else: sequences[0] = line return sequences class MML(PyoObject): """ Generates music sequences based on a custom MML notation. Music Macro Language (MML) is a music description language used in sequencing music on computer and video game systems. :Parent: :py:class:`PyoObject` :Args: music: string The new music code to parse. If the string is a valid path to a text file, the file is opened and its content is taken as the music code. voices: int, optional The number of voices in the music code. This number is used to initialize the internal voices that will play the sequences. Defaults to 1. loop: bool, optional If True, the playback will start again when the music reaches its end, otherwise the object just stops to send triggers. Defaults to False. poly: int, optional Per voice polyphony. Denotes how many independent streams are generated per voice by the object, allowing overlapping processes. Available only at initialization. Defaults to 1. updateAtEnd: bool, optional If True, voices will update their internal sequence only when the current one reaches its end, no matter when the `music` argument is changed. If False, sequences are updated immediately. Defaults to False. .. note:: MML outputs many signals identified with a string between brackets: | obj['freq'] returns an audio stream of the current note frequency. | obj['amp'] returns an audio stream of the current note amplitude. | obj['dur'] returns an audio stream of the current note duration in seconds. | obj['end'] returns an audio stream with a trigger at the end of the sequence. | obj['x'] returns an audio stream with the current value of the `x` parameter. | obj['y'] returns an audio stream with the current value of the `y` parameter. | obj['z'] returns an audio stream with the current value of the `z` parameter. obj without brackets returns the generated trigger streams of the music. The out() method is bypassed. MML's signal can not be sent to audio outs. MML has no `mul` and `add` attributes. >>> s = Server().boot() >>> s.start() >>> a = ''' >>> ; Title: La perdriole >>> ; Author: traditionnel >>> A = r6 o4 v40 g3 v50 o5 c d e f g5 o+ c o- b3 a g f e d c7 >>> B = |: g3 g g4 f1 e3 d c5 :| g1 g g g g g g g b-3 o+ c d7 r7 >>> C = |: o5 c4 d1 e3 f g4 o+ c1 o- b3 a g f e d e d c5 r5 :| >>> #0 t92 x.1 |: A A B C :| >>> A1 = |: r7 o4 c7 d7 e5 f g c7 :| >>> B1 = |: g7 o- b5 o+ c :| d5 d f g7 r7 >>> C1 = |: c8 d7 g c5 r5 :| >>> #1 t92 x0.25 v50 |: A1 B1 C1 :| >>> ''' >>> t = CosTable([(0,0), (64,1), (1024,1), (4096, 0.5), (8191,0)]) >>> mml = MML(a, voices=2, loop=True, poly=4).play() >>> dur = Sig(mml.getVoice(0, "dur"), mul=2) >>> tr = TrigEnv(mml.getVoice(0), table=t, dur=dur, mul=mml.getVoice(0, "amp")) >>> a = SineLoop(freq=mml.getVoice(0, "freq"), feedback=mml.getVoice(0, "x"), mul=tr).mix() >>> dur2 = Sig(mml.getVoice(1, "dur"), mul=2) >>> tr2 = TrigEnv(mml.getVoice(1), table=t, dur=dur2, mul=mml.getVoice(1, "amp")) >>> a2 = LFO(freq=mml.getVoice(1, "freq"), sharp=mml.getVoice(1, "x"), type=2, mul=tr2).mix() >>> output = STRev([a, a2], inpos=[0.2, 0.8], bal=0.2, mul=1.5).out() """ def __init__(self, music, voices=1, loop=False, poly=1, updateAtEnd=False): pyoArgsAssert(self, "SIBIB", music, voices, loop, poly, updateAtEnd) PyoObject.__init__(self) self._editor = None self._pitches = pitches = { 0: "c", 1: "c+", 2: "d", 3: "e-", 4: "e", 5: "f", 6: "f+", 7: "g", 8: "a-", 9: "a", 10: "b-", 11: "b", } self._fre_dummy = [] self._amp_dummy = [] self._dur_dummy = [] self._end_dummy = [] self._x_dummy = [] self._y_dummy = [] self._z_dummy = [] self._music = music self._voices = voices self._loop = loop self._poly = poly self._updateAtEnd = updateAtEnd if os.path.isfile(music): with open(music, "r") as f: music = f.read() self.parser = MMLParser(music, voices) self._sequences = self.parser.getSequences() self._base_players = [MMLMain_base(loop, poly, updateAtEnd) for i in range(voices)] for i in range(voices): if self._sequences[i] is not None: self._base_players[i].setSequence(self._sequences[i]) self._base_objs = [MML_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._fre_objs = [ MMLFreqStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly) ] self._amp_objs = [MMLAmpStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._dur_objs = [MMLDurStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._end_objs = [MMLEndStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._x_objs = [MMLXStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._y_objs = [MMLYStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] self._z_objs = [MMLZStream_base(wrap(self._base_players, j), i) for j in range(voices) for i in range(poly)] def __getitem__(self, i): if i == "freq": self._fre_dummy.append(Dummy([obj for obj in self._fre_objs])) return self._fre_dummy[-1] if i == "amp": self._amp_dummy.append(Dummy([obj for obj in self._amp_objs])) return self._amp_dummy[-1] if i == "dur": self._dur_dummy.append(Dummy([obj for obj in self._dur_objs])) return self._dur_dummy[-1] if i == "end": self._end_dummy.append(Dummy([obj for obj in self._end_objs])) return self._end_dummy[-1] if i == "x": self._x_dummy.append(Dummy([obj for obj in self._x_objs])) return self._x_dummy[-1] if i == "y": self._y_dummy.append(Dummy([obj for obj in self._y_objs])) return self._y_dummy[-1] if i == "z": self._z_dummy.append(Dummy([obj for obj in self._z_objs])) return self._z_dummy[-1] if type(i) == slice: return self._base_objs[i] if i < len(self._base_objs): return self._base_objs[i] else: print("'i' too large!") def
<reponame>sidneycadot/pydwf """The |pydwf.core.api.digital_out| module implements a single class: |DigitalOut|.""" from typing import Tuple, List from pydwf.core.dwf_device_subapi import AbstractDwfDeviceSubApi from pydwf.core.auxiliary.typespec_ctypes import typespec_ctypes from pydwf.core.auxiliary.enum_types import (DwfTriggerSource, DwfTriggerSlope, DwfDigitalOutOutput, DwfDigitalOutType, DwfState, DwfDigitalOutIdle) from pydwf.core.auxiliary.constants import RESULT_SUCCESS from pydwf.core.auxiliary.exceptions import PyDwfError class DigitalOut(AbstractDwfDeviceSubApi): """The |DigitalOut| class provides access to the digital output (pattern generator) instrument of a |DwfDevice:link|. Attention: Users of |pydwf| should not create instances of this class directly. It is instantiated during initialization of a |DwfDevice| and subsequently assigned to its public |digitalOut:link| attribute for access by the user. """ # pylint: disable=too-many-public-methods #################################################################################################################### # # # INSTRUMENT CONTROL # # # #################################################################################################################### def reset(self) -> None: """Reset the |DigitalOut| instrument. Raises: DwfLibraryError: An error occurred while executing the *reset* operation. """ result = self.lib.FDwfDigitalOutReset(self.hdwf) if result != RESULT_SUCCESS: raise self.dwf.exception() def configure(self, start: bool) -> None: """Start or stop the |DigitalOut| instrument. Parameters: start (int): Whether to start/stop the instrument. Raises: DwfLibraryError: An error occurred while executing the *configure* operation. """ result = self.lib.FDwfDigitalOutConfigure(self.hdwf, start) if result != RESULT_SUCCESS: raise self.dwf.exception() def status(self) -> DwfState: """Return the status of the |DigitalOut| instrument. This method performs a status request to the |DigitalOut| instrument and receives its response. Returns: DwfState: The status of the |DigitalOut| instrument. Raises: DwfLibraryError: An error occurred while executing the *status* operation. """ c_status = typespec_ctypes.DwfState() result = self.lib.FDwfDigitalOutStatus(self.hdwf, c_status) if result != RESULT_SUCCESS: raise self.dwf.exception() status_ = DwfState(c_status.value) return status_ #################################################################################################################### # # # CHANNEL COUNT # # # #################################################################################################################### def count(self) -> int: """Get the |DigitalOut| instrument channel (digital pin) count. Returns: int: The number of digital output channels. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_channel_count = typespec_ctypes.c_int() result = self.lib.FDwfDigitalOutCount(self.hdwf, c_channel_count) if result != RESULT_SUCCESS: raise self.dwf.exception() channel_count = c_channel_count.value return channel_count #################################################################################################################### # # # STATE MACHINE SETTINGS # # # #################################################################################################################### def waitInfo(self) -> Tuple[float, float]: """Get the |DigitalOut| instrument range for the |Wait:link| state duration, in seconds. Returns: Tuple[float, float]: A tuple containing the minimal and maximal configurable |Wait:link| state duration, in seconds. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_wait_duration_min = typespec_ctypes.c_double() c_wait_duration_max = typespec_ctypes.c_double() result = self.lib.FDwfDigitalOutWaitInfo(self.hdwf, c_wait_duration_min, c_wait_duration_max) if result != RESULT_SUCCESS: raise self.dwf.exception() wait_duration_min = c_wait_duration_min.value wait_duration_max = c_wait_duration_max.value return (wait_duration_min, wait_duration_max) def waitSet(self, wait_duration: float) -> None: """Set the |DigitalOut| instrument |Wait:link| state duration, in seconds. Parameters: wait_duration (float): Digital-out |Wait:link| state duration, in seconds. Raises: DwfLibraryError: An error occurred while executing the operation. """ result = self.lib.FDwfDigitalOutWaitSet(self.hdwf, wait_duration) if result != RESULT_SUCCESS: raise self.dwf.exception() def waitGet(self) -> float: """Get |DigitalOut| instrument |Wait:link| state duration, in seconds. Returns: float: The |Wait:link| state duration, in seconds. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_wait_duration = typespec_ctypes.c_double() result = self.lib.FDwfDigitalOutWaitGet(self.hdwf, c_wait_duration) if result != RESULT_SUCCESS: raise self.dwf.exception() wait_duration = c_wait_duration.value return wait_duration def runInfo(self) -> Tuple[float, float]: """Get the |DigitalOut| instrument range for the |Running:link| state duration, in seconds. Returns: Tuple[float, float]: A tuple containing the minimal and maximal |Running:link| state duration, in seconds. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_run_duration_min = typespec_ctypes.c_double() c_run_duration_max = typespec_ctypes.c_double() result = self.lib.FDwfDigitalOutRunInfo(self.hdwf, c_run_duration_min, c_run_duration_max) if result != RESULT_SUCCESS: raise self.dwf.exception() run_duration_min = c_run_duration_min.value run_duration_max = c_run_duration_max.value return (run_duration_min, run_duration_max) def runSet(self, run_duration: float) -> None: """Set the |DigitalOut| instrument |Running:link| state duration, in seconds. Parameters: run_duration: The |Running:link| state duration, in seconds. The value 0 is special; it means *forever*. Raises: DwfLibraryError: An error occurred while executing the operation. """ result = self.lib.FDwfDigitalOutRunSet(self.hdwf, run_duration) if result != RESULT_SUCCESS: raise self.dwf.exception() def runGet(self) -> float: """Get the |DigitalOut| instrument |Running:link| state duration, in seconds. Returns: float: The |Running:link| state duration, in seconds. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_run_duration = typespec_ctypes.c_double() result = self.lib.FDwfDigitalOutRunGet(self.hdwf, c_run_duration) if result != RESULT_SUCCESS: raise self.dwf.exception() run_duration = c_run_duration.value return run_duration def runStatus(self) -> int: """Get the |DigitalOut| instrument |Running:link| state duration time left, in clock cycles. This value is internally expressed as an integer with 48-bit resolution, and is measured in integer clock cycles. The C API returns it as a double-precision floating point number, to avoid using 64-bit integers. Use the :py:meth:`internalClockInfo` method to retrieve the clock frequency. Returns: int: The number of clock cycles until the nest state transition of the |DigitalOut| instrument's state machine. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_run_status = typespec_ctypes.c_double() result = self.lib.FDwfDigitalOutRunStatus(self.hdwf, c_run_status) if result != RESULT_SUCCESS: raise self.dwf.exception() if not c_run_status.value.is_integer(): raise PyDwfError("Bad c_run_status value.") run_status = int(c_run_status.value) return run_status def repeatTriggerSet(self, repeat_trigger_flag: bool) -> None: """Specify if each |DigitalOut| pulse sequence run should wait for its own trigger. Parameters: repeat_trigger_flag (bool): If True, not only the first, both also every successive run of the pulse output sequence will wait until it receives a trigger. Raises: DwfLibraryError: An error occurred while executing the operation. """ result = self.lib.FDwfDigitalOutRepeatTriggerSet(self.hdwf, repeat_trigger_flag) if result != RESULT_SUCCESS: raise self.dwf.exception() def repeatTriggerGet(self) -> bool: """Get if each |DigitalOut| pulse sequence run should wait for its own trigger. Returns: bool: If True, not only the first, both also every successive run of the pulse output sequence will wait until it receives a trigger. """ c_repeat_trigger = typespec_ctypes.c_int() result = self.lib.FDwfDigitalOutRepeatTriggerGet(self.hdwf, c_repeat_trigger) if result != RESULT_SUCCESS: raise self.dwf.exception() repeat_trigger_flag = bool(c_repeat_trigger.value) return repeat_trigger_flag def repeatInfo(self) -> Tuple[int, int]: """Get the |DigitalOut| minimal and maximal repeat count for pulse-sequence runs. Returns: Tuple[int, int]: A tuple containing the minimal and maximal repeat count for digital-out pulse-sequence runs. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_repeat_min = typespec_ctypes.c_unsigned_int() c_repeat_max = typespec_ctypes.c_unsigned_int() result = self.lib.FDwfDigitalOutRepeatInfo(self.hdwf, c_repeat_min, c_repeat_max) if result != RESULT_SUCCESS: raise self.dwf.exception() repeat_min = c_repeat_min.value repeat_max = c_repeat_max.value return (repeat_min, repeat_max) def repeatSet(self, repeat: int) -> None: """Set the |DigitalOut| repeat count for pulse-sequence runs. Parameters: repeat (int): Repeat count. The value 0 is special; it means *forever*. Raises: DwfLibraryError: An error occurred while executing the operation. """ result = self.lib.FDwfDigitalOutRepeatSet(self.hdwf, repeat) if result != RESULT_SUCCESS: raise self.dwf.exception() def repeatGet(self) -> int: """Set the |DigitalOut| repeat count for pulse-sequence runs. Returns: int: Repeat count. The value 0 is special; it means *forever*. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_repeat = typespec_ctypes.c_unsigned_int() result = self.lib.FDwfDigitalOutRepeatGet(self.hdwf, c_repeat) if result != RESULT_SUCCESS: raise self.dwf.exception() repeat = c_repeat.value return repeat def repeatStatus(self) -> int: """Get the |DigitalOut| count of repeats remaining for the currently active output sequence. This number counts down as a digital output sequence is active. Returns: int: The repeat count status. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_repeat_status = typespec_ctypes.c_unsigned_int() result = self.lib.FDwfDigitalOutRepeatStatus(self.hdwf, c_repeat_status) if result != RESULT_SUCCESS: raise self.dwf.exception() repeat_status = c_repeat_status.value return repeat_status #################################################################################################################### # # # TRIGGER CONFIGURATION # # # #################################################################################################################### def triggerSourceInfo(self) -> List[DwfTriggerSource]: """Get the valid |DigitalOut| trigger sources. Warning: **This method is obsolete.** Use the generic DeviceControl.triggerInfo() method instead. Returns: List[DwfTriggerSource]: A list of valid the trigger sources. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_trigger_source_bitset = typespec_ctypes.c_int() result = self.lib.FDwfDigitalOutTriggerSourceInfo(self.hdwf, c_trigger_source_bitset) if result != RESULT_SUCCESS: raise self.dwf.exception() trigger_source_bitset = c_trigger_source_bitset.value trigger_source_list = [trigger_source for trigger_source in DwfTriggerSource if trigger_source_bitset & (1 << trigger_source.value)] return trigger_source_list def triggerSourceSet(self, trigger_source: DwfTriggerSource) -> None: """Set the |DigitalOut| trigger source. Parameters: trigger_source (DwfTriggerSource): The trigger source to be configured. Raises: DwfLibraryError: An error occurred while executing the operation. """ result = self.lib.FDwfDigitalOutTriggerSourceSet(self.hdwf, trigger_source.value) if result != RESULT_SUCCESS: raise self.dwf.exception() def triggerSourceGet(self) -> DwfTriggerSource: """Get the currently selected instrument trigger source. Returns: DwfTriggerSource: The currently selected instrument trigger source. Raises: DwfLibraryError: An error occurred while executing the operation. """ c_trigger_source = typespec_ctypes.DwfTriggerSource() result = self.lib.FDwfDigitalOutTriggerSourceGet(self.hdwf, c_trigger_source) if result != RESULT_SUCCESS: raise self.dwf.exception() trigger_source = DwfTriggerSource(c_trigger_source.value) return trigger_source def triggerSlopeSet(self, trigger_slope: DwfTriggerSlope) -> None: """Select the |DigitalOut| instrument trigger slope. Parameters: trigger_slope (DwfTriggerSlope): The trigger slope to be selected.
<gh_stars>0 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['AuthorityArgs', 'Authority'] @pulumi.input_type class AuthorityArgs: def __init__(__self__, *, certificate_authority_id: pulumi.Input[str], config: pulumi.Input['AuthorityConfigArgs'], key_spec: pulumi.Input['AuthorityKeySpecArgs'], location: pulumi.Input[str], pool: pulumi.Input[str], gcs_bucket: Optional[pulumi.Input[str]] = None, ignore_active_certificates_on_deletion: Optional[pulumi.Input[bool]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, lifetime: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Authority resource. :param pulumi.Input[str] certificate_authority_id: The user provided Resource ID for this Certificate Authority. :param pulumi.Input['AuthorityConfigArgs'] config: The config used to create a self-signed X.509 certificate or CSR. Structure is documented below. :param pulumi.Input['AuthorityKeySpecArgs'] key_spec: Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority is a self-signed CertificateAuthority, this key is also used to sign the self-signed CA certificate. Otherwise, it is used to sign a CSR. Structure is documented below. :param pulumi.Input[str] location: Location of the CertificateAuthority. A full list of valid locations can be found by running `gcloud privateca locations list`. :param pulumi.Input[str] pool: The name of the CaPool this Certificate Authority belongs to. :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as `gs://`) or suffixes (such as `.googleapis.com`). For example, to use a bucket named my-bucket, you would simply specify `my-bucket`. If not specified, a managed bucket will be created. :param pulumi.Input[bool] ignore_active_certificates_on_deletion: This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs. Use with care. Defaults to `false`. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels with user-defined metadata. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. :param pulumi.Input[str] lifetime: The desired lifetime of the CA certificate. Used to create the "notBeforeTime" and "notAfterTime" fields inside an X.509 certificate. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] type: The Type of this CertificateAuthority. > **Note:** For `SUBORDINATE` Certificate Authorities, they need to be manually activated (via Cloud Console of `gcloud`) before they can issue certificates. Default value is `SELF_SIGNED`. Possible values are `SELF_SIGNED` and `SUBORDINATE`. """ pulumi.set(__self__, "certificate_authority_id", certificate_authority_id) pulumi.set(__self__, "config", config) pulumi.set(__self__, "key_spec", key_spec) pulumi.set(__self__, "location", location) pulumi.set(__self__, "pool", pool) if gcs_bucket is not None: pulumi.set(__self__, "gcs_bucket", gcs_bucket) if ignore_active_certificates_on_deletion is not None: pulumi.set(__self__, "ignore_active_certificates_on_deletion", ignore_active_certificates_on_deletion) if labels is not None: pulumi.set(__self__, "labels", labels) if lifetime is not None: pulumi.set(__self__, "lifetime", lifetime) if project is not None: pulumi.set(__self__, "project", project) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="certificateAuthorityId") def certificate_authority_id(self) -> pulumi.Input[str]: """ The user provided Resource ID for this Certificate Authority. """ return pulumi.get(self, "certificate_authority_id") @certificate_authority_id.setter def certificate_authority_id(self, value: pulumi.Input[str]): pulumi.set(self, "certificate_authority_id", value) @property @pulumi.getter def config(self) -> pulumi.Input['AuthorityConfigArgs']: """ The config used to create a self-signed X.509 certificate or CSR. Structure is documented below. """ return pulumi.get(self, "config") @config.setter def config(self, value: pulumi.Input['AuthorityConfigArgs']): pulumi.set(self, "config", value) @property @pulumi.getter(name="keySpec") def key_spec(self) -> pulumi.Input['AuthorityKeySpecArgs']: """ Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority is a self-signed CertificateAuthority, this key is also used to sign the self-signed CA certificate. Otherwise, it is used to sign a CSR. Structure is documented below. """ return pulumi.get(self, "key_spec") @key_spec.setter def key_spec(self, value: pulumi.Input['AuthorityKeySpecArgs']): pulumi.set(self, "key_spec", value) @property @pulumi.getter def location(self) -> pulumi.Input[str]: """ Location of the CertificateAuthority. A full list of valid locations can be found by running `gcloud privateca locations list`. """ return pulumi.get(self, "location") @location.setter def location(self, value: pulumi.Input[str]): pulumi.set(self, "location", value) @property @pulumi.getter def pool(self) -> pulumi.Input[str]: """ The name of the CaPool this Certificate Authority belongs to. """ return pulumi.get(self, "pool") @pool.setter def pool(self, value: pulumi.Input[str]): pulumi.set(self, "pool", value) @property @pulumi.getter(name="gcsBucket") def gcs_bucket(self) -> Optional[pulumi.Input[str]]: """ The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as `gs://`) or suffixes (such as `.googleapis.com`). For example, to use a bucket named my-bucket, you would simply specify `my-bucket`. If not specified, a managed bucket will be created. """ return pulumi.get(self, "gcs_bucket") @gcs_bucket.setter def gcs_bucket(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "gcs_bucket", value) @property @pulumi.getter(name="ignoreActiveCertificatesOnDeletion") def ignore_active_certificates_on_deletion(self) -> Optional[pulumi.Input[bool]]: """ This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs. Use with care. Defaults to `false`. """ return pulumi.get(self, "ignore_active_certificates_on_deletion") @ignore_active_certificates_on_deletion.setter def ignore_active_certificates_on_deletion(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_active_certificates_on_deletion", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels with user-defined metadata. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def lifetime(self) -> Optional[pulumi.Input[str]]: """ The desired lifetime of the CA certificate. Used to create the "notBeforeTime" and "notAfterTime" fields inside an X.509 certificate. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". """ return pulumi.get(self, "lifetime") @lifetime.setter def lifetime(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "lifetime", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The Type of this CertificateAuthority. > **Note:** For `SUBORDINATE` Certificate Authorities, they need to be manually activated (via Cloud Console of `gcloud`) before they can issue certificates. Default value is `SELF_SIGNED`. Possible values are `SELF_SIGNED` and `SUBORDINATE`. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @pulumi.input_type class _AuthorityState: def __init__(__self__, *, access_urls: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorityAccessUrlArgs']]]] = None, certificate_authority_id: Optional[pulumi.Input[str]] = None, config: Optional[pulumi.Input['AuthorityConfigArgs']] = None, create_time: Optional[pulumi.Input[str]] = None, gcs_bucket: Optional[pulumi.Input[str]] = None, ignore_active_certificates_on_deletion: Optional[pulumi.Input[bool]] = None, key_spec: Optional[pulumi.Input['AuthorityKeySpecArgs']] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, lifetime: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, pem_ca_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, pool: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[str]] = None, update_time: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Authority resources. :param pulumi.Input[Sequence[pulumi.Input['AuthorityAccessUrlArgs']]] access_urls: URLs for accessing content published by this CA, such as the CA certificate and CRLs. :param pulumi.Input[str] certificate_authority_id: The user provided Resource ID for this Certificate Authority. :param pulumi.Input['AuthorityConfigArgs'] config: The config used to create a self-signed X.509 certificate or CSR. Structure is documented below. :param pulumi.Input[str] create_time: The time at which this CertificateAuthority was created. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as `gs://`) or suffixes (such as `.googleapis.com`). For example, to use a bucket named my-bucket, you would simply specify `my-bucket`. If not specified, a managed bucket will be created. :param pulumi.Input[bool] ignore_active_certificates_on_deletion: This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs. Use with care. Defaults to `false`. :param pulumi.Input['AuthorityKeySpecArgs'] key_spec: Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority is a self-signed CertificateAuthority, this key is also used to sign the self-signed CA certificate.
Laungminbha Buddhist Monastery', 'Temple of the Emerald Buddha - Wat Phra Si Rattana Satsadaram / Wat Phra Kaew', 'วัดพุทธปัญญา เสนา Buddhapanya Monastery Sena', 'Wat Sam Phran', 'Wat Phra Chetuphon (Wat Pho)', 'Wat Suthat Thepwararam Ratchaworahawihan', 'သန္တိသုခတောရ Santisukha Forest Monastery', 'Wat Mangkon Kamalawat (Wat Leng Noei Yi)', 'Wat Arun Ratchawararam Ratchaworamahawihan', 'Wat Bowonniwet', 'Loha Prasat (Wat Ratchanatdaram Worawihan)', 'Klyanaryarma Kadinwa Monastery ကလျာဏာရာမကဒင်ဝကျောင်း', 'ကြက်ယက်တွင်းအနောက်ကျောင်း Kyetyettwin western Monastery', 'Phra Ubosot (The Chapel)', 'Erawan Shrine', 'Wat Prayun Wongsawat Worawihan', 'Thabarwa Nature Center TNC Thailand MAHACHAI', 'Wat Rakhang Kositaram Woramahawihan', 'Wat Pathum Khongkha Ratchaworawihan (Wat Sampheng)', 'Wat Chai Chana Songkhram (Wat Tuek)', 'Wat Chana Songkhram Ratchaworamahawihan', 'Wat Molilok Kayaram Ratchaworawihan', 'Wat Sitaram (Wat Khok Mu)', 'Wat Paknam Phasi Charoen', 'Wat Mahathat Yuwararangsarit Royal Palace', 'วัดไตรมิตรวิทยาราม', 'วัดรางหมัน หลวงปู่แผ้ว ปวโร', 'Phra Pathom Chedi', 'Wat Amarinthraram Worawihan', 'Wat Suthamwadee', 'Wat Maha Pruettharam Worawihan', 'Wat Rat Warin Thon', 'San Buddhist Monastery', 'Wat Samian Nari', 'ศูนย์พุทธศรัทธา', 'Wat Bang Phra', 'Wat Intharawihan (Wat Rai Phrik)', 'วัดโคกม่วง', 'Alesu monastery အလယ်စုကျောင်း', 'Wat Tham Krabok', 'Wat Ratchapradit Sathitmahasimarama Ratchaworawihan (Wat Ratchapradit Sathit Dhammayuttikaram)', 'SamakkheeDham Monastery', 'Wattara Bhikkhuni Patimokkha Dhamma Tipitaka', 'Phu Hung Monastery', 'Prasat Silavann monastery', 'Wat Nong Pa Phong', 'Kham Charoen Forest Monastery', 'Wat Nantikaram Buddhism Broadcast Center', 'Wat Pa Mahasan', 'วัดป่าแก่นธรรม Kaentham Forest Monastery', 'Wat Mahathat', 'Big Buddha Image', 'Wat Pa Bueng Khao Luang', 'Wat Tai', 'Wat Nong Krai', 'Wat PA Ban Nong Wai', 'Jediabanphot Monastery', 'Ruam Jai Wiwakaram Dharma Park', 'Wat Amnat', 'Buddhist Monastery, NansanKhan', 'Nanmekhon Buddhist Monastery', 'Wat Samran Niwet', 'Wat Burapharam', 'Wat Khemaram', 'วัดโพนสูง (หลังสาธารณสุขอำเภอสุวรรณภูมิ)', 'Wat Pa Kham Bon', 'Wat Pa Mettatham', 'ธรรมสถานกัลยานุสรณ์', 'วัดสาวแห', 'Buddhist Monastic Complex', 'Mahabodhi Buddha Vihara', 'Dhyana Buddha Temple', 'The Buddhist Centre', "Nagarjuna's Monastery", 'Tatakonda Buddhist Site', 'Buddhist monastery ( बौद्ध विहार )', 'Drepung Loseling Monastery', 'Iskcon Temple', 'Iskcon', 'Sri Sakshi Bhavanarayana Swamy Temple', 'Chaturmukha Bramha Temple, A.P. State protected Monument', 'Ruined Amaravati Buddhist Stupas & Remains,Archaeological Survey of India', 'Sri Sri Radha Krishna Chandra Temple - ISKCON', 'VaikuntaPuram Venkateswara Swamy Temple', 'Sri Anjaneya Swamy Temple', 'Sai Baba Temple, Santhapeta, Ongole', 'Nipponzan Myohoji Buddhist Temple', 'Uppalapadu Birds Sanctuary', 'Sabkamgong Buddhist Monastery', 'Anandagiri Forest Monastery', 'วัดป่าซับคำกอง', 'Wat Noon FakThon Monastery วัดโนนฟักทอง อ.สนั่น กัลยาณธัมโม', 'สถานธรรมซับไพรวัลย์ ตำบลบ้านโภชน์ อำเภอหนองไผ จังหวัดเพชรบูรณ์ 67140', 'วัดห้วยงาช้าง(สวนศานติธรรม)', 'Buddhist Patriarchs Institute Priest Land Wat Payap Sap Yai', 'Wat Chan Nimit', 'Samaggasukha Sirisujanarama Monastery သမဂ္ဂသုခ သီရိသုဇနာရာမကျောင်း', 'พุทธพรหมปัญโญ(ดู่)สาขายางงาม', 'วัดวิมุตตาราม', 'Wat Nam Lao', 'Wat Thung Thong', 'Wat Huai Nga Chang', 'ၵွျင်းႁူဝ်ၵုၼ် Buddhist Monastery', 'Wat Wang Thong Charoen Tham', 'วัดป่ารัตนวัน', 'Nanchon Buddhist Monastery', 'วัดป่าอรัญญาวาส', 'สำนักสงฆ์ป่าเนินสวรรค์', 'Shan Buddha Monastery', 'สำนักสงฆ์สวนป่าพัชราภรณ์', 'Wang Nam Mok Buddhist Monastery', 'Wat Kao Nhot', 'Vat Tai Yai', 'Wat Si Saket', 'Wat Pa Phu Kon', 'Hophakaew Museum', 'ที่พักสงฆ์บ่อทองวณาราม Bo Thong Buddist Monastery', 'Pha That Luang Vientiane', 'Wat Si Muang', 'Wat Pa Ban Tat', 'Wat Pho Chai', 'Buddha Park', 'Wat Ong Teu', 'Wat Na Luang (Wat Aphinya Thesittatham)', 'Patuxay Monument', 'Ban Cho Buddhist Monastery', 'Wat Pa Daeng', 'Doi Pui Phu Phing Monastery, Luang Pu Phiroj', 'Sanmuang Pracharam Bureau of Monks', 'Avalokiteshvara Buddhist Center', 'Wat Tham Doi Ton', 'Chitta Bhawanaram Forest Monastery', 'Wat Buppharam', 'အောင်မင်္ဂလာကျောင်းတိုက်', 'Wat MayPole ၵျွင်းမႆႉပူၵ်ႇ buddhist Monastery', 'Wat Umong', 'Wat Chiang Man', 'Wat Chedi Luang', 'Wat Phantao', 'Wat Suan Dok', 'Buddhist Monastery, U-Mindin', 'Mingalaryarma monastery', 'Wat Phra Singh Woramahawihan', 'Pong Lao Monastery ဝတ်ႉပူင်းလၢဝ်း', 'Wat Phrathat Haripunchai Woramahawihan', 'Wat Chetawan', 'Wat Sri Suphan', 'Nam Lot Monastery ဝတ်ႉၼမ်ႉလွတ်ႇ', 'Naung Mya Hlwe Monastery', 'Wat Ou Sai Kham', 'Wat Saen Fang', 'Old Wep Gyi Monastery', 'PitakaTarkara Monastery', 'International Vipassana Meditation Centre at Wat Phradat Sri Chomtong', 'Wat Chiang Khang', 'Wat Pha Lat', 'Wat Ram Poeng (Tapotaram)', 'Wat Pha Bong (Mank Kalaram)', 'Wat Nirotharam', 'Maha dhyan bhumi mahavihar, Indosan Sogenji, Zen Buddhist Monastery', 'Vishwashanti Buddhist Monastery', 'Lumbini Buddhist monastery ( लुम्बिनी बौद्ध विहार )', 'Bouddha Vihar', 'Nalanda Buddha Vihar', 'Dhammakuti Buddha Vihar', 'Jetvan Buddha Vihara & Tourist Place', 'Shraddha Buddha Vihar', 'DhammaKrithi Buddha Vihar', 'Triratna Buddha Vihar', 'Dhammadutt Yuva Vikas Samiti', 'Nagbodhi Prabuddha Buddha Vihar', 'Golden Buddha Statue', 'Buddha Vihara', 'Brahma Kumaris Pusad Vithabai Nagar', 'Brahma Kumaris Hadgaon', 'Jetwan Monestry', 'Brahma Kumaris Umarkhed', 'ၵျွင်းၼႃးမၢၵ်ႇၶေႃ🙏 Monastery Temple🙏', 'Loi Khaw Buddhist Monastery ၵျွင်းလွႆၶေႃ လွယ်ခေါ်ကျောင်းတိုက်', 'ၵျွင်းလွႆၶေႃ Loikaw buddhist monastery', 'Kungleng monastery ၵျွင်းၵုင်းလႅင်း', 'ဝိႁၢရ်ၵူဝ်းတွင်ႇ GoDaung Monastery', 'Pang Law Monastery', 'ဝိႁၢၼ်ႇၼႃးမူၼ်း-Narmoon Monastery', 'ဝိႁၢရ်မၢတ်တယႃႇ Marddayar Monastery', 'ဝိႁၢၼ်ႇၽူဝ်တေႃႇ-Pho Daw Monastery', 'ဝိႁၢရ်ႇၼွင်ေၵႃႈ NawngGaw Monastery', 'Naung Wo Monastery', 'ဝတ်လူင်ၵၢင်ႉေမႃႇ - Garng Maw Monastery', 'ဝိႁၢၼ်ႇၼွင်ဢၢင်ႇ-Nawng Arng Monastery', 'ဝိႁၢၼ်ႇလူင်ယူမ်း-Long Yom Monastery', 'ဝိႁၢၼ်ႇသူပ်းလင်း - Sopt Lang Monastery', 'ဝိႁၢၼ်ႇတူၼ်ႈၵႅင်း မွၼ်း-Don Geng Monastery', 'ဝိႁၢၼ်ႇမိူင်းလင်း - Murng Lang Monastery', 'Yaytakun Tawya Kyaung Monastery', 'Aung Min Ga Lar Monastery (အောင်မင်္ဂလာကျောင်းတိုက်)', 'ဝိႁၢရ်ၵွျင်းၼႃးပၢင်ႇ Nar Barng Monastery', 'upper Pann Ma Buddhist monastery', 'Kyao Kham Monastery ချိုခမ်းကျောင်း ၵျွင်းၶဵဝ်ၶမ်း', 'Tong Lao Monastery', 'Kandaw buddhist monastery', 'Wan Mone Monastery - ၵျွင်းဝၢၼ်ႈမူင်ႈ', 'Shan Tai Buddhist Monastery', 'Myo Thit Monastery', 'ဝိႁၢရ်ႇၼမ်ႉပႅၼ်ႈ Nam Ben Monastery', 'ၵျွင်းၼွင်ဝၼ်း နောင်ဝမ်းကျောင်း Naungwan Monastery', 'ဝတ်ႉပၢင်ၸူင် Pang Sone Monastery', 'ဝိႁၢရ်ႇဝၢၼ်ႈႁၢႆး Warn Haai Monastery', 'ဝတ်ႉလူင်လွႆၸွင်ႈ Lowe Shount Monastery', 'ဝိႁၢရ်ႇပၢင်မႅင်း Barng Meng Monastery', 'Maha Nanda Kantha (Bamboo Buddha Monastery)', 'Keng Mong Monastery Kyaukme', 'Son Hin Monastery', 'ဝိႁၢရ်ႇၼွၵ်ႈၵၢတ်ႇ Nawk Gart Monastery', 'Pansari Monastery(ပန်စရီကျောင်းတိုက်)', 'ဝိႁၢၼ်ႇၼႃးၵၢင် NarGarng Monastery', 'ကံ့ကော်တစ်ထောင်ဘုရား', 'Man Phat Monastery', 'ဝိႁၢၼ်ႇသီႇထွၵ်ႈ Sithout Monastery', 'Ho Nam Village Monastery', 'Maha Nanda Kantha Monastery', 'ဝိႁၢရ်ႇပၢင်ၼိဝ်ႉ Barng Niue Monastery', 'ၵွျင်းလူင်မူိင်းၶူိဝ်း Mong Hko Monastery', 'ဝိႁၢၼ်ႇဝၢၼ်ႈၼွင်-Wan Nawng Monastery', 'Yae Haw Monastery', 'ဝိႁၢရ်ႇ ၵုၼ်ၼႃး Kunna Monastery', 'ဝိႁၢၼ်ႇဝၢၼ်ႈပိင်ႈ WAN PING MONASTERY', 'Ten Thousand Buddhas Monastery', 'Cham Shan Monastery', 'Po Lin Monastery', 'Su Bong Zen Monastery', 'Tsz Shan Monastery', 'Wat Buddha Vipsasana Dham Thai temple', 'Fa Hong Monastery 法航精舍', 'Chuk Lam Sim Monastery', 'Buddhist Dharmananada Lotus Monastery', 'Tung Chung Lo Hon Monastery', 'Grand Hall of Ten Thousand Buddhas, Po Lin Monastery', 'Cham Shan Monastery Niche', 'Ling To Tsz', 'Western Monastery', 'Sai Chuk Lam Monastery', 'Ting Wai Monastery', 'Ching Shan Monastery', 'Cheung Shan Monastery', 'Miu Kwok Monastery', 'Hok Shan Monastery', 'Miu Fat Buddhist Monastery Elderly Home', 'Yuan Ming Monastery', 'HONGKONG DHAMMARAM TEMPLE', 'Kwun Yam Shan Ling Wan Monastery', 'Kuen Yuen Tung Monastery', 'Po Kwong Monastery', 'Lo Hon Buddhist Monastery Ltd', 'Yuen Tung Monastery', 'Fat Yuen Monastery', 'Shek Mun Kap Lo Hon Monastery', '香港密宗寧瑪派白玉佛法中心 Penor Rinpoche Charity Foundation (白玉中心 Palyul Center)', 'Ling Yan Monastery', 'Tsu Hsing Monastery', 'Chi Lin Nunnery Main Hall', 'Gig Lok Monastery', 'Avalokitesvara (Guan Yin) Statue', 'Gak Su Zen Retreat Center', 'Guanyin Temple', '寶林寺', 'Maitreya Hall', 'Pou Tai Un Temple', 'Asian Institute of Applied Buddhism, Lotus Pond Temple', '<NAME> Lung Secondary School of Miu Fat Buddhist Monastery', 'Chee Hon Monastery', 'Chi Lin Nunnery Hall of Kwun Yam', 'Tripitaka Library', '青山寺', 'Universal Gate Hall', 'Kadampa Meditation Centre Hong Kong', 'Tung Lin Kok Yuen', 'Dudjom Buddhist Association (International)', 'Hongfa Temple', 'Tung PO To', 'Kai Ming Temple', 'Pavilion of Absolute Perfection, Nan Lian Garden', 'Old Tibetan Buddhist monastery', 'GYALTON KARYING DUBDE LING (GKDL) Monastery, Camp # 3, Kamleshwarpur', 'Mainpat Monastery (camp No. 1)', 'Tamang Buddhist Monastery', 'Tibetan Temple', 'She<NAME> Dargyeling', 'Royal Bhutanese Monastery', 'Buddhist Monasteries', 'Great HolyLand Monastery', 'Burmese Monastery', 'Tergar Monastery', 'Royal Bhutan Monastery', 'Cambodian Monastery', 'Karma Temple Camp No 2', 'Anand Buddhist Temple & Monastery', 'Buddha Mandir', 'Buddha Monastery', 'Budhha Temple', 'All India Bhikkhu Sangha', 'Daijokyo Buddist Temple', 'Tibetan monastery', 'Parpatiya Sunset point', 'MAHAYANA BUDDHIST MONASTERY', 'Wat Thai Buddhagaya', 'Myanmar Buddhist Vihara', 'Do Sanh Vietnam Temple', 'Tara Temple, yulo koepa', 'Tshoka Buddhist Monastery', 'Buddist Temple', 'Zhong Hua Buddhist Monastery [Chinese Temple]', 'Thrangu Tashi Choling Monastery (GUMBA)', 'Shangya Chhyoling Buddhist Monastary', 'First Temple Of Camp.No 1', 'Burmese Temple Bodhgaya', 'Thonje Buddhist Monastery (Gumba)', 'Yiga Choeling Monastery', 'Mahabodhi Chinese Temple', 'Budha Vihar Temple', 'Chandragiri', 'Jangchub Choeling Monastery', 'Giant Buddha', 'Mahamevnawa Sunshine Meditation Center', 'Florida Buddhist Vihara', 'Tu Viện Quan Âm', 'Chua Phat Phap - Southwest Florida Buddhist Inc', 'Wat Mongkolratanaram', 'Mahamevnawa Meditation Monastery Florida', 'Drong Ngur Jangchubling Buddhist Center', 'Sarasota Forest Monastery (SFM)', 'Mongkolratanaram', 'Watlao Mixayaram', 'Soka Gakkai International USA', 'Rissho Kosei-kai of Tampa Bay', 'Wat Florida Dhammaram', 'Kadampa Meditation Center Tampa Bay', 'WATTAMPA', 'Tu Viện Hạnh Phúc - Happy Monastery', 'Wat Lao Dhammavanno Buddhist Temple', 'Chùa Báo Ân', 'White Sands Buddhist Center', 'Vien Giac Buddhist Monastery', 'Guang Ming Temple 佛光山光明寺', 'Clear Water Zen Center', 'Chua Tu Quang', 'Bodhi Tree Dhamma Center', 'Phap Vu Buddhist Cultural', 'Wat Navaram Buddhist Temple', 'Dhamma Wheel Meditation Society', 'Nitadsasanakhun Buddhist Temple Of Florida', 'Karma Thegsum Chöling', 'Tu Viện Hương Hải', 'Wat Buddharangsi Buddhist Temple', 'Chua Loc Uyen Vietnamese Buddhist Temple', 'Open Awareness Buddhist Center', 'Ba La Mat Buddhist Center', 'Hindu Temple of Florida', 'Phuoc Hue Vietnamese Buddhist Temple', 'PHUOC VIEN BUDDHIST TEMPLE', 'Long Van Temple, USA', 'The Dalí (Salvador Dalí Museum)', 'Kadampa Meditation Center Fort Lauderdale', 'Wat Khmer Savy Rattanaram', 'Tam Bao Tu', 'Florida Community of Mindfulness', 'St. Petersburg Shambhala Center', 'Hokori Zen Center', 'Mindfulness Meditation Centers', 'Carolina Buddhist Vihara', 'Holy Name Monastery', 'Cambodian Buddhist Temple of Georgia', 'Shree Urgen Choling Buddhist Monastery', 'Pema Dechenling monastery', 'Mila Kagyu Ling Monastery', "Pema Ts'al Sakya Monastic Institute", 'Kunga Dorje Sanag Terling Monastery', 'Matepani Gumba', 'Taklung Kagyu Domsumling Monastery', 'Bauddha Monastry', 'Ram<NAME>ba', 'Mei-Lung PemChoilung Buddhist Monastery', 'Shree Onta Kriti Vihar', 'Non-Violence World Peace Buddhist Organization', 'Deleg Dodeng Thayaling Yiengma Monastery (Tamang Ghedung)', 'The Tibetan Encounter Day Tours P. Ltd', 'Nyingma Palyul Buddhist monastery', 'Thai Monastery', 'Tusita Hermitage Nepal branch', 'Tharlam Monastery', 'Dhimelvaham Buddhist Monastery', 'Kopan Monastery', 'Japanese Monastery Lumbini ( Japanese Temple)', 'Urgen Dorjee Choling Buddhist Centre Singapore Temple', 'Kyidong Tashi Samtenling Monastery བཀྲ་ཤིས་བསམ་གཏན་གླིང།', 'Ganden Yiga Chozin Buddhist Meditation Centre', 'Dechhen Tharbaling Bouddha Gumba', 'Hinang Monastery', 'Shechen Monastery', 'Wochen Thukje Choeling Monastery', 'Tegchen Legshey Ling', 'Drubgyud Choling Monastery', 'Druk Amitabha Monastery', 'Shanteng monastery', 'Samye Memorial Monastery', 'तेर्गार गुम्बा Tergar Osel Ling Monastery', 'Tsechen Shedup Ling Sakya Tharig Monastery', 'Namobuddha Monastery', 'Lumbini Dharmodaya Buddha Temple', 'Ribum Monastery', 'Chusang Tazang', 'Chhairo Gompa', 'Dongak Nyida Dzungdrel Sherab Raltri Ling Monastery', 'Indian Monastery', 'CHELE Monastery (GOMPA)', 'Neydo Tashi Chöling Monastery', 'karma Samteling monastery', 'World Peace Pagoda', 'Khutsab Terenga Ghumba', 'Shelkar Monastery', 'Wat Dhammaratanaram', 'Chua Huong Dam', 'Phuoc Hue Buddhist Temple', 'Sitagu Buddha Vihara - Buddhist Temple', 'Lien Hoa Buddhist Temple (Đạo Tràng Liên Hoa)', 'Texas Buddhist Association', 'Houston Buddhist Vihara (Temple)', 'Chung Tai Zen Center of Houston', 'Linh Son Buddhist Temple', 'Chung Tai International Retreat Center', 'Rissho Kosei-Kai Buddhist Center of San Antonio', 'Tu Vien Lien Hoa', 'Wat Buddhananachat of Austin', 'English Dharma Group@Jade Buddha Temple', 'Fo Guang Shan Xiang Yun Temple', 'Buddhist Temple of Dallas', 'Texas Guandi Temple', 'Teen How Taoist Temple', 'Buddhist Center of Dallas', 'Phap Luan Buddhist Culture Center', 'Tinh Luat Temple', 'Buddha Meditation Centre Winnipeg - Mahamevnawa Buddhist Monastery Winnipeg', 'Vajrapani Kadampa Buddhist Center', 'Tu Viện A Nan', 'Phước Viện Quán Âm', 'Kim Cang Monastery', 'An Lac Hanh Buddhist Temple', 'Northeast Fl Buddhist Association', 'Buddhist Tzu Chi Foundation', "Disney's Animal Kingdom Theme Park", 'Lakeland Insight Meditation Group', 'ISKCON Orlando', 'North Carolina Buddhist Temple', 'Fish Lake', 'Thathana Parla Buddhist Monastery', 'Wat Punyawanaram', 'Volusia Buddhist Fellowship', 'Magic Kingdom Park', 'Wat Kanteyaram', 'SGI-USA Atlanta Buddhist Center', 'Ni Vien Pho Hien', 'Hindu Mandir Of Daytona Beach', 'Wat Phouthapasaram Temple', 'Samye Choling Monastery', 'Dolpo Pu Monastery', 'Jumlang Chorten', 'Old Namgung Monastery', 'Dolpo
to forward-map the dataset containing the boxcar event samples. If None (default) a FlattenMapper is employed to convert multi-dimensional sample matrices into simple one-dimensional sample vectors. This option can be used to implement temporal compression, by e.g. averaging samples within an event boxcar using an FxMapper. Any mapper needs to keep the sample axis unchanged, i.e. number and order of samples remain the same. Returns ------- Dataset One sample per each event definition that has been passed to the function. Additional event attributes are included as sample attributes. Examples -------- The documentation also contains an :ref:`example script <example_eventrelated>` showing a spatio-temporal analysis of fMRI data that involves this function. >>> from mvpa2.datasets import Dataset >>> ds = Dataset(np.random.randn(10, 25)) >>> events = [{'onset': 2, 'duration': 4}, ... {'onset': 4, 'duration': 4}] >>> eds = eventrelated_dataset(ds, events) >>> len(eds) 2 >>> eds.nfeatures == ds.nfeatures * 4 True >>> 'mapper' in ds.a False >>> print eds.a.mapper <Chain: <Boxcar: bl=4>-<Flatten>> And now the same conversion, but with events specified as real time. This is on possible if the input dataset contains a sample attribute with the necessary information about the input samples. >>> ds.sa['record_time'] = np.linspace(0, 5, len(ds)) >>> rt_events = [{'onset': 1.05, 'duration': 2.2}, ... {'onset': 2.3, 'duration': 2.12}] >>> rt_eds = eventrelated_dataset(ds, rt_events, time_attr='record_time', ... match='closest') >>> np.all(eds.samples == rt_eds.samples) True >>> # returned dataset e.g. has info from original samples >>> rt_eds.sa.record_time array([[ 1.11111111, 1.66666667, 2.22222222, 2.77777778], [ 2.22222222, 2.77777778, 3.33333333, 3.88888889]]) """ # relabel argument conv_strategy = {'prev': 'floor', 'next': 'ceil', 'closest': 'round'}[match] if not (event_offset is None and event_duration is None): descr_events = [] for ev in events: # do not mess with the input data ev = copy.deepcopy(ev) if event_offset is not None: ev['onset'] += event_offset if event_duration is not None: ev['duration'] = event_duration descr_events.append(ev) events = descr_events if time_attr is not None: tvec = ds.sa[time_attr].value # we are asked to convert onset time into sample ids descr_events = [] for ev in events: # do not mess with the input data ev = copy.deepcopy(ev) # best matching sample idx = value2idx(ev['onset'], tvec, conv_strategy) # store offset of sample time and real onset ev['orig_offset'] = ev['onset'] - tvec[idx] # rescue the real onset into a new attribute ev['orig_onset'] = ev['onset'] ev['orig_duration'] = ev['duration'] # figure out how many samples we need ev['duration'] = \ len(tvec[idx:][tvec[idx:] < ev['onset'] + ev['duration']]) # new onset is sample index ev['onset'] = idx descr_events.append(ev) else: descr_events = events # convert the event specs into the format expected by BoxcarMapper # take the first event as an example of contained keys evvars = _events2dict(descr_events) # checks for p in ['onset', 'duration']: if not p in evvars: raise ValueError("'%s' is a required property for all events." % p) boxlength = max(evvars['duration']) if __debug__: if not max(evvars['duration']) == min(evvars['duration']): warning('Boxcar mapper will use maximum boxlength (%i) of all ' 'provided Events.'% boxlength) # finally create, train und use the boxcar mapper bcm = BoxcarMapper(evvars['onset'], boxlength, space=eprefix) bcm.train(ds) ds = ds.get_mapped(bcm) if event_mapper is None: # at last reflatten the dataset # could we add some meaningful attribute during this mapping, i.e. would # assigning 'inspace' do something good? ds = ds.get_mapped(FlattenMapper(shape=ds.samples.shape[1:])) else: ds = ds.get_mapped(event_mapper) # add samples attributes for the events, simply dump everything as a samples # attribute # special case onset and duration in case of conversion into descrete time if time_attr is not None: for attr in ('onset', 'duration'): evvars[attr] = [e[attr] for e in events] ds = _evvars2ds(ds, evvars, eprefix) return ds def fit_event_hrf_model( ds, events, time_attr, condition_attr='targets', design_kwargs=None, glmfit_kwargs=None, regr_attrs=None, return_model=False): """Fit a GLM with HRF regressor and yield a dataset with model parameters A univariate GLM is fitted for each feature and model parameters are returned as samples. Model parameters are returned for each regressor in the design matrix. Using functionality from NiPy, design matrices can be generated from event definitions with a variety of customizations (HRF model, confound regressors, ...). Events need to be specified as a list of dictionaries (see:class:`~mvpa2.misc.support.Event`) for a helper class. Each dictionary contains all relevant attributes to describe an event. HRF event model details ----------------------- The event specifications are used to generate a design matrix for all present conditions. In addition to the mandatory ``onset`` information each event definition needs to include a label in order to associate individual events to conditions (the design matrix will contain at least one regressor for each condition). The name of this label attribute must be specified too (see ``condition_attr`` argument). NiPy is used to generate the actual design matrix. It is required to specify a dataset sample attribute that contains time stamps for all input data samples (see ``time_attr``). NiPy operation could be customized (see ``design_kwargs`` argument). Additional regressors from sample attributes of the input dataset can be included in the design matrix (see ``regr_attrs``). The actual GLM fit is also performed by NiPy and can be fully customized (see ``glmfit_kwargs``). Parameters ---------- ds : Dataset The samples of this input dataset have to be in whatever ascending order. events : list Each event definition has to specify ``onset`` and ``duration``. All other attributes will be passed on to the sample attributes collection of the returned dataset. time_attr : str Attribute with dataset sample time stamps. Its values will be treated as in-the-same-unit and are used to determine corresponding samples from real-value onset and duration definitions. For HRF modeling this argument is mandatory. condition_attr : str Name of the event attribute with the condition labels. Can be a list of those (e.g. ['targets', 'chunks'] combination of which would constitute a condition. design_kwargs : dict Arbitrary keyword arguments for NiPy's make_dmtx() used for design matrix generation. Choose HRF model, confound regressors, etc. glmfit_kwargs : dict Arbitrary keyword arguments for NiPy's GeneralLinearModel.fit() used for estimating model parameter. Choose fitting algorithm: OLS or AR1. regr_attrs : list List of dataset sample attribute names that shall be extracted from the input dataset and used as additional regressors in the design matrix. return_model : bool Flag whether to included the fitted GLM model in the returned dataset. For large input data this can be problematic, as the model may contain the residuals (same size is input data), hence multiplies the memory demand. Off by default. Returns ------- Dataset One sample for each regressor/condition in the design matrix is returned. The condition names are included as a sample attribute with the name specified by the ``condition_attr`` argument. The actual design regressors are included as ``regressors`` sample attribute. If enabled, an instance with the fitted NiPy GLM results is included as a dataset attribute ``model``, and can be used for computing contrasts subsequently. Examples -------- The documentation also contains an :ref:`example script <example_eventrelated>` showing a spatio-temporal analysis of fMRI data that involves this function. >>> from mvpa2.datasets import Dataset >>> ds = Dataset(np.random.randn(10, 25)) >>> ds.sa['time_coords'] = np.linspace(0, 50, len(ds)) >>> events = [{'onset': 2, 'duration': 4, 'condition': 'one'}, ... {'onset': 4, 'duration': 4, 'condition': 'two'}] >>> hrf_estimates = fit_event_hrf_model( ... ds, events, ... time_attr='time_coords', ... condition_attr='condition', ... design_kwargs=dict(drift_model='blank'), ... glmfit_kwargs=dict(model='ols'), ... return_model=True) >>> print hrf_estimates.sa.condition ['one' 'two'] >>> print hrf_estimates.shape (2, 25) >>> len(hrf_estimates.a.model.get_mse()) 25 Additional regressors used in GLM modeling are also available in a dataset attribute: >>> print hrf_estimates.a.add_regs.sa.regressor_names ['constant'] """ if externals.exists('nipy', raise_=True): from nipy.modalities.fmri.design_matrix import make_dmtx from mvpa2.mappers.glm import NiPyGLMMapper # Decide/device condition attribute on which GLM will actually be done if isinstance(condition_attr, basestring): # must be a list/tuple/array for the logic below condition_attr = [condition_attr] glm_condition_attr = 'regressor_names' # actual regressors glm_condition_attr_map = dict([(con, dict()) for con in condition_attr]) # # to map back to original conditions events = copy.deepcopy(events) # since we are modifying in place for event in events: if glm_condition_attr in event: raise ValueError("Event
import json import yaml import re from myst_parser.main import default_parser def to_myst(model: str) -> str: myst_file = "" model_dict = json.loads(model) for key in model_dict: if key == "cells": myst_file = _handle_cells(model_dict[key]) elif key == "metadata": myst_file = _handle_metadata(model_dict[key]) elif key == "nbformat": myst_file = _handle_nbformat(model_dict[key]) elif key == "nbformat_minor": myst_file = _handle_nbformat_minor(model_dict[key]) else: raise ValueError("Attempting to parse unknown type.") return myst_file def _handle_cells(cell_nodes: list) -> str: """ Cells can be nested: cells (list) { "metadata" : { "kernel_info": { # if kernel_info is defined, its name field is required. "name" : "the name of the kernel" }, "language_info": { # if language_info is defined, its name field is required. "name" : "the programming language of the kernel", "version": "the version of the language", "codemirror_mode": "The name of the codemirror mode to use [optional]" } }, "nbformat": 4, "nbformat_minor": 0, "cells" : [ # list of cell dictionaries, see below ], } """ pass def _handle_metadata(metadata_nodes: dict) -> str: """ metadata (dict) "metadata" : { "kernel_info": { # if kernel_info is defined, its name field is required. "name" : "the name of the kernel" }, "language_info": { # if language_info is defined, its name field is required. "name" : "the programming language of the kernel", "version": "the version of the language", "codemirror_mode": "The name of the codemirror mode to use [optional]" } }, """ pass def _handle_nbformat(nbformat: int) -> str: """ "nbformat" (int) """ pass def _handle_nbformat_minor(nbformat_minor: int) -> str: """ "nbformat_minor" (int) """ pass def _parse_code_output(cell_section): out = {} out["output_type"] = cell_section.info.split(" ")[1] if out["output_type"] == "stream": out["text"] = cell_section.content.splitlines(True) out["name"] = "stdout" return out def _parse_cell(cell): cell_meta = yaml.safe_load(cell[0].content) if "metadata" not in cell_meta: cell_meta["metadata"] = {} for cell_section in cell[1:]: if cell_section.info == "{source}": cell_meta["source"] = cell_section.content.splitlines(True) cell_meta["source"][-1] = cell_meta["source"][-1].strip() if cell_section.info.startswith("{output}"): out = _parse_code_output(cell_section) if "outputs" not in cell_meta: cell_meta["outputs"] = [] cell_meta["outputs"].append(out) return cell_meta def _split_sections(tokens): append_to_cell = None sections = {} for token in tokens: if token.type == "fence" and token.info == "{metadata}": # Metadata yaml block sections = yaml.safe_load(token.content) sections["cells"] = [] if append_to_cell is not None: append_to_cell.append(token) if token.type == "myst_line_comment" and token.content == "cell": append_to_cell = [] if token.type == "myst_line_comment" and token.content == "endcell": sections["cells"].append(_parse_cell(append_to_cell)) append_to_cell = None return sections def to_model(myst): md = default_parser("docutils") tokens = md.parse(myst) sections = _split_sections(tokens) return sections """ Some fields, such as code input and text output, are characteristically multi-line strings. When these fields are written to disk, they may be written as a list of strings, which should be joined with '' when reading back into memory. In programmatic APIs for working with notebooks (Python, Javascript), these are always re-joined into the original multi-line string. If you intend to work with notebook files directly, you must allow multi-line string fields to be either a string or list of strings. Cell Types There are a few basic cell types for encapsulating code and text. All cells have the following basic structure: { "cell_type" : "type", "metadata" : {}, "source" : "single string or [list, of, strings]", } Note On disk, multi-line strings MAY be split into lists of strings. When read with the nbformat Python API, these multi-line strings will always be a single string. Markdown cells Markdown cells are used for body-text, and contain markdown, as defined in GitHub-flavored markdown, and implemented in marked. { "cell_type" : "markdown", "metadata" : {}, "source" : "[multi-line *markdown*]", } Changed in version nbformat: 4.0 Heading cells have been removed in favor of simple headings in markdown. Code cells Code cells are the primary content of Jupyter notebooks. They contain source code in the language of the document’s associated kernel, and a list of outputs associated with executing that code. They also have an execution_count, which must be an integer or null. { "cell_type" : "code", "execution_count": 1, # integer or null "metadata" : { "collapsed" : True, # whether the output of the cell is collapsed "scrolled": False, # any of true, false or "auto" }, "source" : "[some multi-line code]", "outputs": [{ # list of output dicts (described below) "output_type": "stream", ... }], } Changed in version nbformat: 4.0 input was renamed to source, for consistency among cell types. Changed in version nbformat: 4.0 prompt_number renamed to execution_count Code cell outputs A code cell can have a variety of outputs (stream data or rich mime-type output). These correspond to messages produced as a result of executing the cell. All outputs have an output_type field, which is a string defining what type of output it is. stream output { "output_type" : "stream", "name" : "stdout", # or stderr "text" : "[multiline stream text]", } Changed in version nbformat: 4.0 The stream key was changed to name to match the stream message. display_data Rich display outputs, as created by display_data messages, contain data keyed by mime-type. This is often called a mime-bundle, and shows up in various locations in the notebook format and message spec. The metadata of these messages may be keyed by mime-type as well. { "output_type" : "display_data", "data" : { "text/plain" : "[multiline text data]", "image/png": "[base64-encoded-multiline-png-data]", "application/json": { # JSON data is included as-is "key1": "data", "key2": ["some", "values"], "key3": {"more": "data"} }, "application/vnd.exampleorg.type+json": { # JSON data, included as-is, when the mime-type key ends in +json "key1": "data", "key2": ["some", "values"], "key3": {"more": "data"} } }, "metadata" : { "image/png": { "width": 640, "height": 480, }, }, } Changed in version nbformat: 4.0 application/json output is no longer double-serialized into a string. Changed in version nbformat: 4.0 mime-types are used for keys, instead of a combination of short names (text) and mime-types, and are stored in a data key, rather than the top-level. i.e. output.data['image/png'] instead of output.png. execute_result Results of executing a cell (as created by displayhook in Python) are stored in execute_result outputs. execute_result outputs are identical to display_data, adding only a execution_count field, which must be an integer. { "output_type" : "execute_result", "execution_count": 42, "data" : { "text/plain" : "[multiline text data]", "image/png": "[base64-encoded-multiline-png-data]", "application/json": { # JSON data is included as-is "json": "data", }, }, "metadata" : { "image/png": { "width": 640, "height": 480, }, }, } Changed in version nbformat: 4.0 pyout renamed to execute_result Changed in version nbformat: 4.0 prompt_number renamed to execution_count error Failed execution may show a traceback { 'output_type': 'error', 'ename' : str, # Exception name, as a string 'evalue' : str, # Exception value, as a string # The traceback will contain a list of frames, # represented each as a string. 'traceback' : list, } Changed in version nbformat: 4.0 pyerr renamed to error Raw NBConvert cells A raw cell is defined as content that should be included unmodified in nbconvert output. For example, this cell could include raw LaTeX for nbconvert to pdf via latex, or restructured text for use in Sphinx documentation. The notebook authoring environment does not render raw cells. The only logic in a raw cell is the format metadata field. If defined, it specifies which nbconvert output format is the intended target for the raw cell. When outputting to any other format, the raw cell’s contents will be excluded. In the default case when this value is undefined, a raw cell’s contents will be included in any nbconvert output, regardless of format. { "cell_type" : "raw", "metadata" : { # the mime-type of the target nbconvert format. # nbconvert to formats other than this will exclude this cell. "format" : "mime/type" }, "source" : "[some nbformat output text]" } Cell attachments Markdown and raw cells can have a number of attachments, typically inline images that can be referenced in the markdown content of a cell. The attachments dictionary of a cell contains a set of mime-bundles (see display_data) keyed by filename that represents the files attached
<gh_stars>0 #!/usr/bin/env python # # Copyright 2017 University Of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import os import subprocess import sys import time from datetime import datetime from Pegasus.catalogs.replica_catalog import * from Pegasus.catalogs.sites_catalog import * from Pegasus.catalogs.transformation_catalog import * from Pegasus.DAX3 import * from Pegasus.init import * from future import standard_library standard_library.install_aliases() __author__ = '<NAME>' class Cleanup: """ """ LEAF = 'leaf' INPLACE = 'inplace' CONSTRAINT = 'constraint' class Instance: def __init__( self, dax=None, sites_catalog=None, replica_catalog=None, transformation_catalog=None, workflow_dir=None, input_dir=None ): """ Create an object of the Instance class to run a Pegasus workflow. :param dax: A Pegasus DAX3 object :param sites_catalog: A Pegasus sites catalog :param replica_catalog: A Pegasus replica catalog :param transformation_catalog: A Pegasus transformation catalog :param workflow_dir: A path to the workflow directory :param input_dir: A path to the inputs directory """ self.dax = dax self.base_dir = workflow_dir self.input_dir = input_dir self.submit_dir = None self.output_dir = None self.wf_image_abs = None self.wf_image_exe = None # private members self._is_tutorial = False self._submit = False self._sites_catalog = sites_catalog self._replica_catalog = replica_catalog self._transformation_catalog = transformation_catalog self._properties = { # basic pegasus properties 'pegasus.data.configuration': 'condorio' } def tutorial( self, env=TutorialEnv.LOCAL_MACHINE, example=TutorialExample.SPLIT, workflow_dir=None ): """ Generate a Pegasus tutorial workflow. :param env: Execution environment (e.g., TutorialEnv.LOCAL_MACHINE) :param example: Example tutorial worklfow (e.g., TutorialExample.SPLIT) :param workflow_dir: Name of the folder where the workflow will be generated """ if not env: raise Exception( 'An environment option should be provided (e.g., TutorialEnv.LOCAL_MACHINE).' ) if not example: raise Exception( 'A tutorial workflow should be provided (e.g., TutorialExample.SPLIT).' ) shared_dir = None try: out = subprocess.getoutput('pegasus-config --python-dump') for line in out.split('\n'): if 'pegasus_share_dir' in line: pegasus_shared_dir = line.split('=')[1].strip()[1:-1] shared_dir = os.path.join(pegasus_shared_dir, 'init') break except subprocess.CalledProcessError as grepexc: print("error code", grepexc.returncode, grepexc.output) # generate workflow folder if not workflow_dir: d = datetime.now() workflow_dir = '-'.join( [example[1], env[1], d.replace(microsecond=0).isoformat()] ) workflow_dir = os.path.abspath(workflow_dir) self.base_dir = workflow_dir self.workflow = Workflow(workflow_dir, shared_dir) self.workflow.config = "tutorial" self.workflow.daxgen = "tutorial" self.workflow.tutorial_setup = env[1] self.workflow.tutorial = example[1] self.workflow.generate_tutorial = True # checks if example == TutorialExample.DIAMOND and env != TutorialEnv.OSG_FROM_ISI: raise Exception( 'The "diamond" workflow can only run on OSG sites.' ) if example == TutorialExample.MPI and env != TutorialEnv.BLUEWATERS_GLITE: raise Exception( 'The "MPI Hello World" workflow can only run on Bluewaters.' ) # setup tutorial and generate folder self.workflow.setup_tutorial() self.workflow.generate() # generate DAX out, err = subprocess.Popen( './generate_dax.sh %s.dax' % self.workflow.tutorial, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self.base_dir ).communicate() if err: raise Exception(err) self.dax = ADAG(self.workflow.tutorial) self._is_tutorial = True def set_property(self, key, value): """ Add a property to the Pegasus properties file. :param key: Property key :param value: Property value """ if not key: raise Exception('A key should be provided.') if not value: raise Exception('A value should be provided.') self._properties[key] = value def run( self, submit=True, cleanup=Cleanup.INPLACE, site='local', force=False ): """ The main method, which is used to run a Pegasus workflow. :param submit: Plan and submit the executable workflow generated (default: True) :param cleanup: The cleanup strategy to use (default: Cleanup.INPLACE) :param site: The sitename of the workflow :param force: Skip reduction of the workflow, resulting in build style dag (default: False) """ if not self._is_tutorial and ( not self.dax or not isinstance(self.dax, ADAG) ): raise Exception('Invalid DAX object') if not self.base_dir: self.base_dir = os.path.abspath('./' + self.dax.name) if not os.path.exists(self.base_dir): os.makedirs(self.base_dir) if not self.input_dir: self.input_dir = self.base_dir + '/input' if not os.path.exists(self.input_dir): os.makedirs(self.input_dir) self._submit = submit properties_file = self.base_dir + '/pegasus.properties' submit_dir = self.base_dir + '/submit' self.output_dir = self.base_dir + '/output' dax_name = self.base_dir + '/' + self.dax.name + '.dax' if not self._is_tutorial: # write the sites catalog if not self._sites_catalog: self._sites_catalog = SitesCatalog(self.base_dir) self._sites_catalog.write(force=force) # write the replica catalog if not self._replica_catalog: self._replica_catalog = ReplicaCatalog(self.base_dir) self._replica_catalog.write(force=force) # write the transformation catalog if not self._transformation_catalog: self._transformation_catalog = TransformationCatalog( self.base_dir ) self._transformation_catalog.write(force=force) # write properties file self.set_property( 'pegasus.catalog.site.file', self._sites_catalog.filename ) self.set_property('pegasus.catalog.replica', 'File') self.set_property( 'pegasus.catalog.replica.file', self._replica_catalog.filename ) self.set_property('pegasus.catalog.transformation', 'Text') self.set_property( 'pegasus.catalog.transformation.file', self._transformation_catalog.filename ) self.set_property('pegasus.metrics.app', self.dax.name) with open(properties_file, 'w') as ppf: for key in self._properties: ppf.write('%s=%s\n' % (key, self._properties[key])) # write DAX file f = open(dax_name, 'w') self.dax.writeXML(f) f.close() # prepare for submission cmd = [ 'pegasus-plan', '--conf', properties_file, '--dax', dax_name, '--dir', submit_dir, '--input-dir', self.input_dir, '--output-dir', self.output_dir, '--sites', site ] if cleanup: cmd.append('--cleanup %s' % cleanup) else: cmd.append('--nocleanup') if force: cmd.append('--force') if submit: cmd.append('--submit') # plan the workflow out, err = subprocess.Popen( ' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self.base_dir ).communicate() if err: raise Exception(err) for line in out.decode('utf8').split('\n'): if 'pegasus-run' in line: self.submit_dir = line.split('pegasus-run')[1].strip() print('The pegasus workflow has been successfully planned.\n' \ 'Please, use the ```submit()``` method to start the workflow execution.\n\n' '\x1b[1;34mPegasus submit dir: %s\x1b[0m' % self.submit_dir) break elif 'pegasus-status -l' in line: self.submit_dir = line.split('pegasus-status -l')[1 ].strip() print('The pegasus workflow has been successfully planned and started to run.\n' \ 'Please, use the status() method to follow the progress of the workflow execution.\n\n' '\x1b[1;34mPegasus submit dir: %s\x1b[0m' % self.submit_dir) break def submit(self): """ Run the workflow in case it has only been planned. """ if self._submit: raise Exception('The workfow execution has already been started.') out, err = subprocess.Popen( 'pegasus-run %s' % self.submit_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self.base_dir ).communicate() if err: raise Exception(err) self._submit = True print( 'The pegasus workflow has started its execution.\n' 'Please, use the status() method to follow the progress of the workflow execution.' ) def status(self, loop=False, delay=10): """ Monitor the workflow status. :param loop: Whether to query the workflow status within a loop until it is completed or failed (default: False) :param delay: Delay in seconds to query the workflow status (default: 10 seconds) """ if not self._submit: raise Exception( 'The workfow has not started its execution yet.\n' 'Please, check if the workflow is planned and submitted for execution.' ) seq = False while True: out, err = subprocess.Popen( 'pegasus-status -l %s' % self.submit_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self.base_dir ).communicate() if err: raise Exception(err) for line in out.decode('utf8').split('\n'): if 'UNRDY' in line: seq = True elif seq: seq = False v = line.split() state = v[8] if state == 'Success': state = '\x1b[1;32m' + state + '\x1b[0m' elif state == 'Failure': state = '\x1b[1;31m' + state + '\x1b[0m' progress = '\x1b[1;34m' + 'Progress: ' + v[ 7 ] + '%\x1b[0m (' + state + ')' completed = '\x1b[1;32mCompleted: ' + v[5] + '\x1b[0m' queued = '\x1b[1;33mQueued: ' + v[1] + '\x1b[0m' running = '\x1b[1;36mRunning: ' + v[3] + '\x1b[0m' fail = '\x1b[1;31mFailed: ' + v[6] + '\x1b[0m' st = progress + '\t(' + completed + ', ' + queued + ', ' + running + ', ' + fail + ')' print('%s\r' % st, end='') break if not loop or 'Success' in out.decode( 'utf8' ) or 'Failure' in out.decode('utf8'): break time.sleep(delay) def statistics( self, workflow=False, jobs=False, breakdown=False, time=False ): """ Print the workflow statistics. :param workflow: :param jobs: :param breakdown: :param time: """ if not self._submit: raise Exception( 'The workfow has not started its execution yet.\n' 'Please, check if the workflow is planned and submitted for execution.' ) out, err = subprocess.Popen( 'pegasus-statistics -s all %s' % self.submit_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self.base_dir ).communicate() if err: raise Exception(err) for line in out.decode('utf8').split('\n'): if line.startswith('Workflow wall time'): v = line.split(':') print( 'Workflow Wall Time: \x1b[1;34m' + v[1].strip() + '\x1b[0m' ) break def outputs(self): """ Print a list of output files. """ if not self.output_dir: raise Exception('No output directory is configured.') outputs = [ os.path.join(root, name) for root, dirs, files in os.walk(self.output_dir) for name in files ] for f in outputs: print(f.replace(self.output_dir + '/', '')) def inspect(self, path):
i in x_vals])) logging.debug("y vals: " + " ".join([str(i) for i in y_vals])) # if exp_env.is_prefilled: # local_colors = prefilled_colors_web # local_linestyles = prefilled_linestyles_web # else: # local_colors = colors # local_linestyles = linestyles if exp_env in label_translation: label = label_translation[exp_env] else: label = exp_env ax.plot(x_vals, y_vals, linestyles_paper[exp_env], # color=local_colors[cell_to_anon(exp_env.cell_name)], dashes=dashes_paper[exp_env], label=label, markersize=ms, # mec=local_colors[cell_to_anon(exp_env.cell_name)] ) if x_axis_type != "": vary_dim = x_axis_type setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type, all_x_vals_set, v_dim=vary_dim) except Exception as e: logging.warn(e) # In our per-workload or per-scheduler plots, all lines # associated with the same workload_desc # are the same color, but have different line-types per workload_name # or scheduler_name. In this way, we end up with a set of related lines # for each workload_desc. def plot_2d_data_set_dict(data_set_2d_dict, plot_title, filename_suffix, y_label, y_axis_type, error_bars_data_set_2d_dict=None): assert (y_axis_type == "0-to-1" or y_axis_type == "ms-to-day" or y_axis_type == "abs") logging.info("Plotting {}".format(plot_title)) try: plt.clf() ax = fig.add_subplot(111) # Track a union of all x_vals which can be used to figure out # the x-axis for the plot we are generating. all_x_vals_set = sets.Set() for exp_env, name_to_val_map in data_set_2d_dict.iteritems(): if paper_mode: cell_label = cell_to_anon(exp_env.cell_name) else: cell_label = exp_env.cell_name # if exp_env.cell_name != "B": # print("skipping %s" % exp_env.cell_name) # continue # else: # print("not skipping %s" % exp_env.cell_name) # If in paper mode, skip this plot if the cell name was not # passed in as argument envs_to_plot. if paper_mode and not re.search(cell_label, envs_to_plot): logging.debug( "skipping plot because cell_label %s was not passed in as envs_to_plot %s" % ( cell_label, envs_to_plot)) continue # if exp_env.is_prefilled: # # TEMPORARY: Skip prefilled to get smaller place-holder graph # # for paper draft. # continue for wl_or_sched_name, values in name_to_val_map.iteritems(): # Skip service schedulers. logging.debug("wl_or_sched_name is {}".format(wl_or_sched_name)) # if re.search('Service', wl_or_sched_name): # logging.debug("Skipping %s" % wl_or_sched_name) # continue wl_or_sched_name_root = wl_or_sched_name result = re.search('^[^-]+', wl_or_sched_name) if result is not None: wl_or_sched_name_root = result.group(0) wl_or_sched_num = wl_or_sched_name result = re.search('[0-9]+$', wl_or_sched_name) if result is not None: wl_or_sched_num = result.group(0) line_label = str(wl_or_sched_num) # Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc. # down to "Batch" and "Service" if in paper mode. updated_wl_or_sched_name = wl_or_sched_name if paper_mode and re.search("Batch", wl_or_sched_name): updated_wl_or_sched_name = "Batch" if paper_mode and re.search("Service", wl_or_sched_name): updated_wl_or_sched_name = "Service" # Append scheduler or workload name unless in paper mode and # graphing monolithic. # if not (paper_mode and re.search("Monolithic", wl_or_sched_name)): # line_label += " " + updated_wl_or_sched_name # if exp_env.is_prefilled: # line_label += " prefilled" # Don't add an item to the legend for batch schedulers/workloads # in paper mode. We'll explain those in the caption. if paper_mode and updated_wl_or_sched_name == "Service": line_label = "_nolegend_" # if vary_dim == "lambda": # x_vals = [(1 / value.x) for value in values] # else: # x_vals = [value.x for value in values] x_vals = [value.x for value in values] logging.debug("x_vals size: {}, values size: {}".format(len(x_vals), len(values))) all_x_vals_set = all_x_vals_set.union(x_vals) logging.debug("all_x_vals_set updated, now = %s" % all_x_vals_set) # Rewrite zero's for the y_axis_types that will be log. y_vals = [0.00001 if (value.y == 0 and y_axis_type == "ms-to-day") else value.y for value in values] logging.debug("Plotting line for %s %s %s, line_label = %s." % (exp_env, wl_or_sched_name, plot_title, line_label)) logging.debug("x vals: " + " ".join([str(i) for i in x_vals])) logging.debug("y vals: " + " ".join([str(i) for i in y_vals])) if exp_env.is_prefilled: local_colors = prefilled_colors_web local_linestyles = prefilled_linestyles_web else: local_colors = colors local_linestyles = linestyles if error_bars_data_set_2d_dict is None: ax.plot(x_vals, y_vals, linestyles_paper[wl_or_sched_num], dashes=dashes_paper[wl_or_sched_num], color=local_colors[cell_to_anon(exp_env.cell_name)], label=line_label, markersize=ms, mec=local_colors[cell_to_anon(exp_env.cell_name)]) else: err_bar_vals = \ [i.y for i in error_bars_data_set_2d_dict[exp_env][wl_or_sched_name]] logging.debug("Plotting error bars: " + " ".join([str(i) for i in err_bar_vals])) ax.errorbar(x_vals, y_vals, # fmt=local_linestyles[wl_or_sched_name_root], dashes=dashes_paper[wl_or_sched_name_root], color=local_colors[exp_env.cell_name], # color=per_wl_colors[wl_or_sched_name_root], label=line_label, markersize=ms, capsize=1, yerr=err_bar_vals) logging.debug("all_x_vals_set size: {}".format(len(all_x_vals_set))) setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type, all_x_vals_set, v_dim=vary_dim) except Exception as e: logging.warn(e) def plot_distribution_2d(data_set_2d_dict, plot_title, filename_suffix, x_label, y_axis_type): assert (y_axis_type == "0-to-1" or y_axis_type == "ms-to-day" or y_axis_type == "abs") logging.info("Plotting {}".format(plot_title)) try: plt.clf() ax = fig.add_subplot(111) # Track a union of all x_vals which can be used to figure out # the x-axis for the plot we are generating. all_x_vals_set = sets.Set() color = None avg = {} for exp_env, name_to_job_map in data_set_2d_dict.iteritems(): # if paper_mode: # cell_label = cell_to_anon(exp_env.cell_name) # else: # cell_label = exp_env.cell_name # if exp_env.cell_name != "B": # print("skipping %s" % exp_env.cell_name) # continue # else: # print("not skipping %s" % exp_env.cell_name) # If in paper mode, skip this plot if the cell name was not # passed in as argument envs_to_plot. # if paper_mode and not re.search(cell_label, envs_to_plot): # logging.debug( # "skipping plot because cell_label %s was not passed in as envs_to_plot %s" % (cell_label, envs_to_plot)) # continue # if exp_env.is_prefilled: # # TEMPORARY: Skip prefilled to get smaller place-holder graph # # for paper draft. # continue for wl_or_sched_name, values in name_to_job_map.iteritems(): if wl_or_sched_name not in avg: avg[wl_or_sched_name] = {"x": [], "y": []} # Skip service schedulers. logging.debug("wl_or_sched_name is {}".format(wl_or_sched_name)) # if re.search('Service', wl_or_sched_name): # logging.debug("Skipping %s" % wl_or_sched_name) # continue wl_or_sched_num = wl_or_sched_name result = re.search('[0-9]+$', wl_or_sched_name) if result is not None: wl_or_sched_num = result.group(0) line_label = str(wl_or_sched_num) # Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc. # down to "Batch" and "Service" if in paper mode. updated_wl_or_sched_name = wl_or_sched_name if paper_mode and re.search("Batch", wl_or_sched_name): updated_wl_or_sched_name = "Batch" if paper_mode and re.search("Service", wl_or_sched_name): updated_wl_or_sched_name = "Service" # Append scheduler or workload name unless in paper mode and # graphing monolithic. # if not (paper_mode and re.search("Monolithic", wl_or_sched_name)): # line_label += " " + updated_wl_or_sched_name # if exp_env.is_prefilled: # line_label += " prefilled" # Don't add an item to the legend for batch schedulers/workloads # in paper mode. We'll explain those in the caption. if paper_mode and updated_wl_or_sched_name == "Service": line_label = "_nolegend_" cdf = [] x_vals = [value for value in values] x_vals.sort() for i, value in enumerate(x_vals): cdf.append(i / float(len(x_vals))) x_vals = np.array(x_vals) avg[wl_or_sched_name]["x"].append(x_vals) all_x_vals_set = all_x_vals_set.union(x_vals) logging.debug("all_x_vals_set updated, now = %s" % all_x_vals_set) y_vals = np.array(cdf) avg[wl_or_sched_name]["y"].append(y_vals) logging.debug("Plotting line for %s %s %s, line_label = %s." % (exp_env, wl_or_sched_name, plot_title, line_label)) logging.debug("x vals: " + " ".join([str(i) for i in x_vals])) logging.debug("y vals: " + " ".join([str(i) for i in y_vals])) # if exp_env.is_prefilled: # local_colors = prefilled_colors_web # local_linestyles = prefilled_linestyles_web # else: # local_colors = colors # local_linestyles = linestyles # color = getNewColor(color) ax.plot(x_vals, y_vals, linestyles_paper[wl_or_sched_num], dashes=dashes_paper[exp_env], # color=color['color'], label=line_label + "-" + exp_env, markersize=ms, # mec=local_colors[cell_to_anon(exp_env.cell_name)] ) logging.debug("all_x_vals_set size: {}".format(len(all_x_vals_set))) setup_graph_details(ax, plot_title, filename_suffix, "", y_axis_type, all_x_vals_set, x_label=x_label) # plt.clf() # ax = fig.add_subplot(111) # color = None # for pl_name in avg: # longest_x_vals = [] # # for x_vals in avg[pl_name]["x"]: # if len(x_vals) > len(longest_x_vals): # longest_x_vals = x_vals # # y_vals = np.array(avg[pl_name]["y"]) # y_vals = y_vals.mean(axis=0) # # logging.debug("{} Length x:{} y:{}".format(pl_name, len(longest_x_vals), len(y_vals))) # # # color = getNewColor(color) # ax.plot(longest_x_vals, y_vals, linestyles_paper[pl_name], # dashes=dashes_paper[pl_name], # # color=color['color'], # label=pl_name, markersize=ms, # # mec=local_colors[cell_to_anon(exp_env.cell_name)] # ) # setup_graph_details(ax, plot_title, filename_suffix + "-mean", "", y_axis_type, all_x_vals_set, x_label=x_label) except Exception as e: logging.warn(e) def plot_distribution(data_set_1d_dict, plot_title, filename_suffix, x_label, y_axis_type): assert (y_axis_type == "0-to-1" or y_axis_type == "ms-to-day" or y_axis_type == "abs") logging.info("Plotting {}".format(plot_title)) try: plt.clf() ax = fig.add_subplot(111) # Track a union of all x_vals which can be used to figure out # the x-axis for the plot we are generating. all_x_vals_set = sets.Set() color = None for wl_or_sched_name, values in data_set_1d_dict.iteritems(): # Skip service schedulers. logging.debug("wl_or_sched_name is {}".format(wl_or_sched_name)) # if re.search('Service', wl_or_sched_name): # logging.debug("Skipping %s" % wl_or_sched_name) # continue wl_or_sched_num = wl_or_sched_name result = re.search('[0-9]+$', wl_or_sched_name) if result is not None: wl_or_sched_num = result.group(0) line_label = str(wl_or_sched_num) # Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc. # down to "Batch" and "Service" if in paper mode. updated_wl_or_sched_name = wl_or_sched_name if paper_mode and re.search("Batch", wl_or_sched_name): updated_wl_or_sched_name = "Batch" if paper_mode and re.search("Service", wl_or_sched_name): updated_wl_or_sched_name = "Service" # Append scheduler or workload name unless in paper mode and # graphing monolithic. # if not (paper_mode and
{'name': 'TLS_DH_Anon_WITH_DES_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'Anon', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'MITM', 'enc_strength': 'LOW', 'overall_strength': 'MITM'}, '00001B': {'name': 'TLS_DH_Anon_WITH_3DES_EDE_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'Anon', 'enc': '3DES_EDE_CBC', 'bits': '168', 'mac': 'SHA', 'kxau_strength': 'MITM', 'enc_strength': 'HIGH', 'overall_strength': 'MITM'}, '00001C': {'name': 'SSL_FORTEZZA_KEA_WITH_NULL_SHA', 'protocol': 'SSL', 'kx': 'FORTEZZA', 'au': 'KEA', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '00001D': {'name': 'SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA', 'protocol': 'SSL', 'kx': 'FORTEZZA', 'au': 'KEA', 'enc': 'FORTEZZA_CBC', 'bits': '80', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00001E': {'name': 'TLS_KRB5_WITH_DES_CBC_SHA', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'LOW', 'overall_strength': 'LOW'}, '00001F': {'name': 'TLS_KRB5_WITH_3DES_EDE_CBC_SHA', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': '3DES_EDE_CBC', 'bits': '168', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000020': {'name': 'TLS_KRB5_WITH_RC4_128_SHA', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'RC4_128', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'MEDIUM', 'overall_strength': 'MEDIUM'}, '000021': {'name': 'TLS_KRB5_WITH_IDEA_CBC_SHA', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'IDEA_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000022': {'name': 'TLS_KRB5_WITH_DES_CBC_MD5', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'MD5', 'kxau_strength': 'HIGH', 'enc_strength': 'LOW', 'overall_strength': 'LOW'}, '000023': {'name': 'TLS_KRB5_WITH_3DES_EDE_CBC_MD5', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': '3DES_EDE_CBC', 'bits': '168', 'mac': 'MD5', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000024': {'name': 'TLS_KRB5_WITH_RC4_128_MD5', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'RC4_128', 'bits': '128', 'mac': 'MD5', 'kxau_strength': 'HIGH', 'enc_strength': 'MEDIUM', 'overall_strength': 'MEDIUM'}, '000025': {'name': 'TLS_KRB5_WITH_IDEA_CBC_MD5', 'protocol': 'TLS', 'kx': 'KRB5', 'au': 'KRB5', 'enc': 'IDEA_CBC', 'bits': '128', 'mac': 'MD5', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000026': {'name': 'TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'DES_CBC_40', 'bits': '40', 'mac': 'SHA', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000027': {'name': 'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'RC2_CBC_40', 'bits': '40', 'mac': 'SHA', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000028': {'name': 'TLS_KRB5_EXPORT_WITH_RC4_40_SHA', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'RC4_40', 'bits': '40', 'mac': 'SHA', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000029': {'name': 'TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'DES_CBC_40', 'bits': '40', 'mac': 'MD5', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '00002A': {'name': 'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'RC2_CBC_40', 'bits': '40', 'mac': 'MD5', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '00002B': {'name': 'TLS_KRB5_EXPORT_WITH_RC4_40_MD5', 'protocol': 'TLS', 'kx': 'KRB5_EXPORT', 'au': 'KRB5_EXPORT', 'enc': 'RC4_40', 'bits': '40', 'mac': 'MD5', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '00002C': {'name': 'TLS_PSK_WITH_NULL_SHA', 'protocol': 'TLS', 'kx': 'PSK', 'au': 'PSK', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '00002D': {'name': 'TLS_DHE_PSK_WITH_NULL_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'PSK', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '00002E': {'name': 'TLS_RSA_PSK_WITH_NULL_SHA', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'PSK', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '00002F': {'name': 'TLS_RSA_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000030': {'name': 'TLS_DH_DSS_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'DSS', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000031': {'name': 'TLS_DH_RSA_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000032': {'name': 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000033': {'name': 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000034': {'name': 'TLS_DH_Anon_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'Anon', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'MITM', 'enc_strength': 'HIGH', 'overall_strength': 'MITM'}, '000035': {'name': 'TLS_RSA_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000036': {'name': 'TLS_DH_DSS_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'DSS', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000037': {'name': 'TLS_DH_RSA_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'RSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000038': {'name': 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000039': {'name': 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'RSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00003A': {'name': 'TLS_DH_Anon_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'Anon', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'MITM', 'enc_strength': 'HIGH', 'overall_strength': 'MITM'}, '00003B': {'name': 'TLS_RSA_WITH_NULL_SHA256', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '00003C': {'name': 'TLS_RSA_WITH_AES_128_CBC_SHA256', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00003D': {'name': 'TLS_RSA_WITH_AES_256_CBC_SHA256', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00003E': {'name': 'TLS_DH_DSS_WITH_AES_128_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DH', 'au': 'DSS', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00003F': {'name': 'TLS_DH_RSA_WITH_AES_128_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DH', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000040': {'name': 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000041': {'name': 'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'RSA', 'au': 'RSA', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000042': {'name': 'TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'DSS', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000043': {'name': 'TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'RSA', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000044': {'name': 'TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000045': {'name': 'TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'RSA', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000046': {'name': 'TLS_DH_Anon_WITH_CAMELLIA_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'DH', 'au': 'Anon', 'enc': 'CAMELLIA_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'MITM', 'enc_strength': 'HIGH', 'overall_strength': 'MITM'}, '000047': {'name': 'TLS_ECDH_ECDSA_WITH_NULL_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': 'NULL', 'bits': '0', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'NULL', 'overall_strength': 'NULL'}, '000048': {'name': 'TLS_ECDH_ECDSA_WITH_RC4_128_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': 'RC4_128', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'MEDIUM', 'overall_strength': 'MEDIUM'}, '000049': {'name': 'TLS_ECDH_ECDSA_WITH_DES_CBC_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'LOW', 'overall_strength': 'LOW'}, '00004A': {'name': 'TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': '3DES_EDE_CBC', 'bits': '168', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00004B': {'name': 'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '00004C': {'name': 'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA', 'protocol': 'TLS', 'kx': 'ECDH', 'au': 'ECDSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000060': {'name': 'TLS_RSA_EXPORT1024_WITH_RC4_56_MD5', 'protocol': 'TLS', 'kx': 'RSA_EXPORT1024', 'au': 'RSA_EXPORT1024', 'enc': 'RC4_56', 'bits': '56', 'mac': 'MD5', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000061': {'name': 'TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5', 'protocol': 'TLS', 'kx': 'RSA_EXPORT1024', 'au': 'RSA_EXPORT1024', 'enc': 'RC2_CBC_56', 'bits': '56', 'mac': 'MD5', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000062': {'name': 'TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA', 'protocol': 'TLS', 'kx': 'RSA_EXPORT1024', 'au': 'RSA_EXPORT1024', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'EXPORT', 'enc_strength': 'LOW', 'overall_strength': 'EXPORT'}, '000063': {'name': 'TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'DES_CBC', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'LOW', 'overall_strength': 'LOW'}, '000064': {'name': 'TLS_RSA_EXPORT1024_WITH_RC4_56_SHA', 'protocol': 'TLS', 'kx': 'RSA_EXPORT1024', 'au': 'RSA_EXPORT1024', 'enc': 'RC4_56', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'EXPORT', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000065': {'name': 'TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'RC4_56', 'bits': '56', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'EXPORT', 'overall_strength': 'EXPORT'}, '000066': {'name': 'TLS_DHE_DSS_WITH_RC4_128_SHA', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'DSS', 'enc': 'RC4_128', 'bits': '128', 'mac': 'SHA', 'kxau_strength': 'HIGH', 'enc_strength': 'MEDIUM', 'overall_strength': 'MEDIUM'}, '000067': {'name': 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DHE', 'au': 'RSA', 'enc': 'AES_128_CBC', 'bits': '128', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000068': {'name': 'TLS_DH_DSS_WITH_AES_256_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DH', 'au': 'DSS', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'}, '000069': {'name': 'TLS_DH_RSA_WITH_AES_256_CBC_SHA256', 'protocol': 'TLS', 'kx': 'DH', 'au': 'RSA', 'enc': 'AES_256_CBC', 'bits': '256', 'mac': 'SHA256', 'kxau_strength': 'HIGH', 'enc_strength': 'HIGH', 'overall_strength': 'HIGH'},
""" Couple of functions needed for training the model. """ import pandas as pd import numpy as np from catboost import CatBoostRegressor, Pool from sklearn.model_selection import train_test_split, KFold from sklearn.preprocessing import StandardScaler from catboost import CatBoostClassifier from tqdm import tqdm_notebook def eval_metric(actual, predic, molecule_type): error = np.abs(actual - predic) df = pd.DataFrame(np.vstack([error, molecule_type]).T, columns=['error', 'type']) df['error'] = df['error'].astype(np.float32) return df.groupby('type')['error'].mean().apply(np.log).mean() def get_performance(model, X_train_df, y_train_df, X_test_df, y_test_df, raw_train_df): molecule_type_train = raw_train_df.loc[X_train_df.index, 'type'] molecule_type_test = raw_train_df.loc[X_test_df.index, 'type'] prediction_train = model.predict(X_train_df) prediction_test = model.predict(X_test_df) train_perf = eval_metric(y_train_df.values, prediction_train, molecule_type_train) test_perf = eval_metric(y_test_df.values, prediction_test, molecule_type_test) return (train_perf, test_perf) def aversarial_train(train_X, train_Y, test_X, test_Y, plot=False): """ Here, we try to differentiate between train and test """ train_pool = Pool(train_X, label=train_Y) test_pool = Pool(test_X, label=test_Y) model = CatBoostClassifier( iterations=500, random_seed=63, learning_rate=0.5, custom_loss=['AUC'], task_type='GPU', depth=10, ) model.fit(train_pool, eval_set=test_pool, plot=plot, logging_level='Silent') auc_train = model.best_score_['learn']['AUC'] auc_test = model.best_score_['validation']['AUC'] return (model, auc_train, auc_test) def train_lightGBM(lightGBM_config, train_X, train_Y, test_X, test_Y, data_size): import lightgbm as lgb lgb_train = lgb.Dataset(train_X, train_Y) valid_sets = [lgb_train] # When creating final model, test_X is not None but test_Y is None. if test_Y is None: lgb_eval = None test_Y = [] else: lgb_eval = lgb.Dataset(test_X, test_Y, reference=lgb_train) valid_sets.append(lgb_eval) print('{:.2f}% Train:{}K Test:{}K #Features:{}'.format( 100 * (len(train_Y) + len(test_Y)) / data_size, len(train_Y) // 1000, len(test_Y) // 1000, train_X.shape[1], )) model = lgb.train( lightGBM_config, lgb_train, valid_sets=valid_sets, verbose_eval=5000, early_stopping_rounds=40, ) feature_importance_df = pd.DataFrame( sorted(zip(model.feature_importance(), test_X.columns)), columns=['Importances', 'Feature Index']).set_index('Feature Index') output_dict = { 'model': model, 'feature_importance': feature_importance_df, 'best_iteration': model.best_iteration, 'train_prediction': model.predict(train_X), 'test_prediction': model.predict(test_X), } return output_dict def train_catboost(catboost_config, train_X, train_Y, test_X, test_Y, data_size, plot=False): train_pool = Pool(train_X, label=train_Y) # When creating final model, test_X is not None but test_Y is None. if test_Y is None: test_pool = None test_Y = [] else: test_pool = Pool(test_X, label=test_Y) print('{:.2f}% Train:{}K Test:{}K #Features:{}'.format( 100 * (len(train_Y) + len(test_Y)) / data_size, len(train_Y) // 1000, len(test_Y) // 1000, train_X.shape[1], )) model = CatBoostRegressor(**catboost_config) model.fit(train_pool, eval_set=test_pool, plot=plot, logging_level='Silent') feature_importance_df = model.get_feature_importance(prettified=True) feature_importance_df.rename({'Feature Id': 'Feature Index'}, inplace=True, axis=1) feature_importance_df.set_index('Feature Index', inplace=True) output_dict = { 'model': model, 'feature_importance': feature_importance_df, 'best_iteration': model.best_iteration_, 'train_prediction': model.predict(train_X), 'test_prediction': model.predict(test_X), } return output_dict def averserial_train_for_each_type_no_model(useless_cols_for_each_type, train_X_df, test_X_df, max_auc): """ It does not store (and return) model. It is very lean in terms of memory in that sense. We try to see how much AUC do we get when we try to differentiate train with test. """ anal_dict = {} for type_enc in train_X_df['type_enc'].unique(): anal_dict[type_enc] = {'train': None, 'test': None, 'feature_importance': None, 'useless_cols': []} X_t = train_X_df[train_X_df.type_enc == type_enc].copy() test_X_t = test_X_df[test_X_df.type_enc == type_enc].copy() if len(useless_cols_for_each_type[type_enc]) > 0: X_t = X_t.drop(useless_cols_for_each_type[type_enc], axis=1) test_X_t = test_X_t[X_t.columns] X = pd.concat([X_t, test_X_t], axis=0) Y = X[[]].copy() Y['target'] = 1 Y.loc[test_X_t.index, 'target'] = 0 train_X, test_X = train_test_split(X, test_size=0.15, random_state=0, stratify=Y['target']) test_Y = Y.loc[test_X.index] train_Y = Y.loc[train_X.index] (model, auc_train, auc_test) = aversarial_train(train_X, train_Y, test_X, test_Y) anal_dict[type_enc]['train'] = round(auc_train, 2) anal_dict[type_enc]['test'] = round(auc_test, 2) f_df = model.get_feature_importance(prettified=True).set_index('Feature Index')['Importances'] # anal_dict[type_enc]['feature_importance'] = f_df.to_dict() while auc_test > max_auc: useless_cols = f_df[f_df > 10].index.tolist() if len(useless_cols) == 0: break train_X.drop(useless_cols, axis=1, inplace=True) test_X.drop(useless_cols, axis=1, inplace=True) print('Typ:', type_enc, 'Train:', anal_dict[type_enc]['train'], 'Test:', anal_dict[type_enc]['test'], 'Removing:', useless_cols) anal_dict[type_enc]['useless_cols'] += useless_cols (model, auc_train, auc_test) = aversarial_train(train_X, train_Y, test_X, test_Y) anal_dict[type_enc]['train'] = round(auc_train, 2) anal_dict[type_enc]['test'] = round(auc_test, 2) f_df = model.get_feature_importance(prettified=True).set_index('Feature Index')['Importances'] # anal_dict[type_enc]['feature_importance'] = f_df.to_dict() return anal_dict def one_type_eval_metric(actual, predic): """ Metric for just one type """ return eval_metric(actual, predic, np.zeros(actual.shape)) def permutation_importance(model, X_val, y_val, metric, threshold=0.005, verbose=True): """ Permutes the features. If performance doesn't change a lot then it is useless. """ # Taken from here https://www.kaggle.com/speedwagon/permutation-importance results = {} y_pred = model.predict(X_val) results['base_score'] = metric(y_val, y_pred) if verbose: print(f'Base score {results["base_score"]:.5}') for col in tqdm_notebook(X_val.columns): freezed_col = X_val[col].copy() X_val.loc[:,col] = np.random.permutation(X_val[col]) preds = model.predict(X_val) results[col] = metric(y_val, preds) X_val.loc[:,col] = freezed_col if verbose: print(f'column: {col} - {results[col]:.5}') bad_features = [k for k in results if results[k] > results['base_score'] + threshold] return results, bad_features def permute_to_get_useless_features(catboost_config_for_each_type, useless_cols_for_each_type, train_X_df, Y_df): bad_features_dict = {} for type_enc in train_X_df['type_enc'].unique(): print(type_enc) bad_features_dict[type_enc] = {} X_t = train_X_df[train_X_df.type_enc == type_enc].copy() if len(useless_cols_for_each_type[type_enc]) > 0: X_t = X_t.drop(useless_cols_for_each_type[type_enc], axis=1) train_X, test_X = train_test_split(X_t, test_size=0.15, random_state=0) test_Y = Y_df.loc[test_X.index].copy() train_Y = Y_df.loc[train_X.index].copy() model, _ = train_catboost(catboost_config_for_each_type[type_enc], train_X, train_Y, test_X, test_Y, train_X_df.shape[0]) perm_results, bad_features = permutation_importance(model, test_X, test_Y, one_type_eval_metric, verbose=False) print([('base', one_type_eval_metric(test_Y, model.predict(test_X)))] + [(bf, perm_results[bf]) for bf in bad_features]) bad_features_dict[type_enc] = bad_features return bad_features def add_one_hot_encoding(X_df, test_X_df, skip_one_hot_columns=None): """ integer columns are converted to onehot columns. This is helpful in gradient based models LR,NN. Returns a dict mapping old column to new column names. Ensure that df does not get any more columns than test_X_df. """ # we might have removed some columns from training data as part of preprocessing. assert set(X_df.columns).issubset(set(test_X_df.columns)) _ = _add_one_hot_encoding(X_df, skip_one_hot_columns=skip_one_hot_columns) _ = _add_one_hot_encoding(test_X_df, skip_one_hot_columns=skip_one_hot_columns) extra_cols = list(set(X_df.columns) - set(test_X_df.columns)) if extra_cols: print('ONEHOT encoding extra columns getting removed:', extra_cols) X_df.drop(extra_cols, axis=1, inplace=True) def _add_one_hot_encoding(df, skip_one_hot_columns=None, one_hot_columns=None): """ integer columns are converted to onehot columns. This is helpful in gradient based models LR,NN. Returns a dict mapping old column to new column names. """ new_columns_dict = {} dtypes_df = df.dtypes if one_hot_columns is None: # are float but have int values one_hot_columns = [ 'SpinMultiplicity', 'nbr_0_SpinMultiplicity', 'nbr_1_SpinMultiplicity', 'CC_hybridization', 'nbr_0_Type', 'nbr_1_Type' ] one_hot_columns = list(set(dtypes_df.index.tolist()).intersection(set(one_hot_columns))) one_hot_columns += dtypes_df[(dtypes_df == np.uint8) | (dtypes_df == np.uint16) | (dtypes_df == np.int16)].index.tolist() else: one_hot_columns = one_hot_columns.copy() if skip_one_hot_columns is not None: for skip_col in skip_one_hot_columns: if skip_col in one_hot_columns: one_hot_columns.remove(skip_col) columns_added_count = 0 columns_converted = [] for col in tqdm_notebook(one_hot_columns): one_hot_df = pd.get_dummies(df[col], dtype=bool) one_hot_df.columns = [f'ONEHOT_{col}_{one_hot_col}' for one_hot_col in one_hot_df.columns] new_columns_dict[col] = one_hot_df.columns.tolist() # We ensure that only certain number of columns gets added. Otherwise, it results in issues. if columns_added_count + one_hot_df.shape[1] > 200: break columns_added_count += one_hot_df.shape[1] for one_hot_col in one_hot_df.columns: df[one_hot_col] = one_hot_df[one_hot_col] columns_converted.append(col) df.drop([col], axis=1, inplace=True) print(f'{columns_added_count} many columns added from {len(columns_converted)} columns') # df.drop(columns_converted, axis=1, inplace=True) return new_columns_dict def train_for_one_type_no_model_normalized_onehot(model_config, X_df, Y_df, train_fn, test_X_df): data = {} # one hot encoding test_X = test_X_df[X_df.columns] add_one_hot_encoding(X_df, test_X) test_X = test_X[X_df.columns] train_X, val_X = train_test_split(X_df, test_size=0.15, random_state=0) val_Y = Y_df.loc[val_X.index] train_Y = Y_df.loc[train_X.index] train_idx = train_X.index val_idx = val_X.index test_idx = test_X.index scalar_X = StandardScaler() train_X = scalar_X.fit_transform(train_X) val_X = scalar_X.transform(val_X) test_X = scalar_X.transform(test_X) train_dict = train_fn(model_config, train_X, train_Y, val_X, val_Y, test_X) # saving important things. data['train'] = pd.Series(train_dict['train_prediction'], index=train_idx) data['val'] = pd.Series(train_dict['val_prediction'], index=val_idx) data['test'] = pd.Series(train_dict['test_prediction'], index=test_idx) print('Eval', one_type_eval_metric(val_Y[val_Y.columns[0]].values, data['val'].values)) return data def train_for_each_type_no_model_normalized_onehot( model_config_for_each_type, useless_cols_for_each_type, Y_df: pd.DataFrame, train_fn, ): """ 3 Data sets: Train-Test-Validation It does not store (and return) model. It is very lean in terms of memory in that sense. Y_df has to be a dataframe. this handles the case for multiple outputs. primary target has to be the first column. """ assert isinstance(Y_df, pd.DataFrame) anal_dict = {} val_predictions = [] for type_enc in reversed(range(8)): print(type_enc) anal_dict[type_enc] = {} X_t = pd.read_hdf('train.hdf', f'type_enc{type_enc}') test_X = pd.read_hdf('test.hdf', f'type_enc{type_enc}') cols_to_remove = useless_cols_for_each_type[type_enc].copy() cols_to_remove += get_constant_features(X_t) valid_cols = [c for c in X_t.columns.tolist() if c not in cols_to_remove] print(X_t.shape[1] - len(valid_cols), 'total columns removed') X_t = X_t[valid_cols] anal_dict[type_enc] = train_for_one_type_no_model_normalized_onehot(model_config_for_each_type[type_enc], X_t, Y_df.loc[X_t.index], train_fn, test_X) pred_t_df = anal_dict[type_enc]['val'].to_frame('prediction') pred_t_df['type_enc'] = type_enc val_predictions.append(pred_t_df) prediction_df = pd.concat(val_predictions) actual = Y_df.loc[prediction_df.index, Y_df.columns[0]] print('Final metric', eval_metric(actual.values, prediction_df['prediction'].values, prediction_df['type_enc'])) return anal_dict def train_for_one_type_Kfold( model_config, train_X_df, Y_df: pd.DataFrame, train_fn, test_X_df=None, n_splits=5, ): """ It does not store (and return) model. It is very lean in terms of memory in that sense. Y_df has to be a dataframe. this handles the case for multiple outputs. primary target has to be the first column. """ assert isinstance(Y_df, pd.DataFrame) analysis_data = [] type_enc = train_X_df['type_enc'].unique() assert len(type_enc) == 1 type_enc = type_enc[0] print(type_enc) if test_X_df is not None: assert set(test_X_df['type_enc'].unique()) == set([type_enc]) test_X_df = test_X_df[train_X_df.columns] kf = KFold(n_splits=n_splits, shuffle=True, random_state=955) train_index = train_X_df.index for tr_idx, val_idx in kf.split(train_X_df[[]]): print('') print(f'{len(analysis_data)}th iteration') train_X = train_X_df.iloc[tr_idx] val_X = train_X_df.iloc[val_idx] val_Y = Y_df.loc[val_X.index] train_Y = Y_df.loc[train_X.index] train_dict = train_fn(model_config, train_X, train_Y, val_X, val_Y, train_X_df.shape[0]) # saving important things. data = {} data['train'] = pd.Series(train_dict['train_prediction'], index=train_index[tr_idx]) data['val'] = pd.Series(train_dict['test_prediction'], index=train_index[val_idx]) data['best_iteration'] = train_dict['best_iteration'] data['feature_importance'] = train_dict['feature_importance']['Importances'].to_dict() print('Best iter', train_dict['best_iteration'], model_config)
pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class GateContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.GateContext, self).__init__(parent, invokingState) self.parser = parser def name(self): return self.getTypedRuleContext(QuilParser.NameContext,0) def LPAREN(self): return self.getToken(QuilParser.LPAREN, 0) def param(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.ParamContext) else: return self.getTypedRuleContext(QuilParser.ParamContext,i) def RPAREN(self): return self.getToken(QuilParser.RPAREN, 0) def qubit(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.QubitContext) else: return self.getTypedRuleContext(QuilParser.QubitContext,i) def COMMA(self, i=None): if i is None: return self.getTokens(QuilParser.COMMA) else: return self.getToken(QuilParser.COMMA, i) def getRuleIndex(self): return QuilParser.RULE_gate def enterRule(self, listener): if hasattr(listener, "enterGate"): listener.enterGate(self) def exitRule(self, listener): if hasattr(listener, "exitGate"): listener.exitGate(self) def gate(self): localctx = QuilParser.GateContext(self, self._ctx, self.state) self.enterRule(localctx, 6, self.RULE_gate) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 125 self.name() self.state = 137 self._errHandler.sync(self) _la = self._input.LA(1) if _la==QuilParser.LPAREN: self.state = 126 self.match(QuilParser.LPAREN) self.state = 127 self.param() self.state = 132 self._errHandler.sync(self) _la = self._input.LA(1) while _la==QuilParser.COMMA: self.state = 128 self.match(QuilParser.COMMA) self.state = 129 self.param() self.state = 134 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 135 self.match(QuilParser.RPAREN) self.state = 140 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 139 self.qubit() self.state = 142 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==QuilParser.INT): break except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class NameContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.NameContext, self).__init__(parent, invokingState) self.parser = parser def IDENTIFIER(self): return self.getToken(QuilParser.IDENTIFIER, 0) def getRuleIndex(self): return QuilParser.RULE_name def enterRule(self, listener): if hasattr(listener, "enterName"): listener.enterName(self) def exitRule(self, listener): if hasattr(listener, "exitName"): listener.exitName(self) def name(self): localctx = QuilParser.NameContext(self, self._ctx, self.state) self.enterRule(localctx, 8, self.RULE_name) try: self.enterOuterAlt(localctx, 1) self.state = 144 self.match(QuilParser.IDENTIFIER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class QubitContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.QubitContext, self).__init__(parent, invokingState) self.parser = parser def INT(self): return self.getToken(QuilParser.INT, 0) def getRuleIndex(self): return QuilParser.RULE_qubit def enterRule(self, listener): if hasattr(listener, "enterQubit"): listener.enterQubit(self) def exitRule(self, listener): if hasattr(listener, "exitQubit"): listener.exitQubit(self) def qubit(self): localctx = QuilParser.QubitContext(self, self._ctx, self.state) self.enterRule(localctx, 10, self.RULE_qubit) try: self.enterOuterAlt(localctx, 1) self.state = 146 self.match(QuilParser.INT) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class ParamContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.ParamContext, self).__init__(parent, invokingState) self.parser = parser def dynamicParam(self): return self.getTypedRuleContext(QuilParser.DynamicParamContext,0) def expression(self): return self.getTypedRuleContext(QuilParser.ExpressionContext,0) def getRuleIndex(self): return QuilParser.RULE_param def enterRule(self, listener): if hasattr(listener, "enterParam"): listener.enterParam(self) def exitRule(self, listener): if hasattr(listener, "exitParam"): listener.exitParam(self) def param(self): localctx = QuilParser.ParamContext(self, self._ctx, self.state) self.enterRule(localctx, 12, self.RULE_param) try: self.state = 150 self._errHandler.sync(self) token = self._input.LA(1) if token in [QuilParser.LBRACKET]: self.enterOuterAlt(localctx, 1) self.state = 148 self.dynamicParam() pass elif token in [QuilParser.PI, QuilParser.I, QuilParser.SIN, QuilParser.COS, QuilParser.SQRT, QuilParser.EXP, QuilParser.CIS, QuilParser.PLUS, QuilParser.MINUS, QuilParser.INT, QuilParser.FLOAT, QuilParser.LPAREN, QuilParser.PERCENTAGE]: self.enterOuterAlt(localctx, 2) self.state = 149 self.expression(0) pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class DynamicParamContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.DynamicParamContext, self).__init__(parent, invokingState) self.parser = parser def LBRACKET(self): return self.getToken(QuilParser.LBRACKET, 0) def INT(self, i=None): if i is None: return self.getTokens(QuilParser.INT) else: return self.getToken(QuilParser.INT, i) def RBRACKET(self): return self.getToken(QuilParser.RBRACKET, 0) def MINUS(self): return self.getToken(QuilParser.MINUS, 0) def getRuleIndex(self): return QuilParser.RULE_dynamicParam def enterRule(self, listener): if hasattr(listener, "enterDynamicParam"): listener.enterDynamicParam(self) def exitRule(self, listener): if hasattr(listener, "exitDynamicParam"): listener.exitDynamicParam(self) def dynamicParam(self): localctx = QuilParser.DynamicParamContext(self, self._ctx, self.state) self.enterRule(localctx, 14, self.RULE_dynamicParam) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 152 self.match(QuilParser.LBRACKET) self.state = 153 self.match(QuilParser.INT) self.state = 156 self._errHandler.sync(self) _la = self._input.LA(1) if _la==QuilParser.MINUS: self.state = 154 self.match(QuilParser.MINUS) self.state = 155 self.match(QuilParser.INT) self.state = 158 self.match(QuilParser.RBRACKET) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class DefGateContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.DefGateContext, self).__init__(parent, invokingState) self.parser = parser def DEFGATE(self): return self.getToken(QuilParser.DEFGATE, 0) def name(self): return self.getTypedRuleContext(QuilParser.NameContext,0) def COLON(self): return self.getToken(QuilParser.COLON, 0) def NEWLINE(self): return self.getToken(QuilParser.NEWLINE, 0) def matrix(self): return self.getTypedRuleContext(QuilParser.MatrixContext,0) def LPAREN(self): return self.getToken(QuilParser.LPAREN, 0) def variable(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.VariableContext) else: return self.getTypedRuleContext(QuilParser.VariableContext,i) def RPAREN(self): return self.getToken(QuilParser.RPAREN, 0) def COMMA(self, i=None): if i is None: return self.getTokens(QuilParser.COMMA) else: return self.getToken(QuilParser.COMMA, i) def getRuleIndex(self): return QuilParser.RULE_defGate def enterRule(self, listener): if hasattr(listener, "enterDefGate"): listener.enterDefGate(self) def exitRule(self, listener): if hasattr(listener, "exitDefGate"): listener.exitDefGate(self) def defGate(self): localctx = QuilParser.DefGateContext(self, self._ctx, self.state) self.enterRule(localctx, 16, self.RULE_defGate) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 160 self.match(QuilParser.DEFGATE) self.state = 161 self.name() self.state = 173 self._errHandler.sync(self) _la = self._input.LA(1) if _la==QuilParser.LPAREN: self.state = 162 self.match(QuilParser.LPAREN) self.state = 163 self.variable() self.state = 168 self._errHandler.sync(self) _la = self._input.LA(1) while _la==QuilParser.COMMA: self.state = 164 self.match(QuilParser.COMMA) self.state = 165 self.variable() self.state = 170 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 171 self.match(QuilParser.RPAREN) self.state = 175 self.match(QuilParser.COLON) self.state = 176 self.match(QuilParser.NEWLINE) self.state = 177 self.matrix() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class VariableContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.VariableContext, self).__init__(parent, invokingState) self.parser = parser def PERCENTAGE(self): return self.getToken(QuilParser.PERCENTAGE, 0) def IDENTIFIER(self): return self.getToken(QuilParser.IDENTIFIER, 0) def getRuleIndex(self): return QuilParser.RULE_variable def enterRule(self, listener): if hasattr(listener, "enterVariable"): listener.enterVariable(self) def exitRule(self, listener): if hasattr(listener, "exitVariable"): listener.exitVariable(self) def variable(self): localctx = QuilParser.VariableContext(self, self._ctx, self.state) self.enterRule(localctx, 18, self.RULE_variable) try: self.enterOuterAlt(localctx, 1) self.state = 179 self.match(QuilParser.PERCENTAGE) self.state = 180 self.match(QuilParser.IDENTIFIER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class MatrixContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.MatrixContext, self).__init__(parent, invokingState) self.parser = parser def matrixRow(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.MatrixRowContext) else: return self.getTypedRuleContext(QuilParser.MatrixRowContext,i) def NEWLINE(self, i=None): if i is None: return self.getTokens(QuilParser.NEWLINE) else: return self.getToken(QuilParser.NEWLINE, i) def getRuleIndex(self): return QuilParser.RULE_matrix def enterRule(self, listener): if hasattr(listener, "enterMatrix"): listener.enterMatrix(self) def exitRule(self, listener): if hasattr(listener, "exitMatrix"): listener.exitMatrix(self) def matrix(self): localctx = QuilParser.MatrixContext(self, self._ctx, self.state) self.enterRule(localctx, 20, self.RULE_matrix) try: self.enterOuterAlt(localctx, 1) self.state = 187 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,13,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 182 self.matrixRow() self.state = 183 self.match(QuilParser.NEWLINE) self.state = 189 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,13,self._ctx) self.state = 190 self.matrixRow() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class MatrixRowContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.MatrixRowContext, self).__init__(parent, invokingState) self.parser = parser def TAB(self): return self.getToken(QuilParser.TAB, 0) def expression(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.ExpressionContext) else: return self.getTypedRuleContext(QuilParser.ExpressionContext,i) def COMMA(self, i=None): if i is None: return self.getTokens(QuilParser.COMMA) else: return self.getToken(QuilParser.COMMA, i) def getRuleIndex(self): return QuilParser.RULE_matrixRow def enterRule(self, listener): if hasattr(listener, "enterMatrixRow"): listener.enterMatrixRow(self) def exitRule(self, listener): if hasattr(listener, "exitMatrixRow"): listener.exitMatrixRow(self) def matrixRow(self): localctx = QuilParser.MatrixRowContext(self, self._ctx, self.state) self.enterRule(localctx, 22, self.RULE_matrixRow) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 192 self.match(QuilParser.TAB) self.state = 193 self.expression(0) self.state = 198 self._errHandler.sync(self) _la = self._input.LA(1) while _la==QuilParser.COMMA: self.state = 194 self.match(QuilParser.COMMA) self.state = 195 self.expression(0) self.state = 200 self._errHandler.sync(self) _la = self._input.LA(1) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class DefCircuitContext(ParserRuleContext): def __init__(self, parser, parent=None, invokingState=-1): super(QuilParser.DefCircuitContext, self).__init__(parent, invokingState) self.parser = parser def DEFCIRCUIT(self): return self.getToken(QuilParser.DEFCIRCUIT, 0) def name(self): return self.getTypedRuleContext(QuilParser.NameContext,0) def COLON(self): return self.getToken(QuilParser.COLON, 0) def NEWLINE(self): return self.getToken(QuilParser.NEWLINE, 0) def circuit(self): return self.getTypedRuleContext(QuilParser.CircuitContext,0) def LPAREN(self): return self.getToken(QuilParser.LPAREN, 0) def variable(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.VariableContext) else: return self.getTypedRuleContext(QuilParser.VariableContext,i) def RPAREN(self): return self.getToken(QuilParser.RPAREN, 0) def qubitVariable(self, i=None): if i is None: return self.getTypedRuleContexts(QuilParser.QubitVariableContext) else: return self.getTypedRuleContext(QuilParser.QubitVariableContext,i) def COMMA(self, i=None): if i is None: return self.getTokens(QuilParser.COMMA) else: return self.getToken(QuilParser.COMMA, i) def getRuleIndex(self): return QuilParser.RULE_defCircuit def enterRule(self, listener): if hasattr(listener, "enterDefCircuit"): listener.enterDefCircuit(self) def exitRule(self, listener): if hasattr(listener, "exitDefCircuit"): listener.exitDefCircuit(self) def defCircuit(self): localctx = QuilParser.DefCircuitContext(self, self._ctx, self.state) self.enterRule(localctx, 24, self.RULE_defCircuit) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 201 self.match(QuilParser.DEFCIRCUIT) self.state = 202 self.name() self.state = 214 self._errHandler.sync(self) _la = self._input.LA(1) if _la==QuilParser.LPAREN: self.state = 203 self.match(QuilParser.LPAREN) self.state = 204 self.variable() self.state = 209 self._errHandler.sync(self) _la = self._input.LA(1) while _la==QuilParser.COMMA: self.state = 205 self.match(QuilParser.COMMA) self.state = 206 self.variable() self.state = 211 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 212 self.match(QuilParser.RPAREN) self.state = 219 self._errHandler.sync(self) _la = self._input.LA(1) while _la==QuilParser.IDENTIFIER: self.state = 216 self.qubitVariable() self.state = 221 self._errHandler.sync(self) _la = self._input.LA(1) self.state
<reponame>JiaHe-yogurt/GNN import networkx as nx from sklearn import preprocessing import numpy as np import os import collections import networkx as nx import matplotlib.pyplot as plt import random import numpy as np from itertools import permutations, combinations import tensorflow.compat.v1 as tf tf.disable_eager_execution() from numpy.linalg import matrix_power from scipy import sparse import pickle import copy tf.disable_eager_execution() NUM_LABELS = {'ENZYMES': 3, 'COLLAB': 0, 'IMDBBINARY': 0, 'IMDBMULTI': 0, 'MUTAG': 7, 'NCI1': 37, 'NCI109': 38, 'PROTEINS': 3, 'PTC': 22, 'DD': 89} BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def normalize(graph): D_inv = np.diag(np.sum(graph, axis=0) ** -0.5) graph = np.matmul(np.matmul(D_inv, graph), D_inv) return graph def A_power(graph_adj): top = graph_adj.shape[0]-1 D_inv = np.diag(np.sum(graph_adj, axis=0) ** -0.5) graph_adj = np.matmul(np.matmul(D_inv, graph_adj), D_inv) adj_powers=[matrix_power(graph_adj,i+1) - matrix_power(graph_adj,i) for i in range(1, top+1)] adj_powers.insert(0,graph_adj) return np.array(adj_powers) #top = graph_adj.shape[0] #adj_powers, diffs = [],[] #adj_powers.append(graph_adj) #diffs.append(graph_adj) #for p in range(2,top+1): # power, diff = correct_A_power(p, graph_adj, adj_powers) # adj_powers.append(power), diffs.append(diff) return np.array(diffs) def correct_A_power(p,graph_adj,adj_powers): adj_power = matrix_power(graph_adj,p) + adj_powers[-1] np.fill_diagonal(adj_power, 0) adj_power[np.where(adj_power > 0)] = 1 diff = adj_power - adj_powers[-1] return adj_power, diff def load_dataset_ori(ds_name): """ construct graphs and labels from dataset text in data folder :param ds_name: name of data set you want to load :return: two numpy arrays of shape (num_of_graphs). the graphs array contains in each entry a ndarray represent adjacency matrix of a graph of shape (num_vertex, num_vertex, num_vertex_labels) the labels array in index i represent the class of graphs[i] """ directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name) graphs = [] labels = [] with open(directory, "r") as data: num_graphs = int(data.readline().rstrip().split(" ")[0]) for i in range(num_graphs): graph_meta = data.readline().rstrip().split(" ") num_vertex = int(graph_meta[0]) curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name]+1), dtype=np.float32) labels.append(int(graph_meta[1])) for j in range(num_vertex): vertex = data.readline().rstrip().split(" ") if NUM_LABELS[ds_name] != 0: curr_graph[j, j, int(vertex[0])+1] = 1. for k in range(2,len(vertex)): curr_graph[j, int(vertex[k]), 0] = 1. # curr_graph = noramlize_graph(curr_graph) graphs.append(curr_graph) graphs = np.array(graphs) for i in range(graphs.shape[0]): graphs[i] = np.transpose(graphs[i], [2,0,1]) return graphs, np.array(labels) def load_dataset(ds_name): directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name) graphs = [] labels = [] with open(directory, "r") as data: num_graphs = int(data.readline().rstrip().split(" ")[0]) for i in range(num_graphs): graph_meta = data.readline().rstrip().split(" ") num_vertex = int(graph_meta[0]) curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32) labels.append(int(graph_meta[1])) # ori for j in range(num_vertex): vertex = data.readline().rstrip().split(" ") if NUM_LABELS[ds_name] != 0: curr_graph[j, j, int(vertex[0]) + 1] = int(vertex[0]) + 1 for k in range(2, len(vertex)): curr_graph[j, int(vertex[k]), 0] = 1. # curr_graph = noramlize_graph(curr_graph) graphs.append(curr_graph) graphs = np.array(graphs) labels = np.array(labels) # ori # dim = [graph.shape[0] for graph in graphs] # sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:110]) # graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0) # labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0) for i in range(graphs.shape[0]): graphs[i] = np.transpose(graphs[i], [2, 0, 1]) ## ori: use all features # edge_feature = Edge_Label(graphs[i]) # adj_powers = A_power(graphs[i][0]) # graphs[i] = np.concatenate((adj_powers, edge_feature), axis=0) adj_powers = A_power(graphs[i][0]) graphs[i] = np.concatenate((graphs[i], adj_powers[1:]), axis=0) # max_dim = max([graph.shape[0] for graph in graphs]) # for i in range(graphs.shape[0]): # padded = np.zeros((max_dim - graphs[i].shape[0], graphs[i].shape[1], graphs[i].shape[2])) # graphs[i] = np.concatenate((graphs[i], padded), axis=0) return graphs, labels def load_dataset2s(ds_name): graph_dict=dict(zip([5,6,9,12,15,16,25], [0.7,0.7,0.6,0.8,0.8,0.8,0.7])) num_rep=[100,100,100,200,200,200,200] # graph_dict=dict(zip([5,6,9,12,15,16], [0.7,0.7,0.6,0.8, 0.8,0.8])) # num_rep=[100,100,100,200,200,200] graphs = [] labels = [] for num, (k,v) in zip(num_rep, graph_dict.items()): G = nx.erdos_renyi_graph(k, v, seed=1, directed=False) #plt.subplot(121) #nx.draw(G,with_labels=True) label=nx.clique.graph_clique_number(G) A=nx.to_numpy_matrix(G,nodelist=list(range(len(G.nodes)))) graphs.append(A) labels.append(label) for graph in range(num): node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random()))) G_new = nx.relabel_nodes(G, node_mapping) A_new=nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes)))) graphs.append(A_new) labels.append(label) graphs = np.array(graphs) labels = np.array(labels) for i in range(graphs.shape[0]): # graphs[i] = A_power(graphs[i]) graphs[i] = np.expand_dims(graphs[i], axis=0) # use only A # max_dim = max([graph.shape[0] for graph in graphs]) # for i in range(graphs.shape[0]): # padded = np.zeros((max_dim-graphs[i].shape[0], graphs[i].shape[1], graphs[i].shape[1])) # graphs[i] =np.concatenate([graphs[i], padded], axis=0) le = preprocessing.LabelEncoder() # to find clique le.fit(labels) # to find clique labels = le.transform(labels) # to find clique return graphs, labels def load_dataset_2s_val(ds_name): """ construct graphs and labels from dataset text in data folder :param ds_name: name of data set you want to load :return: two numpy arrays of shape (num_of_graphs). the graphs array contains in each entry a ndarray represent adjacency matrix of a graph of shape (num_vertex, num_vertex, num_vertex_labels) the labels array in index i represent the class of graphs[i] """ graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7])) num_rep = [20, 20, 20, 30, 30, 30, 30] # graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6])) # num_rep=[3,3,3] graphs = [] labels = [] for num, (k, v) in zip(num_rep, graph_dict.items()): G = nx.erdos_renyi_graph(k, v, seed=1, directed=False) # plt.subplot(121) # nx.draw(G,with_labels=True) label = nx.clique.graph_clique_number(G) A = nx.to_numpy_matrix(G, nodelist=list(range(len(G.nodes)))) for graph in range(num): node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random()))) G_new = nx.relabel_nodes(G, node_mapping) u, v = random.sample(range(G_new.number_of_nodes() + 1), 2) G_new.add_edge(u, v) if G_new.number_of_edges() == G.number_of_edges() + 1: if nx.clique.graph_clique_number(G_new) == label: A_new = nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes)))) graphs.append(A_new) labels.append(label) graphs = np.array(graphs) labels = np.array(labels) for i in range(graphs.shape[0]): # graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features graphs[i] = np.expand_dims(np.expand_dims(graphs[i], axis=0), axis=0) # use only A le = preprocessing.LabelEncoder() # to find clique le.fit(labels) # to find clique labels = le.transform(labels) # to find clique return graphs, labels def load_dataset2m(ds_name): graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7])) num_rep = [100, 100, 100, 200, 200, 200, 200] # graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6])) # num_rep=[3,3,3] graphs = [] labels = [] for num, (k, v) in zip(num_rep, graph_dict.items()): G = nx.erdos_renyi_graph(k, v, seed=1, directed=False) # plt.subplot(121) # nx.draw(G,with_labels=True) label = nx.clique.graph_clique_number(G) A = nx.to_numpy_matrix(G, nodelist=list(range(len(G.nodes)))) graphs.append(A) labels.append(label) for graph in range(num): node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random()))) G_new = nx.relabel_nodes(G, node_mapping) u, v = random.sample(range(G_new.number_of_nodes() + 1), 2) G_new.add_edge(u, v) if G_new.number_of_edges() == G.number_of_edges() + 1: if nx.clique.graph_clique_number(G_new) == label: A_new = nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes)))) graphs.append(A_new) labels.append(label) graphs = np.array(graphs) labels = np.array(labels) for i in range(graphs.shape[0]): # graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features graphs[i] = np.expand_dims(graphs[i], axis=0) # use only A le = preprocessing.LabelEncoder() # to find clique le.fit(labels) # to find clique labels = le.transform(labels) # to find clique # idx = np.where(labels == 4)[0] # balance data # labels = np.delete(labels, idx[:700]) # labels = labels[:2000] # graphs = np.delete(graphs, idx[:700], axis=0) # graphs= graphs[:2000] return graphs, labels def get_train_val_indexes(num_val, ds_name): """ reads the indexes of a specific split to train and validation sets from data folder :param num_val: number of the split :param ds_name: name of data set :return: indexes of the train and test graphs """ directory = BASE_DIR + "/data/benchmark_graphs/{0}/10fold_idx".format(ds_name) train_file = "train_idx-{0}.txt".format(num_val) train_idx = [] with open(os.path.join(directory, train_file), 'r') as file: for line in file: train_idx.append(int(line.rstrip())) test_file = "test_idx-{0}.txt".format(num_val) test_idx = [] with open(os.path.join(directory, test_file), 'r') as file: for line in file: test_idx.append(int(line.rstrip())) return train_idx, test_idx def get_parameter_split(ds_name): """ reads the indexes of a specific split to train and validation sets from data folder :param ds_name: name of data set :return: indexes of the train and test graphs """ directory = BASE_DIR + "/data/benchmark_graphs/{0}/".format(ds_name) train_file = "tests_train_split.txt" train_idx = [] with open(os.path.join(directory, train_file), 'r') as file: for line in file: train_idx.append(int(line.rstrip())) test_file = "tests_val_split.txt" test_idx = [] with open(os.path.join(directory, test_file), 'r') as file: for line in file: test_idx.append(int(line.rstrip())) return train_idx, test_idx def group_same_size(graphs, labels, graphs3d): """ group graphs of same size to same array :param graphs: numpy array of shape (num_of_graphs) of numpy arrays of graphs adjacency matrix :param labels: numpy array of labels :return: two numpy arrays. graphs arrays in the shape (num of different size graphs) where each entry is a numpy array in the shape (number of graphs with this size, num vertex, num. vertex, num vertex labels) the second arrayy is labels with correspons shape """ sizes = list(map(lambda t: t.shape[1], graphs)) indexes = np.argsort(sizes) graphs = graphs[indexes] labels = labels[indexes] graphs3d = graphs3d[indexes] r_graphs = [] r_labels = [] r_graphs3d = [] one_size = [] one_size_node = [] start = 0 size = graphs[0].shape[1] for i in range(len(graphs)): if graphs[i].shape[1] == size: one_size.append(np.expand_dims(graphs[i], axis=0)) one_size_node.append(np.expand_dims(graphs3d[i], axis=0)) else: r_graphs.append(np.concatenate(one_size, axis=0)) r_graphs3d.append(np.concatenate(one_size_node, axis=0)) r_labels.append(np.array(labels[start:i])) start =
<filename>routing/active_paths_mixin.py<gh_stars>0 from operator import itemgetter import logging logger = logging.getLogger(__name__) # own classes from networking import Message class ActivePathsMixin(object): def __init__(self, *args, **kwargs): self.__aggressive_teardown = False self.__active_edges = {} # needed to send out error messages when needed and to know if overlay is established self.__reverse_edges = {} def __configure(self, aggressive_teardown): self.__aggressive_teardown = aggressive_teardown def __dump_state(self): return { "active_edges": self.__active_edges, "reverse_edges": self.__reverse_edges, } def __init_channel(self, channel): if channel not in self.__active_edges: self.__active_edges[channel] = {} if channel not in self.__reverse_edges: self.__reverse_edges[channel] = {} def __cleanup(self, connection): peer = connection.get_peer_id() # inform affected subscribers of broken path, so they can reestablish the overlay for channel in self.__reverse_edges: for subscriber in list(self.__reverse_edges[channel].keys()): for publisher in list(self.__reverse_edges[channel][subscriber].keys()): if peer == self.__reverse_edges[channel][subscriber][publisher]["peer"]: self._route_covert_data(Message("%s_error" % self.__class__.__name__, { "channel": channel, "subscriber": subscriber, "publisher": publisher }), connection) # teardown broken path to publishers along the reverse active edges if the corresponding active edge is broken for channel in self.__active_edges: for subscriber in list(self.__active_edges[channel].keys()): if self.__active_edges[channel][subscriber]["peer"] == peer and subscriber in self.__reverse_edges[channel]: for publisher in list(self.__reverse_edges[channel][subscriber].keys()): self._route_covert_data(Message("%s_teardown" % self.__class__.__name__, { "channel": channel, "subscriber": subscriber, "publisher": publisher, "version": self.__reverse_edges[channel][subscriber][publisher]["version"] })) # clean up active edges to subscribers for channel in self.__active_edges: for subscriber in list(self.__active_edges[channel].keys()): if self.__active_edges[channel][subscriber]["peer"] == peer: del self.__active_edges[channel][subscriber] # clean up reverse active edges to publishers for channel in self.__reverse_edges: for subscriber in self.__reverse_edges[channel]: for publisher in list(self.__reverse_edges[channel][subscriber].keys()): if self.__reverse_edges[channel][subscriber][publisher]["peer"] == peer: del self.__reverse_edges[channel][subscriber][publisher] def __send_error_reply(self, subscription, publisher, incoming_connection): logger.info("Sending error reply to %s..." % str(incoming_connection)) self._send_covert_msg(Message("%s_error" % self.__class__.__name__, { "channel": subscription["channel"], "subscriber": subscription["subscriber"], "publisher": publisher }), incoming_connection) def __unpublish(self, channel, publisher): self._route_covert_data(Message("%s_unpublish" % self.__class__.__name__, { "channel": channel, "publisher": publisher })) def __get_next_hops(self, channel, incoming_peer=None): # calculate list of next nodes to route a (data) messages to according to the active edges (and don't return our incoming peer here) return set(con for node_id, con in self.connections.items() if node_id in set(itemgetter("peer")(entry) for entry in self.__active_edges[channel].values()) and node_id != incoming_peer) def __active_edges_present(self, channel, subscriber, publisher=None): return (subscriber in self.__reverse_edges[channel] and len( set(itemgetter("peer")(entry) for pub, entry in self.__reverse_edges[channel][subscriber].items() if pub == publisher or publisher == None))) def __get_known_publishers(self, channel): retval = set() for subscriber in self.__reverse_edges[channel]: retval.update(self.__reverse_edges[channel][subscriber].keys()) return retval def __activate_edge(self, channel, subscriber, publisher, version, incoming_connection, outgoing_connection): self.__init_channel(channel) incoming_peer = incoming_connection.get_peer_id() if incoming_connection else None outgoing_peer = outgoing_connection.get_peer_id() if outgoing_connection else None # fill in reverse edge if this activation was received from a real peer (in contrast to activations starting/originating here) if incoming_connection: if subscriber not in self.__reverse_edges[channel]: self.__reverse_edges[channel][subscriber] = {} if publisher not in self.__reverse_edges[channel][subscriber]: logger.info("Activation for channel '%s', version %s, edge pointing from publisher '%s' (connection: %s) to subscriber '%s' (connection: %s)..." % ( str(channel), str(version), str(publisher), str(incoming_connection), str(subscriber), str(outgoing_connection) )) # send out a teardown message if a new path was detected elif (incoming_peer != self.__reverse_edges[channel][subscriber][publisher]["peer"] and version > self.__reverse_edges[channel][subscriber][publisher]["version"]): logger.warning("New path activation for channel '%s' version %s, edge pointing from publisher '%s' (connection: %s) to subscriber '%s' (connection: %s)..." % ( str(channel), str(version), str(publisher), str(incoming_connection), str(subscriber), str(outgoing_connection) )) if self.__aggressive_teardown: self._route_covert_data(Message("%s_teardown" % self.__class__.__name__, { "channel": channel, "subscriber": subscriber, "publisher": publisher, # this has to be the version of the old edge, not the one of our new edge! "version": self.__reverse_edges[channel][subscriber][publisher]["version"] })) # always increment edge versions, never decrement if (publisher not in self.__reverse_edges[channel][subscriber] or version >= self.__reverse_edges[channel][subscriber][publisher]["version"]): logger.info("Edge version is now %d (from %s to %s)" % (version, str(incoming_connection), str(outgoing_connection))) self.__reverse_edges[channel][subscriber][publisher] = { "version": version, "peer": incoming_peer } if outgoing_peer: # only activate (new) edge if we don't have an edge to this subscriber yet, or if the new edge has a greater version # than the edge we have already if (subscriber not in self.__active_edges[channel] or self.__active_edges[channel][subscriber]["version"] < version): #NOTE: use this for fewer logging output: if outgoing_peer not in set(itemgetter("peer")(entry) for entry in self.__active_edges[channel].values()): logger.info("Activating edge from %s to %s for channel '%s' (new edge version: %d, old edge version: %d)..." % ( str(incoming_connection), str(outgoing_connection), str(channel), version, self.__active_edges[channel][subscriber]["version"] if subscriber in self.__active_edges[channel] else 0 )) self.__active_edges[channel][subscriber] = { "version": version, "peer": outgoing_peer } else: logger.info("Ignoring new edge version %d, old edge has version %d" % (version, self.__active_edges[channel][subscriber]["version"])) def __route_teardown(self, teardown, incoming_connection): logger.info("Routing teardown: %s coming from %s..." % (str(teardown), str(incoming_connection))) self.__init_channel(teardown["channel"]) incoming_peer = incoming_connection.get_peer_id() if incoming_connection else None # get peer id of reverse active edge to this publisher for this subscriber and delete edge afterwards # but only if the edge version is lower or equal to the teardown version node_id = None if teardown["subscriber"] in self.__reverse_edges[teardown["channel"]]: if teardown["publisher"] in self.__reverse_edges[teardown["channel"]][teardown["subscriber"]]: node_id = self.__reverse_edges[teardown["channel"]][teardown["subscriber"]][teardown["publisher"]]["peer"] edge_version = self.__reverse_edges[teardown["channel"]][teardown["subscriber"]][teardown["publisher"]]["version"] logger.info("Teardown versus edge: edge: %d, teardown: %d" % (edge_version, teardown["version"])) if edge_version > teardown["version"]: logger.warning("Teardown version (%d) lower than edge version (%d), not processing this outdated teardown!" % (teardown["version"], edge_version)) return # ignore outdated teardown messages not originating from here logger.info("Removing reverse active edge '%s' due to teardown..." % str(self.__reverse_edges[teardown["channel"]][teardown["subscriber"]][teardown["publisher"]])) del self.__reverse_edges[teardown["channel"]][teardown["subscriber"]][teardown["publisher"]] if not len(self.__reverse_edges[teardown["channel"]][teardown["subscriber"]]): del self.__reverse_edges[teardown["channel"]][teardown["subscriber"]] # only handle teardown if we have a matching active edge # otherwise don't touch our active edge and don't route the teardown further # (but only if the teardown does not originate here, because we definitely need to route it further if it originates here) if incoming_connection and (teardown["subscriber"] not in self.__active_edges[teardown["channel"]] or self.__active_edges[teardown["channel"]][teardown["subscriber"]]["peer"] != incoming_peer or self.__active_edges[teardown["channel"]][teardown["subscriber"]]["version"] > teardown["version"]): logger.info("Not routing teardown further because we have no matching active edge (or its version is higher than the teardown version)...") return # delete active edge to this subscriber if the teardown didn't originate here AND the reverse active edge # for this subscriber is the only one (or if there is no reverse active edge at all because the reverse path ends here). # if it is NOT the only one, we are a forwarder for another publisher and the active edge is still needed if incoming_connection and ((teardown["subscriber"] in self.__reverse_edges[teardown["channel"]] and len(self.__reverse_edges[teardown["channel"]][teardown["subscriber"]]) <= 1) or teardown["subscriber"] not in self.__reverse_edges[teardown["channel"]]): logger.info("Removing active edge via %s due to teardown..." % ( str(self.connections[self.__active_edges[teardown["channel"]][teardown["subscriber"]]["peer"]]) if self.__active_edges[teardown["channel"]][teardown["subscriber"]]["peer"] in self.connections else str(self.__active_edges[teardown["channel"]][teardown["subscriber"]]["peer"]) )) del self.__active_edges[teardown["channel"]][teardown["subscriber"]] # route teardown message further along the reverse active edge to this publisher for this subscriber if node_id in self.connections: logger.info("Routing teardown to %s..." % str(self.connections[node_id])) self._send_covert_msg(teardown, self.connections[node_id]) def __route_error(self, error, subscriber_id, incoming_connection, callback=None): logger.info("Routing error: %s coming from %s..." % (str(error), str(incoming_connection))) self.__init_channel(error["channel"]) incoming_peer = incoming_connection.get_peer_id() if incoming_connection else None # abort routing of error message if incoming_peer is not a reverse active edge for this subsciber # if a new active path was created, the reverse active edge pointing to the old next hop was replaced by the new one # the search fot the old edge (incoming_peer) will thus fail # edge versions are not relevant here if (error["subscriber"] not in self.__reverse_edges[error["channel"]] or not incoming_peer in set(itemgetter("peer")(entry) for entry in self.__reverse_edges[error["channel"]][error["subscriber"]].values())): logger.info("Not routing error further, incoming_peer is not a reverse active edge...") return # recreate broken overlay if needed (we subscribed the channel) if subscriber_id and error["subscriber"] == subscriber_id: logger.warning("Received error, tearing down broken path to publisher %s on channel '%s'..." % (str(error["publisher"]), str(error["channel"]))) self._route_covert_data(Message("%s_teardown" % self.__class__.__name__, { "channel": error["channel"], "subscriber": error["subscriber"], "publisher": error["publisher"], "version": self.__reverse_edges[error["channel"]][error["subscriber"]][error["publisher"]]["version"] })) # handle error further if a callback was given if callback: callback(error) # route error message further along the active edge for this subscriber if error["subscriber"] in self.__active_edges[error["channel"]]: node_id = self.__active_edges[error["channel"]][error["subscriber"]]["peer"] if node_id in self.connections: logger.info("Routing error to %s..." % str(self.connections[node_id])) self._send_covert_msg(error, self.connections[node_id]) def __route_unsubscribe(self, unsubscribe, incoming_connection): logger.info("Routing unsubscribe: %s coming from %s..." % (str(unsubscribe), str(incoming_connection))) self.__init_channel(unsubscribe["channel"]) incoming_peer = incoming_connection.get_peer_id() if incoming_connection else None # route unsubscribe message along all reverse active edges to all publishers (only if there are reverse edges, of course) # edge versions not relevant here if unsubscribe["subscriber"] in self.__reverse_edges[unsubscribe["channel"]]: for node_id in set(itemgetter("peer")(entry) for entry in self.__reverse_edges[unsubscribe["channel"]][unsubscribe["subscriber"]].values()): if node_id in self.connections:
# 'maximum_cycle24':Time('2014-02-01', format='iso'), # 'minimum_cycle23':Time('1996-05-01', format='iso'), # 'minimum_cycle24': Time('2008-12-01', format='iso')} #ax2.plot(solar_df.index.values, # solar_df['sunspot RI'], # label='Monthly Mean', # c='#1E88E5') #ax2.plot(solar_df.index.values, # solar_df['sunspot RI smooth'], # label='Smoothed', # c='#D81B60') ax1.tick_params(labelbottom=True) for i,cycle in enumerate(solar_cycle.keys()): # Min if i == 0: ax1.axvline(solar_cycle[cycle][0],label='Solar Min', ls='--', color='k') # Max ax1.axvline(solar_cycle[cycle][1], label='Solar Max',ls='-', color='k') ax1.axvline(solar_cycle[cycle][0],ls='--', color='k') # Max ax1.axvline(solar_cycle[cycle][1],ls='-', color='k') #ax2.axvline(solar_cycle[cycle][1], ls='-', color='k') ax1_legend = ax1.legend(loc='upper left', bbox_to_anchor = (1.02, 1.), ncol=1, labelspacing=0.2,fontsize=11, columnspacing=0.5) ax1_legend.get_frame().set_edgecolor('k') for i in range(len(ax1_legend.legendHandles)): ax1_legend.legendHandles[i]._sizes = [15] date_min = Time('1992-01-01', format='iso') date_max = Time('2020-01-01', format='iso') ax1.set_xlim((date_min.to_datetime(), date_max.to_datetime())) # ax2.set_xlim((date_min.to_datetime(), date_max.to_datetime())) # ax2.set_ylabel('$R_I$', fontsize=10, color='k') # ax2.set_xlabel('Date', fontsize=10,color='k') ax1.set_ylim(0.75, 2) ax1.set_ylabel('Median Normalized CR Flux',fontsize=11, color='k') ax1.set_xlabel('Observation Date', fontsize=11, color='k') #for ax in [ax1, ax2]: ax1.tick_params(axis='both', which='minor', width=1, length=2.5) ax1.tick_params(axis='both', which='major', width=1.5, length=5) fout = os.path.join(APJ_PLOT_DIR,'cr_rate_vs_time.png') fig.savefig(fout, format='png', dpi=350, bbox_inches='tight',transparent=False) plt.show() def ccd_substrate_model(): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4, 6)) ax.set_facecolor('white') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.text(-1, 17, 'ACS/WFC CCD Substrate Layers', fontsize='x-large') # top layer text_x = 8 ax.text(6, 12, 'CCD Housing\n Environment', fontsize='large') arrow_w=0.05 ax.text(0, 15.2,'Incident Light') ax.arrow(2, 15, 0.55, -4.75, color='k',shape='full', width=arrow_w) # reflected ax.arrow(2+0.7, 15-4.95, 0.55, 3, color='k', linestyle='dashed', width=arrow_w) ax.plot([2.65,2.65],[10,15], ls='--', color='k') ax.axhline(10, xmin=0.1) # Si ax.arrow(2.65, 10., 0.53, -3.75, color='k', shape='full', width=arrow_w) # reflected ax.arrow(2.65 + 0.6, 10 - 3.95, 0.45, 1.75, color='k', linestyle='dashed', width=arrow_w) ax.plot([3.25, 3.25], [6, 9], ls='--', color='k') ax.text(text_x, 7.7, '$Si$', fontsize='large') ax.axhline(6, xmin=0.1) # SiO2 ax.arrow(3.25, 6., 0.35, -1.75, color='k', shape='full', width=arrow_w) #reflected ax.arrow(3.24 + 0.45, 6 - 1.95, 0.3, 1., color='k', linestyle='dashed', width=arrow_w) ax.plot([3.68, 3.68], [4, 5.5], ls='--', color='k') ax.text(text_x, 4.8,r'$SiO_2$', fontsize='large') ax.axhline(4, xmin=0.1) # Si3N4 ax.text(text_x, 2.8, r'$Si_{3}N_4$', fontsize='large') ax.arrow(3.7, 4., 0.45, -1.8, color='k', shape='full', width=arrow_w) # reflected ax.arrow(3.75 + 0.5, 4 - 1.95, 0.25, 1., color='k', linestyle='dashed', width=arrow_w) ax.plot([4.22, 4.22], [2, 3.5], ls='--', color='k') ax.axhline(2, xmin=0.1) # Si ax.text(text_x, 0.85, r'$Si$', fontsize='large') ax.arrow(4.25, 2, 0.55, -1.8, color='k', shape='full', width=arrow_w) # reflected ax.arrow(4.25 + 0.65, 2 - 1.9, 0.3, 1., color='k', linestyle='dashed', width=arrow_w) ax.plot([4.87, 4.87], [0, 1.5], ls='--', color='k') # Substrate ax.axhline(0, xmin=0.1) ax.text(text_x, -1, r'$Substrate$', fontsize='large') ax.axhline(-1.75, xmin=0.1) ax.grid(False) ax.set_xlim(-1,12) ax.set_ylim(-2,15.5) fout = os.path.join(APJ_PLOT_DIR,'ccd_substrate_example.png') fig.savefig(fout, format='png', dpi=300, bbox_inches='tight', transparent=True, frameon=False) def thickness_histograms(): data_dict_th = { 'acs_hrc': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/hrc_th_Si.fits', 'data': None, 'interval': [12.49, 16.03], 'norm': None, 'im': None, 'cbar_ticks': [13, 14, 15, 16], }, 'acs_wfc': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/wfc_th1.fits', 'data': None, 'interval': [12.60, 17.10], 'norm': None, 'im': None, 'cbar_ticks': [13, 14, 15, 16, 17], }, 'wfc3_uvis': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/wfc3_uvis_thickness.fits', 'data': None, 'interval': [13.50, 18.00], 'cbar_ticks': [14, 15, 16, 17, 18], 'norm': None, 'im': None } } data_dict_cr = { 'acs_hrc': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/smoothed_acs_hrc.fits', 'data': None, 'interval': [130, 205], 'norm': None, 'im': None, 'cbar_ticks': [130, 145, 160, 175, 190, 205], }, 'acs_wfc': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/acs_wfc_smoothed.fits', 'data': None, 'interval': [60, 110], 'norm': None, 'im': None, 'cbar_ticks': [60, 70, 80, 90, 100, 110], }, 'wfc3_uvis': { 'fname': '/Users/nmiles/hst_cosmic_rays/results/wfc3_uvis_smoothed.fits', 'data': None, 'interval': [140, 240], 'cbar_ticks': [140, 160, 180, 200, 220, 240], 'norm': None, 'im': None } } for key in data_dict_th.keys(): data_dict_th[key]['data'] = fits.getdata(data_dict_th[key]['fname']) data_dict_th[key]['norm'] = ImageNormalize( data_dict_th[key]['data'], stretch=LinearStretch(), vmin=data_dict_th[key]['interval'][0], vmax=data_dict_th[key]['interval'][1] ) # Get the CR data cr_data = fits.getdata(data_dict_cr[key]['fname']) smoothed = gaussian_filter(cr_data, sigma=2) data_dict_cr[key]['data'] = smoothed data_dict_cr[key]['norm'] = ImageNormalize( data_dict_cr[key]['data'], stretch=LinearStretch(), vmin=data_dict_cr[key]['interval'][0], vmax=data_dict_cr[key]['interval'][1] ) v = visualize.Visualizer() fig, axes = v.mk_fig(nrows=1, ncols=3, figsize=(5, 3), sharex=True,sharey=True) def plot_stis_energy_area(): df = pd.read_csv('stis_ccd_catalog_full.txt', header=0) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7, 4.5)) df['cumulative_energy_per_area_per_time'].hist( range=(10, 1e4), bins=100, ax=ax ) ax.xaxis.set_minor_locator(MultipleLocator(200)) ax.xaxis.set_major_locator(MultipleLocator(1000)) ax.yaxis.set_major_locator(MultipleLocator(200)) ax.yaxis.set_minor_locator(MultipleLocator(50)) ax.tick_params(which='minor', axis='both', width=1., length=2.) ax.tick_params(which='major', axis='both', width=1.5, length=4) ax.set_ylabel('Bin Count') ax.set_xlabel('Energy Deposited per Image [$e^-/s/cm^2$]') ax.set_xlim((0, 1e4)) stats = df.cumulative_energy_per_area_per_time.describe() percentiles = np.percentile(df.cumulative_energy_per_area_per_time,q=[10,50,90]) print('\n'.join(list([str(val) for val in percentiles]))) stats['10%'] = percentiles[0] stats['50%'] = percentiles[1] stats['90%'] = percentiles[2] ax.axvline(stats['10%'],ls='--',c='k', label="$10^{th}$ percentile")#={stats['10%']:.2f}$e^-/s/cm^2$") ax.axvline(stats['50%'], ls='-',c='k', label=f"Median")#={stats['50%']:.2f}$e^-/s/cm^2$") ax.axvline(stats['90%'], ls=':',c='k', label="$90^{th}$ percentile")#={stats['90%']:.2f}$e^-/s/cm^2$") ax.legend(loc='best', edgecolor='k') fout = os.path.join(APJ_PLOT_DIR, 'total_energy_deposited_per_s_cm2_stis.png') fig.savefig(fout, dpi=250, format='png', bbox_inches='tight') plt.show() def thickness_plot(fname=None, fname_comp=None, fout=None, instr=None): """ Parameters ---------- fname fname_comp Returns ------- """ data_dict_th = { 'acs_hrc':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/hrc_th_Si.fits', 'data': None, 'interval': [12.49, 16.03], 'norm': None, 'im' : None, 'cbar_ticks': [13, 14, 15, 16], }, 'acs_wfc':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/wfc_th1.fits', 'data': None, 'interval':[12.60, 17.10], 'norm':None, 'im': None, 'cbar_ticks': [13, 14, 15, 16, 17], }, 'wfc3_uvis':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/wfc3_uvis_thickness.fits', 'data':None, 'interval': [13.50, 18.00], 'cbar_ticks': [14, 15, 16, 17, 18], 'norm':None, 'im': None } } data_dict_cr = { 'acs_hrc':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/smoothed_acs_hrc.fits', 'data': None, 'interval': [130, 205], 'norm': None, 'im' : None, 'cbar_ticks': [130, 145, 160, 175, 190, 205], }, 'acs_wfc':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/acs_wfc_smoothed.fits', 'data': None, 'interval': [60, 110], 'norm':None, 'im': None, 'cbar_ticks': [60, 70, 80, 90, 100, 110], }, 'wfc3_uvis':{ 'fname':'/Users/nmiles/hst_cosmic_rays/results/wfc3_uvis_smoothed.fits', 'data':None, 'interval': [140, 240], 'cbar_ticks': [140, 160, 180, 200, 220, 240], 'norm':None, 'im': None } } # rc('text', usetex=True) data = [] for key in data_dict_th.keys(): data_dict_th[key]['data'] = fits.getdata(data_dict_th[key]['fname']) data_dict_th[key]['norm'] = ImageNormalize( data_dict_th[key]['data'], stretch=LinearStretch(), vmin=data_dict_th[key]['interval'][0], vmax=data_dict_th[key]['interval'][1] ) # Get the CR data cr_data = fits.getdata(data_dict_cr[key]['fname']) smoothed = gaussian_filter(cr_data, sigma=2) data_dict_cr[key]['data'] = smoothed data_dict_cr[key]['norm'] = ImageNormalize( data_dict_cr[key]['data'], stretch=LinearStretch(), vmin=data_dict_cr[key]['interval'][0], vmax=data_dict_cr[key]['interval'][1] ) # v = visualize.Visualizer() # fig, axes = v.mk_fig(nrows=2, ncols=3, figsize=(9, 6)) fig = plt.figure(figsize=(9, 7)) gs0 = gridspec.GridSpec(ncols=3, nrows=2, figure=fig, hspace=0.05, wspace=0.3) # fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9,6)) axes = [ fig.add_subplot(gs0[i, j]) for i in range(2) for j in range(3) ] # Plot the thickness data for ax, key in zip(axes[:3], data_dict_th.keys()): ax.grid(False) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if key =='acs_wfc': data_dict_th[key]['im'] = ax.imshow(data_dict_th[key]['data'], norm=data_dict_th[key]['norm'], cmap='plasma') else: data_dict_th[key]['im'] = ax.imshow(data_dict_th[key]['data'], norm=data_dict_th[key]['norm'], cmap='plasma', origin='lower') ax.set_title('{}'.format(key.replace('_','/').upper())) divider = make_axes_locatable(ax) cax = divider.append_axes('bottom', size='5%', pad=0.05) cbar = fig.colorbar(data_dict_th[key]['im'], cax=cax, ticks=data_dict_th[key]['cbar_ticks'], orientation='horizontal') # cax = divider.append_axes('right', size='5%', pad=0.05) # cbar = fig.colorbar(data_dict_th[key]['im'], cax=cax, # ticks=data_dict_th[key]['cbar_ticks'], # orientation='vertical') # cbar.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%2.1f')) # cbar.ax.xaxis.set_major_locator(plt.MaxNLocator(5)) cbar_labels = [str(x) for x in data_dict_th[key]['cbar_ticks']] cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), rotation=45) # cbar.ax.set_yticklabels(cbar_labels, ha='left', rotation=0, fontsize=8) cbar.set_label(r'Thickness $[\mu m]$', fontsize=10) # fout = os.path.join(APJ_PLOT_DIR, 'thickness_all_instr.png') # fig.savefig(fout, # transparent=True, # format='png', # dpi=350, # bbox_inches='tight') # plt.show() # fig, axes = v.mk_fig(nrows=1, ncols=3, figsize=(10, 5)) # plot the CR data for ax, key in zip(axes[3:], data_dict_th.keys()): ax.grid(False) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # ax.set_title('{}'.format(key.replace('_', '/').upper())) data_dict_cr[key]['im'] = ax.imshow(data_dict_cr[key]['data'], norm=data_dict_cr[key]['norm'], cmap='plasma', origin='lower') divider = make_axes_locatable(ax) cax = divider.append_axes('bottom', size='5%', pad=0.05) cbar = fig.colorbar(data_dict_cr[key]['im'], cax=cax, ticks=data_dict_cr[key]['cbar_ticks'], orientation='horizontal') # cax = divider.append_axes('right', size='5%', pad=0.05) # cbar = fig.colorbar(data_dict_cr[key]['im'], cax=cax, # ticks=data_dict_cr[key]['cbar_ticks'], # orientation='vertical') cbar_labels = [str(x) for x in data_dict_cr[key]['cbar_ticks']] cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), rotation=45) cbar.set_label(r'Number of CR Strikes', fontsize=10) # Add a colorbar to show the image scaling # divider1 = make_axes_locatable(ax1) # cax1 = divider1.append_axes('bottom', size='5%', pad=0.1) # cbar1 = fig1.colorbar(im1, cax=cax1, orientation='horizontal') # cbar1.ax.set_xticklabels(cbar1.ax.get_xticklabels(), rotation=45) # cbar1.set_label('Cosmic Ray Strikes') # if not astrofits: # norm1 = ImageNormalize(comp_data, # stretch=LinearStretch(), # interval=ZScaleInterval()) # im2 = ax2.imshow(comp_data, norm=norm1, cmap='plasma') # else: # norm1 = ImageNormalize(comp_data, # stretch=LinearStretch(), # vmin=12.5, vmax=16) # im2 = ax2.imshow(comp_data, cmap='plasma', norm=norm1)#, origin='lower') # # Add a colorbar to show the image scaling # divider2 = make_axes_locatable(ax2) # cax2 = divider2.append_axes('bottom', size='5%', pad=0.1) # cbar2 = fig2.colorbar(im2, cax=cax2, orientation='horizontal') # cbar2.ax.set_xticklabels(cbar2.ax.get_xticklabels(), rotation=45) # cbar2.set_label(r'Thickness $[\mu m]$') # ax1.grid(False) # ax2.grid(False) # ax1.set_title('WFC Cosmic Ray Incidence Heat Map') # ax2.set_title('WFC Fringing Thickness Map') # # # fig.suptitle(instr, # # x=0.5, y=0.9, # # horizontalalignment='center', # # size=16, weight='bold') # fig1.savefig('cr_heat_map_WFC.png', # transparent=True, format='png', dpi=350, bbox_inches='tight') fout = os.path.join(APJ_PLOT_DIR, 'cr_th_all_instr.png') fig.savefig(fout, format='png', dpi=250, bbox_inches='tight') plt.show() def plot_example_darks(hrc=None, wfc=None, wfpc2=None, stis=None, uvis=None): hrc = '/Users/nmiles/hst_cosmic_rays/data/ACS/HRC/mastDownload/HST/j8ba0hrpq/j8ba0hrpq_flt.fits' wfc = '/Users/nmiles/hst_cosmic_rays/data/ACS/WFC/j8jbrcgrq_flt.fits' stis = '/Users/nmiles/hst_cosmic_rays/data/STIS/STIS_grazing_CR/o3sl01pcq_flt.fits' wfpc2 = '/Users/nmiles/hst_cosmic_rays/data/WFPC2/mastDownload/HST/u21y2801t/u21y2801t_c0m.fits' uvis = '/Users/nmiles/hst_cosmic_rays/data/WFC3/UVIS/icfcafaaq_blv_tmp.fits' fig = plt.figure(figsize=(7,5)) gs0 = gridspec.GridSpec(ncols=5, nrows=1, figure=fig, hspace=0, wspace=0) # gs00 = gridspec.GridSpecFromSubplotSpec(nrows=1, ncols=6, hspace=0, # wspace=0., subplot_spec=gs0[0]) # gs10 = gridspec.GridSpecFromSubplotSpec(nrows=1, ncols=6, hspace=0, # wspace=0., subplot_spec=gs0[1]) # ax1 = fig.add_subplot(gs00[0, 1:3]) # ax2 = fig.add_subplot(gs00[0, 3:5]) # ax3 = fig.add_subplot(gs10[0, :2]) # ax4 = fig.add_subplot(gs10[0, 2:4]) # ax5 = fig.add_subplot(gs10[0, 4:6]) ax1 = fig.add_subplot(gs0[0]) ax2 = fig.add_subplot(gs0[1]) ax3 = fig.add_subplot(gs0[2]) ax4 = fig.add_subplot(gs0[3]) ax5 = fig.add_subplot(gs0[4]) axes = [ax1, ax2, ax3, ax4, ax5] labels = ['ACS/HRC', 'ACS/WFC', 'STIS/CCD', 'WFPC2', 'WFC3/UVIS'] datasets = [hrc, wfc, stis, wfpc2, uvis] for dset, label, ax in zip(datasets, labels, axes): with fits.open(dset) as hdu: data = hdu[1].data norm = ImageNormalize(data, stretch=LinearStretch(), interval=ZScaleInterval()) ax.imshow(data, norm=norm, origin='lower', cmap='gray') ax.yaxis.set_major_locator(plt.NullLocator()) ax.xaxis.set_major_locator(plt.NullLocator()) ax.grid(False) ax.set_xlim((100,400)) ax.set_ylim((100,400)) # ax.text(x=120,y=356, s=label, color='#F4CC70',fontsize=8, fontweight='medium', backgroundcolor='white') # ax.set_title(label) fig.savefig( os.path.join(APJ_PLOT_DIR, 'example_darks_transparent.png'),
#!/usr/bin/env python # -*- coding: utf-8 -*- """ sfftkplus.formats.roi Copyright 2017 EMBL - European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import division, print_function import os import math import sys import psycopg2 import sfftkplus.schema.roi from sfftkrw.core.print_tools import print_date from sfftkrw.core.utils import rgba_to_hex from ..readers import roireader from ..schema import roi __author__ = "<NAME>, PhD" __email__ = "<EMAIL>, <EMAIL>" __date__ = "2017-04-11" ORIENTATIONS = ['x', 'y', 'z'] def get_image_id(cursor, image_name_root, view, ext='map', quick_pick=None): """Obtain the image IDs for top, front and right images by EMDB accession code :param cursor: cursor to postgres connection :type cursor: psycopg2.Cursor :param str image_name_root: accession code in lowercase e.g. 'emd_1080' :param str view: the view of the image; either 'top', 'front' or 'right' :param str ext: extension of image file e.g. 'map' :return int image_id: the image ID or 0 for fail (no or multiple image IDs found) """ try: assert isinstance(cursor, psycopg2.extensions.cursor) except AssertionError: print_date("Not psycopg2.extensions.cursor object: {}".format(cursor)) sys.exit(1) views = ['top', 'front', 'right'] try: assert view in views except AssertionError: print_date("Invalid view: {}; should be one of: {}".format(view, ", ".join(views))) sys.exit(1) exts = ['map', 'mrc', 'rec'] try: assert ext in exts # supported file extensions except AssertionError: print_date("Invalid extension: {}; should be one of {}".format(ext, ", ".join(exts))) sys.exit(1) query_string = "select id from image where image.name like '{}-{}.%'".format(image_name_root, view) cursor.execute(query_string) rows = cursor.fetchall() if rows: if len(rows) == 1: return rows[0][0] else: print_date("Multiple image IDs for {}-{}: {}".format(image_name_root, view, rows)) if quick_pick is not None: print_date("Quick picking an ID from index {}".format(quick_pick)) return rows[quick_pick][0] else: return os.EX_OK else: print_date("No image IDs found for view '{}'".format(view)) return os.EX_OK def get_image_size(cursor, image_id): """Obtain image dimensions :param cursor: cursor to postgres connection :param image_id: a valid image id :return tuple image_ids: (sizex, sizey, sizez) """ if image_id is not None: query_string = "select sizex, sizey, sizez from pixels where id={}".format(image_id) cursor.execute(query_string) rows = cursor.fetchall() if rows: return rows[0] return os.EX_OK class ROIContours(object): def __init__(self, contour_sets): if isinstance(contour_sets, sfftkplus.schema.roi.segmentType): self._x_contours = contour_sets.xContours.contour self._y_contours = contour_sets.yContours.contour self._z_contours = contour_sets.zContours.contour elif isinstance(contour_sets, list): x_contours = list() y_contours = list() z_contours = list() for contour_set in contour_sets: x_contours += contour_set.x_contours y_contours += contour_set.y_contours z_contours += contour_set.z_contours self._x_contours = x_contours self._y_contours = y_contours self._z_contours = z_contours @classmethod def from_vtk(cls, contour_sets): obj = cls(contour_sets) return obj @property def x_contours(self): return self._x_contours @x_contours.setter def x_contours(self, value): assert isinstance(value, list) self._x_contours = value @property def y_contours(self): return self._y_contours @y_contours.setter def y_contours(self, value): assert isinstance(value, list) self._y_contours = value @property def z_contours(self): return self._z_contours @z_contours.setter def z_contours(self, value): assert isinstance(value, list) self._z_contours = value def convert(self, args, configs): # x contours xContours = roi.orientedContourType() # print(self.x_contours) for contour in self.x_contours: # for each contour K = roi.contourType() for point_id, point in contour.items(): # for each point in the contour p = roi.pointType() x, y, z = point p.set_id(point_id) p.set_x(x) p.set_y(y) p.set_z(z) K.add_p(p) xContours.add_contour(K) # y contours yContours = roi.orientedContourType() for contour in self.y_contours: K = roi.contourType() for point_id, point in contour.items(): p = roi.pointType() x, y, z = point p.set_id(point_id) p.set_x(x) p.set_y(y) p.set_z(z) K.add_p(p) yContours.add_contour(K) # z contours zContours = roi.orientedContourType() for contour in self.z_contours: K = roi.contourType() for point_id, point in contour.items(): p = roi.pointType() x, y, z = point p.set_id(point_id) p.set_x(x) p.set_y(y) p.set_z(z) K.add_p(p) zContours.add_contour(K) return xContours, yContours, zContours class ROISegment(object): def __init__(self, segment=None): if segment: self._segment = segment self._id = segment.id self._colour = segment.colour.red, segment.colour.green, segment.colour.blue, segment.colour.alpha self._contours = ROIContours(segment) self._oriented_contours = self._compute_oriented_contours() @classmethod def from_vtk(cls, vtk_seg): obj = cls() obj.id = vtk_seg.id obj.colour = vtk_seg.colour obj.contours = ROIContours.from_vtk(vtk_seg.contours) return obj @property def id(self): return self._id @id.setter def id(self, value): assert value >= 0 and isinstance(value, int) self._id = value @property def colour(self): return self._colour @colour.setter def colour(self, value): # assertions? self._colour = value @property def contours(self): return self._contours @contours.setter def contours(self, value): assert isinstance(value, ROIContours) self._contours = value def _compute_oriented_contours(self, *args, **kwargs): oriented_contours = dict() oriented_contours['x'] = dict() oriented_contours['y'] = dict() oriented_contours['z'] = dict() # xContours for xContour in self.contours.x_contours: x = int(xContour.p[0].get_x()) if x not in oriented_contours['x']: oriented_contours['x'][x] = [xContour] else: oriented_contours['x'][x] += [xContour] # yContours for yContour in self.contours.y_contours: y = int(yContour.p[0].get_y()) if y not in oriented_contours['y']: oriented_contours['y'][y] = [yContour] else: oriented_contours['y'][y] += [yContour] # zContours for zContour in self.contours.z_contours: z = int(zContour.p[0].get_z()) if z not in oriented_contours['z']: oriented_contours['z'][z] = [zContour] else: oriented_contours['z'][z] += [zContour] return oriented_contours @property def oriented_contours(self): return self._oriented_contours def convert(self, args, configs): segment = roi.segmentType() segment.id = self.id segment.colour = roi.rgbaType() segment.colour.red, segment.colour.green, segment.colour.blue, segment.colour.alpha = self.colour xContours, yContours, zContours = self.contours.convert(args, configs) segment.set_xContours(xContours) segment.set_yContours(yContours) segment.set_zContours(zContours) return segment class ROIAnnotation(object): pass class ROIHeader(object): def __init__(self, segmentation=None): if segmentation: self._segmentation = segmentation if segmentation.image_ids: self._top_id = segmentation.image_ids.top self._front_id = segmentation.image_ids.front self._right_id = segmentation.image_ids.right else: self._top_id = None self._front_id = None self._right_id = None def reset_ids(self, args, configs, *args_, **kwargs_): try: if args.image_name_root is not None: cw = configs['CONNECT_WITH'] # either LOCAL or REMOTE # server settings conn_str = "dbname='{}' user='{}' password='{}' host='{}' port='{}'".format( configs['IMAGE_DB_{}_NAME'.format(cw)], configs['IMAGE_DB_{}_USER'.format(cw)], configs['IMAGE_DB_{}_PASS'.format(cw)], configs['IMAGE_DB_{}_HOST'.format(cw)], configs['IMAGE_DB_{}_PORT'.format(cw)], ) conn = psycopg2.connect(conn_str) cur = conn.cursor() self.top_id = get_image_id(cur, args.image_name_root, 'top', quick_pick=args.quick_pick) self.front_id = get_image_id(cur, args.image_name_root, 'front', quick_pick=args.quick_pick) self.right_id = get_image_id(cur, args.image_name_root, 'right', quick_pick=args.quick_pick) # sanity check assert self.top_id != self.front_id and self.right_id != self.front_id elif args.top_front_right is not None: self.top_id, self.front_id, self.right_id = args.top_front_right else: print_date( "Neither -I/--image-name-root nor --top-front-right arguments not set. Image IDs will be excluded.") self.top_id = None self.front_id = None self.right_id = None except AssertionError: print_date("Invalid image IDs or image IDs not found. Did you use -I/--image-name-root option?") self.top_id = None self.front_id = None self.right_id = None @classmethod def from_vtk(cls, vtk_args, configs, *args, **kwargs): obj = cls() obj.reset_ids(vtk_args, configs, *args, **kwargs) return obj @property def top_id(self): return self._top_id @top_id.setter def top_id(self, value): if value is not None: try: assert value > 0 and isinstance(value, int) except AssertionError: print_date("Invalid value: {}".format(value)) self._top_id = value @property def front_id(self): return self._front_id @front_id.setter def front_id(self, value): if value is not None: try: assert value > 0 and isinstance(value, int) except AssertionError: print_date("Invalid value: {}".format(value)) self._front_id = value @property def right_id(self): return self._right_id @right_id.setter def right_id(self, value): if value is not None: try: assert value > 0 and isinstance(value, int) except AssertionError: print_date("Invalid value: {}".format(value)) self._right_id = value def get_image_size(self, args, configs): cw = configs['CONNECT_WITH'] # either LOCAL or REMOTE # server settings conn_str = "dbname='{}' user='{}' password='{}' host='{}' port='{}'".format( configs['IMAGE_DB_{}_NAME'.format(cw)], configs['IMAGE_DB_{}_USER'.format(cw)], configs['IMAGE_DB_{}_PASS'.format(cw)], configs['IMAGE_DB_{}_HOST'.format(cw)], configs['IMAGE_DB_{}_PORT'.format(cw)], ) conn = psycopg2.connect(conn_str) cur = conn.cursor() return get_image_size(cur, self.top_id) def convert(self, *args, **kwargs): if all([self.top_id, self.front_id, self.right_id]): image_ids = roi.image_idsType() image_ids.set_top(self.top_id) image_ids.set_front(self.front_id) image_ids.set_right(self.right_id) return image_ids else: return None class ROISegmentation(object): def __init__(self, fn=None, *args, **kwargs): self._fn = fn if fn: self.roi_seg = roireader.get_data(fn, *args, **kwargs) self._header = ROIHeader(self.roi_seg) self._segments = list(map(ROISegment, self.roi_seg.segment)) self._oriented_segments, self._segment_colours = self._compute_oriented_segments() @classmethod def from_vtk(cls, vtk_seg, args, configs): obj = cls() obj.header = ROIHeader.from_vtk(args, configs) obj.segments = list(map(ROISegment.from_vtk, vtk_seg.segments)) obj.convert(args, configs) return obj @property def header(self): return self._header @header.setter def header(self, value): assert isinstance(value, ROIHeader) self._header = value @property def segments(self): return self._segments @segments.setter def segments(self, value): assert isinstance(value, list) self._segments = value def convert(self, args, configs): self.roi_seg = roi.ROI() self.roi_seg.set_image_ids(self.header.convert()) for segment in self.segments: self.roi_seg.add_segment(segment.convert(args, configs)) def _compute_oriented_segments(self, *args, **kwargs): oriented_segments = { 'x': dict(), 'y': dict(), 'z': dict(), } segment_colours = dict() for segment in self.segments: segment_colours[segment.id] = segment.colour for o in segment.oriented_contours: for ovalue in segment.oriented_contours[o]: if ovalue not in oriented_segments[o]: oriented_segments[o][ovalue] = dict() if segment.id not in oriented_segments[o][ovalue]: oriented_segments[o][ovalue][segment.id] = segment.oriented_contours[o][ovalue] else: oriented_segments[o][ovalue][segment.id] += segment.oriented_contours[o][ovalue] return oriented_segments, segment_colours @property def oriented_segments(self): return self._oriented_segments @property def segment_colours(self): return self._segment_colours def as_omero_rois(self, orientation, image, args): """Convert an ROISegmentation object to a set of OMERO ROIs""" from
<reponame>StanPlatinum/VMI-as-a-Service #!/usr/bin/env python # -*- coding: utf-8 -*- """ Libxc Migration v2 streams Record structures as per docs/specs/libxc-migration-stream.pandoc, and verification routines. """ import sys from struct import calcsize, unpack from xen.migration.verify import StreamError, RecordError, VerifyBase # Image Header IHDR_FORMAT = "!QIIHHI" IHDR_MARKER = 0xffffffffffffffff IHDR_IDENT = 0x58454E46 # "XENF" in ASCII IHDR_VERSION = 2 IHDR_OPT_BIT_ENDIAN = 0 IHDR_OPT_LE = (0 << IHDR_OPT_BIT_ENDIAN) IHDR_OPT_BE = (1 << IHDR_OPT_BIT_ENDIAN) IHDR_OPT_RESZ_MASK = 0xfffe # Domain Header DHDR_FORMAT = "IHHII" DHDR_TYPE_x86_pv = 0x00000001 DHDR_TYPE_x86_hvm = 0x00000002 DHDR_TYPE_x86_pvh = 0x00000003 DHDR_TYPE_arm = 0x00000004 dhdr_type_to_str = { DHDR_TYPE_x86_pv : "x86 PV", DHDR_TYPE_x86_hvm : "x86 HVM", DHDR_TYPE_x86_pvh : "x86 PVH", DHDR_TYPE_arm : "ARM", } # Records RH_FORMAT = "II" REC_TYPE_end = 0x00000000 REC_TYPE_page_data = 0x00000001 REC_TYPE_x86_pv_info = 0x00000002 REC_TYPE_x86_pv_p2m_frames = 0x00000003 REC_TYPE_x86_pv_vcpu_basic = 0x00000004 REC_TYPE_x86_pv_vcpu_extended = 0x00000005 REC_TYPE_x86_pv_vcpu_xsave = 0x00000006 REC_TYPE_shared_info = 0x00000007 REC_TYPE_tsc_info = 0x00000008 REC_TYPE_hvm_context = 0x00000009 REC_TYPE_hvm_params = 0x0000000a REC_TYPE_toolstack = 0x0000000b REC_TYPE_x86_pv_vcpu_msrs = 0x0000000c REC_TYPE_verify = 0x0000000d REC_TYPE_checkpoint = 0x0000000e rec_type_to_str = { REC_TYPE_end : "End", REC_TYPE_page_data : "Page data", REC_TYPE_x86_pv_info : "x86 PV info", REC_TYPE_x86_pv_p2m_frames : "x86 PV P2M frames", REC_TYPE_x86_pv_vcpu_basic : "x86 PV vcpu basic", REC_TYPE_x86_pv_vcpu_extended : "x86 PV vcpu extended", REC_TYPE_x86_pv_vcpu_xsave : "x86 PV vcpu xsave", REC_TYPE_shared_info : "Shared info", REC_TYPE_tsc_info : "TSC info", REC_TYPE_hvm_context : "HVM context", REC_TYPE_hvm_params : "HVM params", REC_TYPE_toolstack : "Toolstack", REC_TYPE_x86_pv_vcpu_msrs : "x86 PV vcpu msrs", REC_TYPE_verify : "Verify", REC_TYPE_checkpoint : "Checkpoint", } # page_data PAGE_DATA_FORMAT = "II" PAGE_DATA_PFN_MASK = (1L << 52) - 1 PAGE_DATA_PFN_RESZ_MASK = ((1L << 60) - 1) & ~((1L << 52) - 1) # flags from xen/public/domctl.h: XEN_DOMCTL_PFINFO_* shifted by 32 bits PAGE_DATA_TYPE_SHIFT = 60 PAGE_DATA_TYPE_LTABTYPE_MASK = (0x7L << PAGE_DATA_TYPE_SHIFT) PAGE_DATA_TYPE_LTAB_MASK = (0xfL << PAGE_DATA_TYPE_SHIFT) PAGE_DATA_TYPE_LPINTAB = (0x8L << PAGE_DATA_TYPE_SHIFT) # Pinned pagetable PAGE_DATA_TYPE_NOTAB = (0x0L << PAGE_DATA_TYPE_SHIFT) # Regular page PAGE_DATA_TYPE_L1TAB = (0x1L << PAGE_DATA_TYPE_SHIFT) # L1 pagetable PAGE_DATA_TYPE_L2TAB = (0x2L << PAGE_DATA_TYPE_SHIFT) # L2 pagetable PAGE_DATA_TYPE_L3TAB = (0x3L << PAGE_DATA_TYPE_SHIFT) # L3 pagetable PAGE_DATA_TYPE_L4TAB = (0x4L << PAGE_DATA_TYPE_SHIFT) # L4 pagetable PAGE_DATA_TYPE_BROKEN = (0xdL << PAGE_DATA_TYPE_SHIFT) # Broken PAGE_DATA_TYPE_XALLOC = (0xeL << PAGE_DATA_TYPE_SHIFT) # Allocate-only PAGE_DATA_TYPE_XTAB = (0xfL << PAGE_DATA_TYPE_SHIFT) # Invalid # x86_pv_info X86_PV_INFO_FORMAT = "BBHI" X86_PV_P2M_FRAMES_FORMAT = "II" # x86_pv_vcpu_{basic,extended,xsave,msrs} X86_PV_VCPU_HDR_FORMAT = "II" # tsc_info TSC_INFO_FORMAT = "IIQII" # hvm_params HVM_PARAMS_ENTRY_FORMAT = "QQ" HVM_PARAMS_FORMAT = "II" class VerifyLibxc(VerifyBase): """ Verify a Libxc v2 stream """ def __init__(self, info, read): VerifyBase.__init__(self, info, read) self.squashed_pagedata_records = 0 def verify(self): """ Verity a libxc stream """ self.verify_ihdr() self.verify_dhdr() while self.verify_record() != REC_TYPE_end: pass def verify_ihdr(self): """ Verify an Image Header """ marker, ident, version, options, res1, res2 = \ self.unpack_exact(IHDR_FORMAT) if marker != IHDR_MARKER: raise StreamError("Bad image marker: Expected 0x%x, got 0x%x" % (IHDR_MARKER, marker)) if ident != IHDR_IDENT: raise StreamError("Bad image id: Expected 0x%x, got 0x%x" % (IHDR_IDENT, ident)) if version != IHDR_VERSION: raise StreamError("Unknown image version: Expected %d, got %d" % (IHDR_VERSION, version)) if options & IHDR_OPT_RESZ_MASK: raise StreamError("Reserved bits set in image options field: 0x%x" % (options & IHDR_OPT_RESZ_MASK)) if res1 != 0 or res2 != 0: raise StreamError("Reserved bits set in image header: 0x%04x:0x%08x" % (res1, res2)) if ( (sys.byteorder == "little") and ((options & IHDR_OPT_BIT_ENDIAN) != IHDR_OPT_LE) ): raise StreamError( "Stream is not native endianess - unable to validate") endian = ["little", "big"][options & IHDR_OPT_LE] self.info("Libxc Image Header: %s endian" % (endian, )) def verify_dhdr(self): """ Verify a domain header """ gtype, page_shift, res1, major, minor = \ self.unpack_exact(DHDR_FORMAT) if gtype not in dhdr_type_to_str: raise StreamError("Unrecognised domain type 0x%x" % (gtype, )) if res1 != 0: raise StreamError("Reserved bits set in domain header 0x%04x" % (res1, )) if page_shift != 12: raise StreamError("Page shift expected to be 12. Got %d" % (page_shift, )) if major == 0: self.info("Domain Header: legacy converted %s" % (dhdr_type_to_str[gtype], )) else: self.info("Domain Header: %s from Xen %d.%d" % (dhdr_type_to_str[gtype], major, minor)) def verify_record(self): """ Verify an individual record """ rtype, length = self.unpack_exact(RH_FORMAT) if rtype not in rec_type_to_str: raise StreamError("Unrecognised record type 0x%x" % (rtype, )) contentsz = (length + 7) & ~7 content = self.rdexact(contentsz) if rtype != REC_TYPE_page_data: if self.squashed_pagedata_records > 0: self.info("Squashed %d Page Data records together" % (self.squashed_pagedata_records, )) self.squashed_pagedata_records = 0 self.info("Libxc Record: %s, length %d" % (rec_type_to_str[rtype], length)) else: self.squashed_pagedata_records += 1 padding = content[length:] if padding != "\x00" * len(padding): raise StreamError("Padding containing non0 bytes found") if rtype not in record_verifiers: raise RuntimeError("No verification function for libxc record '%s'" % rec_type_to_str[rtype]) else: record_verifiers[rtype](self, content[:length]) return rtype def verify_record_end(self, content): """ End record """ if len(content) != 0: raise RecordError("End record with non-zero length") def verify_record_page_data(self, content): """ Page Data record """ minsz = calcsize(PAGE_DATA_FORMAT) if len(content) <= minsz: raise RecordError("PAGE_DATA record must be at least %d bytes long" % (minsz, )) count, res1 = unpack(PAGE_DATA_FORMAT, content[:minsz]) if res1 != 0: raise StreamError("Reserved bits set in PAGE_DATA record 0x%04x" % (res1, )) pfnsz = count * 8 if (len(content) - minsz) < pfnsz: raise RecordError("PAGE_DATA record must contain a pfn record for " "each count") pfns = list(unpack("=%dQ" % (count,), content[minsz:minsz + pfnsz])) nr_pages = 0 for idx, pfn in enumerate(pfns): if pfn & PAGE_DATA_PFN_RESZ_MASK: raise RecordError("Reserved bits set in pfn[%d]: 0x%016x", idx, pfn & PAGE_DATA_PFN_RESZ_MASK) if pfn >> PAGE_DATA_TYPE_SHIFT in (5, 6, 7, 8): raise RecordError("Invalid type value in pfn[%d]: 0x%016x", idx, pfn & PAGE_DATA_TYPE_LTAB_MASK) # We expect page data for each normal page or pagetable if PAGE_DATA_TYPE_NOTAB <= (pfn & PAGE_DATA_TYPE_LTABTYPE_MASK) \ <= PAGE_DATA_TYPE_L4TAB: nr_pages += 1 pagesz = nr_pages * 4096 if len(content) != minsz + pfnsz + pagesz: raise RecordError("Expected %u + %u + %u, got %u" % (minsz, pfnsz, pagesz, len(content))) def verify_record_x86_pv_info(self, content): """ x86 PV Info record """ expectedsz = calcsize(X86_PV_INFO_FORMAT) if len(content) != expectedsz: raise RecordError("x86_pv_info: expected length of %d, got %d" % (expectedsz, len(content))) width, levels, res1, res2 = unpack(X86_PV_INFO_FORMAT, content) if width not in (4, 8): raise RecordError("Expected width of 4 or 8, got %d" % (width, )) if levels not in (3, 4): raise RecordError("Expected levels of 3 or 4, got %d" % (levels, )) if res1 != 0 or res2 != 0: raise StreamError("Reserved bits set in X86_PV_INFO: 0x%04x 0x%08x" % (res1, res2)) bitness = {4:32, 8:64}[width] self.info(" %sbit guest, %d levels of pagetables" % (bitness, levels)) def verify_record_x86_pv_p2m_frames(self, content): """ x86 PV p2m frames record """ if len(content) % 8 != 0: raise RecordError("Length expected to be a multiple of 8, not %d" % (len(content), )) start, end = unpack("=II", content[:8]) self.info(" Start pfn 0x%x, End 0x%x" % (start, end)) def verify_record_x86_pv_vcpu_generic(self, content, name): """ Generic for all REC_TYPE_x86_pv_vcpu_{basic,extended,xsave,msrs} """ minsz = calcsize(X86_PV_VCPU_HDR_FORMAT) if len(content) <= minsz: raise RecordError("X86_PV_VCPU_%s record length must be at least %d" " bytes long" % (name, minsz)) vcpuid, res1 = unpack(X86_PV_VCPU_HDR_FORMAT, content[:minsz]) if res1 != 0: raise StreamError( "Reserved bits set in x86_pv_vcpu_%s record 0x%04x" % (name, res1)) self.info(" vcpu%d %s context, %d bytes" % (vcpuid, name, len(content) - minsz)) def verify_record_shared_info(self, content): """ shared info record """ if len(content) != 4096: raise RecordError("Length expected to be 4906 bytes, not %d" % (len(content), )) def verify_record_tsc_info(self, content): """ tsc info record """ sz = calcsize(TSC_INFO_FORMAT) if len(content) != sz: raise RecordError("Length should be %u bytes" % (sz, )) mode, khz, nsec, incarn, res1 = unpack(TSC_INFO_FORMAT, content) if res1 != 0: raise StreamError("Reserved bits set in TSC_INFO: 0x%08x" % (res1, )) self.info(" Mode %u, %u kHz, %u ns, incarnation %d" % (mode, khz, nsec, incarn)) def verify_record_hvm_context(self, content): """ hvm context record """ if len(content) == 0: raise RecordError("Zero length HVM context") def verify_record_hvm_params(self, content): """ hvm params record """ sz = calcsize(HVM_PARAMS_FORMAT) if len(content) < sz: raise RecordError("Length should be at least %u bytes" % (sz, )) count, rsvd = unpack(HVM_PARAMS_FORMAT, content[:sz]) if rsvd != 0: raise RecordError("Reserved field not zero (0x%04x)" % (rsvd, )) sz += count * calcsize(HVM_PARAMS_ENTRY_FORMAT) if len(content) != sz: raise RecordError("Length should be %u bytes" % (sz, )) def verify_record_toolstack(self, _): """ toolstack record """ raise DeprecationWarning("Found Toolstack record in stream") def verify_record_verify(self, content): """ verify record """ if len(content) != 0: raise RecordError("Verify record with non-zero length") def verify_record_checkpoint(self, content): """ checkpoint record """ if len(content) != 0: raise RecordError("Checkpoint record with non-zero length") record_verifiers = { REC_TYPE_end: VerifyLibxc.verify_record_end, REC_TYPE_page_data: VerifyLibxc.verify_record_page_data, REC_TYPE_x86_pv_info: VerifyLibxc.verify_record_x86_pv_info, REC_TYPE_x86_pv_p2m_frames: VerifyLibxc.verify_record_x86_pv_p2m_frames, REC_TYPE_x86_pv_vcpu_basic: lambda s, x: VerifyLibxc.verify_record_x86_pv_vcpu_generic(s, x, "basic"), REC_TYPE_x86_pv_vcpu_extended: lambda
#!/usr/bin/env python3 # Author: <NAME> # Contact: <EMAIL> """Define data preprocessing operations for **PETGEM**.""" # --------------------------------------------------------------- # Load python modules # --------------------------------------------------------------- import numpy as np import h5py import meshio import sys from scipy.spatial import Delaunay from petsc4py import PETSc # --------------------------------------------------------------- # Load petgem modules (BSC) # --------------------------------------------------------------- from .common import Print, Timers, measure_all_class_methods from .parallel import MPIEnvironment, createSequentialDenseMatrixWithArray from .parallel import writeParallelDenseMatrix, createSequentialVectorWithArray from .parallel import writePetscVector from .parallel import communication from .mesh import computeEdges, computeBoundaryEdges, computeFacesEdges from .mesh import computeFaces, computeBoundaryFaces from .mesh import computeBoundaryElements, computeBoundaries, computeFacePlane from .hvfem import computeConnectivityDOFS # ############################################################### # ################ CLASSES DEFINITION ################## # ############################################################### @measure_all_class_methods class Preprocessing(): def __init__(self): # Variable to identify the communicator self.tComm = 123 return def run(self, inputSetup): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() self.mesh=[] self.nElems=0 self.model={} self.data_model={} self.output={} self.mode="" self.basis_order=0 self.num_dof_in_element=0 self.out_dir="" # --------------------------------------------------------------- # Set parameters and read mesh file # --------------------------------------------------------------- if( parEnv.rank == 0 ): # Parameters shortcut (for code legibility) self.model = inputSetup.model self.run = inputSetup.run self.output = inputSetup.output self.out_dir = self.output.get('directory_scratch') # Compute number of dofs per element self.basis_order = self.run.get('nord') self.num_dof_in_element = np.int(self.basis_order*(self.basis_order+2)*(self.basis_order+3)/2) if (self.model.get('mode') == 'csem'): self.mode = 'csem' elif (model.get('mode') == 'mt'): mode = 'mt' # Get data model self.data_model = self.model.get(self.mode) # ------------------------------------------------------------------- # Import mesh file # ------------------------------------------------------------------- mesh_file = self.model.get('mesh') # Import mesh self.mesh = meshio.read(mesh_file) # Number of elements size = self.mesh.cells[0][1][:].shape self.nElems = size[0] return # Method to get nodal coordinates data and its parallel distribution def calculate_nodes(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() parEnv.comm1(self.tComm) matDim=[] data=[] # ----------------------------------------------------------------------- # Preprocessing nodal coordinates # ----------------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Nodal coordinates') # Build coordinates in PETGEM format where each row # represent the xyz coordinates of the 4 tetrahedral element num_dimensions = 3 num_nodes_per_element = 4 data = self.mesh.points[self.mesh.cells[0][1][:], :] data = data.reshape(self.nElems, num_dimensions*num_nodes_per_element) # Get matrix dimensions matDim = data.shape # Scatter data self.local_nodes = communication(matDim, data) return self.local_nodes # Method to get mesh connectivity data and its parallel distribution def calculate_MeshConnectivity(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() matDim=[] data=[] # --------------------------------------------------------------------- # Preprocessing mesh connectivity # --------------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Mesh connectivity') # Get data and matrix dimensions data = self.mesh.cells[0][1][:] matDim = data.shape # Scatter data self.local_elemsN = communication(matDim, data) return self.local_elemsN # Method to get edges connectivity data and its parallel distribution def calculate_EdgesConnectivity(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() matDim=[] data=[] self.elemsE=[] self.edgesNodes=[] self.nEdges=0 # -------------------------------------------------------------------- # Preprocessing edges connectivity (Edges) # -------------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Edges connectivity') # Compute edges self.elemsE, self.edgesNodes = computeEdges(self.mesh.cells[0][1][:], self.nElems) self.nEdges = self.edgesNodes.shape[0] # Change data type and get matrix dimensions data = self.elemsE.astype('int32') matDim = data.shape # Scatter data self.local_elemsE = communication(matDim, data) # -------------------------------------------------------------------- # Preprocessing edges connectivity (EdgesNodes) # -------------------------------------------------------------------- if( parEnv.rank == 0 ): # Reshape edgesNodes and save num_nodes_per_edge = 2 num_edges_per_element = 6 data = np.array((self.edgesNodes[self.elemsE[:], :]), dtype=np.float) data = data.reshape(self.nElems, num_nodes_per_edge*num_edges_per_element) # Get matrix dimensions matDim = data.shape # Scatter data self.local_edgesNodes = communication(matDim, data) return self.local_elemsE, self.local_edgesNodes # Method to get faces and faces-edges connectivity data and its parallel distribution def calculate_FacesConnectivity(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() matDim=[] data=[] self.elemsF=[] self.facesN=[] self.facesE=[] self.nFaces=0 # --------------------------------------------------------------- # Preprocessing faces connectivity # --------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Faces connectivity') # Compute faces self.elemsF, self.facesN = computeFaces(self.mesh.cells[0][1][:], self.nElems) self.nFaces = self.facesN.shape[0] # Change data type and get matrix dimensions data = self.elemsF.astype('int32') matDim = data.shape # Scatter data self.local_elemsF = communication(matDim, data) # --------------------------------------------------------------- # Preprocessing faces-edges connectivity # --------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Faces-edges connectivity') self.facesE = computeFacesEdges(self.elemsF, self.elemsE, self.nFaces, self.nElems) num_faces_per_element = 4 num_edges_per_face = 3 data = np.array((self.facesE[self.elemsF[:], :]), dtype=np.float) data = data.reshape(self.nElems, num_faces_per_element*num_edges_per_face) # Get matrix dimensions matDim = data.shape # Scatter data self.local_facesEdges = communication(matDim, data) return self.local_elemsF, self.local_facesEdges # Method to get dofs connectivity data and its parallel distribution def calculate_DofsConnectivity(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() matDim=[] data=[] self.dofs=[] self.dof_edges=[] self.dof_faces=[] self.total_num_dofs=0 # --------------------------------------------------------------- # Preprocessing dofs connectivity # --------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' DOFs connectivity') # Compute degrees of freedom connectivity self.basis_order = self.run.get('nord') self.dofs, self.dof_edges, self.dof_faces, _, self.total_num_dofs = computeConnectivityDOFS(self.elemsE,self.elemsF,self.basis_order) # Change data type and get matrix dimensions data = self.dofs.astype('int32') matDim = data.shape # Scatter data self.local_dofs = communication(matDim, data) return self.local_dofs # Method to get sigma model data and its parallel distribution def calculate_SigmaModel(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() matDim=[] data=[] # --------------------------------------------------------------- # Preprocessing sigma model # --------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Conductivity model') i_model = self.data_model.get('sigma') if (self.run.get('conductivity_from_file')): # Open sigma file sigma_file = i_model.get('file') fileID = h5py.File(sigma_file, 'r') # Read sigma file conductivityModel = fileID.get('data')[()] else: # Get physical groups elemsS = self.mesh.cell_data['gmsh:physical'][0] elemsS -= np.int(1) # 0-based indexing # Get horizontal sigma horizontal_sigma = i_model.get('horizontal') vertical_sigma = i_model.get('vertical') # Allocate conductivity array conductivityModel = np.zeros((self.nElems, 2), dtype=np.float) for i in np.arange(self.nElems): # Set horizontal sigma conductivityModel[i, 0] = horizontal_sigma[np.int(elemsS[i])] # Set vertical sigma conductivityModel[i, 1] = vertical_sigma[np.int(elemsS[i])] # Get matrix dimensions size = conductivityModel.shape # Build PETSc structures matrix = createSequentialDenseMatrixWithArray(size[0], size[1], conductivityModel) # Build path to save the file out_path = self.out_dir + '/conductivityModel.dat' # Write PETGEM edges in PETSc format writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF) del matrix # Get matrix dimensions data = conductivityModel matDim = data.shape # Scatter data self.local_sigmaModel = communication(matDim, data) return self.local_sigmaModel # Method to write boundaries data def calculate_Boundaries(self): # --------------------------------------------------------------- # Obtain the MPI environment # --------------------------------------------------------------- parEnv = MPIEnvironment() self.indx_boundary_dofs=[] # --------------------------------------------------------------- # Preprocessing boundaries # --------------------------------------------------------------- if( parEnv.rank == 0 ): Print.master(' Boundaries') # Compute boundary faces bFacesN, bFaces, nbFaces = computeBoundaryFaces(self.elemsF, self.facesN) # Build array with boundary dofs for csem mode (dirichlet BC) if (self.mode == 'csem'): # Compute boundary edges bEdges = computeBoundaryEdges(self.edgesNodes, bFacesN) # Compute dofs on boundaries _, self.indx_boundary_dofs = computeBoundaries(self.dofs, self.dof_edges, self.dof_faces, bEdges, bFaces, self.basis_order); # Build PETSc structures vector = createSequentialVectorWithArray(self.indx_boundary_dofs) # Build path to save the file out_path = self.out_dir + '/boundaries.dat' # Write PETGEM nodes in PETSc format writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF) del vector elif (mode == 'mt'): # Compute to what plane the boundary face belongs planeFace = computeFacePlane(mesh.points, bFaces, bFacesN) # Compute boundary elements bElems, numbElems = computeBoundaryElements(elemsF, bFaces, nFaces) if (nbFaces != numbElems): Print.master(' Number of boundary faces is not consistent.') exit(-1) # Allocate data_boundaries = np.zeros((nbFaces, 53+self.num_dof_in_element), dtype=np.float) # Fill tmp matrix with data for boundary faces for i in np.arange(nbFaces): # Get index of tetrahedral element (boundary element) iEle = bElems[i] # Get dofs of element container dofsElement = dofs[iEle, :] # Get indexes of nodes for i-boundary element and insert nodesBoundaryElement = mesh.cells[0][1][iEle,:] data_boundaries[i, 0:4] = nodesBoundaryElement # Get nodes coordinates for i-boundary element and insert coordEle = mesh.points[nodesBoundaryElement, :] coordEle = coordEle.flatten() data_boundaries[i, 4:16] = coordEle # Get indexes of faces for i-boundary element and insert facesBoundaryElement = elemsF[iEle, :] data_boundaries[i, 16:20] = facesBoundaryElement # Get edges indexes for faces in i-boundary element and insert edgesBoundaryFace = facesE[facesBoundaryElement, :] edgesBoundaryFace = edgesBoundaryFace.flatten() data_boundaries[i, 20:32] = edgesBoundaryFace # Get indexes of edges for i-boundary and insert edgesBoundaryElement = elemsE[iEle, :] data_boundaries[i, 32:38] = edgesBoundaryElement # Get node indexes for edges in i-boundary and insert edgesNodesBoundaryElement = edgesNodes[edgesBoundaryElement, :] edgesNodesBoundaryElement = edgesNodesBoundaryElement.flatten() data_boundaries[i, 38:50] = edgesNodesBoundaryElement # Get plane face ifacetype = planeFace[i] data_boundaries[i, 50] = ifacetype # Get global face index localFaceIndex = bFaces[i] data_boundaries[i, 51] = localFaceIndex # Get sigma value sigmaEle = conductivityModel[iEle, 0] data_boundaries[i, 52] = sigmaEle # Get dofs for boundary element and insert dofsBoundaryElement = dofsElement data_boundaries[i, 53::] = dofsBoundaryElement # Get matrix dimensions size = data_boundaries.shape # Build PETSc structures matrix = createSequentialDenseMatrixWithArray(size[0], size[1],
<gh_stars>0 # MIT License # Copyright (c) 2021 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Activity Selection Board A fun way to select random activities This file contains main ActivityBoard class that can be instantiated to play the game https://github.com/davidsmakerworks/activity-board """ import random import time from enum import Enum, unique, auto from typing import Union, List import pygame # Wildcard import used here based on standard pygame code style from pygame.locals import * from button import Button from door import Door, DoorProperties from text_renderer import TextRenderer class ActivityBoard: """ Class representing the entire activity board. Properties: surface -- the pygame surface where the board will be drawn config -- dictionary representing the activity board configuration - almost all configuration is done through this object rather than by programmatically changing class properties start_hidden -- determines whether doors start hidden (i.e., the doors will appear one by one during startup animation) surface_is_display -- determines whether the surface object is to be treated as a pygame display (i.e., calling pygame.display.update() when needed) TODO: Clean up properties and methods related to door coordinates, door sizes, etc. """ @unique class State(Enum): """ Enumeration to define states for the finite state machine in the main loop. States: START -- Draw all doors with optional animated sequence SELECTING -- Choosing a door to open IN_PROGRESS -- Activity displayed on screen and in progress ALL_REVEALED -- All doors revealed at end of game GAME_OVER -- Exiting game """ START = auto() SELECTING = auto() IN_PROGRESS = auto() ALL_REVEALED = auto() GAME_OVER = auto() @unique class Action(Enum): """ Enumeration to represent player action. Actions: UP -- Move up DOWN -- Move down LEFT -- Move left RIGHT -- Move right OPEN -- Open door (i.e. joystick button A) RETURN -- Return to selection screen after opening door (i.e., joystick button B) REVEAL -- Reveal all (i.e., joystick button X + Y) RESTART -- Start new game (i.e., joystick START button) QUIT -- Exit game (i.e., joystick button LB + RB + BACK) """ UP = auto() DOWN = auto() LEFT = auto() RIGHT = auto() OPEN = auto() RETURN = auto() REVEAL = auto() RESTART = auto() QUIT = auto() @property def num_doors(self) -> int: """Returns total number of doors on the board.""" return self._doors_horiz * self._doors_vert @property def door_width(self) -> int: """Returns width (in pixels) of one door.""" return self._surface.get_width() // self._doors_horiz @property def door_height(self) -> int: """Returns height (in pixels) of one door.""" return self._surface.get_height() // self._doors_vert def __init__( self, surface: pygame.Surface, config: dict, start_hidden: bool = False, surface_is_display: bool = True) -> None: doors_horiz = config['board']['doors_horiz'] doors_vert = config['board']['doors_vert'] if surface.get_width() % doors_horiz != 0: raise RuntimeError('surface width must be an integer ' 'multiple of doors_horiz') if surface.get_height() % doors_vert != 0: raise RuntimeError('surface height must be an integer ' 'multiple of doors_vert') self._surface = surface self._config = config self._surface_is_display = surface_is_display self._bg_color = pygame.Color(config['board']['bg_color']) self._width = surface.get_width() self._height = surface.get_height() activity_font = pygame.font.Font( config['board']['font']['activity']['file'], config['board']['font']['activity']['size']) line_spacing = self._config['board']['line_spacing'] activity_color = pygame.Color( self._config['board']['color']['activity']) # One full-screen activity renderer for the whole class self.activity_renderer = TextRenderer( activity_font, line_spacing, activity_color) self._doors_horiz = doors_horiz self._doors_vert = doors_vert self._start_hidden = start_hidden self._activities = self._read_activities(config['activity_file']) self._doors = self._build_door_list( self._activities, doors_hidden=start_hidden) self._move_sounds = self._build_sound_list( config['board']['sound']['move']) self._open_sounds = self._build_sound_list( config['board']['sound']['open']) self._oops_sounds = self._build_sound_list( config['board']['sound']['oops']) self._start_sounds = self._build_sound_list( config['board']['sound']['start']) self._reveal_all_sounds = self._build_sound_list( config['board']['sound']['reveal_all']) self._intro_step_time = config['board']['intro_step_time'] # Initialize pygame if it hasn't been initialized already if not pygame.get_init(): # Use small buffer size to prevent delays when playing sounds pygame.mixer.init(buffer=512) pygame.init() # Joystick is optional - see documentation for controls if pygame.joystick.get_count(): self._joystick = pygame.joystick.Joystick(0) self._joystick.init() def _door_x_coord(self, index: int) -> int: """ Calculate and return the screen X coordinate (in pixels) of the door. """ return (index % self._doors_horiz) * self.door_width def _door_y_coord(self, index: int) -> int: """ Calculate and return the screen Y coordinate (in pixels) of the door. """ return (index // self._doors_horiz) * self.door_height def _clear_surface(self) -> None: """ Clear the underlying surface by filling with background color. """ self._surface.fill(self._bg_color) if self._surface_is_display: pygame.display.update() def _read_activities(self, file_name: str) -> List[str]: """Read activities from file (one per line).""" activities = [] with open(file_name, 'r') as activity_file: for line in activity_file: activities.append(line.strip()) return activities def _build_sound_list( self, sound_files: List[str]) -> List[pygame.mixer.Sound]: """ Builds a list of pygame Sound objects given a list of sound file names. """ sound_list = [] for f in sound_files: sound_list.append(pygame.mixer.Sound(f)) return sound_list def _build_door_list( self, activities: List[str], doors_hidden: bool = False) -> List[Door]: """ Build list of Door objects for use on the activity board. Arguments: activities -- list of activities that can be behind doors (newlines are represented by backticks: `) doors_hidden -- boolean that determines if the doors start off hidden (i.e., not displayed when calling Door.draw()) """ doors = [] door_colors = self._config['door']['color'] for i in range(self.num_doors): activity_font = pygame.font.Font( self._config['door']['font']['activity']['file'], self._config['door']['font']['activity']['size']) number_font = pygame.font.Font( self._config['door']['font']['number']['file'], self._config['door']['font']['number']['size']) # Individual props object for each door to allow for later # customization props = DoorProperties( bg_color=pygame.Color(self._config['board']['bg_color']), door_color=pygame.Color(door_colors['door']), ellipse_color=pygame.Color(door_colors['ellipse']), number_color=pygame.Color(door_colors['number']), cross_color=pygame.Color(door_colors['cross']), selection_color=pygame.Color(door_colors['selection']), activity_color=pygame.Color(door_colors['activity']), unused_color=pygame.Color(door_colors['unused']), activity_font=activity_font, line_spacing=self._config['door']['line_spacing'], number_font=number_font, border_size=self._config['door']['border_size'], ellipse_margin=self._config['door']['ellipse_margin'], cross_width=self._config['door']['cross_width'], cross_offset=self._config['door']['cross_offset'], open_step_time=self._config['door']['open_step_time']) # Choose a random activity for the door activity = random.choice(activities) # Remove the activity from the list to prevent duplicates activities.remove(activity) # Handle varied repetitions if '(' in activity and ')' in activity: # Keep the parentheses for ease of replacing later rep_string = activity[ activity.find('('):activity.find(')') + 1 ] # Strip off any parentheses in the chosen number of reps reps = random.choice(rep_string.split('|')).strip('()') # Replace the string of options with the chosen value activity = activity.replace(rep_string, reps) doors.append(Door( index=i, height=self.door_height, width=self.door_width, activity=activity, props=props, is_hidden=doors_hidden)) return doors def _play_random_sound(self, sound_list: List[pygame.mixer.Sound]) -> None: """ Plays one random sound from a list of pygame Sound objects. This should be used for all sound playback to allow for the possibility of adding multiple sounds. For effects that should always play the same sound, pass in a one-item list. """ sound = random.choice(sound_list) sound.play() def _get_new_selection(self, door: Door, action: Action) -> int: """ Return new door index based on originally selected door and direction of movement. Arguments: door -- the currently selected Door object action -- a value from the Action enum representing a movement direction NOTE: This method takes a Door object as input but return an integer door index as the result. TODO: Change the above to be more consistent. """ old_index = door.index old_index_h = old_index % self._doors_horiz old_index_v = old_index // self._doors_horiz new_index_h = old_index_h new_index_v = old_index_v if action is ActivityBoard.Action.UP: new_index_v = old_index_v - 1 elif action is ActivityBoard.Action.DOWN: new_index_v = old_index_v + 1 elif action is ActivityBoard.Action.LEFT: new_index_h = old_index_h - 1 elif action is ActivityBoard.Action.RIGHT: new_index_h = old_index_h + 1 if new_index_h < 0: new_index_h = old_index_h if new_index_h > self._doors_horiz - 1: new_index_h = old_index_h if new_index_v < 0: new_index_v = 0 if new_index_v > self._doors_vert - 1: new_index_v = old_index_v new_index = new_index_v * self._doors_horiz + new_index_h return
postCellId="../VA4/0/"/> </projection> <projection id="NC_AVER_VA5_FMRFamide" postsynapticPopulation="VA5" presynapticPopulation="AVER" synapse=""> <connection id="0" preCellId="../AVER/0/" postCellId="../VA5/0/"/> </projection> <projection id="NC_AVFL_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVFL_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVFL_AVFR_Glutamate" postsynapticPopulation="AVFR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVFL_AVFR_Generic_GJ" postsynapticPopulation="AVFR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVFL_AVG_Glutamate" postsynapticPopulation="AVG" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVG/0/"/> </projection> <projection id="NC_AVFL_AVHL_Glutamate" postsynapticPopulation="AVHL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVFL_AVHL_Generic_GJ" postsynapticPopulation="AVHL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVFL_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVFL_AVHR_Generic_GJ" postsynapticPopulation="AVHR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVFL_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVFL_AVJR_Glutamate" postsynapticPopulation="AVJR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVJR/0/"/> </projection> <projection id="NC_AVFL_AVL_Glutamate" postsynapticPopulation="AVL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../AVL/0/"/> </projection> <projection id="NC_AVFL_HSNL_Glutamate" postsynapticPopulation="HSNL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../HSNL/0/"/> </projection> <projection id="NC_AVFL_PDER_Glutamate" postsynapticPopulation="PDER" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../PDER/0/"/> </projection> <projection id="NC_AVFL_PVNL_Glutamate" postsynapticPopulation="PVNL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../PVNL/0/"/> </projection> <projection id="NC_AVFL_PVQL_Glutamate" postsynapticPopulation="PVQL" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../PVQL/0/"/> </projection> <projection id="NC_AVFL_PVQR_Glutamate" postsynapticPopulation="PVQR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../PVQR/0/"/> </projection> <projection id="NC_AVFL_PVQR_Generic_GJ" postsynapticPopulation="PVQR" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../PVQR/0/"/> </projection> <projection id="NC_AVFL_VB1_Glutamate" postsynapticPopulation="VB1" presynapticPopulation="AVFL" synapse=""> <connection id="0" preCellId="../AVFL/0/" postCellId="../VB1/0/"/> </projection> <projection id="NC_AVFR_ASJL_Glutamate" postsynapticPopulation="ASJL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../ASJL/0/"/> </projection> <projection id="NC_AVFR_ASKL_Glutamate" postsynapticPopulation="ASKL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../ASKL/0/"/> </projection> <projection id="NC_AVFR_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVFR_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVFR_AVFL_Glutamate" postsynapticPopulation="AVFL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVFR_AVFL_Generic_GJ" postsynapticPopulation="AVFL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVFR_AVHL_Glutamate" postsynapticPopulation="AVHL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVFR_AVHL_Generic_GJ" postsynapticPopulation="AVHL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVFR_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVFR_AVHR_Generic_GJ" postsynapticPopulation="AVHR" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVFR_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVFR_AVJR_Glutamate" postsynapticPopulation="AVJR" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../AVJR/0/"/> </projection> <projection id="NC_AVFR_HSNR_Glutamate" postsynapticPopulation="HSNR" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../HSNR/0/"/> </projection> <projection id="NC_AVFR_PVQL_Glutamate" postsynapticPopulation="PVQL" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../PVQL/0/"/> </projection> <projection id="NC_AVFR_VC4_Generic_GJ" postsynapticPopulation="VC4" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../VC4/0/"/> </projection> <projection id="NC_AVFR_VD11_Glutamate" postsynapticPopulation="VD11" presynapticPopulation="AVFR" synapse=""> <connection id="0" preCellId="../AVFR/0/" postCellId="../VD11/0/"/> </projection> <projection id="NC_AVG_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVAR/0/"/> </projection> <projection id="NC_AVG_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVG_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVG_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVDR/0/"/> </projection> <projection id="NC_AVG_AVEL_Glutamate" postsynapticPopulation="AVEL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVEL/0/"/> </projection> <projection id="NC_AVG_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVER/0/"/> </projection> <projection id="NC_AVG_AVFL_Glutamate" postsynapticPopulation="AVFL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVG_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVG_AVL_Glutamate" postsynapticPopulation="AVL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../AVL/0/"/> </projection> <projection id="NC_AVG_DA8_Glutamate" postsynapticPopulation="DA8" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../DA8/0/"/> </projection> <projection id="NC_AVG_PHAL_Glutamate" postsynapticPopulation="PHAL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PHAL/0/"/> </projection> <projection id="NC_AVG_PVCL_Glutamate" postsynapticPopulation="PVCL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PVCL/0/"/> </projection> <projection id="NC_AVG_PVNR_Glutamate" postsynapticPopulation="PVNR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PVNR/0/"/> </projection> <projection id="NC_AVG_PVPR_Glutamate" postsynapticPopulation="PVPR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PVPR/0/"/> </projection> <projection id="NC_AVG_PVQR_Glutamate" postsynapticPopulation="PVQR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PVQR/0/"/> </projection> <projection id="NC_AVG_PVT_Glutamate" postsynapticPopulation="PVT" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../PVT/0/"/> </projection> <projection id="NC_AVG_RIFL_Generic_GJ" postsynapticPopulation="RIFL" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../RIFL/0/"/> </projection> <projection id="NC_AVG_RIFR_Generic_GJ" postsynapticPopulation="RIFR" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../RIFR/0/"/> </projection> <projection id="NC_AVG_VA11_Glutamate" postsynapticPopulation="VA11" presynapticPopulation="AVG" synapse=""> <connection id="0" preCellId="../AVG/0/" postCellId="../VA11/0/"/> </projection> <projection id="NC_AVHL_ADFR_Glutamate" postsynapticPopulation="ADFR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../ADFR/0/"/> </projection> <projection id="NC_AVHL_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVHL_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVHL_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVDL/0/"/> </projection> <projection id="NC_AVHL_AVFL_Glutamate" postsynapticPopulation="AVFL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVHL_AVFL_Generic_GJ" postsynapticPopulation="AVFL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVHL_AVFR_Generic_GJ" postsynapticPopulation="AVFR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVHL_AVFR_Glutamate" postsynapticPopulation="AVFR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVHL_AVHR_Glutamate" postsynapticPopulation="AVHR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVHL_AVHR_Generic_GJ" postsynapticPopulation="AVHR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVHR/0/"/> </projection> <projection id="NC_AVHL_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVHL_AWBR_Glutamate" postsynapticPopulation="AWBR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../AWBR/0/"/> </projection> <projection id="NC_AVHL_PHBR_Generic_GJ" postsynapticPopulation="PHBR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../PHBR/0/"/> </projection> <projection id="NC_AVHL_PVPR_Glutamate" postsynapticPopulation="PVPR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../PVPR/0/"/> </projection> <projection id="NC_AVHL_PVQL_Glutamate" postsynapticPopulation="PVQL" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../PVQL/0/"/> </projection> <projection id="NC_AVHL_PVQR_Glutamate" postsynapticPopulation="PVQR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../PVQR/0/"/> </projection> <projection id="NC_AVHL_RIMR_Glutamate" postsynapticPopulation="RIMR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../RIMR/0/"/> </projection> <projection id="NC_AVHL_RIR_Glutamate" postsynapticPopulation="RIR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../RIR/0/"/> </projection> <projection id="NC_AVHL_SMBDR_Glutamate" postsynapticPopulation="SMBDR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../SMBDR/0/"/> </projection> <projection id="NC_AVHL_SMBVR_Glutamate" postsynapticPopulation="SMBVR" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../SMBVR/0/"/> </projection> <projection id="NC_AVHL_VD1_Glutamate" postsynapticPopulation="VD1" presynapticPopulation="AVHL" synapse=""> <connection id="0" preCellId="../AVHL/0/" postCellId="../VD1/0/"/> </projection> <projection id="NC_AVHR_ADLL_Glutamate" postsynapticPopulation="ADLL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../ADLL/0/"/> </projection> <projection id="NC_AVHR_ADLR_Glutamate" postsynapticPopulation="ADLR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../ADLR/0/"/> </projection> <projection id="NC_AVHR_AQR_Glutamate" postsynapticPopulation="AQR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AQR/0/"/> </projection> <projection id="NC_AVHR_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVHR_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVHR_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVDR/0/"/> </projection> <projection id="NC_AVHR_AVFL_Glutamate" postsynapticPopulation="AVFL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVHR_AVFL_Generic_GJ" postsynapticPopulation="AVFL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVFL/0/"/> </projection> <projection id="NC_AVHR_AVFR_Generic_GJ" postsynapticPopulation="AVFR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVHR_AVFR_Glutamate" postsynapticPopulation="AVFR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVHR_AVHL_Generic_GJ" postsynapticPopulation="AVHL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVHR_AVHL_Glutamate" postsynapticPopulation="AVHL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVHR_AVJR_Glutamate" postsynapticPopulation="AVJR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../AVJR/0/"/> </projection> <projection id="NC_AVHR_PVNL_Glutamate" postsynapticPopulation="PVNL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../PVNL/0/"/> </projection> <projection id="NC_AVHR_PVPL_Glutamate" postsynapticPopulation="PVPL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../PVPL/0/"/> </projection> <projection id="NC_AVHR_RIGL_Glutamate" postsynapticPopulation="RIGL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../RIGL/0/"/> </projection> <projection id="NC_AVHR_RIR_Glutamate" postsynapticPopulation="RIR" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../RIR/0/"/> </projection> <projection id="NC_AVHR_SMBDL_Glutamate" postsynapticPopulation="SMBDL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../SMBDL/0/"/> </projection> <projection id="NC_AVHR_SMBVL_Glutamate" postsynapticPopulation="SMBVL" presynapticPopulation="AVHR" synapse=""> <connection id="0" preCellId="../AVHR/0/" postCellId="../SMBVL/0/"/> </projection> <projection id="NC_AVJL_AVAL_Generic_GJ" postsynapticPopulation="AVAL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVAL/0/"/> </projection> <projection id="NC_AVJL_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVAR/0/"/> </projection> <projection id="NC_AVJL_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVJL_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVJL_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVDL/0/"/> </projection> <projection id="NC_AVJL_AVDR_Generic_GJ" postsynapticPopulation="AVDR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVDR/0/"/> </projection> <projection id="NC_AVJL_AVEL_Glutamate" postsynapticPopulation="AVEL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVEL/0/"/> </projection> <projection id="NC_AVJL_AVFR_Glutamate" postsynapticPopulation="AVFR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVFR/0/"/> </projection> <projection id="NC_AVJL_AVHL_Glutamate" postsynapticPopulation="AVHL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVHL/0/"/> </projection> <projection id="NC_AVJL_AVJR_Generic_GJ" postsynapticPopulation="AVJR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../AVJR/0/"/> </projection> <projection id="NC_AVJL_HSNR_Glutamate" postsynapticPopulation="HSNR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../HSNR/0/"/> </projection> <projection id="NC_AVJL_PLMR_Glutamate" postsynapticPopulation="PLMR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PLMR/0/"/> </projection> <projection id="NC_AVJL_PVCL_Glutamate" postsynapticPopulation="PVCL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PVCL/0/"/> </projection> <projection id="NC_AVJL_PVCL_Generic_GJ" postsynapticPopulation="PVCL" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PVCL/0/"/> </projection> <projection id="NC_AVJL_PVCR_Glutamate" postsynapticPopulation="PVCR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PVCR/0/"/> </projection> <projection id="NC_AVJL_PVCR_Generic_GJ" postsynapticPopulation="PVCR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PVCR/0/"/> </projection> <projection id="NC_AVJL_PVNR_Glutamate" postsynapticPopulation="PVNR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../PVNR/0/"/> </projection> <projection id="NC_AVJL_RIFR_Glutamate" postsynapticPopulation="RIFR" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../RIFR/0/"/> </projection> <projection id="NC_AVJL_RIS_Glutamate" postsynapticPopulation="RIS" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../RIS/0/"/> </projection> <projection id="NC_AVJL_RIS_Generic_GJ" postsynapticPopulation="RIS" presynapticPopulation="AVJL" synapse=""> <connection id="0" preCellId="../AVJL/0/" postCellId="../RIS/0/"/> </projection> <projection id="NC_AVJR_AVAL_Glutamate" postsynapticPopulation="AVAL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVAL/0/"/> </projection> <projection id="NC_AVJR_AVAR_Glutamate" postsynapticPopulation="AVAR" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVAR/0/"/> </projection> <projection id="NC_AVJR_AVBL_Glutamate" postsynapticPopulation="AVBL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVBL/0/"/> </projection> <projection id="NC_AVJR_AVBR_Glutamate" postsynapticPopulation="AVBR" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVBR/0/"/> </projection> <projection id="NC_AVJR_AVDL_Glutamate" postsynapticPopulation="AVDL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVDL/0/"/> </projection> <projection id="NC_AVJR_AVDR_Glutamate" postsynapticPopulation="AVDR" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVDR/0/"/> </projection> <projection id="NC_AVJR_AVER_Glutamate" postsynapticPopulation="AVER" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVER/0/"/> </projection> <projection id="NC_AVJR_AVJL_Glutamate" postsynapticPopulation="AVJL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVJR_AVJL_Generic_GJ" postsynapticPopulation="AVJL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../AVJL/0/"/> </projection> <projection id="NC_AVJR_PVCL_Generic_GJ" postsynapticPopulation="PVCL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../PVCL/0/"/> </projection> <projection id="NC_AVJR_PVCL_Glutamate" postsynapticPopulation="PVCL" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../PVCL/0/"/> </projection> <projection id="NC_AVJR_PVCR_Glutamate" postsynapticPopulation="PVCR" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/" postCellId="../PVCR/0/"/> </projection> <projection id="NC_AVJR_PVQR_Glutamate" postsynapticPopulation="PVQR" presynapticPopulation="AVJR" synapse=""> <connection id="0" preCellId="../AVJR/0/"