index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,900 | 6f42cbc3ae5b2537efc59b29d82048ecffbcaccc | logo = """
____ ____ ____ ____ ____ _________ ____ ____ ____ _________ ____ ____ ____ ____ ____ ____
||G |||u |||e |||s |||s ||| |||t |||h |||e ||| |||n |||u |||m |||b |||e |||r ||
||__|||__|||__|||__|||__|||_______|||__|||__|||__|||_______|||__|||__|||__|||__|||__|||__||
|/__\|/__\|/__\|/__\|/__\|/_______\|/__\|/__\|/__\|/_______\|/__\|/__\|/__\|/__\|/__\|/__\|
""" |
999,901 | 04917c4e1e49ba5c20a9fe578e54811c53f42c7b | #!/usr/bin/env python
"""
This action will create a CHAP account on the cluster for each client and configure the client with the CHAP credentials
"""
from libsf.apputil import PythonApp
from libsf.argutil import SFArgumentParser, GetFirstLine, SFArgFormatter
from libsf.logutil import GetLogger, SetThreadLogPrefix, logargs
from libsf.sfclient import SFClient
from libsf.sfcluster import SFCluster
from libsf.sfaccount import SFAccount
from libsf.util import ValidateAndDefault, ItemList, IPv4AddressType, BoolType, StrType, OptionalValueType
from libsf import sfdefaults
from libsf import threadutil
from libsf import SolidFireError, SolidFireAPIError
@logargs
@ValidateAndDefault({
# "arg_name" : (arg_type, arg_default)
"account_name" : (OptionalValueType(StrType), None),
"chap" : (BoolType, False),
"strict" : (BoolType, False),
"client_ips" : (ItemList(IPv4AddressType), sfdefaults.client_ips),
"client_user" : (StrType, sfdefaults.client_user),
"client_pass" : (StrType, sfdefaults.client_pass),
"mvip" : (IPv4AddressType, sfdefaults.mvip),
"username" : (StrType, sfdefaults.username),
"password" : (StrType, sfdefaults.password),
})
def ClientCreateAccount(account_name,
chap,
strict,
client_ips,
client_user,
client_pass,
mvip,
username,
password):
"""
Create an account for each client
Args:
account_name: the name of the account, client hostname is used if this is not specified
chap: whether or not to configure CHAP on the clients
strict: fail if the account already exists
client_ips: the list of client IP addresses
client_user: the username for the clients
client_pass: the password for the clients
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
"""
log = GetLogger()
log.info("Searching for accounts")
cluster = SFCluster(mvip, username, password)
try:
svip = cluster.GetClusterInfo()["svip"]
except SolidFireError as e:
log.error("Failed to get cluster info: {}".format(e))
return False
# Get a list of accounts from the cluster
try:
allaccounts = SFCluster(mvip, username, password).ListAccounts()
except SolidFireError as e:
log.error("Failed to list accounts: {}".format(e))
return False
# Run all of the client operations in parallel
allgood = True
results = []
pool = threadutil.GlobalPool()
for client_ip in client_ips:
results.append(pool.Post(_ClientThread, mvip, username, password, client_ip, client_user, client_pass, account_name, svip, allaccounts, chap, strict))
for idx, client_ip in enumerate(client_ips):
try:
results[idx].Get()
except SolidFireError as e:
log.error(" {}: Error creating account: {}".format(client_ip, e))
allgood = False
continue
if allgood:
log.passed("Successfully created accounts for all clients")
return True
else:
log.error("Could not create accounts for all clients")
return False
@threadutil.threadwrapper
def _ClientThread(mvip, username, password, client_ip, client_user, client_pass, account_name, svip, accounts_list, chap, strict):
log = GetLogger()
SetThreadLogPrefix(client_ip)
log.info("Connecting to client")
client = SFClient(client_ip, client_user, client_pass)
if not account_name:
account_name = client.HostnameToAccountName()
log.debug("Using account name {}".format(account_name))
# See if the account already exists
init_secret = ""
found = False
for account in accounts_list:
if account["username"].lower() == account_name.lower():
init_secret = account["initiatorSecret"]
found = True
break
if found:
if strict:
raise SolidFireError("Account {} already exists".format(account_name))
else:
log.passed("Account {} already exists".format(account_name))
else:
# Create the account
log.info("Creating account {}".format(account_name))
try:
account = SFCluster(mvip, username, password).CreateAccount(accountName=account_name,
initiatorSecret=SFAccount.CreateCHAPSecret(),
targetSecret=SFAccount.CreateCHAPSecret())
except SolidFireAPIError as e:
# Ignore xDuplicateUsername; we may have multiple threads trying to create the same account
if e.name != "xDuplicateUsername":
raise
log.passed("Created account {}".format(account_name))
if chap:
log.info("Setting CHAP credentials")
client.SetupCHAP(portalAddress=svip,
chapUser=account_name.lower(),
chapSecret=init_secret)
if __name__ == '__main__':
parser = SFArgumentParser(description=GetFirstLine(__doc__), formatter_class=SFArgFormatter)
parser.add_argument("--account-name", type=str, metavar="NAME", help="the name for the account (client hostname is used if this is not specified)")
parser.add_argument("--nochap", action="store_false", dest="chap", default=True, help="do not configure CHAP on the clients")
parser.add_argument("--strict", action="store_true", default=False, help="fail if the account already exists")
parser.add_cluster_mvip_args()
parser.add_client_list_args()
args = parser.parse_args_to_dict()
app = PythonApp(ClientCreateAccount, args)
app.Run(**args)
|
999,902 | c21500260fbd8697558c32ab4d02839cf7a01446 | import os, sys
from ROOT import *
from DataFormats.FWLite import Events,Handle
import array, math
#gROOT.Macro( os.path.expanduser( '~/rootlogon.C' ) )
gROOT.Reset()
gROOT.SetStyle("Plain")
gStyle.SetOptStat(0)
gStyle.SetOptFit(0)
gStyle.SetTitleOffset(1.2,"Y")
gStyle.SetPadLeftMargin(0.18)
gStyle.SetPadBottomMargin(0.15)
gStyle.SetPadTopMargin(0.03)
gStyle.SetPadRightMargin(0.05)
gStyle.SetMarkerSize(1.5)
gStyle.SetHistLineWidth(1)
gStyle.SetStatFontSize(0.020)
gStyle.SetTitleSize(0.06, "XYZ")
gStyle.SetLabelSize(0.05, "XYZ")
gStyle.SetNdivisions(510, "XYZ")
gStyle.SetLegendBorderSize(0)
def createPlots(sample,prefix,xsec,massbins):
files=[]
print "list files"
if sample.endswith(".txt"):
filelist=open(sample)
for line in filelist.readlines():
if ".root" in line:
files+=[line.strip()]
else:
folders=os.listdir("/pnfs/psi.ch/cms/trivcat/store/user/hinzmann/dijet_angular/")
for folder in folders:
if sample in folder:
files+=["dcap://t3se01.psi.ch:22125//pnfs/psi.ch/cms/trivcat/store/user/hinzmann/dijet_angular/"+folder+"/GEN.root"]
#break
# files=["dcap://t3se01.psi.ch:22125//pnfs/psi.ch/cms/trivcat/store/user/hinzmann/dijet_angular/jobtmp_"+sample+"-0/GEN.root"]
print files
prunedgenjets_handle=Handle("std::vector<reco::GenJet>")
prunedgenjets_label="ak4GenJets"
plots=[]
for massbin in massbins:
plots += [TH1F(prefix+'#chi'+str(massbin).strip("()").replace(',',"_").replace(' ',""),';#chi;N',15,1,16)]
#plots += [TH1F(prefix+'y_{boost}'+str(massbin).strip("()").replace(',',"_").replace(' ',""),';y_{boost};N',20,0,2)]
for plot in plots:
plot.Sumw2()
event_count=0
print "open chain"
events=TChain('Events')
for f in files[:]:
events.Add(f)
nevents=events.GetEntries()
print sample,nevents,xsec
event_count=0
for event in events:
event_count+=1
if event_count>10000000: break
if event_count%10000==1: print "event",event_count
jet1=TLorentzVector()
jet2=TLorentzVector()
jets=event.recoGenJets_ak4GenJets__GEN.product()
if len(jets)<2: continue
j1=jets[0]
j2=jets[1]
jet1.SetPtEtaPhiM(j1.pt(),j1.eta(),j1.phi(),j1.mass())
jet2.SetPtEtaPhiM(j2.pt(),j2.eta(),j2.phi(),j2.mass())
mjj=(jet1+jet2).M()
chi=math.exp(abs(jet1.Rapidity()-jet2.Rapidity()))
yboost=abs(jet1.Rapidity()+jet2.Rapidity())/2.
if mjj<1500 or chi>16. or yboost>1.11: continue
irec=0
for massbin in massbins:
if yboost<1.11 and mjj>=massbin[0] and mjj<massbin[1]:
plots[irec].Fill(chi)
irec+=1
for plot in plots:
if event_count>0:
plot.Scale(xsec/event_count)
return plots
if __name__ == '__main__':
wait=False
prefix="datacard_shapelimit13TeV_GENnp-30-v5"
chi_bins=[(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
(1,2,3,4,5,6,7,8,9,10,12,14,16),
]
massbins=[(1900,2400),
(2400,3000),
(3000,3600),
(3600,4200),
(4200,4800),
(4800,5400),
(5400,6000),
(6000,6600),
(4800,13000),
(5400,13000),
(6000,13000),
(6600,13000),
]
samples=[("QCD",[("pythia8_ci_m1000_1500_50000_1_0_0_13TeV_Nov14",3.769e-05),
("pythia8_ci_m1500_1900_50000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_50000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_50000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_50000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_50000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_50000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_50000_1_0_0_13TeV_Nov14",3.507e-09),
]),
]
samples2=[("QCDNonPert",[("pythia8_ciNonPert_m1000_1500_50000_1_0_0_13TeV_Nov14",3.769e-05),
("pythia8_ciNonPert_m1500_1900_50000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ciNonPert_m1900_2400_50000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ciNonPert_m2400_2800_50000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ciNonPert_m2800_3300_50000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ciNonPert_m3300_3800_50000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ciNonPert_m3800_4300_50000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ciNonPert_m4300_13000_50000_1_0_0_13TeV_Nov14",3.507e-09),
]),
]
samples2=[("QCDHerwig",[("herwigpp_qcd_m1000_1500___Nov28",3.769e-05),
("herwigpp_qcd_m1500_1900___Nov28",3.307e-06),
("herwigpp_qcd_m1900_2400___Nov28",8.836e-07),
("herwigpp_qcd_m2400_2800___Nov28",1.649e-07),
("herwigpp_qcd_m2800_3300___Nov28",6.446e-08),
("herwigpp_qcd_m3300_3800___Nov28",1.863e-08),
("herwigpp_qcd_m3800_4300___Nov28",5.867e-09),
("herwigpp_qcd_m4300_13000___Nov28",3.507e-09),
]),
]
samples2=[("QCDHerwigNonPert",[("herwigpp_qcdNonPert_m1000_1500___Nov28",3.769e-05),
("herwigpp_qcdNonPert_m1500_1900___Nov28",3.307e-06),
("herwigpp_qcdNonPert_m1900_2400___Nov28",8.836e-07),
("herwigpp_qcdNonPert_m2400_2800___Nov28",1.649e-07),
("herwigpp_qcdNonPert_m2800_3300___Nov28",6.446e-08),
("herwigpp_qcdNonPert_m3300_3800___Nov28",1.863e-08),
("herwigpp_qcdNonPert_m3800_4300___Nov28",5.867e-09),
("herwigpp_qcdNonPert_m4300_13000___Nov28",3.507e-09),
]),
]
samples=[("QCDCIplusLL8000",[("pythia8_ci_m1500_1900_8000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_8000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_8000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_8000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_8000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_8000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_8000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL9000",[("pythia8_ci_m1500_1900_9000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_9000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_9000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_9000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_9000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_9000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_9000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL10000",[("pythia8_ci_m1500_1900_10000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_10000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_10000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_10000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_10000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_10000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_10000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL11000",[("pythia8_ci_m1500_1900_11000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_11000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_11000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_11000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_11000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_11000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_11000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL12000",[("pythia8_ci_m1500_1900_12000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_12000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_12000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_12000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_12000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_12000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_12000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL13000",[("pythia8_ci_m1500_1900_13000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_13000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_13000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_13000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_13000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_13000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_13000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL14000",[("pythia8_ci_m1500_1900_14000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_14000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_14000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_14000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_14000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_14000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_14000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL16000",[("pythia8_ci_m1500_1900_16000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_16000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_16000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_16000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_16000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_16000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_16000_1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIplusLL18000",[("pythia8_ci_m1500_1900_18000_1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_18000_1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_18000_1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_18000_1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_18000_1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_18000_1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_18000_1_0_0_13TeV_Nov14",3.507e-09),
]),
]
samples+=[("QCDCIminusLL8000",[("pythia8_ci_m1500_1900_8000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_8000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_8000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_8000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_8000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_8000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_8000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL9000",[("pythia8_ci_m1500_1900_9000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_9000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_9000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_9000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_9000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_9000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_9000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL10000",[("pythia8_ci_m1500_1900_10000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_10000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_10000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_10000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_10000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_10000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_10000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL11000",[("pythia8_ci_m1500_1900_11000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_11000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_11000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_11000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_11000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_11000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_11000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL12000",[("pythia8_ci_m1500_1900_12000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_12000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_12000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_12000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_12000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_12000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_12000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL13000",[("pythia8_ci_m1500_1900_13000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_13000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_13000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_13000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_13000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_13000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_13000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL14000",[("pythia8_ci_m1500_1900_14000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_14000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_14000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_14000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_14000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_14000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_14000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL16000",[("pythia8_ci_m1500_1900_16000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_16000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_16000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_16000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_16000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_16000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_16000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
("QCDCIminusLL18000",[("pythia8_ci_m1500_1900_18000_-1_0_0_13TeV_Nov14",3.307e-06),
("pythia8_ci_m1900_2400_18000_-1_0_0_13TeV_Nov14",8.836e-07),
("pythia8_ci_m2400_2800_18000_-1_0_0_13TeV_Nov14",1.649e-07),
("pythia8_ci_m2800_3300_18000_-1_0_0_13TeV_Nov14",6.446e-08),
("pythia8_ci_m3300_3800_18000_-1_0_0_13TeV_Nov14",1.863e-08),
("pythia8_ci_m3800_4300_18000_-1_0_0_13TeV_Nov14",5.867e-09),
("pythia8_ci_m4300_13000_18000_-1_0_0_13TeV_Nov14",3.507e-09),
]),
]
samples+=[("QCDADD6000",[("pythia8_add_m1500_1900_6000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_6000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_6000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_6000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_6000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_6000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_6000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD7000",[("pythia8_add_m1500_1900_7000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_7000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_7000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_7000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_7000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_7000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_7000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD8000",[("pythia8_add_m1500_1900_8000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_8000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_8000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_8000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_8000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_8000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_8000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD9000",[("pythia8_add_m1500_1900_9000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_9000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_9000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_9000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_9000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_9000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_9000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD10000",[("pythia8_add_m1500_1900_10000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_10000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_10000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_10000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_10000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_10000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_10000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD11000",[("pythia8_add_m1500_1900_11000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_11000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_11000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_11000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_11000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_11000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_11000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD12000",[("pythia8_add_m1500_1900_12000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_12000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_12000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_12000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_12000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_12000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_12000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD13000",[("pythia8_add_m1500_1900_13000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_13000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_13000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_13000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_13000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_13000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_13000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD14000",[("pythia8_add_m1500_1900_14000_0_0_0_1_13TeV_Nov14",3.307e-06),
("pythia8_add_m1900_2400_14000_0_0_0_1_13TeV_Nov14",8.836e-07),
("pythia8_add_m2400_2800_14000_0_0_0_1_13TeV_Nov14",1.649e-07),
("pythia8_add_m2800_3300_14000_0_0_0_1_13TeV_Nov14",6.446e-08),
("pythia8_add_m3300_3800_14000_0_0_0_1_13TeV_Nov14",1.863e-08),
("pythia8_add_m3800_4300_14000_0_0_0_1_13TeV_Nov14",5.867e-09),
("pythia8_add_m4300_13000_14000_0_0_0_1_13TeV_Nov14",3.507e-09),
]),
("QCDADD15000",[("pythia8_add_m1500_1900_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_15000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_15000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD16000",[("pythia8_add_m1500_1900_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_16000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_16000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD17000",[("pythia8_add_m1500_1900_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_17000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_17000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD18000",[("pythia8_add_m1500_1900_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_18000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_18000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD19000",[("pythia8_add_m1500_1900_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_19000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_19000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD20000",[("pythia8_add_m1500_1900_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_20000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_20000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD21000",[("pythia8_add_m1500_1900_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_21000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_21000_0_0_0_1_13TeV_Nov14",1),
]),
("QCDADD22000",[("pythia8_add_m1500_1900_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m1900_2400_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2400_2800_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m2800_3300_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3300_3800_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m3800_4300_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m4300_5200_22000_0_0_0_1_13TeV_Nov14",1),
("pythia8_add_m5200_13000_22000_0_0_0_1_13TeV_Nov14",1),
]),
]
if "np" in prefix:
samples=[samples[int(prefix.split("-")[1])]]
xsecs=eval(open("xsecs_13TeV.txt").readline())
print xsecs
chi_binnings=[]
for mass_bin in chi_bins:
chi_binnings+=[array.array('d')]
for chi_bin in mass_bin:
chi_binnings[-1].append(chi_bin)
if len(sys.argv)>1:
newsamples=[]
for sample in samples:
found=False
for arg in sys.argv:
if sample[0]==arg or sample[0]=="QCD":
newsamples+=[sample]
break
samples=newsamples
if samples[-1][0]=="QCD":
prefix+="_"+samples[-1][0]
else:
prefix+="_"+samples[-1][0].replace("QCD","")
print prefix, samples
plots=[]
for name,files in samples:
plots+=[[]]
i=0
for filename,xsec in files:
i+=1
ps=createPlots(filename,name,float(xsecs[filename]),massbins)
if i==1:
plots[-1]+=ps
else:
for i in range(len(plots[-1])):
plots[-1][i].Add(ps[i])
out=TFile(prefix + '_chi.root','RECREATE')
for j in range(len(massbins)):
for i in range(len(samples)):
#if plots[i][j].Integral()>0:
# plots[i][j].Scale(expectedevents[j]/plots[i][j].Integral())
plots[i][j]=plots[i][j].Rebin(len(chi_binnings[j])-1,plots[i][j].GetName()+"_rebin1",chi_binnings[j])
if samples[i][0]=="QCD":
# data
plots[i][j].Write(plots[i][j].GetName().replace("QCD","data_obs"))
# ALT
clone=plots[i][j].Clone(plots[i][j].GetName().replace("QCD",samples[-1][0]+"_ALT"))
clone.Write()
# QCD
plots[i][j].Scale(1e-10)
plots[i][j].Write()
# QCD backup
clonebackup=plots[i][j].Clone(plots[i][j].GetName()+"_backup")
clonebackup.Write()
else:
# signal
clone=plots[i][j]
clone.Write()
# signal backup
clonebackup=plots[i][j].Clone(plots[i][j].GetName()+"_backup")
clonebackup.Write()
for j in range(len(massbins)):
for i in range(len(samples)):
if plots[i][j].Integral()>0:
plots[i][j].Scale(1./plots[i][j].Integral())
for b in range(plots[i][j].GetXaxis().GetNbins()):
plots[i][j].SetBinContent(b+1,plots[i][j].GetBinContent(b+1)/plots[i][j].GetBinWidth(b+1))
plots[i][j].SetBinError(b+1,plots[i][j].GetBinError(b+1)/plots[i][j].GetBinWidth(b+1))
plots[i][j].GetYaxis().SetRangeUser(0,0.2)
canvas = TCanvas("","",0,0,400,200)
canvas.Divide(2,1)
if len(massbins)>2:
canvas = TCanvas("","",0,0,600,400)
canvas.Divide(3,2)
legends=[]
for j in range(len(massbins)):
canvas.cd(j+1)
plots[0][j].Draw("he")
print "number of events passed:",plots[0][j].GetEntries()
legend1=TLegend(0.6,0.6,0.9,0.9,(str(massbins[j][0])+"<m_{jj}<"+str(massbins[j][1])+" GeV").replace("4200<m_{jj}<13000","m_{jj}>4200"))
legends+=[legend1]
legend1.AddEntry(plots[0][j],samples[0][0],"l")
for i in range(1,len(samples)):
plots[i][j].SetLineColor(i+2)
plots[i][j].Draw("hesame")
legend1.AddEntry(plots[i][j],samples[i][0],"l")
legend1.SetTextSize(0.04)
legend1.SetFillStyle(0)
legend1.Draw("same")
canvas.SaveAs(prefix + '_chi.pdf')
canvas.SaveAs(prefix + '_chi.eps')
if wait:
os.system("ghostview "+prefix + '_chi.eps')
|
999,903 | 71a0d6bac88b73222fd08d41dbe1b82a54681826 | # Creating an object
class Classroom:
def __init__(self):
self._people = []
def add_person(self, person):
self._people.append(person)
def remove_person(self, person):
self._people.remove(person)
def greet(self):
for person in self._people:
person.say_hello()
class Person:
def __init__(self, name):
self.name = name
def say_hello(self):
print("Hello, ", self.name)
room = Classroom()
room.add_person(Person("Connor"))
room.add_person(Person("Sara"))
room.add_person(Person("Lara"))
room.add_person(Person("Dione"))
room.greet() |
999,904 | 7a559d44b71b76fc84361a341ddea9cdb737fcff | '''
ChainMap in collection.
- chainMap就是封裝許多dictionaries 到一個簡單的 unit,並return 一個list的dictoraries。
Reference:
- https://www.geeksforgeeks.org/python-collections-module/
'''
from collections import ChainMap
d1 = {'a': 1, 'b': 2}
d2 = {'c': 3, 'd': 4}
d3 = {'e': 5, 'f': 6}
c = ChainMap(d1, d2, d3)
print(c)
# Accessing Keys and Values from ChainMap
print(c['a'])
print(c.values())
print(c.keys())
for i in c:
print(i) # e f c d a b
for i in c.values():
print(i) # 5 6 3 4 1 2
for i in c.keys():
print(i) # e f c d a b
|
999,905 | 9fc7f4c6ffde05a09619c9a4171ef623928af350 | import random
lst1 = [random.randint(0,100) for i in range (100)]
lst2 = [random.randint(0,100) for i in range (100)]
print(lst1)
print(lst2)
print(set(el for el in lst1 if el in lst2)) |
999,906 | 8b5017fcee5c86b1a5d037acab67b7fc14c97d15 | import os
import keras
import numpy as np
from keras import regularizers
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.datasets import cifar100
from keras.layers import Flatten, Dense, Dropout, Activation, Conv2D, MaxPool2D, BatchNormalization
from keras.models import Sequential
from keras_preprocessing.image import ImageDataGenerator
def main():
# LOAD DATA
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = normalize(x_train, x_test)
# y_train = keras.utils.to_categorical(y_train, num_of_classes)
# y_test = keras.utils.to_categorical(y_test, num_of_classes)
# BUILD MODEL
num_of_classes = 100
model_saving_path = 'models/cnn_cifar100.h5'
learning_rate = 0.1
if os.path.exists(model_saving_path):
model = keras.models.load_model(model_saving_path)
else:
model = create_model(input_shape=(32, 32, 3), num_of_classes=num_of_classes)
model.summary()
sgd = keras.optimizers.SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# START TRAINING
batch_size = 256
lr_drop = 20
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = LearningRateScheduler(lr_scheduler)
checkpoint_callback = ModelCheckpoint(model_saving_path, monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=100,
steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=(x_test, y_test),
callbacks=[reduce_lr, checkpoint_callback],
verbose=2)
def create_model(input_shape, num_of_classes):
weight_decay = 0.0005
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=input_shape))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_of_classes))
model.add(Activation('softmax'))
return model
def normalize(x_train, x_test):
mean = np.mean(x_train)
std = np.std(x_train)
return (x_train - mean) / (std + 1e-7), \
(x_test - mean) / (std + 1e-7)
if __name__ == '__main__':
main()
|
999,907 | af839b47e0ca3bbdd42b3eecfe1c6c77447df354 | from tkinter import *
import sqlite3
import os.path
import datetime
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from HospitalManagementDBMS import settings
settings.init()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "../database.db")
conn = sqlite3.connect(db_path)
c = conn.cursor()
class Application:
def __init__(self, master):
self.master = master
self.heading = Label(master, text="Sagar Hospitals", fg='steelblue',bg='lightgreen', font=('arial 40 bold'))
self.heading.place(x=150, y=0)
now = datetime.datetime.now()
date = now.strftime("%d-%m-%Y %H:%M")
self.heading = Label(master, text=date, bg='lightgreen')
self.heading.place(x=750, y=10)
self.name = Label(master, text="Enter Patient's Name",bg='lightgreen', font=('arial 18 bold'))
self.name.place(x=0, y=60)
self.namenet = Entry(master, width=30)
self.namenet.place(x=280, y=62)
self.search = Button(master, text="Search", width=12, height=1, bg='steelblue', command=self.search_db)
self.search.place(x=350, y=102)
def search_db(self):
self.input = self.namenet.get()
sql = "SELECT * FROM appointments WHERE name LIKE ?"
self.res = c.execute(sql, (self.input,))
for self.row in self.res:
self.name1 = self.row[1]
self.age = self.row[2]
self.gender = self.row[3]
self.location = self.row[4]
self.time = self.row[6]
self.phone = self.row[5]
self.reason = self.row[7]
self.uname = Label(self.master, text="Patient's Name",bg='lightgreen', font=('arial 18 bold'))
self.uname.place(x=0, y=140)
self.ulocation = Label(self.master, text="Location",bg='lightgreen', font=('arial 18 bold'))
self.ulocation.place(x=0, y=180)
self.uphone = Label(self.master, text="Phone Number",bg='lightgreen', font=('arial 18 bold'))
self.uphone.place(x=0, y=220)
self.ent1 = Label(self.master, width=30, text=str(self.name1), bg='lightgreen', anchor=W, font=('arial 14'))
self.ent1.place(x=300, y=140)
self.ent4 = Label(self.master, width=30, text=str(self.location), bg='lightgreen', anchor=W, font=('arial 14'))
self.ent4.place(x=300, y=180)
self.ent5 = Label(self.master, width=30, text=str(self.phone), bg='lightgreen', anchor=W, font=('arial 14'))
self.ent5.place(x=300, y=220)
frameReason = Frame(self.master,bg='lightgreen',relief = 'sunken',borderwidth=2,width=420,height=200)
frameReason.place(x=0, y=260)
self.ureason = Label(frameReason, text="Reason", font=('arial 18 bold'), fg='black', bg='lightgreen')
self.ureason.place(x=0, y=0)
self.cost = Label(frameReason, text="Cost", font=('arial 18 bold'), fg='black', bg='lightgreen')
self.cost.place(x=0, y=40)
self.gst = Label(frameReason, text="GST", font=('arial 18 bold'), fg='black', bg='lightgreen')
self.gst.place(x=0, y=80)
self.meds = Label(frameReason, text="Cost of Medication", font=('arial 18 bold'), fg='black', bg='lightgreen')
self.meds.place(x=0, y=120)
self.total = Label(frameReason, text="Total", font=('arial 18 bold'), fg='black', bg='lightgreen')
self.total.place(x=0, y=160)
condition = (settings.diseases[settings.diseases.index(self.reason)])
comand1 = "SELECT * FROM reason where name LIKE ?"
self.res = c.execute(comand1,(settings.diseases[settings.diseases.index(self.reason)],))
for self.row in self.res :
varCost = self.row[1]
varMedCost = self.row[2]
self.ent7 = Label(frameReason, text=condition, anchor=W,bg='lightgreen',font=('arial 14 bold'))
self.ent7.place(x=300,y=0)
self.ent8 = Label(frameReason, text=varCost, anchor=W,bg='lightgreen',font=('arial 14 bold'))
self.ent8.place(x=300,y=40)
gst = int(varCost*0.14)
self.ent9 = Label(frameReason, text=gst, anchor=W,bg='lightgreen',font=('arial 14 bold'))
self.ent9.place(x=300,y=80)
self.ent10 = Label(frameReason, text=varMedCost, anchor=W,bg='lightgreen',font=('arial 14 bold'))
self.ent10.place(x=300,y=120)
self.ent11 = Label(frameReason, text=varCost+gst+varMedCost, anchor=W,bg='lightgreen',font=('arial 14 bold'))
self.ent11.place(x=300,y=160)
self.update = Button(self.master, text="Print", width=20, height=2, bg='lightblue')
self.update.place(x=300, y=500)
self.delete = Button(self.master, text="Save", width=20, height=2, bg='lightblue')
self.delete.place(x=0, y=500)
root = Tk()
b = Application(root)
root.geometry("1200x720+0+0")
root.title("Billing")
root.resizable(False, False)
root.config(bg='lightgreen')
root.mainloop()
|
999,908 | 391643082c08c81c39d0a4c21cf1c4f390193f79 | # This script starts a number of forked processes to generate TSV files.
#
# Usage
#
# python run_h5_to_tsv.py h5file 0
#
# Where 0 is the slice that will be converted into a CSV.
import os, sys
os.system("python " + os.path.dirname(os.path.realpath(__file__)) + "/make_feature_tsv_from_h5.py {}&".format(sys.argv[1]))
os.system("cat columns.tsv | python " + os.path.dirname(os.path.realpath(__file__)) + "/create_feature_table.py {}&".format(sys.argv[1]))
for k in range(0, 31):
os.system("python " + os.path.dirname(os.path.realpath(__file__)) + "/h5_to_tsv.py {} {}&".format(sys.argv[1], k))
|
999,909 | 98d6926e750579086e5304985863b1ac87f33ac9 | # Довженко Віталій
# Лабораторна робота №2
# Розрахувати значення x, визначивши і використавши відповідну функцію
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.uic import loadUi
from math import sqrt
import sys
class mywindow(QtWidgets.QMainWindow):
def __init__(self):
super(__class__, self).__init__()
loadUi("Lab2_view.ui", self)
self.pushButton.clicked.connect(
lambda: self.btnClicked(self.checkBox.isChecked())
)
@staticmethod
def my_formula(a):
return (sqrt(a) + a) / 2
def btnClicked(self, chk):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
if chk:
a = mywindow.my_formula(7)
b = mywindow.my_formula(14)
c = mywindow.my_formula(24)
ans = a + b + c
msg.setText("Результат = " + str(round(ans, 2)))
msg.exec()
self.ans_label.setText(str(round(ans, 2)))
else:
msg.setIcon(QMessageBox.Critical)
msg.setText("Заборонений показ результату")
msg.exec()
app = QtWidgets.QApplication([])
application = mywindow()
application.show()
sys.exit(app.exec())
|
999,910 | bcae0b431d9bb8f1f7dbfede7a9696060dda5c11 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "MAV"
addresses_name = "2021-03-03T13:39:58.634524/Democracy_Club__06May2021.tsv"
stations_name = "2021-03-03T13:39:58.634524/Democracy_Club__06May2021.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
if record.polling_place_id == "11488":
record = record._replace(polling_place_easting="379264")
record = record._replace(polling_place_northing="246303")
rec = super().station_record_to_dict(record)
return rec
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100121268209", # CHERRY TREE COTTAGE MOSELEY ROAD, HALLOW
]:
return None
if record.addressline6 in [
"WR5 3PA",
"WR6 6YY",
"WR15 8JF",
"WR15 8DP",
"WR2 6RB",
"WR14 4JY",
]:
return None
rec = super().address_record_to_dict(record)
return rec
|
999,911 | 125dbacd9c7d3b92c3d2a316ff9c717581f2f9c6 | import argparse
import os
import subprocess
import sys
# map location in repo to deploy path (relative to home directory)
path_mappings = {
".bashrc": ".bashrc",
".bash_aliases": ".bash_aliases",
".config/nvim/init.vim": ".config/nvim/init.vim",
".config/nvim/go.vim": ".config/nvim/go.vim",
".config/nvim/UltiSnips/python.snippets": ".config/nvim/UltiSnips/python.snippets",
".config/nvim/UltiSnips/text.snippets": ".config/nvim/UltiSnips/text.snippets",
".inputrc": ".inputrc",
".ssh_config": ".ssh/config",
"tmux_base.yaml": ".tmuxp/base.yaml",
".tmux.conf": ".tmux.conf",
".vimrc": ".vimrc",
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"command",
type=str,
choices=["list", "compare", "backup", "deploy"],
help="command to run",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="flag to override limits on running commands with uncomitted changes",
)
args = parser.parse_args()
return args
def get_gh_fullpath(filename):
return os.path.join(os.environ["HOME"], ".dotfiles", filename)
def get_system_fullpath(filepath):
return os.path.join(os.environ["HOME"], filepath)
def warn_uncommitted_changes(force):
"""
Run bash command
git status | grep modified | awk '{print $2}'
which gets the current status of the git repo, checks for modified files, and prints just the filename
If any output is found, output a warning; only continue processing if the force arg was submitted
"""
output = subprocess.run(["git", "status"], capture_output=True, text=True,)
if "modified" in output.stdout or "Untracked" in output.stdout:
print("Warning: repository has uncommitted changes:\n")
print("-----------------------------------------------------------------------")
print(f"{output.stdout}")
print("-----------------------------------------------------------------------")
if not force:
print("\nRun with -f to override")
sys.exit(1)
def compare(args):
warn_uncommitted_changes(args.force)
for gh_file, system_path in path_mappings.items():
command = ["diff", get_gh_fullpath(gh_file), get_system_fullpath(system_path)]
print(" ".join(command))
output = subprocess.run(command, capture_output=True, text=True)
if output.stderr:
print(f"Error running command: {output.stderr}")
elif output.stdout:
print(output.stdout)
def deploy(args):
warn_uncommitted_changes(args.force)
for gh_file, system_path in path_mappings.items():
command = [
"cp",
get_system_fullpath(system_path),
f"{get_system_fullpath(system_path)}.bak",
]
print(" ".join(command))
output = subprocess.run(command, capture_output=True, text=True)
if output.stderr:
print(f"Error running command: {output.stderr}")
continue
command = ["cp", get_gh_fullpath(gh_file), get_system_fullpath(system_path)]
print(" ".join(command))
output = subprocess.run(command, capture_output=True, text=True)
if output.stderr:
print(f"Error running command: {output.stderr}")
elif output.stdout:
print(output.stdout)
def backup(args):
warn_uncommitted_changes(args.force)
for gh_file, system_path in path_mappings.items():
command = ["cp", get_system_fullpath(system_path), get_gh_fullpath(gh_file)]
print(" ".join(command))
output = subprocess.run(command, capture_output=True, text=True)
if output.stderr:
print(f"Error running command: {output.stderr}")
elif output.stdout:
print(output.stdout)
def list_mappings(args):
for gh_file, system_path in path_mappings.items():
print(f"{get_gh_fullpath(gh_file)} -> {get_system_fullpath(system_path)}")
def main():
args = parse_args()
if args.command == "compare":
compare(args)
elif args.command == "deploy":
deploy(args)
elif args.command == "backup":
backup(args)
elif args.command == "list":
list_mappings(args)
if __name__ == "__main__":
main()
|
999,912 | 86d2f95e8af1f0ddcffdbd74ba9059f64025210f | import os
import cgi, cgitb
import random
form = cgi.FieldStorage()
from flask import Flask, request, redirect, render_template, session, url_for, g, jsonify
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY='devIAm') # Needed for session tracking
# Note flask does CLIENT session data storage! Watch data sizes!
@app.route('/', methods=['GET','POST'])
def randomizer():
list1 = []
string1 = ""
blank = " "
noblank = ""
textarea = []
if 'text_box' in request.args:
textarea1 = request.args.get('text_box')
textarea2 = textarea1.split('\n')
for word in textarea2:
if word not in blank:
textarea.append(word)
# remove empty elements or ones with only spaces
#for word in textarea:
# list1.append(word)
random.shuffle(textarea)
string1 = '\n'.join(textarea)
if 'json' in request.args:
return jsonify(textarea)
if 'times' not in session:
session['times'] = 0
session['times'] += 1
return render_template('index.html', string1 = string1, times = session['times']) # Send t to the template
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('randomizer')) # Calculate is the fn name above!
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') # Enable on all devices so Docker works! |
999,913 | 4ade6f502323803542f640a268cf1edabf1a9bd8 | '''
Module containing the methods for the detection of the arrows
Here is where the Neural Network can be applied and the images are passed through
Methods here are:
load_default_model(): Loads the model that is defualt, works well for most reaction images
get_direction(): gets the direction of the arrow, recursively uses pipeline() to ensure that text is
not merged into the arrow
pipeline(): final pipeline, insert an image that needs to be detected and spits out the arrow
'''
import os
import cv2
import math
import imutils
import json
import copy
import time
import logging
import scipy
import numpy as np
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
import pandas as pd
import pytesseract
from pytesseract import Output
import sklearn
from sklearn.cluster import KMeans
import skimage
from skimage import io
from skimage.util import pad
from skimage.color import rgb2gray
from skimage.measure import regionprops
from skimage.measure import find_contours
from skimage.util import crop as crop_skimage
from skimage.util import random_noise
from skimage.morphology import binary_closing, disk
from skimage.morphology import skeletonize as skeletonize_skimage
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import sklearn
from sklearn.cluster import KMeans
from sklearn.model_selection import KFold
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from arrow import *
from scikit_arrow_utils import arrow_average, arrow_centroid, line_mag, get_contour_height, get_contour_length, get_orientation
from image_utils import binary_close, binarize, binary_floodfill, skeletonize, pixel_ratio, skeletonize_area_ratio
from scikit_scripts import pad_image, segment_image, show_contours, get_image_contours
def load_default_model():
'''
Loads the default model for the functions to use
@RETURNS:
- model: default neural network
'''
model = keras.models.load_model(os.path.join(os.getcwd(), 'models', 'notOverfittedModel2'))
return model
def get_direction(arrow_contours, doc_name, count, model, image = None):
'''
Gets the direction of an arrow given its contour of points. Will be important for getting
the products and the reactants of the arrow. Can use the centroid and direction of the
arrow to see whether the compunds on either side are products, reactants or intermediates
If the arrow height is seen to be above 25 pixels, we can assume that either
a) It is not an arrow
b) Or it has text merged in
Either way we will reevaluate the image and contours without full segmentation, so not
binary closing and skeletonzing
@PARAM:
- arrow_contours: the contours of the arrows extracted from find_arrow()
- doc_name: Name of the document
- count: page count
- model: model used for detection
- image: if using on only one set of contours, can leave this none, however for
full capability use this to allow for recurvie checking
@RETURN:
- a dictionary with label of the arrow and the direction in the form of a string
'''
directions = {}
# averages = arrow_average(arrow_contours)
centroids = arrow_centroid(arrow_contours)
orientations = get_orientation(arrow_contours)
for arrow in range(len(orientations)):
name = 'Arrow ' + str(arrow + 1)
if orientations[arrow] == "Horizontal":
height, extreme_idx = get_contour_height(arrow_contours[arrow])
# If the height of the "arrow" is above 25 pixels, we need to ensure that text did not
# Close it in, thus we will run the pipeline again without the binary closing
if height > 15 and image != None:
info, arrow_contours, averages, directions = pipeline(image, doc_name, count, model = model, segment = False)
x_min = arrow_contours[arrow][extreme_idx[0]][1]
x_max = arrow_contours[arrow][extreme_idx[1]][1]
if (x_min + x_max) * 0.5 >= centroids[arrow][0]:
directions[name] = 'Right'
else:
directions[name] = 'Left'
else:
length, extreme_idx = get_contour_length(arrow_contours[arrow])
y_min = arrow_contours[arrow][extreme_idx[0]][0]
y_max = arrow_contours[arrow][extreme_idx[1]][0]
if (y_min + y_max) * 0.5 >= centroids[arrow][1]:
directions[name] = 'Up'
else:
directions[name] = 'Down'
return directions
def prepare_padded_images(image, segment = True):
'''
Given a reaction image, create the set of padded images
of all the contours. Like the pipeline, there is the option
to segment or just binarize the image, and the default is just
to segment
@PARAM:
- image: image we want to get the padded image from
- segment: True if we want full segment (binarize, binary close)
False if we only want binarization. Default is True
@RETURN:
- padded_images: a Numpy Array of padded images that are in the shape of:
(num images, 500, 500, 1)
'''
# Segment the image if segment == true, else
# just binarize
if segment:
image = segment_image(image)
else:
image = binarize(image)
cnts1 = find_contours(image, 0)
if len(cnts1) > 400:
return [], [], [], []
padded_images = []
for i, cnt in enumerate(cnts1):
padded = pad_image(cnts1, i)
padded = segment_image(padded)
padded_images.append(padded)
padded_images = np.array(padded_images)
padded_images = padded_images.reshape(padded_images.shape[0], 500, 500, 1)
return cnts1, padded_images
def pipeline(image, doc_name, count, model = None, segment = True, verbose=1):
'''
Full extraction pipeline from reaction image to coordinates and direction of the
arrows (if there are any) in the image
Steps:
(1) If segment is true, binarize and segment the image, else just binarize it
(2) Find all the contours in the image
(3) Pad all the contours onto 500 x 500 images.
(4) Run the padded images through the model and get a confidence as to if it is
an arrow or not
(5) If the result is greater than .875, then we are confident it is an arrow
(6) If it is abouve .575, but below .875, then we check the ratio of the contour height
to the height of the image, if that is below .15, then we can assume it is skinny enought to be an arrow
(7) WE also check the length, and if the length is less than .1 of the image size (vertical arrows), then we also add it
(8) Take the isolate arrow contours and run them through to find their average, centroids, and directions. Create arrow object
(9) Print time for extractions, and return all necessary info
@PARAM:
- image: if using on only one set of contours, can leave this none, however for
full capability use this to allow for recurvie checking
- doc_name: Name of the document
- count: page count
- model: model used for detection, if None then load default model
- segment: Boolean telling us whether we want to segment. Defualt is true, false is used
when we dont want binary closing
- verbose: verbosity of information being printed out
- 0: No output
- 1: Print things out
@RETURN:
- info: List of Arrow objects that are returned (refer to Arrow Doc for info on what is included)
- final_contours: Actual contours for the arrows that are on the page
- centroids: Centroids for all the arrow objects that are present
- directions: Directions for all the arrows
'''
if model == None:
model = load_default_model()
times = []
times.append(time.time())
cnts1, padded_images = prepare_padded_images(image, segment = segment)
results = model.predict(padded_images)
final_contours = []
final_index = []
conf = []
for contour in range(len(results)):
if results[contour] >= 0.6:
height,_ = get_contour_height(cnts1[contour])
length, _ = get_contour_length(cnts1[contour])
if (height / image.shape[0]) <= 0.125:
final_contours.append(cnts1[contour])
final_index.append(contour)
conf.append(results[contour])
centroids = arrow_centroid(final_contours)
averages = arrow_average(final_contours)
directions = get_direction(final_contours, doc_name, count, model, image)
info = []
for i, arrow in enumerate(final_contours):
arrow = Arrow(final_contours[i], centroids[i], averages[i], directions['Arrow ' + str(i + 1)])
info.append(arrow.to_dictionary())
if len(final_contours) == 0 and verbose == 1:
if segment == True:
info, arrow_contours, averages, directions = pipeline(image, doc_name, count, model = model, segment = False)
if len(info) == 0:
print('Label ' + str(count) + ' ' + doc_name + ': No Arrows were Identified')
return [], cnts1, [], []
else:
return info, arrow_contours, averages, directions
times.append(time.time())
if verbose == 1:
print('Label ' + str(count) + ' ' + doc_name + ' ' + str(len(final_contours)) +
" Arrows Extracted! Time Elapsed: %.2fs"%(times[-1] - times[-2]))
return info, final_contours, centroids, directions |
999,914 | 8fead57ad7427a5ea0f554acc4562555a387aa91 | from __future__ import print_function
import sys
from lxml import etree, objectify
xml = objectify.parse(open(sys.argv[1]))
print(etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="UTF-8").decode())
|
999,915 | aecea8735463b15be9cb1487d29539eb5eb48324 | import vanilla
from django.db.models import Q
from django.contrib import messages
from django.http import Http404
# For LoginRequiredMixin
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
u"""Ensures that user must be authenticated in order to access view."""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class ExtraContextMixin(object):
extra_context = {}
def collect_bases(self, classType):
bases = [classType]
for baseClassType in classType.__bases__:
bases += self.collect_bases(baseClassType)
return bases
def get_context_data(self, **kwargs):
# Get the bases and remove duplicates
bases = self.collect_bases(self.__class__)
# for b in bases: print b
bases.reverse()
# print "======= Printing Bases ========"
for base in bases:
if hasattr(base, 'extra_context'):
for key, value in base.extra_context.items():
# print "key: %s value: %s" % (key, value)
# First check to see if it's the name of a function
if isinstance(value, basestring) and value[:4] == "get_": kwargs[key] = getattr(self,value)()
# Otherwise, just add it to the context
else: kwargs[key] = value
# print "Base: %s Template: %s" % (base, kwargs.get('row_template_name',""))
# for k,v in kwargs.items(): print "%s: %s" % (k,v)
# print "==============================="
# for key, value in kwargs.items(): print key+":"+str(value)
return super(ExtraContextMixin, self).get_context_data(**kwargs)
class MessageMixin(object):
# Sends messages when form is valid or invalid
success_message = None
error_message = None
def get_error_message(self, form):return self.error_message
def get_success_message(self, form):return self.success_message
def form_valid(self, form):
msg=self.get_success_message(form)
if msg: messages.success(self.request, msg)
return super(MessageMixin, self).form_valid(form)
def form_invalid(self, form):
error_msg=self.get_error_message(form)
if error_msg: messages.error(self.request, error_msg)
return super(MessageMixin, self).form_invalid(form)
class AjaxPostMixin(object):
template_name = 'design/ajax_row.html'
# Renders a response when form_valid
def form_valid(self, form):
self.object = form.save()
context = self.get_context_data(form=form)
return self.render_to_response(context)
class FormWithUserMixin(object):
def get_form(self, data=None, files=None, **kwargs):
cls = self.get_form_class()
kwargs['user'] = self.request.user
return cls(data=data, files=files, **kwargs)
class NonModelFormMixin(object):
def get_form(self, data=None, files=None, **kwargs):
del kwargs['instance']
return super(NonModelFormMixin, self).get_form(data=data, files=files, **kwargs)
class CheckOwnerMixin(object):
owner_field_name = None
def get_queryset(self):
qs= super(CheckOwnerMixin, self).get_queryset()
if self.owner_field_name: qs = qs.filter(**{owner_field_name:self.request.user})
return qs
def get_object(self):
obj = super(CheckOwnerMixin, self).get_object()
if self.owner_field_name and not getattr(obj,self.owner_field_name)==self.request.user: return Http404
return obj
class CreateView(ExtraContextMixin, MessageMixin, vanilla.CreateView): pass
class DetailView(CheckOwnerMixin, ExtraContextMixin, MessageMixin, vanilla.DetailView): pass
class UpdateView(CheckOwnerMixin, ExtraContextMixin, MessageMixin, vanilla.UpdateView): pass
class DeleteView(CheckOwnerMixin, ExtraContextMixin, vanilla.DeleteView): pass
class FormView(ExtraContextMixin, MessageMixin, vanilla.FormView): pass
class GenericModelView(CheckOwnerMixin, ExtraContextMixin, MessageMixin, vanilla.GenericModelView):pass
class TemplateView(ExtraContextMixin, MessageMixin, vanilla.TemplateView):pass
class GenericAjaxModelView(AjaxPostMixin, GenericModelView): pass
# def post(self, request, *args, **kwargs):
# self.object = self.get_object()
# self.do_task()
# context = self.get_context_data()
# return self.render_to_response(context)
class AjaxCreateView(AjaxPostMixin, CreateView): pass
class AjaxUpdateView(AjaxPostMixin, UpdateView): pass
class AjaxDeleteView(DeleteView):
success_message = ""
template_name = 'design/ajax_row.html'
def post(self, request, *args, **kwargs):
# We save the pk so the js will know which row to replace
self.object = self.get_object()
old_pk=self.object.pk
self.object.delete()
if self.success_message: messages.success(self.request, self.success_message)
context = self.get_context_data()
context['object'].pk=old_pk
return self.render_to_response(context)
class ListView(ExtraContextMixin, vanilla.ListView):
search_key = 'q'
search_on = []
filter_on = []
def return_pipe(self, x,y=None):
if x and y: return x | y
elif x: return x
elif y: return y
def filter(self, queryset):
# Takes a queryset and returns the queryset filtered
# If you want to do a custom filter for a certain field,
# Declare a function: filter_{{fieldname}} that takes a queryset and the value,
# does the filter and returns the queryset
for f in self.filter_on:
if hasattr(self,'filter_'+f): queryset = getattr(self,'filter_'+f)(queryset, self.request.GET.getlist(f,[]))
elif f in self.request.GET: queryset = queryset.filter(**{f:self.request.GET[f]})
return queryset
def search(self, queryset):
# Takes a queryset and returns the queryset filtered based on search
# The default search is "field = icontains"
# If you want to do a custom search on a certain field, there are two ways:
# 1) Declare a function: search_{{fieldname}} that takes a value and returns a Q()
# Example: def search_field(self, value): return Q(field = value)
# 2) Set search_{{fieldname}} to a string to use as the filter name.
# Example: search_field = "field__icontains"
if self.search_key in self.request.GET:
value = self.request.GET[self.search_key]
query=None
for field in self.search_on:
if hasattr(self,'search_'+field):
search=getattr(self,'search_'+field)
if hasattr(search, '__call__'): query = self.return_pipe(query, search(value))
elif type(search) == str: query = self.return_pipe(query, Q(**{search:value}))
else: query = self.return_pipe(query, Q(**{field+"__icontains":value}))
return queryset.filter(query)
else: return queryset
def get_queryset(self):
queryset = super(ListView, self).get_queryset()
if self.filter_on: queryset = self.filter(queryset)
if self.search_on: queryset = self.search(queryset)
return queryset
def get_context_data(self, **kwargs):
context = super(ListView, self).get_context_data(**kwargs)
# print "context = %s" % str(context)
if self.search_key in self.request.GET: context[self.search_key] = self.request.GET
# print "context = %s" % str(context)
for filter_key in self.filter_on:
if filter_key in self.request.GET: context[filter_key] = self.request.GET
# print "context = %s" % str(context)
return context
|
999,916 | a4ee059317ccbe7baae8ee7dac619a0d11ce29d7 | import json
import jieba
import pandas as pd
from core.utils import *
from core.vggnet import Vgg19, load_and_resize_image
from collections import Counter
import os
import sys
from scipy import ndimage
import hickle
import tensorflow as tf
from PIL import Image
from config import *
import hashlib
def fenci(sentence):
return ' '.join(jieba.cut(sentence))
def _process_caption_data(caption_file, image_dir, max_length):
with open(caption_file) as f:
raw_data = json.load(f)
print "there are %d samples in %s" % (len(raw_data), caption_file)
caption_data = []
miss_count = 0
for sample in raw_data:
image_file_name = os.path.join(image_dir, sample['image_id'])
if not os.path.exists(image_file_name):
miss_count += 1
if miss_count % 100 == 0:
print "miss count %d " % miss_count
continue
for caption in sample['caption']:
sample_dict = {'caption': caption,
'fenci_caption': fenci(caption),
'image_file_name': image_file_name,
'image_id': sample['image_id']}
caption_data.append(sample_dict)
# convert to pandas dataframe (for later visualization or debugging)
caption_data = pd.DataFrame.from_dict(caption_data)
caption_data.sort_values(by='image_id', inplace=True)
caption_data = caption_data.reset_index(drop=True)
return caption_data
def _build_vocab(annotations, threshold=1):
counter = Counter()
max_len = 0
for i, caption in enumerate(annotations['fenci_caption']):
words = caption.split(' ') # caption contrains only lower-case words
for w in words:
counter[w] +=1
if len(caption.split(" ")) > max_len:
max_len = len(caption.split(" "))
vocab = [word for word in counter if counter[word] >= threshold]
print ('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold))
word_to_idx = {u'<NULL>': 0, u'<START>': 1, u'<END>': 2}
idx = 3
for word in vocab:
word_to_idx[word] = idx
idx += 1
print "Max length of caption: ", max_len
return word_to_idx
def _build_caption_vector(annotations, word_to_idx, max_length=15):
n_examples = len(annotations)
captions = np.ndarray((n_examples,max_length+2)).astype(np.int32)
for i, caption in enumerate(annotations['fenci_caption']):
words = caption.split(" ") # caption contrains only lower-case words
cap_vec = []
cap_vec.append(word_to_idx['<START>'])
for word in words:
if word in word_to_idx:
cap_vec.append(word_to_idx[word])
if len(cap_vec) > max_length:
break
cap_vec.append(word_to_idx['<END>'])
# pad short caption with the special null token '<NULL>' to make it fixed-size vector
if len(cap_vec) < (max_length + 2):
for j in range(max_length + 2 - len(cap_vec)):
cap_vec.append(word_to_idx['<NULL>'])
captions[i, :] = np.asarray(cap_vec)
print "Finished building caption vectors"
return captions
def _build_file_names(annotations):
image_file_names = []
id_to_idx = {}
idx = 0
image_ids = annotations['image_id']
file_names = annotations['image_file_name']
for image_id, file_name in zip(image_ids, file_names):
if not image_id in id_to_idx:
id_to_idx[image_id] = idx
image_file_names.append(file_name)
idx += 1
file_names = np.asarray(image_file_names)
return file_names, id_to_idx
def _build_image_idxs(annotations, id_to_idx):
image_idxs = np.ndarray(len(annotations), dtype=np.int32)
image_ids = annotations['image_id']
for i, image_id in enumerate(image_ids):
image_idxs[i] = id_to_idx[image_id]
return image_idxs
def generate_contest_reference(annotations, split):
# prepare reference captions to compute bleu scores later
ref_images = []
ref_annotations = []
image_ids = set()
i = -1
id = 1
for fenci_caption, image_id in zip(annotations['fenci_caption'], annotations['image_id']):
image_hash = int(int(hashlib.sha256(image_id).hexdigest(), 16) % sys.maxint)
if not image_id in image_ids:
image_ids.add(image_id)
i += 1
ref_images.append({'file_name': image_id, 'id': image_hash})
ref_annotations.append({'caption': fenci_caption, 'id': id, 'image_id': image_hash})
id += 1
result = {'annotations': ref_annotations, 'images': ref_images, "type": "captions", 'info': {}, 'licenses': {}}
print "Finished building %s caption dataset" %split
return result
def main():
# batch size for extracting feature vectors from vggnet.
batch_size = 100
# maximum length of caption(number of word). if caption is longer than max_length, deleted.
max_length = 15
# if word occurs less than word_count_threshold in training dataset, the word index is special unknown token.
word_count_threshold = 1
train_caption_file = TRAIN_DATA_PATH + '/caption_train_annotations_20170902.json'
image_dir = TRAIN_DATA_PATH + '/caption_train_images_20170902/'
val_caption_file = VAL_DATA_PATH + '/caption_validation_annotations_20170910.json'
val_image_dir = VAL_DATA_PATH + '/caption_validation_images_20170910/'
# test_image_dir = TEST_DATA_PATH + '/caption_validation_images_20170910/'
train_dataset = _process_caption_data(train_caption_file, image_dir, max_length)
val_dataset = _process_caption_data(val_caption_file, val_image_dir, max_length)
# test_dataset = _process_caption_data(test_caption_file, test_image_dir, max_length)
# init make dirs
sub_train_split = ['train' + str(i) for i in range(21)]
# split_parts = ['train', 'val', 'test'] + sub_train_split
split_parts = ['train', 'val'] + sub_train_split
for split in split_parts:
path = 'data/' + split
if not os.path.exists(path):
os.makedirs(path)
save_pickle(train_dataset, 'data/train/train.annotations.pkl')
save_pickle(val_dataset, 'data/val/val.annotations.pkl')
# save_pickle(test_dataset, 'data/test/test.annotations.pkl')
# since the dataset might larger than system memory, cut the train dataset into several parsts
block_size = len(train_dataset)/21
for i in range(21):
save_pickle(train_dataset[i*block_size: (i+1)*block_size].reset_index(drop=True), 'data/train%d/train%d.annotations.pkl' % (i, i))
for split in split_parts:
annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split))
if split == 'train':
word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold)
save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split)
captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length)
save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split))
file_names, id_to_idx = _build_file_names(annotations)
save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split))
image_idxs = _build_image_idxs(annotations, id_to_idx)
save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split))
reference_json = generate_contest_reference(annotations, split)
json.dump(reference_json, open('./data/%s/%s.references.json' % (split, split), 'w'))
# extract conv5_3 feature vectors
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
tf.reset_default_graph()
vggnet = Vgg19(VGG_MODEL_PATH)
vggnet.build()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for split in split_parts[1:]:
anno_path = './data/%s/%s.annotations.pkl' % (split, split)
save_path = './data/%s/%s.features.hkl' % (split, split)
annotations = load_pickle(anno_path)
image_path = list(annotations['image_file_name'].unique())
n_examples = len(image_path)
all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32)
for start, end in zip(range(0, n_examples, batch_size),
range(batch_size, n_examples + batch_size, batch_size)):
image_batch_file = image_path[start:end]
image_batch = np.array(map(lambda x: load_and_resize_image(x), image_batch_file)).astype(
np.float32)
feats = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch})
all_feats[start:end, :] = feats
print ("Processed %d %s features.." % (end, split))
# use hickle to save huge feature vectors
hickle.dump(all_feats, save_path)
print ("Saved %s.." % (save_path))
if __name__ == '__main__':
main()
|
999,917 | 5da2fb43ea53c791d80071ffb4a72d4c70239e26 | ```
A city with 6 districts has 6 robberies in a particular week. Assume the robberies are
located randomly, with all possibilities for which robbery occurred where equally likely.
What is the probability that some district had more than 1 robbery?
Solving by-hand yields 1 - 6!/6^6 = .98456.
```
import numpy as np
n_sims = 10**7
DISTRICTS = np.arange(6)
robberies = np.random.choice(DISTRICTS, size=(n_sims,6), replace=True)
sorted_robberies = np.sort(robberies)
# After sorting the districts that were robbed, you can take the difference between sequential district numbers with np.diff.
# If all differences are greeater than 0 then no district was robbed more than once.
1 - np.mean(np.all(np.diff(sorted_robberies), axis=1)) # 0.98455182 |
999,918 | 76c494e90fd69abad57bf3bb421cad709e3dd11c | import json
import aiohttp
import asyncio
import requests
import pandas as pd
import xlsxwriter
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").text
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
async def fetch(session, url):
while 1:
try:
proxy = 'http://'+get_proxy()
print(proxy)
async with session.get(url,proxy = proxy) as response:
return await response.read()
except Exception:
delete_proxy(proxy)
async def get_html(url,semaphore):
async with semaphore:
headers = {
'Referer': 'https://www.bilibili.com/v/anime/finish/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
async with aiohttp.ClientSession(headers=headers) as session:
html = await fetch(session, url)
await parse_html(html)
await asyncio.sleep(1)
async def parse_html(r):
global data
info = json.loads(r[38:-1])['data']['archives']
'''df = pd.DataFrame(info)//暂时全部放在一起,部分参数扔保留了字典格式'''
print(info[0]['aid'])
data =data + info
def data_save(t):
df = pd.DataFrame(t,)
df.to_excel('bilibili.xlsx',engine='xlsxwriter')
#pd.DataFrame.to_excel(df,)
def main():
urls = ['https://api.bilibili.com/x/web-interface/newlist?callback=jqueryCallback_bili_09058144271712298&rid=32&type=0&pn='+str(pg)+'&ps=20&jsonp=jsonp&_=1564477907551' for pg in range(1,803)]
semaphore = asyncio.Semaphore(20)
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(get_html(url,semaphore)) for url in urls]
tasks = asyncio.gather(*tasks)
loop.run_until_complete(tasks)
data_save(data)
if __name__ == '__main__':
data = []
main() |
999,919 | f0108efdd0172c4f40f89f198dcf89a687ef2b70 | import os
from getWebForecasts import extractDarkSky, extractAccuWeather, extractWUnderground, extractNWS, getDarkSkyHistory, getCities, get_API_keys
def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):
"""
Create a spreadsheet for each city.
An observation (row) will have max, min etc. for all of the weather services at different times of prediction. It will indicate the date predicted.
The response variables will be the actual temperatures, etc.
2 obs X 4 services X 4 forecasts X 4 variables = 128 feature columns in a row + 4 response variables
"""
city_dictionary = getCities(cities_file)
actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())
#For each day and for each city, get all the data and put it into a spreadsheet.
class getActualWeather:
def __init__(self, actual_directory, city_dictionary, api_keys):
self.actual_directory = actual_directory
self.actual_dictionary = {}
self.key = api_keys['DARKSKY_KEY']
self.city_dictionary = city_dictionary
def getActualWeather(self, cityState, date):
return getDarkSkyHistory(self.key, self.city_dictionary[cityState]['lat'], self.city_dictionary[cityState]['lon'], date, outdirectory = self.actual_directory)
if __name__ == "__main__":
prediction_directory = ""
actual_directory = ""
utc_offset = False
write_directory = ""
cities_file = '/home/jsporter/workspace/WeatherBay/hello/static/LatLongCities.csv'
getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, utc_offset = utc_offset) |
999,920 | 3b6e6ae70843fdef493d60803f0759f927d7f8d9 | import io
import json
from typing import List, Union, Type
import altair as alt
import pandas as pd
import pytest
from altair_saver import (
save,
render,
BasicSaver,
HTMLSaver,
NodeSaver,
Saver,
SeleniumSaver,
)
from altair_saver._utils import JSONDict, mimetype_to_fmt
FORMATS = ["html", "pdf", "png", "svg", "vega", "vega-lite"]
def check_output(out: Union[str, bytes], fmt: str) -> None:
"""Do basic checks on output to confirm correct type, and non-empty."""
if fmt in ["png", "pdf"]:
assert isinstance(out, bytes)
elif fmt in ["vega", "vega-lite"]:
assert isinstance(out, str)
dct = json.loads(out)
assert len(dct) > 0
else:
assert isinstance(out, str)
assert len(out) > 0
@pytest.fixture
def chart() -> alt.Chart:
data = pd.DataFrame({"x": range(10), "y": range(10)})
return alt.Chart(data).mark_line().encode(x="x", y="y")
@pytest.fixture
def spec(chart: alt.Chart) -> JSONDict:
return chart.to_dict()
@pytest.mark.parametrize("fmt", FORMATS)
def test_save_chart(chart: alt.TopLevelMixin, fmt: str) -> None:
fp: Union[io.BytesIO, io.StringIO]
if fmt in ["png", "pdf"]:
fp = io.BytesIO()
else:
fp = io.StringIO()
save(chart, fp, fmt=fmt)
check_output(fp.getvalue(), fmt)
@pytest.mark.parametrize("fmt", FORMATS)
def test_save_spec(spec: JSONDict, fmt: str) -> None:
fp: Union[io.BytesIO, io.StringIO]
if fmt in ["png", "pdf"]:
fp = io.BytesIO()
else:
fp = io.StringIO()
save(spec, fp, fmt=fmt)
check_output(fp.getvalue(), fmt)
@pytest.mark.parametrize("method", ["node", "selenium", BasicSaver, HTMLSaver])
@pytest.mark.parametrize("fmt", FORMATS)
def test_save_chart_method(
spec: JSONDict, fmt: str, method: Union[str, Type[Saver]]
) -> None:
fp: Union[io.BytesIO, io.StringIO]
if fmt in ["png", "pdf"]:
fp = io.BytesIO()
else:
fp = io.StringIO()
valid_formats: List[str] = []
if method == "node":
valid_formats = NodeSaver.valid_formats
elif method == "selenium":
valid_formats = SeleniumSaver.valid_formats
elif isinstance(method, type):
valid_formats = method.valid_formats
else:
raise ValueError(f"unrecognized method: {method}")
if fmt not in valid_formats:
with pytest.raises(ValueError):
save(spec, fp, fmt=fmt, method=method)
else:
save(spec, fp, fmt=fmt, method=method)
check_output(fp.getvalue(), fmt)
@pytest.mark.parametrize("inline", [True, False])
def test_html_inline(spec: JSONDict, inline: bool) -> None:
fp = io.StringIO()
save(spec, fp, fmt="html", inline=inline)
html = fp.getvalue()
cdn_url = "https://cdn.jsdelivr.net"
if inline:
assert cdn_url not in html
else:
assert cdn_url in html
def test_render_spec(spec: JSONDict) -> None:
bundle = render(spec, fmts=FORMATS)
assert len(bundle) == len(FORMATS)
for mimetype, content in bundle.items():
fmt = mimetype_to_fmt(mimetype)
if isinstance(content, dict):
check_output(json.dumps(content), fmt)
else:
check_output(content, fmt)
def test_infer_mode(spec: JSONDict) -> None:
vg_spec = render(spec, "vega").popitem()[1]
vl_svg = render(spec, "svg").popitem()[1]
vg_svg = render(vg_spec, "svg").popitem()[1]
assert vl_svg == vg_svg
|
999,921 | fdb64aecbbfd3a10e5b60563f870b8ac208d7979 | import pandas as pd
from sklearn import preprocessing
from model_F17 import *
def sim_zsrc_Neff(Nsamp, dth, zmin = 0, zmax = 10, dz = 0.001, Neff_scale = 1, model = 'Be13'):
'''
Simulate the source redshift in a light cone with Neff(z)
Neff is derived from Fonseca 17 Schechter LF model.
Inputs:
=======
Nsamp: number of lightcone to generate
dth: lightcone size dth^2[arcmin]
zmin,zmax: range of z to be sampled
dz: z resolution in the sim
Neff_scale: scale the overall Neff
Model: SFR model -- 'F17' or 'Be13'
Output:
=======
zsrc_all = Nsamp element list, each element is a list of z of source in a lightcone
'''
if model == 'F17':
df = pd.read_csv('data_internal/F17NeffSFRs.txt')
elif model == 'Be13':
df = pd.read_csv('data_internal/Be13NeffSFRs.txt')
else:
raise ValueError("Model name incorrect.")
z_dat = df['z'].values
Neff_dat = df['Neff'].values
Neff_dat *= dz * dth**2
z_vec = np.arange(zmin,zmax,dz)
z_vec = z_vec[1:]
Neff_vec = np.interp(z_vec,z_dat,Neff_dat)
Neff_vec *= Neff_scale
zsrc_all = []
for i in range(Nsamp):
samps = np.random.poisson([Neff_vec])[0]
maxN = np.max(samps)
zsamps = []
for j in range(1,maxN+1,1):
zsamps.extend(z_vec[samps>=j])
zsamps = np.round(np.asarray(zsamps)/dz)*dz
zsrc_all.append(zsamps)
return zsrc_all
def sim_SFRlc_F17(Nsamp, dth, zbinedges, SFRbinedges, SFRf_arr, Neff_scale = 1, model = 'Be13'):
'''
Simulate the light cone with F17 Sim fit luminosity function.
Inputs:
=======
Nsamp: number of lightcone to generate
dth: lightcone size dth^2[arcmin]
zbinedges: the redshift bin edges
SFRbinedges: the luminosity bin edges to sample the LF with Poisson distribution
[unit: dimensionless, SFR* of Schechter func]
SFRf_arr: pre-computed SFR func at the given zbinedges, Lbinedges [h^3 / Mpc^3]
Output:
=======
L_arr [Lsun]: Nsamp x Nz, L_arr(i,j) is the intrinsic luminoisty [Lsun] in lightcone j redshift j
zbins: redshift bins for L_arr
'''
zbins = (zbinedges[1:] + zbinedges[:-1]) / 2
Nz = len(zbins)
CDedges = cosmo_dist(zbinedges).comoving_distance
dCD = CDedges[1:] - CDedges[:-1]
dth = dth * u.arcmin
dtrans = cosmo_dist(zbins).kpc_comoving_per_arcmin * dth
dV_zvec = dCD.to(u.Mpc / u.h) * dtrans.to(u.Mpc / u.h)**2
SFRbins = np.sqrt(SFRbinedges[1:] * SFRbinedges[:-1])
NSFR = len(SFRbins)
dSFR_lvec = (SFRbinedges[1:] - SFRbinedges[:-1])
if model == 'F17':
df = pd.read_csv('data_internal/F17NeffSFRs.txt')
elif model == 'Be13':
df = pd.read_csv('data_internal/Be13NeffSFRs.txt')
else:
raise ValueError("Model name incorrect.")
z_dat = df['z'].values
sfrs_dat = df['SFRs'].values
SFRs_vec = np.interp(zbins, z_dat, sfrs_dat)
SFR_arr = np.zeros([Nsamp, Nz])
N_arr = np.zeros([Nsamp, Nz])
for iz, z in enumerate(zbins):
Phi_lvec = SFRf_arr[iz,:] * Neff_scale
Navg_lvec = Phi_lvec * dV_zvec[iz].value * dSFR_lvec
N_vec = np.random.poisson(Navg_lvec, size = (Nsamp, NSFR))
SFRtot_svec = np.matmul(N_vec, SFRbins.reshape([NSFR,-1])).flatten()
N_arr[:,iz] = SFRtot_svec
SFR_arr[:,iz] = SFRtot_svec * SFRs_vec[iz]
return N_arr, zbins, SFR_arr
def add_line_bin(df, nu_binedges, line_use = ['Lya', 'Ha', 'Hb', 'OII', 'OIII']):
'''
Given a df with a column of 'redshift', and the survey nu_binedges, add the columns
'binLINENAME' specified in which nu bin we can observed the line.
Inputs:
=======
df: df with each source a row. Contain column 'redshift'
nu_binedges: frequency bin edges [GHz]
line_use: lines to compute
Output:
=======
df: df with columns 'binLINENAME'
'''
binlabel_arr = np.zeros([len(df), len(line_use)], dtype=int)
for jidx, name in enumerate(line_use):
if name == 'Lya':
nu_arr = np.asarray(spec_lines.Lya.to(u.GHz, equivalencies=u.spectral()).value \
/ (1 + df['redshift']))
elif name == 'Ha':
nu_arr = np.asarray(spec_lines.Ha.to(u.GHz, equivalencies=u.spectral()).value \
/ (1 + df['redshift']))
elif name == 'Hb':
nu_arr = np.asarray(spec_lines.Hb.to(u.GHz, equivalencies=u.spectral()).value \
/ (1 + df['redshift']))
elif name == 'OII':
nu_arr = np.asarray(spec_lines.OII.to(u.GHz, equivalencies=u.spectral()).value \
/ (1 + df['redshift']))
elif name == 'OIII':
nu_arr = np.asarray(spec_lines.OIII.to(u.GHz, equivalencies=u.spectral()).value \
/ (1 + df['redshift']))
else:
raise ValueError('Line name %s is invalid.'%name)
# binlabels: 0 ~ len(nu_binedges)-2
binlabel = np.digitize(nu_arr,nu_binedges) - 1
binlabel[nu_arr == nu_binedges[0]] = 0
binlabel[nu_arr == nu_binedges[-1]] = len(nu_binedges) - 2
binlabel[(nu_arr < nu_binedges[-1]) | (nu_arr > nu_binedges[0])] = -1
df['bin' + name] = binlabel
binlabel_arr[:,jidx] = binlabel
return df, binlabel_arr
def add_line_flux(df, line_use = ['Lya', 'Ha', 'Hb', 'OII', 'OIII'], muL = [], sigL = [], model = 'Be13'):
'''
Given a df with a column of 'redshift', calculate line flux and add the columns
'FCO##' or 'FCII'. The line instrinsic luminosity is the L* in Fonseca 17.
muL, sigL are the bias and scatter in intrinsic luminosity.
Inputs:
=======
df: df with each source a row. Contain column 'redshift'
line_use: lines to compute
muL: intrinsic luminosity bias for all sources. Same dimemsion as line_use. unit: L*
sigL: intrinsic luminosity Gaussian scatter for all sources. Same dimemsion as line_use. unit: L*
Output:
=======
df: df with columns 'FCO##', 'FCII' [Jy GHz]
'''
if len(sigL) == 0:
sigL = np.zeros(len(line_use))
elif len(sigL) != len(line_use):
raise ValueError('sigL and line_use does not have the same dimension!!!')
if len(muL) == 0:
muL = np.zeros(len(line_use))
elif len(muL) != len(line_use):
raise ValueError('sigL and line_use does not have the same dimension!!!')
if model == 'F17':
dfdat = pd.read_csv('data_internal/F17NeffSFRs.txt')
elif model == 'Be13':
dfdat = pd.read_csv('data_internal/Be13NeffSFRs.txt')
else:
raise ValueError("Model name incorrect.")
z_dat = dfdat['z'].values
z_vec = df['redshift']
DL_vec = cosmo.luminosity_distance(z_vec)
F_arr = np.zeros([len(df), len(line_use)])
for jidx,line_name in enumerate(line_use):
Ls_dat = dfdat[line_name + '_Ls'].values
L_vec = 10**np.interp(z_vec, z_dat, np.log10(Ls_dat))
L_vec = L_vec * (1 + np.random.normal(muL[jidx], sigL[jidx], len(L_vec)))
if line_use == 'OII':
rand_OII_OIII = (1 + np.random.normal(muL[jidx], sigL[jidx], len(L_vec)))
Lvec = L_vec * rand_OII_OIII
elif line_use == 'OIII':
Lvec = Lvec * rand_OII_OIII
L_vec = L_vec * (1 + np.random.normal(muL[jidx], sigL[jidx], len(L_vec)))
F_vec = L_vec * u.Lsun / 4 / np.pi / DL_vec**2
F_vec = F_vec.to(u.Jy * u.GHz).value
#if line_name=='Lya':##########
# F_vec = F_vec*15###########
df['F' + line_name] = F_vec
F_arr[:,jidx] = F_vec
return df, F_arr
def Ivox_from_zsrc(zsrc_all, dth, nu_binedges, line_use, line_targ, \
muL = [], sigL = [], Lratio = [], model = 'Be13', verbose = 0):
'''
Given list of source redshift and nu_binedges, calculate the inensity of the light cone.
Inputs:
=======
zsrc_all[list]: Nsamp element list, each element is a list of z of source in a lightcone
dth: pixel size dth^2 [arcmin]
nu_binedges: [GHz]
line_use[str list]: list of lines in the light cone
line_targ[str or str list]: targeting line
muL: pass to add_line_flux
sigL: pass to add_line_flux
Lratio[list]: the intrinsic line luminosity of the individual source is Lratio time Lsrc.
Lratio has to be same dimension of zsrc_all. Default: all 1's.
Outputs:
========
I_vec_all: intensity from all the lines, Nsamp x Nnu array [Jy/sr]
I_vec_targ: intensity from target lines, Nsamp x Nnu array [Jy/sr]
'''
if type(line_targ) is str:
if line_targ not in line_use:
raise ValueError("line_targ must be in line_use.")
else:
if not set(line_targ).issubset(line_use):
raise ValueError("line_targ must be in line_use.")
if len(Lratio) != len(zsrc_all) and len(Lratio) !=0:
raise ValueError('zsrc_all and Lratio does not have the same dimension!!!')
Nnu = len(nu_binedges) - 1
Nset = len(zsrc_all)
dsr = ((dth * u.arcmin).to(u.rad))**2
dnus = abs(nu_binedges[1:] - nu_binedges[:-1])
usename = []
for name in line_use:
usename.extend(['bin' + name])
I_vec_all = np.zeros([Nset,Nnu])
if type(line_targ) is str:
idxtarg = line_use.index(line_targ)
I_vec_targ = np.zeros([Nset,Nnu])
else:
idxtarg_vec = [line_use.index(jj) for jj in line_targ]
I_vec_targ = np.zeros([len(line_targ),Nset,Nnu])
for i in range(Nset):
if len(zsrc_all[i])==0:
I_vec_all[i,:] = 0.
if type(line_targ) is str:
I_vec_targ[i,:] = 0.
else:
I_vec_targ[:,i,:] = 0.
else:
if len(Lratio) == 0:
Lri = np.ones(len(zsrc_all[i]))
elif len(Lratio[i]) != len(zsrc_all[i]):
raise ValueError('light cone %d zsrc_all and Lratio size not match!!!'%i)
else:
Lri = Lratio[i]
Lri_arr = np.tile(np.asarray(Lri),(len(line_use),1)).T
df = pd.DataFrame(zsrc_all[i],columns=['redshift'])
_, F_arr = add_line_flux(df, line_use = line_use, muL = muL, sigL = sigL, model = model)
_, bin_arr = add_line_bin(df, nu_binedges, line_use = line_use)
dnu_arr = np.zeros_like(bin_arr,dtype=float)
dnu_arr[bin_arr!=-1] = dnus[bin_arr[bin_arr!=-1]]
I_arr = np.zeros_like(bin_arr,dtype=float)
I_arr[bin_arr!=-1] = F_arr[bin_arr!=-1] * Lri_arr[bin_arr!=-1]/dnu_arr[bin_arr!=-1] / dsr
I_vec = np.histogram(bin_arr,bins = np.arange(-0.5,Nnu,1), weights=I_arr)[0]
I_vec_all[i,:] = I_vec
if type(line_targ) is str:
I_vec = np.histogram(bin_arr[:,idxtarg],bins = np.arange(-0.5,Nnu,1), \
weights=I_arr[:,idxtarg])[0]
I_vec_targ[i,:] = I_vec
else:
for jj,idxtarg in enumerate(idxtarg_vec):
I_vec = np.histogram(bin_arr[:,idxtarg],bins = np.arange(-0.5,Nnu,1), \
weights=I_arr[:,idxtarg])[0]
I_vec_targ[jj,i,:] = I_vec
if verbose:
if (i+1)%100==0:
print('produce light cone %d/%d (%d %%)'%(i+1,Nset,(i+1)*100./Nset))
return I_vec_all,I_vec_targ
def gen_Ipred(z_coords, N_arr, dth, nu_binedges, line_use, line_targ, model = 'Be13',
muL = [], sigL = [], verbose = 0):
'''
Generate I_arr with the N_arr from sparse approx.
Inputs:
=======
z_coords[arr]: Nsamp element list, each element is a list of z of source in a lightcone
N_arr[arr, Nset x len(z_coords)]:
dth: pixel size dth^2 [arcmin]
nu_binedges: [GHz]
line_use[str list]: list of lines in the light cone
line_targ[str or str list]: targeting line
'''
Nsamp, Nz = N_arr.shape
if Nz != len(z_coords):
raise ValueError('N_arr 2nd dimension does not match len(z_coords).')
zsrc_all = []
Lratio = []
for i in range(Nsamp):
zsrc_all.append(z_coords)
Lratio.append(N_arr[i,:])
Ipred_all, Ipred_targ = Ivox_from_zsrc(zsrc_all, dth, nu_binedges, \
line_use, line_targ, Lratio = Lratio, model = model,
muL=muL, sigL=sigL, verbose = 0)
return Ipred_all, Ipred_targ
def zlist_to_N(zsrc, z_coords_all, I_coords_all, z_idx, Nsrc = []):
I_bl = np.copy(I_coords_all)
I_bl[I_bl > 0] = 1
Nall = np.zeros([len(zsrc),len(z_idx)])
for iset in range(len(zsrc)):
N = np.zeros_like(z_idx, dtype = 'float')
idx_vec = np.array([(np.abs(zcii - z_coords_all)).argmin() for zcii in zsrc[iset]])
idx_vec = idx_vec[np.sum(I_bl[:,idx_vec], axis = 0) >= 2]
for isrc,idx in enumerate(idx_vec):
if idx <= min(z_idx):
if len(Nsrc) == 0:
N[0] += 1
else:
N[0] += Nsrc[iset][isrc]
elif idx >= max(z_idx):
if len(Nsrc) == 0:
N[-1] += 1
else:
N[-1] += Nsrc[iset][isrc]
else:
# pick the two neaest dictionary
idx1 = idx - z_idx
idx1 = idx1[idx1 > 0]
idx1 = idx - min(idx1)
idx2 = idx - z_idx
idx2 = idx2[idx2 < 0]
idx2 = idx - max(idx2)
# get binary dict to see which one match
if np.array_equal(I_bl[:,idx1],I_bl[:,idx]):
if len(Nsrc) == 0:
N[np.where(idx1 == z_idx)] += 1
else:
N[np.where(idx1 == z_idx)] += Nsrc[iset][isrc]
else:
if len(Nsrc) == 0:
N[np.where(idx2 == z_idx)] += 1
else:
N[np.where(idx2 == z_idx)] += Nsrc[iset][isrc]
Nall[iset,:] = N
return Nall
def gen_lightcone_toy(Nlc, dth, nu_binedges, z_coords_all,
I_coords_all, z_idx, line_use, line_targ, Neff_scale = 1, model = 'Be13'):
zsrc = sim_zsrc_Neff(Nlc, dth, Neff_scale = Neff_scale, model = model)
Ntrue = zlist_to_N(zsrc, z_coords_all, I_coords_all, z_idx)
Itrue_all, Itrue_targ = Ivox_from_zsrc\
(zsrc, dth, nu_binedges, line_use, line_targ, model = model, verbose=0)
return Ntrue, Itrue_all, Itrue_targ
def gen_lightcone(Nlc, dth, nu_binedges, z_coords_all,
I_coords_all, z_idx, line_use, line_targ,
muL = [], sigL = [], Neff_scale = 1, model = 'Be13'):
zbinedges = np.arange(0,10 + 0.01,0.01)
SFRbinedges = np.logspace(-3,1,100)
if model == 'F17':
SFRf_arr = np.load('data_internal/F17SFRfunc.npy')
elif model == 'Be13':
SFRf_arr = np.load('data_internal/Be13SFRfunc.npy')
else:
raise ValueError("Model name incorrect.")
N_arr, zbins, _ = sim_SFRlc_F17(Nlc, dth, zbinedges, SFRbinedges, SFRf_arr, Neff_scale, model = model)
zsrc = [zbins.tolist()]*Nlc
Nsrc = N_arr.tolist()
Ntrue = zlist_to_N(zsrc, z_coords_all, I_coords_all, z_idx, Nsrc = Nsrc)
Itrue_all, Itrue_targ = gen_Ipred(z_coords_all[z_idx], Ntrue, dth, nu_binedges,
line_use, line_targ, model = model,
muL=muL, sigL=sigL, verbose = 0)
return Ntrue, Itrue_all, Itrue_targ
def sparse_dict(dth, nu_binedges, line_use, dz = 0.0005, model = 'Be13'):
z_coords_all = np.arange(dz,10,dz)
z_coords_all_list = z_coords_all.reshape(len(z_coords_all),-1).tolist()
I_coords_all,_ = Ivox_from_zsrc(z_coords_all_list, dth, nu_binedges, \
line_use, "Lya", model = model, verbose=0)
I_coords_all = I_coords_all.T
z_coords_type_all = np.count_nonzero(I_coords_all, axis=0)
I_bl = np.copy(I_coords_all)
I_bl[I_bl > 0] = 1
# dict for multiple bins
z_coords = []
z_idx = []
ztemp = []
for i,z in enumerate(z_coords_all):
if len(ztemp) != 0 and np.array_equal(I_bl[:,i],I_bl[:,i-1]):
ztemp.append(z)
elif len(ztemp) != 0 and not np.array_equal(I_bl[:,i],I_bl[:,i-1]) and np.sum(I_bl[:,i]) >= 1:
z_median = np.percentile(ztemp,50, interpolation= 'nearest')
z_coords.append(z_median)
z_idx.append(np.where(z_coords_all == z_median)[0][0])
ztemp = [z]
elif len(ztemp) != 0 and not np.array_equal(I_bl[:,i],I_bl[:,i-1]) and not np.sum(I_bl[:,i]) >= 1:
z_median = np.percentile(ztemp,50, interpolation= 'nearest')
z_coords.append(z_median)
z_idx.append(np.where(z_coords_all == z_median)[0][0])
ztemp = []
elif len(ztemp) == 0 and np.sum(I_bl[:,i]) >= 1:
ztemp = [z]
z_idx = np.array(z_idx)
z_coords = np.array(z_coords)
A_raw = I_coords_all[:, z_idx]
A, I_norm = preprocessing.normalize(A_raw,axis=0, copy=True, return_norm=True)
# A_raw = A @ np.diag(I_norm)
N_nu, N_z = A.shape
return A, I_norm, z_coords, N_nu, N_z, z_coords_all, z_idx, I_coords_all
|
999,922 | b6848b07f6c0d56b2566701e68c6842694463c5b | from django.apps import AppConfig
class ExportCalculosConfig(AppConfig):
name = 'export_calculos'
|
999,923 | 6e1278ee0f67e717329a6773c3dc7c8be09718ca | from typing import Optional, Dict
from logging import Logger, getLogger
from optmlstat.basic_modules.class_base import OptMLStatClassBase
from optmlstat.opt.opt_prob import OptimizationProblem
from optmlstat.opt.opt_alg.optimization_algorithm_base import OptimizationAlgorithmBase
from optmlstat.opt.iteration import Iteration
from optmlstat.opt.opt_iterate import OptimizationIterate
from optmlstat.opt.opt_prob_eval import OptimizationProblemEvaluation
logger: Logger = getLogger()
class OptimizationResult(OptMLStatClassBase):
"""
Stores optimization history and final results.
"""
def __init__(self, opt_prob: OptimizationProblem, opt_alg: OptimizationAlgorithmBase) -> None:
self._opt_prob: OptimizationProblem = opt_prob
self._opt_alg: OptimizationAlgorithmBase = opt_alg
self._iter_iterate_dict: Dict[Iteration, OptimizationIterate] = dict()
def register_solution(
self,
iteration: Iteration,
primal_prob_evaluation: OptimizationProblemEvaluation,
dual_prob_evaluation: Optional[OptimizationProblemEvaluation] = None,
) -> None:
assert iteration not in self._iter_iterate_dict
self._iter_iterate_dict[iteration] = OptimizationIterate(primal_prob_evaluation, dual_prob_evaluation)
@property
def opt_prob(self) -> OptimizationProblem:
return self._opt_prob
@property
def opt_alg(self) -> OptimizationAlgorithmBase:
return self._opt_alg
@property
def iter_iterate_dict(self) -> Dict[Iteration, OptimizationIterate]:
return self._iter_iterate_dict
@property
def final_iterate(self) -> OptimizationIterate:
return sorted(self._iter_iterate_dict.items())[-1][1]
|
999,924 | 10c4e72a2108a85869c8eced22d27b959499e443 | # 5 4 2
# +-----> B -----> D ------+
# | ^ \ | v
# A 8| \2 |6 F
# | | \> v ^
# +-----> C -----> E ------+
# 2 7 1
graph = {
'A': {
'neighbors': {
'B': 5,
'C': 2,
},
'__start__': True,
},
'B': {
'neighbors': {
'D': 4,
'E': 2,
},
},
'C': {
'neighbors': {
'B': 8,
'E': 7,
},
},
'D': {
'neighbors': {
'E': 6,
'F': 3,
},
},
'E': {
'neighbors': {
'F': 1,
},
},
'F': {
'neighbors': {},
'__finish__': True,
},
}
start = ''
finish = ''
for key, data in graph.items():
if data.get('__start__'):
start = key
if data.get('__finish__'):
finish = key
costs = {node: float('inf') for node in graph.keys()}
costs[start] = 0
parents = {node: None for node in graph.keys()}
processed = set()
def get_pivot_node(graph, costs, processed):
unprocessed = set(graph) - processed
if len(unprocessed) == 0:
return None
return min(unprocessed, key=lambda node: costs[node])
node = get_pivot_node(graph, costs, processed)
while node is not None:
cost = costs[node]
for nbr, weight in graph[node]['neighbors'].items():
nbr_cost = cost + weight
if nbr_cost < costs[nbr]:
costs[nbr] = nbr_cost
parents[nbr] = node
processed.add(node)
node = get_pivot_node(graph, costs, processed)
print(costs)
print(parents)
|
999,925 | 501f689b8cc92e72fc316d15daf3d5f81d725f4d | import os
import sys
path_to_virtualenv_file = '/path/activate_this.py'
execfile(path_to_virtualenv_file, dict(__file__=path_to_virtualenv_file))
os.environ['DJANGO_SETTINGS_MODULE'] = 'path.to.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler() |
999,926 | ced072875f5dca4eaffc92bb804a1c61fd744209 | import requests
import re
import os
from bs4 import BeautifulSoup
headers={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
all_url="http://www.58pic.com/piccate/10-0-0.html"
start_html=requests.get(all_url,headers=headers).text
#soup=BeautifulSoup(start_html.text,"lxml")
#all_img=soup.find('div',class_="main-left fl").find_all('img')
all_img=re.findall(r'data-original="(.*?)"',start_html,re.S)
i=0
for each in all_img:
print(each)
i += 1
try:
img = requests.get(each, timeout=10)
except BaseException as e:
print(e)
print('当前图片下载错误')
continue
string="F:/asd/"+str(i)+".jpg"
with open(string,'wb') as f:
f.write(img.content)
f.close()
print("下载完成")
print('总共有%s' %(i)+"张图片") |
999,927 | 1458696b28a812959838f7947c047ebae5a41877 | from style.style import style
import sqlite3
import os
class sqlite(style):
_connect = None
_cursor = None
def __init__(self):
for root, _, files in os.walk(os.path.split(os.path.realpath(__file__))[0]):
for name in files:
if name.find("config.db") != -1:
self._connect = sqlite3.connect(os.path.join(root, name))
self._cursor = self._connect.cursor()
def runQuery(self, query):
try:
self._cursor.execute(query)
data = None
if query.upper().startswith('SELECT'):
data = self._cursor.fetchall()
else:
self._connect.commit()
return data
except Exception as e:
print(self.red(e))
return False |
999,928 | ca3abce0ff23ec1eb8aac389b37a6150879f179b | '''
inp : OQQSNPOQZBCETV NPSU SUNPRTJLHJ XZZB HJ KMDFMOHJ
out : PROPADU OT TOSKI YA I LENI
'''
import string as str
inp = list(input())
eng_alphabet = list(str.ascii_uppercase)
d = {eng_alphabet[a]:eng_alphabet[a+1] for a in range(25)}
d_t = {'Z' : 'A'," " : " "}
d.update(d_t)
a=0
while a<len(inp):
if d[inp[a]]!=' ':
print(d[inp[a]] ,end='')
a+=2
elif d[inp[a]]==' ':
print(d[inp[a]] ,end='')
a+=1 |
999,929 | 605a2dc0c21bbaa836d04563a4f1fa8dd98c42e8 | class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if (len(nums) == 0):
return 0
elif (len(nums) == 1):
return nums[0]
else:
profit = [0] * (len(nums) + 2)
for i in range(len(nums)):
current_profit = 0
current_profit = nums[i]
for j in range(i + 2, len(nums), 2):
current_profit += nums[j]
profit.append(current_profit)
return max(profit)
if __name__ == '__main__':
solution = Solution()
solution.rob([2,7,9,3,1]) |
999,930 | 6d95fb65fff3548c0fe63ed21a7601993564e143 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sequencelistings', '0014_auto_20161217_1629'),
]
operations = [
migrations.AddField(
model_name='sequence',
name='sequenceName',
field=models.CharField(default=b'seq_', max_length=100, verbose_name=b'Sequence name'),
preserve_default=True,
),
]
|
999,931 | 3da504f118da15f7a2fa4e94efb6d361c04ef425 | #!/usr/bin/env python
import logging
from typing import (
Dict,
Optional
)
from hummingbot.logger import HummingbotLogger
from hummingbot.core.event.events import TradeType
from hummingbot.core.data_type.order_book cimport OrderBook
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage,
OrderBookMessageType
)
_krob_logger = None
cdef class KrakenOrderBook(OrderBook):
@classmethod
def logger(cls) -> HummingbotLogger:
global _krob_logger
if _krob_logger is None:
_krob_logger = logging.getLogger(__name__)
return _krob_logger
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: float,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.SNAPSHOT, {
"trading_pair": msg["trading_pair"].replace("/", ""),
"update_id": msg["latest_update"],
"bids": msg["bids"],
"asks": msg["asks"]
}, timestamp=timestamp * 1e-3)
@classmethod
def diff_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.DIFF, {
"trading_pair": msg["trading_pair"].replace("/", ""),
"update_id": msg["update_id"],
"bids": msg["bids"],
"asks": msg["asks"]
}, timestamp=timestamp * 1e-3)
@classmethod
def snapshot_ws_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None) -> OrderBookMessage:
if metadata:
msg.update(metadata)
return OrderBookMessage(OrderBookMessageType.SNAPSHOT, {
"trading_pair": msg["trading_pair"].replace("/", ""),
"update_id": msg["update_id"],
"bids": msg["bids"],
"asks": msg["asks"]
}, timestamp=timestamp * 1e-3)
@classmethod
def trade_message_from_exchange(cls, msg: Dict[str, any], metadata: Optional[Dict] = None):
if metadata:
msg.update(metadata)
ts = float(msg["trade"][2])
return OrderBookMessage(OrderBookMessageType.TRADE, {
"trading_pair": msg["pair"].replace("/", ""),
"trade_type": float(TradeType.SELL.value) if msg["trade"][3] == "s" else float(TradeType.BUY.value),
"trade_id": ts,
"update_id": ts,
"price": msg["trade"][0],
"amount": msg["trade"][1]
}, timestamp=ts * 1e-3)
@classmethod
def from_snapshot(cls, msg: OrderBookMessage) -> "OrderBook":
retval = KrakenOrderBook()
retval.apply_snapshot(msg.bids, msg.asks, msg.update_id)
return retval
|
999,932 | 99c9833b137ffb593c5443df6b7216b1374afcee | #!/bin/python3
#-*-coding:utf-8-*-
import os, json, re, scrapy, requests, time
from flask import Flask, render_template, request, jsonify, abort
from flask_cors import CORS, cross_origin
from pymongo import MongoClient
from billiard.process import Process
from whoosh.fields import Schema, TEXT, ID, KEYWORD, STORED
from whoosh.index import create_in, open_dir
from whoosh.qparser import MultifieldParser
from whoosh.highlight import highlight, ContextFragmenter, Highlighter
from whoosh.searching import Hit
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy import signals
from scrapy.spiders import CrawlSpider
from scrapy.utils.log import configure_logging
from twisted.internet import reactor
from jieba.analyse import ChineseAnalyzer
# from nwsuaf.spiders.nwsuaf import NwsuafSpider
# 现在不用导入爬虫了
app = Flask(__name__)
CORS(app)
# app.config['JSON_AS_ASCII'] = False
client = MongoClient('localhost:27017')
db = client.search
co = db.search
url_co = db.url
analyzer = ChineseAnalyzer()
# PAGE_SIZE = 5
class WhooshSarch(object):
"""
Object utilising Whoosh to create a search index of all
crawled rss feeds, parse queries and search the index for related mentions.
"""
def __init__(self, collection):
self.collection = collection
self.indexdir = "indexdir"
self.indexname = "indexname"
self.schema = self.get_schema()
if not os.path.exists(self.indexdir):
os.mkdir(self.indexdir)
create_in(self.indexdir, self.schema, indexname=self.indexname)
self.ix = open_dir(self.indexdir, indexname=self.indexname)
def get_schema(self):
return Schema(title=TEXT(stored=True, analyzer=analyzer),
url=ID(unique=True, stored=True),
content=TEXT(stored=True, analyzer=analyzer))
def rebuild_index(self):
ix = create_in(self.indexdir, self.schema, indexname=self.indexname)
writer = ix.writer()
for coll in self.collection.find():
writer.update_document(title=coll["title"], url=coll["url"],
content=coll["content"])
writer.commit()
def commit(self, writer):
""" commit data to index """
writer.commit()
return True
def parse_query(self, query):
parser = MultifieldParser(["url", "title", "content"], self.ix.schema)
return parser.parse(query)
# def search(self, query, page):
# """ 考虑使用后端分页还是前段分页 """
# results = []
# with self.ix.searcher() as searcher:
# result_page = searcher.search_page(
# self.parse_query(query), page, pagelen=PAGE_SIZE)
# for result in result_page:
# # for result in searcher.search(self.parse_query(query)):
# results.append(dict(result))
# return {'results': results, 'total': result_page.total}
def search(self, query):
results = []
fragments = []
with self.ix.searcher() as searcher:
result_origin = searcher.search(self.parse_query(query), limit=None)
# 默认返回10条结果,limit=20或none可以显示20或者全部结果
# result_origin现在就是一个hit对象,可以直接对它使用highlights方法
#TODO 将查询结果中'content'字段内容改为html格式的摘要
my_cf = ContextFragmenter(maxchars=100, surround=30)
hi = Highlighter(fragmenter=my_cf)
for hit in result_origin:
# hit['fragment'] = highlight(hit['content'], query, analyzer,
# )
# print(hit['fragment'])
print(hit.highlights("content"))
fragment={}
fragment['fragment'] = hit.highlights("content")
fragments.append(fragment)
for result in result_origin:
# my_cf = highlight.ContextFragmenter(maxchars=100, surround=30)
#Fragment size 的maxchars默认200,surround默认20
# dict(result)
# re.sub("[\t\r\n ]+", " ", result['content'])
# results.append(result)
# result['fragment'] = hit.highlights("content")
# 无法修改search result
results.append(dict(result))
for i in range(len(results)):
results[i].update(fragments[i])
# results = zip(*[(result.update(fragment)) for result, fragment
# in zip(results, fragments)])
# for fragment in fragments for result in results:
# result.update(fragment)
return results
# result_len = len(result_origin)
# for i in range(result_len):
# results.append(result_origin[i].fields)
# keywords = [keyword for keyword, score in
# result_origin.key_terms("content", docs=10, numterms=5)]
# print(keywords)
def close(self):
"""
Closes the searcher obj. Must be done manually.
"""
self.ix.close()
# 这是配合process使用的方法,然而有问题。
# class BilliardCrawlProcess(Process):
# # def __init__(self, group=None, target=None, args=(), kwargs={}, daemon, **_kw):
# def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, daemon=None, **_kw):
# super(BilliardCrawlProcess, self).__init__(group, target, name, args, kwargs, daemon, **_kw)
# def run(self):
# settings = get_project_settings()
# process = CrawlerProcess(settings)
# process.crawl('NwsuafSpider')
# process.start()
# def auto_crawl():
# TODO运行爬虫前需要将数据库中的search集合清空,url集合不可以清空
# configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
# runner = CrawlerRunner(get_project_settings())
# d = runner.crawl(NwsuafSpider)
# reactor如果在此处stop后,后面的爬虫将不能运行
# 上面这条注释是错的,下面的语句其实没毛病,原因是twisted调度框架不可以在进程中重启
# d.addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until the crawling is finished
# 执行下面语句前需要清空数据库中的search集合
# crawl_process = BilliardCrawlProcess()
# crawl_process.start()
# crawl_process.join() # blocks here until scrapy finished
# 上面这种方法会报参数不对的错误,出错在crawl_process.start()方法,包为process
# 此处可能需要加connection.close()之类的断开数据库连接的语句
# from scrapy import project # project过时啦,要用from_crawler
# to avoid ReactorNotRestartable issue
# 下面的方法也不能用0.0,真是悲剧。
# class UrlCrawlerScript(Process):
# def __init__(self, spider):
# Process.__init__(self)
# settings = get_project_settings()
# self.crawler = Crawler(settings)
# self.crawler.configure()
# self.crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
# self.spider = spider
# def run(self):
# self.crawler.crawl(self.spider)
# self.crawler.start()
# reactor.run()
# def auto_crawl():
# spider = NwsuafSpider()
# crawler = UrlCrawlerScript(spider)
# crawler.start()
# crawler.join()
# nwsuaf.rebuild_index()
url_co.insert({"url": "www.nwsuaf.edu.cn"})
# TODO 第一次的自动执行爬虫,执行完了再执行下面的代码
# TODO 执行上面的语句时要先清空数据库
# crawl_finished = False
def auto_crawl():
cs_url2 = 'http://localhost:6800/schedule.json'
# r2 = requests.post(cs_url2, "project=nwsuaf&&spider=nwsuaf")
# 这个方法出错了
r2 = requests.post(cs_url2, {'project':'nwsuaf','spider':'nwsuaf'})
print(r2.content)
# return r2.content.status
# 上面会报错 'bytes' object has no attribute 'status'
# content返回的是bytes, text返回的是经过Unicode编码的str
# return r2.text.status
# return r2.json()返回的是dict,无法用点取到status=> .status
return r2.json()['status']
def check_crawl():
# 检查爬取状态
cs_url = 'http://localhost:6800/daemonstatus.json'
r = requests.get(cs_url)
print(r.content)
# return r.json()['pending']
return r.json()
# query_one = "关于举办新西兰林肯大学土壤学专家系列学术报告的通知"
# pageshow = nwsuaf.search(query_one)
# print(pageshow)
# nwsuaf.close()
nwsuaf = WhooshSarch(co)
auto_crawl()
crawl_finished = check_crawl()['pending']
print(crawl_finished)
#此处添加延时函数
while(crawl_finished > 0):
time.sleep(60) #这里的单位是秒
crawl_finished = check_crawl()['pending']
nwsuaf.rebuild_index()
@app.route('/')
def index_page():
return render_template('index.html')
@app.route('/url-get', methods=['GET'])
def url_get():
#find_one用来获取匹配的文档,当只有一个或只要第一个时很有用
urlstr = url_co.find_one()
#返回的字典不能直接jsonify,也不能直接用字典。
return jsonify({'posturl': urlstr['url']}), 200
@app.route('/url-post', methods=['GET', 'POST'])
def url_post():
if not request.json or not 'posturl' in request.json:
abort(400)
# update语句需要用$进行操作,加上$set即可,最后一个参数为true表示找不到就创建一个
# url_co.find_one_and_update({},{'posturl': request.json['posturl']})
url_co.find_one_and_update({},{'$set': {'url': request.json['posturl']}}, upsert=False)
urlstr = url_co.find_one()
auto_crawl()
# 这里可以设置一个返回值,让前端接受到后停止抓取中/索引建立中的动画
crawl_finished = check_crawl()['pending']
print(crawl_finished)
#此处添加延时函数
while(crawl_finished > 0):
time.sleep(60) #这里的单位是秒
crawl_finished = check_crawl()['pending']
return urlstr['url']
@app.route('/check-crawl', methods=['GET'])
def check_crawl_finished():
# 此处int值无法作为返回值,必须是json格式
# 'dict' object is not callable --此处也有错误
return json.dumps(check_crawl())
# TODO 这里可以添加检查check_crawl(),以告诉前端是否爬取完毕
# @app.route('/messages', methods=['POST'])
# def test():
# if request.headers['Content-Type'] == 'text/plain':
# return "Text Message: " + request.data
# elif request.headers['Content-Type'] == 'application/json':
# return "JSON Message: " + json.dumps(request.json)
# else:
# return "415 Unsupported Media Type ;)"
@app.route('/results', methods=['GET', 'POST'])
def get_results():
if not request.json or not 'keywords' in request.json:
abort(400)
query_keywords = request.json['keywords']
pageshow = nwsuaf.search(query_keywords)
# 此处猜想,search的应该是已经建立好的index而非数据库
print(query_keywords)
print(pageshow)
total_items = len(pageshow)
# TODO 此处添加完成搜索后的返回消息
return jsonify({'results': pageshow, 'total_items': total_items}), 201
# @app.route('/keywords', method=['POST'])
# def post_keywords():
# query_keywords = resquest.json['keywords']
# pageshow = nwsuaf.search(query_keywords)
# print(pageshow)
# # TODO 此处添加完成搜索后的返回消息
# return 200
if __name__ == "__main__":
app.run()
# app.run(debug=True)
# 先不开启debug
# host和port 参数
# app.run('0.0.0.0')
|
999,933 | 62e1ecfebf33e86573118b537d6ff41afd3d22d6 | import sqlite3
import calendar
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import pandas as pd
import paceutils
from .settings import color_palette, db_filepath, agg_filepath
helpers = paceutils.Helpers(db_filepath)
enrollment = paceutils.Enrollment(db_filepath)
demographics = paceutils.Demographics(db_filepath)
utilization = paceutils.Utilization(db_filepath)
incidents = paceutils.Incidents(db_filepath)
center_enrollment = paceutils.CenterEnrollment(db_filepath)
center_demographics = paceutils.CenterDemographics(db_filepath)
team = paceutils.Team(db_filepath)
quality = paceutils.Quality(db_filepath)
agg = paceutils.Agg(agg_filepath)
#dictionary for time range buttons
time_range_dict = {
"last_month": (helpers.last_month, helpers.prev_month_dates),
"month_td": (helpers.month_to_date, helpers.prev_month_dates),
"last_quarter": (helpers.last_quarter, helpers.prev_quarter_dates),
"quarter_td": (helpers.quarter_to_date, helpers.prev_quarter_dates),
}
def arrow_direction(prev_val, current_val):
"""
Returns font icon arrow based on
comparison of values
Args:
prev_val(float/int): previous value of indicator
current(float/int): current value of indicator
Returns:
str: "up-big" if current value is larger,
"down-big" if current value is smaller,
"minus" if they are equal
These all correspond to FA icon names
"""
if current_val > prev_val:
return "up-big"
if current_val < prev_val:
return "down-big"
return "minus"
def below_threshold(threshold, current_value):
"""
Returns a color based on the values relation to the threshold.
This function is used for values that should be BELOW their
threshold
Args:
threshold(int/float): threshold value for indicator
current_value(int/float): value of the indicator
Returns:
str: green hex is below threshold, red if above,
black if equal
"""
if current_value < threshold:
return "#00B760"
if current_value > threshold:
return "#E3170A"
return "#030027"
def above_threshold(threshold, current_value):
"""
Returns a color based on the values relation to the threshold.
This function is used for values that should be ABOVE their
threshold
Args:
threshold(int/float): threshold value for indicator
current_value(int/float): value of the indicator
Returns:
str: green hex is above threshold, red if below,
black if equal
"""
if current_value > threshold:
return "#00B760"
if current_value < threshold:
return "#E3170A"
return "#030027"
def indicator_color(threshold=None, current_value=0, polarity="above"):
"""
Returns a color based on the values relation to the threshold.
Args:
threshold(int/float): threshold value for indicator
current_value(int/float): value of the indicator
polarity(str): above means the indicator should be above the threshold,
below means the indicator should be below
Returns:
str: green hex is indicators is in good standing, red if not,
black if equal or no threshold or direction is indicated
"""
if threshold is None:
return "#030027"
if polarity == "above":
return above_threshold(threshold, current_value)
else:
return below_threshold(threshold, current_value)
return "#030027"
def card_value(time_range, value_function, agg_table, agg_col, card_layout, additional_args=None, threshold_value=None, polarity="above"):
"""
Returns a bootstrap card with values defined in the design of the dashboard.
Args:
time_range(str): time range value from time radio button
value_function(func): function to use to calculate the indicator - most come from
the paceutils module
agg_table: table in the agg database to use to build the dataframe for the sparkline
agg_col: column in the table to use to build the dataframe for the sparkline
card_layout(func): function that creates the row, col and card bootstrap elements
additional_args(list): list of additional arguments for the value_function
threshold_value(int/float): threshold value for indicator
polarity(str): above means the indicator should be above the threshold,
below means the indicator should be below
Returns:
list: list of bootstrap components that create a card
"""
params = time_range_dict[time_range][0]()
prev_params = time_range_dict[time_range][1](params)
if additional_args is None:
prev_value = value_function(prev_params)
current_value = value_function(params)
else:
prev_value = value_function(prev_params, *additional_args)
current_value = value_function(params, *additional_args)
arrow = arrow_direction(prev_value, current_value)
plot_df = agg.get_plot_df(agg_table, agg_col, params=helpers.last_year())
figure = sparkline(plot_df)
if threshold_value is not None:
color = indicator_color(threshold_value, current_value, polarity)
return card_layout(
current_value, arrow, f"{agg_table}-{agg_col}", figure, f"sparkline-{agg_col}", color
)
return card_layout(
current_value, arrow, f"{agg_table}-{agg_col}", figure, f"sparkline-{agg_col}"
)
def create_daterange(start_date, end_date, freq, update=True):
"""
Create a range of dates for given start date, end date and frequency
Args:
start_date: First date to include in resulting dataframe
end_date: Last date to include in resulting dataframe
freq: Determines if range should be by month or quarter
Returns: daterange
"""
if update:
start_date, end_date = update_dates(start_date, end_date, freq)
return pd.date_range(start_date, end_date, freq=freq)
def update_dates(start_date, end_date, freq):
"""
Moves start date and end date to beginning of month or quarter and
end of month for month frequency.
Args:
start_date: First date to include in resulting dataframe
end_date: Last date to include in resulting dataframe
freq: Determines if range should be by month or quarter
Returns: start_date, end_date
"""
if (freq == "MS") or (freq == "M"):
try:
start_date = start_date.split("/")
end_date = end_date.split("/")
except AttributeError:
start_date = [start_date.month, start_date.day, start_date.year]
end_date = [end_date.month, end_date.day, end_date.year]
if int(end_date[1]) < 22:
if int(end_date[0]) == 1:
end_month = 12
end_year = int(end_date[2]) - 1
else:
end_month = int(end_date[0]) - 1
end_year = end_date[2]
end_date[0] = end_month
end_date[2] = end_year
start_date = pd.to_datetime(f"{start_date[0]}/01/{start_date[2]}")
end_date = pd.to_datetime(
f"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}"
)
if (freq == "QS") or (freq == "Q"):
start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(
startingMonth=1
)
end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()
return (start_date, end_date)
def calc_min_y(scale_max, min_dp):
"""
Calculates the minimum y value for a plot so there is about
33% white space below the plotted line
Args:
scale_max: Calculated maximum y-axis value (not maximum dataset data point)
min_dp: Minimum data point
Returns:
float: minimum value of the y-axis
"""
min_y = ((3 * min_dp) - (scale_max)) / 2
if (min_y < 0) & (min_dp >= 0):
return 0
else:
return min_y
def sql_return_df(query, params, date_cols):
"""
Returns a pandas dataframe of the provided SQL query
Args:
query: Valid SQL query
params: parameter for parameterized SQL query
date_cols: columns to parse the dates in
Returns:
df: dataframe
"""
conn = sqlite3.connect(db_filepath)
df = pd.read_sql(query, conn, params=params, parse_dates=date_cols)
conn.close()
return df
def create_center_sql(center, params):
"""
Create SQL need to filter query by center
Will return empty string if filtering is
not needed
Args:
center: Name of PACE center
params: current parameters for
related parameterized query
Returns:
center_sql: string that can be used to
filter sql query by PACE center
params: current parameters for related
parameterized query
"""
if center != "all":
center_sql = f"AND centers.center = ?"
params += [f"{center}"]
else:
center_sql = ""
return center_sql, params
def create_join_sql(center, table):
"""
Creates SQL statements to join a table with the enrollment table to
enable filtering by PACE center
Args:
center: Name of PACE center
table: table to be joined to enrollment
Returns:
string: string containing a SQL statement that can be used join a table
with the enrollment table to enable filtering by PACE center
Will return empty if no center filter is selected (i.e. center=all)
"""
if center != "all":
return f"JOIN centers ON {table}.member_id = centers.member_id"
return ""
def build_bar_layout(
title,
bar_mode="group",
x_title=" ",
y_title=" ",
legend={},
x_ticks=pd.Series([]),
small_margins=False,
):
"""
Creates plotly layout object for bar graphs
Args:
title: title of plot
bar_mode: default is group, set mode of plotly bars
x_title: x-axis title
y_title: y-axis title
legend: plotly legend object if needed
small_margins: sets the margins to a narrow setting
Returns plotly layout object
"""
show_legend = bool(legend)
margin = {"pad": 10, "l": 55, "r": 55, "t": 35, "b": 65}
title_y = 1.05
if small_margins:
margin = {"pad": 5, "l": 35, "r": 35, "t": 35, "b": 20}
title_y = 1.15
if not x_ticks.empty:
x_axis = {
"title": x_title,
"showgrid": False,
"showline": False,
"tickmode": "array",
"tickvals": list(range(0, len(x_ticks))),
"ticktext": [
f"{calendar.month_abbr[int(str(date)[5:7])]} {str(date)[:4]}"
for date in x_ticks.values
],
"type": "category",
}
else:
x_axis = {"title": x_title, "showgrid": False, "showline": False}
return go.Layout(
margin=margin,
barmode=bar_mode,
xaxis=x_axis,
yaxis={"title": y_title, "showgrid": False},
showlegend=show_legend,
legend=legend,
hoverdistance=5,
annotations=[
dict(
yref="paper",
xref="paper",
y=title_y,
x=0,
text=f"<b>{title}</b>",
showarrow=False,
font=dict(color="#323031"),
)
],
)
def build_scatter_layout(
title,
df_min,
df_max,
x_title=" ",
y_title=" ",
legend={},
x_ticks=pd.Series([]),
small_margins=False,
):
"""
Creates plotly layout object for scatter plots
Args:
title: title of plot
df_min: minimum value in the dataset
df_max: maximum value in the dataset
x_title: x-axis title
y_title: y-axis title
legend: plotly legend object if needed
x_ticks: sets x_ticks to specific values, default allows plotly to choose
small_margins: sets the margins to a narrow setting
Returns plotly layout object
"""
show_legend = bool(legend)
margin = {"pad": 10, "l": 55, "r": 55, "t": 45, "b": 65}
title_y = 1.05
if small_margins:
margin = {"pad": 5, "l": 35, "r": 35, "t": 45, "b": 20}
title_y = 1.15
scale_max = df_max * 1.10
scale_min = calc_min_y(scale_max, df_min)
if scale_max - scale_min < 4.5:
scale_min = scale_max - 4.5
if not x_ticks.empty:
x_axis = {
"title": x_title,
"showgrid": False,
"showline": False,
"tickmode": "array",
"tickvals": list(range(0, len(x_ticks))),
"ticktext": [
f"{calendar.month_abbr[int(str(date)[5:7])]} {str(date)[:4]}"
for date in x_ticks.values
],
"type": "category",
}
else:
x_axis = {"title": x_title, "showgrid": False, "showline": False}
return go.Layout(
margin=margin,
xaxis=x_axis,
yaxis={
"title": y_title,
"showgrid": True,
"zeroline": False,
"range": [scale_min, scale_max],
},
autosize=True,
hoverdistance=10,
annotations=[
dict(
yref="paper",
xref="paper",
y=title_y,
x=0,
text=f"<b>{title}</b>",
showarrow=False,
font=dict(color="#323031"),
)
],
showlegend=show_legend,
legend=legend,
)
def create_line_graph(plot_df, title="", x_title="", y_title=""):
"""
Function for creating a simple plotly line graph. Useful in keeping the
default graph page of the dashboard simple
Args:
plot_df(DataFrame): dataframe to plot, two column, first column for x-axis
second for the y.
title(str): title of the plot
x_title: label of x-axis
y_title: label of y-axis
Returns:
dict: dictionary to pass to ploty graph figure
"""
fig_data = [
go.Scatter(
x=plot_df.iloc[:, 0].astype(str),
y=plot_df.iloc[:, 1],
text=plot_df.iloc[:, 1],
mode="lines",
line={"width": 7, "color": color_palette[0]},
hoverinfo="x+y",
)
]
fig_layout = build_scatter_layout(
title,
plot_df.iloc[:, 1].min() * 1.05,
plot_df.iloc[:, 1].max() * 1.05,
x_title,
y_title,
)
return dict(data=fig_data, layout=fig_layout)
def create_bar_graph(plot_df, title="", x_title="", y_title=""):
"""
Function for creating a simple plotly bar graph. Useful in keeping the
default graph page of the dashboard simple
Args:
plot_df(DataFrame): dataframe to plot, two column, first column for x-axis
second for the y.
title(str): title of the plot
x_title: label of x-axis
y_title: label of y-axis
Returns:
dict: dictionary to pass to ploty graph figure
"""
plot_df["quarter"] = pd.PeriodIndex(pd.to_datetime(plot_df.iloc[:, 0]), freq="Q")
fig_data = [
go.Bar(
x=plot_df["quarter"].astype(str),
y=plot_df.iloc[:, 1],
text=plot_df.iloc[:, 1],
marker={"color": color_palette[0]},
hoverinfo="x+y",
showlegend=False,
)
]
fiq_layout = build_bar_layout(title, x_title=x_title, y_title=y_title)
return dict(data=fig_data, layout=fiq_layout)
def create_team_line_graph(plot_df, plot_type="scatter", title_suffix=""):
"""
Function for creating a plotly subplot of bar or line graphs.
Useful in keeping the default graph page of the dashboard simple
##TO DO: generalize for any number of teams
Args:
plot_df(DataFrame): dataframe to plot, with a dat columns and
a column for each team.
plot_type(str): scatter for line graph, bar for a bar chart
Returns:
dict: dictionary to pass to ploty graph figure
"""
fig = make_subplots(
rows=4,
cols=1,
shared_xaxes=True,
vertical_spacing=0.1,
subplot_titles=(f"Central {title_suffix}", f"South {title_suffix}", f"East {title_suffix}", f"North {title_suffix}"),
)
team_colors = {
"Central": "#FE6B39",
"East": "#FFD166",
"North": "#439A86",
"South": "#118AB2",
}
if plot_type == "bar":
plot_df["quarter"] = pd.PeriodIndex(pd.to_datetime(plot_df["month"]), freq="Q")
central = go.Bar(
x=plot_df["quarter"].astype(str),
y=plot_df["Central"],
text=plot_df["Central"],
marker={"color": team_colors["Central"]},
hoverinfo="x+y",
)
south = go.Bar(
x=plot_df["quarter"].astype(str),
y=plot_df["South"],
text=plot_df["South"],
marker={"color": team_colors["South"]},
hoverinfo="x+y",
)
east = go.Bar(
x=plot_df["quarter"].astype(str),
y=plot_df["East"],
text=plot_df["East"],
marker={"color": team_colors["East"]},
hoverinfo="x+y",
)
north = go.Bar(
x=plot_df["quarter"].astype(str),
y=plot_df["North"],
text=plot_df["North"],
marker={"color": team_colors["North"]},
hoverinfo="x+y",
)
if plot_type == "scatter":
central = go.Scatter(
x=plot_df["month"],
y=plot_df["Central"],
mode="lines",
line={"width": 7, "color": team_colors["Central"]},
hoverinfo="x+y",
)
south = go.Scatter(
x=plot_df["month"],
y=plot_df["South"],
mode="lines",
line={"width": 7, "color": team_colors["South"]},
hoverinfo="x+y",
)
east = go.Scatter(
x=plot_df["month"],
y=plot_df["East"],
mode="lines",
line={"width": 7, "color": team_colors["East"]},
hoverinfo="x+y",
)
north = go.Scatter(
x=plot_df["month"],
y=plot_df["North"],
mode="lines",
line={"width": 7, "color": team_colors["North"]},
hoverinfo="x+y",
)
fig.add_trace(central, 1, 1)
fig.add_trace(south, 2, 1)
fig.add_trace(east, 3, 1)
fig.add_trace(north, 4, 1)
fig.update_yaxes(
range=[plot_df["Central"].min() * 0.95, plot_df["Central"].max() * 1.05],
row=1,
col=1,
showline=True,
linewidth=1,
linecolor="black",
nticks=4,
)
fig.update_yaxes(
range=[plot_df["South"].min() * 0.95, plot_df["South"].max() * 1.05],
row=2,
col=1,
showline=True,
linewidth=1,
linecolor="black",
nticks=4,
)
fig.update_yaxes(
range=[plot_df["East"].min() * 0.95, plot_df["East"].max() * 1.05],
row=3,
col=1,
showline=True,
linewidth=1,
linecolor="black",
nticks=4,
)
fig.update_yaxes(
range=[plot_df["North"].min() * 0.95, plot_df["North"].max() * 1.05],
row=4,
col=1,
showline=True,
linewidth=1,
linecolor="black",
nticks=4,
)
fig.update_xaxes(showline=True, linewidth=1, linecolor="black", row=1, col=1)
fig.update_xaxes(showline=True, linewidth=1, linecolor="black", row=2, col=1)
fig.update_xaxes(showline=True, linewidth=1, linecolor="black", row=3, col=1)
fig.update_xaxes(showline=True, linewidth=1, linecolor="black", row=4, col=1)
fig.update_layout(
margin={"pad": 10, "l": 55, "r": 55, "t": 35, "b": 65},
showlegend=False,
plot_bgcolor="rgba(0,0,0,0)",
title="Participants",
)
return fig
def sparkline(plot_df):
"""
Creates a plotly figure of a sparkline
Args:
plot_df(DataFrame): dataframe to plot, two column, first column for x-axis
second for the y.
Returns:
dict: dictionary to pass to ploty graph figure
"""
figure = {
"data": [
go.Scatter(
x=plot_df.iloc[:, 0],
y=plot_df.iloc[:, 1],
mode="lines",
text=plot_df.iloc[:, 1],
line={"width": 3, "color": "#262626"},
hoverinfo="none",
)
],
"layout": go.Layout(
margin={"pad": 0, "l": 10, "r": 10, "t": 10, "b": 10},
xaxis={
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False,
},
yaxis={"showgrid": False, "zeroline": False, "showticklabels": False},
showlegend=False,
autosize=True,
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
),
}
return figure
|
999,934 | de157ad3270733d041d055a34796caafd09dd610 | import numpy as np
import matplotlib.pyplot as plt
import quantities as pq
import neo
import pandas as pd
import string
import glob
import sys
def csv_to_raster(fileName, title, output):
MEA_data = pd.read_csv(fileName , sep=',', encoding='latin1')
data = {}
for i in range(1, 9): #column
for j in range(1, 9): #row
name = str(i) + str(j)
data[name] = []
#remove corner electrodes that don't exist
data.pop('11')
data.pop('18')
data.pop('81')
data.pop('88')
MEA_data_full = pd.DataFrame.from_dict(data)
MEA_data_full = MEA_data_full.reindex(sorted(MEA_data_full.columns), axis=1)
spikeTrainArray = []
t_start = 240.0
t_stop = 300.0
for col in MEA_data_full.columns:
try:
values = MEA_data[col].values
values = values[values > t_start]
values = values[values < t_stop]
except:
values = []
spikeTrainArray.append(neo.core.SpikeTrain(values * pq.s, t_stop = t_stop * pq.s, t_start = t_start * pq.s))
for i, spiketrain in enumerate(spikeTrainArray):
plt.plot(spiketrain, i * np.ones_like(spiketrain), 'k|', markersize=2)
plt.axis('tight')
plt.title("Raster Plot - "+title)
plt.xlim(t_start, t_stop)
plt.ylim(-1, 60)
plt.xlabel('Time (s)', fontsize=16)
plt.ylabel('Channels', fontsize=16)
plt.gca().tick_params(axis='both', which='major', labelsize=14)
#plt.show()
name = output+"\\"+title+"_Raster"+".jpg"
plt.savefig(name, dpi=600)
del MEA_data_full
del spikeTrainArray
plt.clf()
def main(directory, output):
directoryPath = directory + "\\*.csv"
print(directoryPath)
dirFiles = glob.glob(directoryPath)
titles = list(map(lambda x: x[len(directoryPath)-5 : len(directoryPath)], dirFiles))
print(titles)
for index, fileName in enumerate(dirFiles):
MEA_num = dirFiles[index][len(directoryPath)+1 : len(directoryPath)+6]
print(index, fileName)
title = MEA_num+"_"+titles[index]
try:
csv_to_raster(fileName, title, output)
except:
print("an error occurred. stopped on: " + fileName)
if __name__ == '__main__':
#accepts two cmd line arguments, input directory and output directory (no \ at the end of paths)
print(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
print(f"Argument {i:>6}: {arg}")
try:
if(len(sys.argv) < 2):
print("running with default location")
main("<INSERT DEFAULT DIRECTORY PATH HERE TO .CSV SPIKING DATA FILES>", "<INSERT DEFAULT OUTPUT DIRECTORY PATH HERE")
elif(len(sys.argv) == 2):
main(sys.argv[1], sys.argv[1])
else:
main(sys.argv[1], sys.argv[2])
except IndexError:
print("no files in directory")
except:
print("something went wrong")
#main()
|
999,935 | de217f7e297f6d9e628ff27736b822a254a8a30d | #!/usr/bin/env python3
import argparse
from pathlib import Path
from json import load
from junitparser import JUnitXml
import csv
from vunit.color_printer import COLOR_PRINTER
def get_project_info(project_info_path):
test_to_requirement_mapping = dict()
tested_requirements = set()
with open(project_info_path) as json_file:
project_info = load(json_file)
for test in project_info["tests"]:
requirements = []
for attribute in test["attributes"].keys():
if attribute.startswith(".req"):
requirements.append(attribute[1:])
tested_requirements.update(requirements)
test_to_requirement_mapping[test["name"]] = requirements
return test_to_requirement_mapping, tested_requirements
def get_failed_test_cases(test_result_path):
return (
test_case.classname + "." + test_case.name
for test_case in JUnitXml.fromfile(test_result_path)
if test_case.result # Absence of result indicates passed test case
)
def get_requirements(requirements_path):
with open(requirements_path, newline="") as csv_file:
requirements = {row[0] for row in csv.reader(csv_file)}
return requirements
def analyze(project_info_path, test_result_path, requirements_path):
test_to_requirement_mapping, tested_requirements = get_project_info(
project_info_path
)
requirements = get_requirements(requirements_path)
not_tested_requirements = requirements - tested_requirements
requirements_failing_test = set()
for test in get_failed_test_cases(test_result_path):
requirements_failing_test.update(test_to_requirement_mapping[test])
requirements_failing_test &= requirements
ok = not not_tested_requirements and not requirements_failing_test
if ok:
COLOR_PRINTER.write("\nRequirements coverage check passed!", fg="gi")
else:
if not_tested_requirements:
COLOR_PRINTER.write("\nThe following requirements have not been tested:\n")
for req in not_tested_requirements:
COLOR_PRINTER.write(f" - {req}\n")
if requirements_failing_test:
COLOR_PRINTER.write("\nThe following requirements have failing tests:\n")
for req in requirements_failing_test:
COLOR_PRINTER.write(f" - {req}\n")
COLOR_PRINTER.write("\nRequirements coverage check failed!", fg="ri")
return ok
def main():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Analyze requirement coverage")
parser.add_argument(
"project_info_path",
help="JSON file containing project information",
type=Path,
)
parser.add_argument(
"test_result_path",
help="XML file containing test result",
type=Path,
)
parser.add_argument(
"requirements_path",
help="CSV file containing requirements",
type=Path,
)
args = parser.parse_args()
ok = analyze(args.project_info_path, args.test_result_path, args.requirements_path)
if not ok:
exit(1)
else:
exit(0)
if __name__ == "__main__":
main()
|
999,936 | 6db075067d78e16d9af66f9b788df71ea549c24e | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.utils import formats
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from .forms import *
from .models import *
def index(request):
return render(request, 'index.html')
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and Inv_User.
user_form = UserForm(data=request.POST)
inv_user_form = Inv_UserForm(data=request.POST);
# If the two forms are valid...
if user_form.is_valid() and inv_user_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
# Now sort out the Inv_User instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
inv_user = inv_user_form.save(commit=False)
inv_user.user = user
user.first_name = inv_user.first_name
user.last_name = inv_user.last_name
# Now we save the User and Inv_User model instance.
user.save()
inv_user.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print(user_form.errors, inv_user_form.errors)
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
inv_user_form = Inv_UserForm()
# Render the template depending on the context.
return render(request,
'register.html',
{'user_form': user_form, 'inv_user_form': inv_user_form, 'registered': registered} )
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
# We use request.POST.get('<variable>') as opposed to request.POST['<variable>'],
# because the request.POST.get('<variable>') returns None, if the value does not exist,
# while the request.POST['<variable>'] will raise key error exception
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active?
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user to the products page.
login(request, user)
#return render(request,"products.html")
return HttpResponseRedirect(reverse('collections'))
else:
# An inactive account was used - no logging in!
return render(request, 'redirect.html', {
'title': 'Account Disabled',
'heading': 'Banned',
'content': 'Your account has been disabled. Contact an administrator.',
'url_arg': 'index',
'url_text': 'Back to homepage'
})
else:
# Bad login details were provided. So we can't log the user in.
return render(request, 'redirect.html', {
'title': 'Invalid Login',
'heading': 'Incorrect Login',
'content': 'Invalid login details for: {0}'.format(username),
'url_arg': 'login',
'url_text': 'Back to login'
})
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
return render(request, 'login.html', {})
@login_required
def profile(request, inv_user_id):
inv_user = get_object_or_404(Inv_User, pk=inv_user_id)
return render(request, 'profile.html', {'inv_user': inv_user})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('collections'))
class CollectionListView(ListView):
# queryset = Gallery.objects.on_site().is_public()
paginate_by = 20
queryset = Gallery.objects.filter(collection=True)
class CategoryListView(ListView):
# queryset = Gallery.objects.on_site().is_public()
paginate_by = 20
queryset = Gallery.objects.filter(category=True)
class GalleryDetailView(DetailView):
queryset = Gallery.objects.all()
class PhotoDetailView(DetailView):
queryset = Photo.objects.all()
def concepts(request):
return render(request,"concepts.html")
def about(request):
return render(request,"about.html")
def contact(request):
return render(request,"contact.html")
def press(request):
return render(request,"press.html")
|
999,937 | f33123c429ec6dd41b65322f60e76b0f098a4b6e | from __future__ import division, print_function, unicode_literals
import pyglet
from pyglet.gl import *
from pyglet import *
from pyglet.window import *
from pyglet import clock
import ctypes
import primitives
import random
import user_input
constant = 10;
win = window.Window(fullscreen = True)
glClearColor(1, 1, 1, 1.0)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
user32 = ctypes.windll.user32
width = user32.GetSystemMetrics(0)
height = user32.GetSystemMetrics(1)
keys = key.KeyStateHandler()
win.push_handlers(keys)
batch = pyglet.graphics.Batch()
widget = user_input.TextWidget('', 200, 100, width - 210, batch)
a = primitives.Arc(150,150,radius=100,color=(1.,0.,0.,1.),sweep=90,style=GLU_FILL)
text_cursor = win.get_system_mouse_cursor('text')
text = "hi"
widget.caret.on_text(text)
pyglet.clock.set_fps_limit(60)
num = constant;
write = False
while not win.has_exit:
clock.tick()
if(num < constant):
num = num+1
win.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if(keys[key.A] and num == constant):
text = "A"
num = 0
write = True
if(num == (constant-1) and write):
widget.caret.on_text(text)
a.render()
a.rotation+=1
batch.draw()
win.flip() |
999,938 | a9bb5ecd764f11ff135896892dc0ae64d04ab31b | from django.db import models
from .. import Team
class TeamRole(models.Model):
team = models.ForeignKey(Team, on_delete=models.CASCADE)
name = models.CharField(max_length=30)
can_modify_members = models.BooleanField(default=False)
can_remove = models.BooleanField(default=False)
def __str__(self):
return self.name
class Meta:
unique_together = ("team", "name")
|
999,939 | b2058b44dd358d3a16ae1aa769b3cf3bdf43b8a2 | import os
import requests
from decimal import Decimal
from models.image import Image
from models.donation import Donation
from flask_login import current_user
from instagram_web.util.helpers import gateway
from flask import Blueprint, render_template, request, redirect, url_for, flash
donations_blueprint = Blueprint('donations',
__name__,
template_folder='templates')
@donations_blueprint.route("/<image_id>/donations/new")
def new(image_id):
token = gateway.client_token.generate()
return render_template("donations/new.html", token=token, image_id=image_id)
@donations_blueprint.route("/donations/<image_id>", methods=["POST"])
def create(image_id):
nonce = request.form["nonce"]
amount = request.form["amount"]
result = gateway.transaction.sale({
"amount": amount,
"payment_method_nonce": nonce,
"options": {
"submit_for_settlement": True
}
})
if result.is_success:
image = Image.get_by_id(image_id)
donation = Donation(image=image, amount=Decimal(amount))
donation.save()
requests.post(
f"https://api.mailgun.net/v3/{os.environ['MAILGUN_DOMAIN']}/messages",
auth=("api", os.environ["MAILGUN_API"]),
data={
"from": f"Me <mailgun@{os.environ['MAILGUN_DOMAIN']}>",
"to": ["nicholasowh@hotmail.com"],
"subject": "Donated",
"text": f"You have donated ${amount}!"
}
)
flash("Payment successful")
else:
flash("Payment not successful")
return redirect(url_for("users.show", username=current_user.username)) |
999,940 | 97c44d4c3e2eafce376dbbe603ba3d8bed5e7835 |
import matplotlib.pyplot as plt
# testing examples from http://matplotlib.org/users/pyplot_tutorial.html
def example_line():
numbers = [1, 2, 3, 4]
plt.plot(numbers)
plt.ylabel('Numbers :) ')
plt.show()
# example_line()
def example_red_dots():
numbers = [1, 2, 3, 4]
more_numbers = [1, 4, 9, 20]
# Un comment line below for BLUE line
# plt.plot(numbers, more_numbers, 'b-')
# 'ro' == red circles
plt.plot(numbers, more_numbers, 'ro')
"""
axis() command in the example above takes a list of
[xmin, xmax, ymin, ymax] and specifies the viewport of the axes.
"""
plt.axis([-10, 6, 0, 57])
plt.ylabel('Numbers :) ')
plt.show()
# Uncomment me to Run
# example_red_dots()
import numpy as np
def numpy_sample():
# space time at 200 ms intervals
time = np.arange(0., 5., 0.15)
# red dashes blue squares and green traingles
plt.plot(time, time, 'r--', time, time * 2, 'bs', time, time**3, 'g^')
plt.show()
# numpy_sample()
def thick_line():
numbers = [1, 2, 3, 4]
plt.plot(numbers, linewidth=60.0)
plt.ylabel('Numbers :) ')
plt.show()
# thick_line()
def super_tofu():
def tofu_function(tofu):
return np.exp(-tofu) * np.sin(2 * np.pi * tofu)
cooked_tofu = np.arange(0.0, 5.2, 0.1)
frozen_fofu = np.arange(0.0, 5.0, 0.02)
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(cooked_tofu, tofu_function(cooked_tofu),
'bo', frozen_fofu, tofu_function(frozen_fofu), 'k')
plt.subplot(2, 1, 1)
plt.plot(frozen_fofu, np.sin(2 * np.pi * frozen_fofu), 'r--')
plt.show()
super_tofu()
|
999,941 | fa196bad482d4b2c5b754045f3bab554b250e1b2 | # coding=utf8
# https://leetcode.com/problems/word-pattern/description/
# Easy
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
if not len(str.split(" ")) == len(pattern):
return False
return len(set(zip(pattern, str.split(" ")))) == len(set(pattern)) == len(set(str.split(" ")))
|
999,942 | 7b1ae0fcf04e38538145cb65d119d48c21dd7168 | #!/usr/bin/env python3
# day165.py
# By Sebastian Raaphorst, 2019
from typing import List
def right_cones(array: List[int]) -> List[int]:
"""
We have an array, say 3 4 9 6 1.
We want to return a new array where 3 is replaced by the number of number of smaller elements of 3 (1).
We want to return a new array where 4 is replaced by the number of number of smaller elements of 4 (1).
We want to return a new array where 9 is replaced by the number of number of smaller elements of 9 (6 1).
We want to return a new array where 9 is replaced by the number of number of smaller elements of 6 (1).
We want to return a new array where 9 is replaced by the number of number of smaller elements of 1 (0).
Result: 1 1 2 1 0
:param array: The initial array.
:return: the array of smaller elements
"""
return [] |
999,943 | 30632aa9235364a2498156d36858efb24f11a8aa | import matplotlib.pylab as plt
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
data_0,data_1 = np.loadtxt("xgVSx_CTEQ6L_Q10.dat", usecols=(0,1), unpack=True)
data_2,data_3 = np.loadtxt("xgVSx_CT14_Q10.dat", usecols=(0,1), unpack=True)
data_4,data_5 = np.loadtxt("xgVSx_MMHT_Q10.dat", usecols=(0,1), unpack=True)
x1=data_0
y1=0.0025*data_1
x2=data_2
y2=0.0025*data_3
x3=data_4
y3=0.0025*data_5
font = {'family': 'serif','color': 'black', 'weight': 'normal', 'size': 16,}
plt.subplots_adjust(left=0.13, bottom=0.11, right=0.98, top=0.95, wspace=0, hspace=0)
plt.semilogx()
#plt.grid(True)
plt.xlim(0.00001,1)
plt.ylim(0,1.0)
plt.plot(x1,y1, 'k-', linewidth=2.8, label='CTEQ6L')
plt.plot(x2,y2, 'b--', linewidth=2.8, label='CT14')
plt.plot(x3,y3, 'r:', linewidth=2.8, label='MMHT')
plt.title('$Q=10$ GeV', fontdict=font)
plt.xlabel(r"$x$", fontsize=20)
plt.ylabel(r"$xg(x,Q^{2}) [\times 0.0025]$", fontsize=20)
leg = plt.legend(loc=1, ncol=1, shadow=True, fancybox=True, numpoints=1, frameon=True)
leg.get_frame().set_alpha(0.5)
plt.savefig("xgVSx_Q10v2.eps")
plt.show()
|
999,944 | 48251a6b3c1fa6b628fd09d66c6ad512a637e0b0 | # -*- coding:utf-8 _*-
import pandas as pd
from sklearn.preprocessing import StandardScaler, LabelEncoder
import lightgbm as lgb
import time
from gen_feas import load_data
import matplotlib.pyplot as plt
train, test, no_features, features = load_data()
X = train[features].values
y = train['target'].astype('int32')
test_data = test[features].values
print(X.shape)
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
test_data = scaler.transform(test_data)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'max_depth': 4,
'objective': 'binary',
'metric': {'auc'},
'num_leaves': 16,
'learning_rate': 0.01,
'feature_fraction': 0.8,
'bagging_fraction': 0.7,
'random_state': 1024,
'n_jobs': -1,
}
trn_data = lgb.Dataset(X, y)
val_data = lgb.Dataset(X, y)
num_round = 550
model = lgb.train(params,
trn_data,
num_round,
valid_sets=[trn_data, val_data],
verbose_eval=10,
early_stopping_rounds=100,
feature_name=features)
r = model.predict(test_data, num_iteration=model.best_iteration)
lgb.plot_importance(model, max_num_features=20)
plt.show()
test['target'] = r
test[['id', 'target']].to_csv('result/lgb_all.csv', index=None)
|
999,945 | 68f9e4d53178d9d79647148d435c351c2e55764a | import pickle
import numpy as np
from mlgame.communication import ml as comm
import os.path as path
# cd C:\Users\user\Desktop\課程\大二下\基於遊戲的機器學習\MLGame-master
# python MLGame.py -i Data.py pingpong HARD 10
# 'frame': 10, 'status': 'GAME_ALIVE', 'ball': (35, 143), 'ball_speed': (-7, 7), 'platform_1P': (35,420), 'platform_2P': (35, 50),
# 'blocker': (110, 240), 'command_1P': 'MOVE_LEFT', 'command_2P': 'MOVE_LEFT'}
# Data = [Commands, Balls, Ball_speed, PlatformPos, Blocker, vectors, direction]
#...............Start the game...............#
def ml_loop(side: str):
# === Here is the execution order of the loop === #
# 1. Put the initialization code here.
ball_served = False
filename = path.join(path.dirname(__file__),
"random_forest.pickle")
with open(filename, 'rb') as file:
clf = pickle.load(file)
# 2. Inform the game process that ml process is ready before start the loop.
comm.ml_ready()
s = [93, 93]
def get_direction(ball_x, ball_y, ball_pre_x, ball_pre_y):
VectorX = ball_x - ball_pre_x
VectorY = ball_y - ball_pre_y
if(VectorX > 0 and VectorY > 0):
return 0
elif(VectorX > 0 and VectorY < 0):
return 1
elif(VectorX < 0 and VectorY > 0):
return 2
elif(VectorX < 0 and VectorY < 0):
return 3
else:
return 4
# 3. Start an endless loop.
while True:
# 3.1. Receive the scene information sent from the game process.
scene_info = comm.recv_from_game()
#Data = [Commands, Balls, Ball_speed, PlatformPos, Blocker, vectors, direction]
#Feature = [Balls, Ball_speed, PlatformPos, Blocker, direction]
feature = []
for i in range(0, 2):
# feature.append(scene_info["ball"][i])
# feature.append(scene_info["ball_speed"][i])
feature.append(scene_info["platform_1P"][i])
feature.append(scene_info["blocker"][i])
feature.append(feature[0] - s[0])
feature.append(feature[1] - s[1])
feature.append(get_direction(feature[0], feature[1], s[0], s[1]))
s = [feature[0], feature[1]]
# print(feature)
feature = np.array(feature)
feature = feature.reshape((-2, 7))
# 3.2. If the game is over or passed, the game process will reset
# the scene and wait for ml process doing resetting job.
if scene_info["status"] != "GAME_ALIVE":
# Do some stuff if needed
ball_served = False
# 3.2.1. Inform the game process that ml process is ready
comm.ml_ready()
continue
# 3.3. Put the code here to handle the scene information
# 3.4. Send the instruction for this frame to the game process
if not ball_served:
comm.send_to_game(
{"frame": scene_info["frame"], "command": "SERVE_TO_LEFT"})
ball_served = True
else:
y = clf.predict(feature)
if y == 0:
comm.send_to_game(
{"frame": scene_info["frame"], "command": "NONE"})
print('NONE')
elif y == 1:
comm.send_to_game(
{"frame": scene_info["frame"], "command": "MOVE_RIGHT"})
print('LEFT')
elif y == 2:
comm.send_to_game(
{"frame": scene_info["frame"], "command": "MOVE_LEFT"})
print('RIGHT')
|
999,946 | ec3559250a4c123bfeee3090da0e133655a97399 | import requests
URL = 'http://127.0.0.1:5000/'
# To get a single request
data = [{'name': 'Neural Network programming with Pytorch', 'views': 152000, 'likes': 14500},
{'name': 'Natural Language Processing', 'views': 120300, 'likes': 12662},
{'name': 'Python OOPs concepts', 'views': 1111020, 'likes': 10220}]
for i in range(len(data)):
response = requests.put(URL + 'video/' + str(i), data[i])
print(response.json())
response = requests.delete(URL + 'video/1')
print(response.json())
response = requests.get(URL + 'video/1')
print(response.json())
|
999,947 | cbf50e5dcd5c62ce3b882ef9ff358c41908fbbe7 | #!/usr/bin/env python
# encoding: utf-8
"""
__author__ = 'FireJohnny'
@license: Apache Licence
@file: data_clean.py
@time: 2018/6/3 17:07
"""
import csv
import codecs
import jieba as jb
import random
import sys
# sys.setdefaultencoding("utf-8")
import pandas as pd
import numpy as np
def read_data(file_dir):
jb.add_word("花呗")
jb.add_word("借呗")
_index = []
text_1 = []
text_2 = []
# label = []
labels = []
with codecs.open(file_dir,encoding="utf-8") as f:
file_text = f.readlines()
for i in file_text:
_in,t_1,t_2, _l = i.replace("\ufeff","").strip().split("\t")
_index.append(_in)
text_1.append(cut_word(t_1))
text_2.append(cut_word(t_2))
labels.append( _l)
# if int(_l) ==0:
# label.append([0,1]) #[0,1] : label = 0
# else :
# label.append([1,0]) #[1, 0 ] : label = 1
max_len = max([len(t) for t in (text_1+text_2)])
text_1 = [" ".join(t) for t in text_1 ]
text_2 = [" ".join(t) for t in text_2 ]
return _index,text_1,text_2,max_len,labels
pass
def balance_data(t1,t2,label_1):
label_1_ind = []
label_0_ind = []
text1 = []
text2 = []
# label = label.tolist()
for i in range(len(label_1)):
if int(label_1[i]) == 0:
label_0_ind.append(i)
else:
label_1_ind.append(i)
label_0_ind_new = random.sample(label_0_ind,25000)
for i in label_1_ind:
text1.append(t1[i])
text2.append(t2[i])
for i in label_0_ind_new:
text1.append(t1[i])
text2.append(t2[i])
label1 = [[0, 1] for _ in range(len(label_1_ind)) ]
label0 = [[1, 0] for _ in range(len(label_0_ind_new))]
label_a = label1 + label0
return text1,text2,label_a
def new_data_process(t1,t2,labels):
pass
def read_out_file(file_dir):
jb.add_word("花呗")
jb.add_word("借呗")
_index = []
text_1 = []
text_2 = []
with codecs.open(file_dir,encoding="utf-8") as f:
file_text = f.readlines()
for i in file_text:
_in,t_1,t_2 = i.replace("\ufeff","").strip().split("\t")
_index.append(_in)
text_1.append(cut_word(t_1))
text_2.append(cut_word(t_2))
text_1 = [" ".join(t) for t in text_1 ]
text_2 = [" ".join(t) for t in text_2 ]
return _index,text_1,text_2,
def cut_word(_text):
t = list(jb.cut(_text))
return t
def creat_batch(text1,text2,labels,batch_size = 64,random_data = True):
data_len = len(text1)
num_batch_per_epoch = int((data_len-1)/batch_size)+1
if random_data:
shuffle_indices = np.random.permutation(np.arange(data_len))
shuffle_text1 = np.array(text1)[shuffle_indices]
shuffle_text2 = np.array(text2)[shuffle_indices]
shuffle_lablels = labels[shuffle_indices]
for batch in range(num_batch_per_epoch):
start_index = batch*batch_size
end_index = min((batch+1)*batch_size,data_len)
yield shuffle_text1[start_index:end_index],shuffle_text2[start_index:end_index],shuffle_lablels[start_index:end_index]
pass
pass
if __name__ == '__main__':
f_dir = "F:/game/atec/atec_nlp_sim_train.csv"
f_dir_2 = "F:/game/atec/atec_nlp_sim_train_add.csv"
read_data(f_dir)
pass
|
999,948 | 2fd9fe8864a1e616c5b29dafa0931c68bb60d2e0 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Port to userbot by @MoveAngel
import datetime
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot import bot, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.sg(?: |$)(.*)")
async def lastname(steal):
if steal.fwd_from:
return
if not steal.reply_to_msg_id:
await steal.edit("```Reply to any user message.```")
return
reply_message = await steal.get_reply_message()
if not reply_message.text:
await steal.edit("```reply to text message```")
return
chat = "@SangMataInfo_bot"
sender = reply_message.sender
if reply_message.sender.bot:
await steal.edit("```Reply to actual users message.```")
return
await steal.edit("```Sit tight while I steal some data from NASA```")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=461843263))
await bot.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await steal.reply("```Please unblock @sangmatainfo_bot and try again```")
return
if response.text.startswith("Forward"):
await steal.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await steal.edit(f"{response.message.message}")
@register(outgoing=True, pattern="^.fakemail(?: |$)(.*)")
async def pembohong(fake):
if fake.fwd_from:
return
if not fake.reply_to_msg_id:
await fake.edit("```Reply to any user message.```")
return
reply_message = await fake.get_reply_message()
if not reply_message.text:
await fake.edit("```reply to text message```")
return
chat = "@fakemailbot"
sender = reply_message.sender
if reply_message.sender.bot:
await fake.edit("```Reply to actual users message.```")
return
await fake.edit("```Sit tight while I sending some data from Microsoft```")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=177914997))
await bot.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await fake.reply("```Please unblock @fakemailbot and try again```")
return
if response.text.startswith("send"):
await fake.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await fake.edit(f"{response.message.message}")
CMD_HELP.update({
"mata mata":
".sg \
\nUsage: Steal ur or friend name.\
\n\n.fakemail\
\nUsage: Fake an email to ur friends or someone."
}) |
999,949 | 7a416f6ec553be0dcfec10080c35eadd70453708 | str1 = "I love python"
chars = []
for i in str1:
#chars.append(i)
chars += i
print(chars)
# Currently there is a string called str1.
# Write code to create a list called chars
# which should contain the characters from str1.
# Each character in str1 should be its own element
# in the list chars. |
999,950 | 9ec7c6c4d6b2e396cec15f64c43188d468ae0865 | class Solution:
def backspace_compare(self, str1, str2):
str1, str2 = self._helper(str1), self._helper(str2)
return str1 == str2
def _helper(self, s):
# while "#" in s:
# i = s.index("#")
# s = s[:i-1] + s[i+1:] if i >0 else s[i+1:]
# return s
stack = []
for ele in s:
# if ele != "#":
# stack.append(ele)
# elif stack:
# stack.pop()
if stack:
stack.append(ele) if ele != "#" else stack.pop
return stack
S = "a##c"
T = "#a#c"
sol = Solution()
print(sol.backspace_compare(S , T)) |
999,951 | ffdd0c55d61a34d2fdad392c1bb9aa523ba529d5 | import requests
import json
resp = requests.get('https://developer.nrel.gov/api/alt-fuel-stations/v1/nearest.json?api_key=VWJLTUJMHcLn25INdExtx46gCX9lowhtFRXdvCRm&location=Denver+CO')
print(resp.text)
|
999,952 | 415e9374c2748aadfc681fe8eb279d60865143d9 | """ThreeUpShow is a variant of ShowBase that defines three cameras covering
different parts of the window."""
__all__ = ['ThreeUpShow']
from .ShowBase import ShowBase
class ThreeUpShow(ShowBase):
def __init__(self):
ShowBase.__init__(self)
def makeCamera(self, win, sort = 0, scene = None,
displayRegion = (0, 1, 0, 1), stereo = None,
aspectRatio = None, clearDepth = 0, clearColor = None,
lens = None, camName = 'cam', mask = None,
useCamera = None):
self.camRS = ShowBase.makeCamera(
self, win, displayRegion = (.5, 1, 0, 1), aspectRatio=.67, camName='camRS')
self.camLL = ShowBase.makeCamera(
self, win, displayRegion = (0, .5, 0, .5), camName='camLL')
self.camUR = ShowBase.makeCamera(
self, win, displayRegion = (0, .5, .5, 1), camName='camUR')
return self.camUR
|
999,953 | cdef6f47adf2fd3cdfb1e2f59974df16d5b57323 | import pymel.core as pm
import lcGeneric as gen
import pickle
def hookOnCurve(tangent = False):
sel = pm.ls (sl=True)
crv=sel[-1]
sampleNPoC = pm.createNode ('nearestPointOnCurve')
sampleGrpA = pm.group (empty=True)
crv.worldSpace[0] >> sampleNPoC.inputCurve
sampleGrpA.translate >> sampleNPoC.inPosition
for obj in sel[:-1]:
wp= pm.xform (obj, t=True, ws=True, q=True)
pm.xform (sampleGrpA, t=wp, ws=True)
hookPoci = pm.createNode ('pointOnCurveInfo')
crv.worldSpace[0] >> hookPoci.inputCurve
hookPoci.position >> obj.translate
hookPar = sampleNPoC.parameter.get()
hookPoci.parameter.set(hookPar)
if tangent:
pm.tangentConstraint (crv, obj, aimVector=(-1, 0, 0),upVector=(0,1, 0),worldUpType="vector",worldUpVector =(0, 0, 1))
pm.delete (sampleNPoC, sampleGrpA)
def attachObj (obj, mesh, u, v, mode=1):
foll = pm.createNode ('follicle')
follDag = foll.firstParent()
mesh.worldMatrix[0] >> foll.inputWorldMatrix
if pm.objectType (mesh) == 'mesh':
mesh.outMesh >> foll.inputMesh
else:
mesh.local >> foll.inputSurface
foll.outTranslate >> follDag.translate
foll.outRotate >> follDag.rotate
follDag.translate.lock()
follDag.rotate.lock()
follDag.parameterU.set (u)
follDag.parameterV.set (v)
if mode==1:
pm.parent (obj, follDag)
elif mode==2:
pm.parentConstraint (follDag, obj, mo=True)
elif mode==3:
pm.pointConstraint (follDag, obj, mo=True)
elif mode==4:
pm.parentConstraint (follDag, obj, mo=False)
return follDag
def hookOnMesh(mode = 3):
sel = pm.ls (sl=True)
mesh = sel[-1]
meshShape = pm.listRelatives (mesh, s=True)[0]
if pm.objectType (mesh) == 'mesh':
cpom =pm.createNode ('closestPointOnMesh')
sampleGrpA = pm.group (empty=True)
meshShape.worldMesh >> cpom.inMesh
else:
cpom =pm.createNode ('closestPointOnSurface')
sampleGrpA = pm.group (empty=True)
meshShape.worldSpace[0] >> cpom.inputSurface
sampleGrpA.translate >> cpom.inPosition
pm.parent (sampleGrpA, mesh)
for obj in sel[:-1]:
print obj
#objShape = obj.getShape()[0]
pos = pm.xform (obj, q=True, ws=True, t=True)
pm.xform (sampleGrpA, ws=True, t=pos)
closestU =cpom.parameterU.get()
closestV = cpom.parameterV.get()
print closestU
print closestV
attachObj (obj, mesh, closestU, closestV, mode)
pm.delete (cpom, sampleGrpA)
#olho
def eyeLidJnts(eyeCenter,eyeUp, verts):
#selecione os vertives da palpebra
center = pm.PyNode (eyeCenter)
centerPos = center.translate.get()
for vert in verts:
pos = vert.getPosition(space='world')
pm.select (cl=True)
jntBase = pm.joint (p=centerPos)
jnt = pm.joint (p=pos)
pm.joint( jntBase, e=True, zso=True, oj='xyz', sao='yup')
loc = pm.spaceLocator (p=[0,0,0], n=jnt.name()+'Aim_loc')
loc.translate.set(pos)
pm.aimConstraint ( loc, jntBase,aim=(1,0,0), u=(0,1,0),wut='objectrotation', wu=(0,1,0), wuo=eyeUp)
#eyesdir
def eyeDirRig():
sel = pm.ls (sl=True)
if not len(sel)==3:
print 'selecione joint da cabeca e as esferas do olho'
else:
print sel
for obj in sel[1:3]:
cls = pm.cluster (obj)
loc = pm.group(empty=True, n='eyeRot_grp')
pos = pm.xform (cls, q=True, ws=True, rp=True)
pm.delete (cls)
loc.translate.set(pos)
loc1= loc.duplicate (n='eyeRotFk_grp')[0]
loc2= loc.duplicate (n='eyeRotAim_grp')[0]
loc3= loc.duplicate (n='eyeRotDeform_grp')[0]
loc.rotate >> loc3.rotate
loc.rotate >> loc3.rotate
pm.orientConstraint (loc1,loc2,loc)
#faz controles
cntrlFk = gen.createCntrl (loc1.name(),loc1.name(),.5, 'ponteiroReto', 1)
pm.orientConstraint (cntrlFk,loc1, mo=True)
cntrlAim = gen.createCntrl (loc2.name(),loc2.name(),.5, 'circuloZ', 1)
aim = pm.PyNode(cntrlAim)
aim.translate.set([0,0,2])
aim.rename ('eyeAimTgt')
pm.aimConstraint ( aim, loc2,aim=(0,0,1), u=(0,1,0),wut='objectrotation', wu=(1,0,0), wuo=sel[0])
aimList = pm.ls ('eyeAimTgt*', type='transform')
print aimList
aimGrp = pm.group(empty=True, n='eyeAim_grp')
tmp = pm.pointConstraint (aimList,aimGrp)
pm.delete (tmp)
cntrlAimGrp = gen.createCntrl (aimGrp.name(),aimGrp.name(),1, 'circuloX', 1)
pm.parent (aimList, cntrlAimGrp)
#dummy cntrsls
def copyDrvCntrls():
sel = pm.ls (sl=True)
cntrlGrp = pm.group (empty=True, n='cntrls_grp')
for obj in sel:
grp = obj.listRelatives (p=True)[0]
print grp
grp2 = grp.duplicate()[0]
obj2 = grp2.listRelatives (c=True)[0]
print obj2
nm=obj2.name()
off = obj2.duplicate (n=nm+'Offset')[0]
shp= off.getShape()
pm.delete (shp)
child=off.listRelatives (c=True)
pm.delete (child)
pm.parent (obj2, off)
pm.parent (grp2, cntrlGrp)
mlt = pm.createNode ('multiplyDivide')
print mlt
mlt.input2.set([-1,-1,-1])
obj2.translate >> mlt.input1
mlt.output >> off.translate
obj2.translate >> obj.translate
obj2.rotate >> obj.rotate
obj2.scale >> obj.scale
#dummy2
def copyDrvCntrlsNoOffset():
sel = pm.ls (sl=True)
cntrlGrp = pm.group (empty=True, n='cntrls_grp')
for obj in sel:
grp = obj.listRelatives (p=True)[0]
print grp
grp2 = grp.duplicate()[0]
obj2 = grp2.listRelatives (c=True)[0]
print obj2
nm=obj2.name()
#off = obj2.duplicate (n=nm+'Offset')[0]
#shp= off.getShape()
#pm.delete (shp)
#child=off.listRelatives (c=True)
#pm.delete (child)
#pm.parent (obj2, off)
pm.parent (grp2, cntrlGrp)
#mlt = pm.createNode ('multiplyDivide')
#print mlt
#mlt.input2.set([-1,-1,-1])
#obj2.translate >> mlt.input1
#mlt.output >> off.translate
obj2.translate >> obj.translate
obj2.rotate >> obj.rotate
def saveCntrlsShape(filename= 'd:/cntrls.shp'):
userSel = pm.ls (sl=True)
sel=[x for x in userSel if '_cntrl' in x.name()]
cntrlShapeDict={}
for obj in sel:
print obj
if pm.nodeType (obj.getShape())=='nurbsCurve':
pointList=[]
for i in range (len (obj.cv)):
pointList.append (pm.pointPosition (obj.cv[i], l=True))
cntrlShapeDict[obj]=pointList
with open(filename, 'wb') as f:
pickle.dump(cntrlShapeDict, f)
def loadCntrlShape(filename= 'd:/cntrls.shp'):
cntrlShapeDict={}
print cntrlShapeDict
with open(filename, 'rb') as f:
cntrlShapeDict = pickle.load(f)
print cntrlShapeDict
for obj in cntrlShapeDict:
print obj
for i in range (len (obj.cv)):
pm.xform (obj.cv[i], t=cntrlShapeDict[obj][i])
def selectSkinJoints():
sel = pm.ls (sl=True)
if sel:
objShp = sel[0].getShape()
print objShp
setList = objShp.inputs(t='objectSet')
for st in setList:
x= st.inputs (t='skinCluster')
if not x==[]:
skinCls=x
print skinCls
if skinCls:
jnts = skinCls[0].inputs (t='joint')
pm.select (jnts)
else:
print 'ERRO: objeto nao tem skin'
else:
print 'ERRO:nenhum objeto selecionado'
def mirrorSetDriven():
# Mirror contrl connections
LCntrl = pm.ls (sl=True)[0]
RCntrl = LCntrl.replace('L_','R_')
crvList = LCntrl.outputs (t = 'animCurve')
print crvList
direct = LCntrl.outputs (t ='blendShape', p=True, c=True)
print direct
for crv in crvList:
print crv
plugIN = crv.outputs(t = 'transform', p=True)[0]
print plugIN
plugOUT = crv.inputs(t = 'blendShape', p=True)[0]
print plugOUT
newCrv = pm.duplicate (crv, n=crv.replace('L_', 'R_'))[0]
print newCrv
pm.connectAttr (plugIN.replace('L_','R_'), newCrv+'.input')
pm.connectAttr (newCrv+'.output',plugOUT.replace('L_','R_'), f=True)
if direct:
for i in xrange(0,len(direct),2):
OUT = direct[i].replace('L_','R_')
IN = direct[i+1].replace('L_','R_')
OUT >> IN
def cpSetDriven(ns):
#referencie o arqivo com os setDrivens a copiar
#selecione controles q vao receber o setDriven
sel = pm.ls (sl=True)
for tgt in sel:
source = pm.PyNode (ns+':'+tgt)
curveList = source.inputs(t='animCurve', c=True, p=True)
blendList = source.inputs(t='blendWeighted', c=True, p=True, scn=True)
for crvPlug, crv in curveList:
print crvPlug
print crv
newCurve = pm.duplicate (crv)[0]
print newCurve
newCurve.attr(crv.longName()) >> tgt.attr (crvPlug.longName())
drivers = crv.node().inputs(scn=True, p=True)
print drivers
for drv in drivers:
newDriver = pm.PyNode (drv.split(':')[-1])
newDriver >> newCurve.input
for bldPlug, bld in blendList:
print bldPlug, bld
newBlend = pm.duplicate (bld)[0]
newBlend.attr(bld.longName()) >> tgt.attr(bldPlug.longName())
curveList = bld.node().inputs(t='animCurve', c=True, p=True, scn=True)
print curveList
for crvPlug, crv in curveList:
print crvPlug
print crv
newCurve = pm.duplicate (crv)[0]
print newCurve
newCurve.attr(crv.longName()) >> newBlend.attr(crvPlug.longName())
drivers = crv.node().inputs(scn=True, p=True)
print drivers
for drv in drivers:
newDriver = pm.PyNode (drv.split(':')[-1])
newDriver >> newCurve.input
#conecta e disconecta
def connect (blendNode):
obj = pm.PyNode (blendNode)
conn = obj.inputs (c=True, p=True)
print conn
for p, c in conn:
print p, c
if 'weight' in p.name() :
print c, p
c // p
for p, c in conn:
if 'weight' in p.name() :
print c, p
c >> p
def slideOnMesh ():
sel = pm.ls (sl=True)
obj= sel[0]
mesh = sel[-1]
meshShape = pm.listRelatives (mesh, s=True)[0]
cpom =pm.createNode ('closestPointOnMesh')
sampleGrpA = pm.group (empty=True)
sampleGrpA.translate >> cpom.inPosition
meshShape.worldMesh >> cpom.inMesh
transf = pm.group (empty=True)
pm.parentConstraint (mesh,transf , mo=False)
pm.scaleConstraint (mesh,transf , mo=False)
pm.parent (sampleGrpA, transf)
foll = pm.createNode ('follicle')
follDag = foll.firstParent()
mesh.worldMatrix[0] >> foll.inputWorldMatrix
mesh.outMesh >> foll.inputMesh
foll.outTranslate >> follDag.translate
foll.outRotate >> follDag.rotate
follDag.translate.lock()
follDag.rotate.lock()
cpom.parameterU >> follDag.parameterU
cpom.parameterV >> follDag.parameterV
pos = pm.xform (obj, q=True, ws=True, t=True)
pm.xform (sampleGrpA, ws=True, t=pos)
cntrl = gen.doCntrl (sampleGrpA.name(),sampleGrpA.name(), 1, 'bola', 1)
pm.parentConstraint (cntrl ,sampleGrpA, mo=True) |
999,954 | cf17b61802dd25ca2ef98ce38add63dcf8fe0a24 | # Generated by Django 2.2.13 on 2021-08-13 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0003_auto_20210812_1530'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(blank=True, max_length=50, null=True)),
('status', models.IntegerField(default=0)),
('selected_plan', models.IntegerField(default=0)),
],
),
]
|
999,955 | f87022e804b3c09ecb99772298aaceafd0fa0034 | default_app_config = 'debug_permissions.apps.DebugPermissionsConfig'
|
999,956 | bb98b08759d0d4a819458292e536da198c96ddbc | import os
from algorithms.pgirl import solve_ra_PGIRL, solve_PGIRL, make_loss_function
from utils import compute_gradient, load_policy, filter_grads, estimate_distribution_params
import numpy as np
import re
import argparse
from trajectories_reader import read_trajectories
# SCRIPT TO RUN SINGLE IRL EXPERIMENTS USING ANY VERSION OF SIGMA-GIRL
NUM_ACTIONS = 4
GAMMA = 0.99
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def check_weights(w):
if np.isnan(w).any():
return False
return not np.isclose(w, 1, rtol=1e-2).any()
def set_global_seeds(seed):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(seed)
np.random.seed(seed)
features = ['fast_area', 'slow_area', 'goal']
features_norm = [x + '_norm' for x in features]
features_norm_after = [x + "'" for x in features_norm]
weights_features = [x + '_w' for x in features]
weights_features_normalized = [x + '_normalized' for x in weights_features]
weights_features_normalized_again = [x + '_' for x in weights_features_normalized]
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def PGIRL(demonstrations=None, model=None, grad_path=None, features_idx=None, normalize_f=False, save_grad=True,
opt_iters=10, compute_jacobian=False, estimate_weights=None, num_episodes=-1, pickled=False,
continuous=False, num_hidden=8, num_layers=0, agent_name=None):
if features_idx is None:
features_idx = [0, 1, 2]
logger = {}
# Read or Calculate Gradient
if args.read_grads:
if grad_path != '':
print("Reading gradients from:", grad_path)
estimated_gradients = np.load(grad_path, allow_pickle=True)
else:
estimated_gradients = np.load(gradient_path + "estimated_gradients.npy", allow_pickle=True)
estimated_gradients = estimated_gradients[:, :, features_idx]
if num_episodes > 0:
estimated_gradients = estimated_gradients[:num_episodes, :, :]
if args.filter_gradients:
estimated_gradients = filter_grads(estimated_gradients, verbose=args.verbose)
else:
if pickled:
states_data = np.load(demonstrations + 'real_states.pkl', allow_pickle=True)
actions_data = np.load(demonstrations + 'actions.pkl', allow_pickle=True)
reward_data = np.load(demonstrations + 'rewards.pkl', allow_pickle=True)
X_dataset = states_data[agent_name]
y_dataset = actions_data[agent_name]
r_dataset = reward_data[agent_name]
print(np.sum(np.array(y_dataset)==1))
input()
dones_dataset = None
else:
# read trajectories
X_dataset, y_dataset, _, _, r_dataset, dones_dataset = \
read_trajectories(demonstrations, all_columns=True,
fill_size=EPISODE_LENGTH,
fix_goal=True,
cont_actions=args.continuous or args.lqg)
if num_episodes > 0:
X_dataset = X_dataset[:EPISODE_LENGTH * num_episodes]
y_dataset = y_dataset[:EPISODE_LENGTH * num_episodes]
r_dataset = r_dataset[:EPISODE_LENGTH * num_episodes]
if dones_dataset is not None:
dones_dataset = dones_dataset[:EPISODE_LENGTH * num_episodes]
X_dim = len(X_dataset[0])
if continuous:
y_dim = len(y_dataset[0])
else:
y_dim = 2
# Create Policy
linear = 'gpomdp' in model
policy_train = load_policy(X_dim=X_dim, model=model, continuous=continuous, num_actions=y_dim, n_bases=X_dim,
trainable_variance=args.trainable_variance, init_logstd=args.init_logstd,
linear=linear, num_hidden=num_hidden, num_layers=num_layers)
print('Loading dataset... done')
# compute gradient estimation
estimated_gradients, _ = compute_gradient(policy_train, X_dataset, y_dataset, r_dataset, dones_dataset,
EPISODE_LENGTH, GAMMA, features_idx,
verbose=args.verbose,
use_baseline=args.baseline,
use_mask=args.mask,
scale_features=args.scale_features,
filter_gradients=args.filter_gradients,
normalize_f=normalize_f)
# ==================================================================================================================
if save_grad:
print("Saving gradients in ", gradient_path)
np.save(gradient_path + 'estimated_gradients.npy', estimated_gradients)
# solve PGIRL or Rank Approx PGIRL
if args.girl:
weights_girl, loss_girl = solve_PGIRL(estimated_gradients, verbose=args.verbose)
estimate_weights = weights_girl
if args.rank_approx:
weights, loss, jacobian = solve_ra_PGIRL(estimated_gradients, verbose=args.verbose,
cov_estimation=args.cov_estimation, diag=args.diag,
identity=args.identity, num_iters=opt_iters,
compute_jacobian=compute_jacobian,
other_options=[False, False, args.masked_cov]
)
if estimate_weights is not None or args.girl:
mu, sigma = estimate_distribution_params(estimated_gradients=estimated_gradients,
diag=args.diag, identity=args.identity,
cov_estimation=args.cov_estimation,
girl=False, other_options=[False, False, args.masked_cov])
id_matrix = np.identity(estimated_gradients.shape[1])
lf = make_loss_function(mu, sigma, id_matrix)
estimated_loss = lf(estimate_weights)
if compute_jacobian:
print("Jacobian Rank:")
print(np.linalg.matrix_rank(jacobian))
print("Jacobian s:")
_, s, _ = np.linalg.svd(jacobian)
print(s)
else:
weights, loss = solve_PGIRL(estimated_gradients, verbose=args.verbose)
print("Weights:", weights)
print("Loss:", loss)
if args.girl:
print("Weights Girl:", weights_girl)
print("Loss Girl:", loss_girl)
if estimate_weights is not None or args.girl:
print("Loss in weights given:", estimated_loss)
return logger, weights
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_layers', type=int, default=0, help='number of hidden layers of mlp')
parser.add_argument('--num_hidden', type=int, default=8, help='number of hidden units per layer')
parser.add_argument('--agent_name', type=str, default='', help='name of the agent')
parser.add_argument('--out_dir', type=str, default='default', help='output_dir')
parser.add_argument('--demonstrations', type=str, default='logs/', help='where to read demonstrations')
parser.add_argument('--model_name', type=str, default='models/trpo/cont/gridworld/checkpoint_240',
help='path to policy to load')
parser.add_argument('--features_idx', default='', type=str, help='commma separated indexes of the reward features'
' to consider, default: consider all')
parser.add_argument('--estimate_weights', default='', type=str, help='estimate GIRL loss at the found weights')
parser.add_argument('--debug', action='store_true', help='display debug info of the policy model')
parser.add_argument('--verbose', action='store_true', help='log information in terminal')
parser.add_argument('--ep_len', type=int, default=20, help='episode lengths')
parser.add_argument('--num_episodes', type=int, default=-1)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--beta', type=float, default=1.0, help='inverse of temperature of Boltzman policy')
parser.add_argument('--opt_iters', type=int, default=25, help='number of optimization iterations')
parser.add_argument('--save_grad', action='store_true', help='save the computed gradients')
parser.add_argument('--pos_weights', action='store_true', help='constrain to positive weights')
parser.add_argument('--compute_jacobian', action='store_true', help='compute the jacobian at the sigma=girl optimum')
parser.add_argument('--mask', action='store_true', help='mask trajectories for baseline computation')
parser.add_argument('--baseline', action='store_true', help='use a baseline for GPOMDP gradient computation')
parser.add_argument('--scale_features', type=int, default=1)
parser.add_argument('--filter_gradients', action='store_true', help='remove 0 rows of the jacobian')
parser.add_argument('--continuous', action='store_true', help='the action space is continuous')
parser.add_argument('--lqg', action='store_true', help='the demonstrations are from lqg env')
parser.add_argument('--trainable_variance', action='store_true', help='fit the noise of the policy')
parser.add_argument("--init_logstd", type=float, default=-1, help='initial noise of the model')
parser.add_argument('--rank_approx', action='store_true', help='use sigma girl')
parser.add_argument('--cov_estimation', action='store_true', help='Regularize the sample covariance matrix')
parser.add_argument('--masked_cov', action='store_true', help='use block covariance model')
parser.add_argument('--diag', action='store_true', help='use diagonal covariance model')
parser.add_argument('--girl', action='store_true', help='use plain girl covariance model')
parser.add_argument('--identity', action='store_true', help='use identity covariance model')
parser.add_argument('--read_grads', action='store_true', help='read the precomputed gradients, avoiding gradient'
' computation')
parser.add_argument('--pickled', action='store_true', help='wether the demonstration data are pickled or csv format')
parser.add_argument('--grad_path', default='', type=str, help='path of gradients to read')
args = parser.parse_args()
EPISODE_LENGTH = args.ep_len
if args.estimate_weights == '':
estimate_weights = None
else:
estimate_weights = [float(x) for x in args.estimate_weights.split(',')]
if args.features_idx == '':
features_idx = None
else:
features_idx = [float(x) for x in args.features_idx.split(',')]
out_dir = "/".join(args.model_name.split('/')[:-1])
gradient_path = out_dir + "/gradients/"
if args.save_grad:
if not os.path.exists(gradient_path):
os.makedirs(gradient_path)
set_global_seeds(args.seed)
log, weights = PGIRL(demonstrations=args.demonstrations, model=args.model_name, grad_path=args.grad_path,
features_idx=features_idx, save_grad=args.save_grad, opt_iters=args.opt_iters,
compute_jacobian=args.compute_jacobian, estimate_weights=estimate_weights,
num_episodes=args.num_episodes, pickled=args.pickled, continuous=args.continuous,
num_hidden=args.num_hidden, num_layers=args.num_layers,agent_name=args.agent_name)
print("Weights:", weights)
np.save(out_dir + '/weights.npy', weights)
|
999,957 | a4d6655893330e675cb0768f0dc85f680c97d1d5 | import copy
import logging
import re
import traitlets as traits
from nbconvert.preprocessors import Preprocessor
from nbformat.notebooknode import NotebookNode
class FinalCells(object):
""" a class that stores cells
"""
def __init__(self, header_slide):
self.cells = []
if header_slide:
self.horizontalbreak_after = "horizontalbreak_after_plusvertical"
else:
self.horizontalbreak_after = "horizontalbreak_after"
def mkdcell(self, source, metadata, slidetype):
meta = copy.deepcopy(metadata)
meta.ipyslides = slidetype
self.append(
NotebookNode(
{"cell_type": "markdown", "source": "\n".join(source), "metadata": meta}
)
)
def append(self, cell):
last = self.last()
if not last:
pass
elif cell.metadata.ipyslides == "verticalbreak_after":
pass # last.metadata.ipyslides = 'verticalbreak_above'
elif cell.metadata.ipyslides == self.horizontalbreak_after:
# if last.metadata.ipyslides == 'before_header':
# last.metadata.ipyslides == 'between_headers'
if not last.metadata.ipyslides == self.horizontalbreak_after:
last.metadata.ipyslides = "horizontalbreak_before"
else:
last.metadata.ipyslides = "horizontalbreak_after_novertical"
self.cells.append(cell)
def first(self):
for cell in self.cells:
if cell.metadata.ipyslides not in ["skip", "notes"]:
return cell
return False
def last(self):
for cell in reversed(self.cells):
if cell.metadata.ipyslides not in ["skip", "notes"]:
return cell
return False
def finalize(self):
if not self.first():
return False
if self.first().metadata.ipyslides == "normal":
self.first().metadata.ipyslides = "first_cell"
if self.last().metadata.ipyslides == "normal":
self.last().metadata.ipyslides = "last_cell"
return True
def is_header(line, max_level):
"""if max_level is 0 assumes all headers ok
Examples
--------
>>> is_header("abc",0)
False
>>> is_header("#",0)
False
>>> is_header("# title",0)
True
>>> is_header("### title",3)
True
>>> is_header("### title",2)
False
"""
if max_level:
return len(re.findall("^#{{1,{0}}} .+".format(max_level), line)) > 0
else:
return len(re.findall("^#+ .+", line)) > 0
def header_level(line):
"""
Examples
--------
>>> header_level('# title')
1
>>> header_level('### title')
3
"""
i = 0
title = line + "e"
while title[0] == "#":
i += 1
title = title[1:]
return i
def number_title(line, current_levels):
"""
Examples
--------
>>> number_title("# title",[])
('# 1. title', [1])
>>> number_title("## title",[])
('## 1.1. title', [1, 1])
>>> number_title("# title",[1,1])
('# 2. title', [2])
>>> number_title("## title",[2,1])
('## 2.2. title', [2, 2])
>>> number_title("### title a#bc",[2])
('### 2.1.1. title a#bc', [2, 1, 1])
>>> number_title("### title a#bc",[2,1,2,3])
('### 2.1.3. title a#bc', [2, 1, 3])
"""
level = header_level(line)
if not level > 0:
raise ValueError("level must be > 0: {}".format(level))
if len(current_levels) < level:
while len(current_levels) < level:
current_levels.append(1)
else:
current_levels = current_levels[:level]
current_levels[-1] += 1
hashes, title = line.split(" ", 1)
numbers = ".".join([str(i) for i in current_levels]) + "."
new = " ".join([hashes, numbers, title])
return new, current_levels
class MarkdownSlides(Preprocessor):
""" a preprocessor to setup the notebook as an ipyslideshow,
according to a set of rules
- markdown cells containaing # headers are broken into individual cells
- any cells where ipub.ignore=True is set to 'skip'
- any code cells with no other ipub tags are set to 'skip'
- any header level >= column_level starts a new column
- else, any header level >= row_level starts a new row
- if max_cells is not 0, then breaks to a new row after <max_cells> cells
"""
column_level = traits.Integer(
1, min=0, help="maximum header level for new columns (0 indicates no maximum)"
).tag(config=True)
row_level = traits.Integer(
0, min=0, help="maximum header level for new rows (0 indicates no maximum)"
).tag(config=True)
header_slide = traits.Bool(
False,
help=("if True, make the first header in a " "column appear on its own slide"),
).tag(config=True)
max_cells = traits.Integer(
0, min=0, help="maximum number of nb cells per slide (0 indicates no maximum)"
).tag(config=True)
autonumbering = traits.Bool(
False, help="append section numbering to titles, e.g. 1.1.1 Title"
).tag(config=True)
def preprocess(self, nb, resources):
logging.info("creating slides based on markdown and existing slide tags")
latexdoc_tags = ["code", "error", "table", "equation", "figure", "text"]
# break up titles
cells_in_slide = 0
final_cells = FinalCells(self.header_slide)
header_levels = []
try:
base_numbering = nb.metadata.toc.base_numbering
header_levels = list(map(lambda x: int(x), base_numbering.split(".")))
header_levels[0] -= 1
logging.debug("base_numbering = " + base_numbering)
logging.debug("header_levels = " + str(header_levels))
except ValueError:
logging.warning("Invalid toc.base_numbering in notebook metadata")
except AttributeError:
logging.debug("No toc.base_numbering in notebook metadata; starting at 1")
for i, cell in enumerate(nb.cells):
# Make sure every cell has an ipub meta tag
cell.metadata.ipub = cell.metadata.get("ipub", NotebookNode())
if cell.metadata.ipub.get("ignore", False):
cell.metadata.ipyslides = "skip"
final_cells.append(cell)
continue
if cell.metadata.ipub.get("slide", False) == "notes":
cell.metadata.ipyslides = "notes"
final_cells.append(cell)
continue
if not cell.cell_type == "markdown":
# TODO this doesn't test if the data is actually available
# to be output
if not any(
[cell.metadata.ipub.get(typ, False) for typ in latexdoc_tags]
):
cell.metadata.ipyslides = "skip"
final_cells.append(cell)
continue
if cells_in_slide > self.max_cells and self.max_cells:
cell.metadata.ipyslides = "verticalbreak_after"
cells_in_slide = 1
elif cell.metadata.ipub.get("slide", False) == "new":
cell.metadata.ipyslides = "verticalbreak_after"
cells_in_slide = 1
else:
cell.metadata.ipyslides = "normal"
cells_in_slide += 1
final_cells.append(cell)
continue
nonheader_lines = []
for line in cell.source.split("\n"):
if is_header(line, 0) and self.autonumbering:
line, header_levels = number_title(line, header_levels[:])
if is_header(line, self.column_level):
if nonheader_lines and cell.metadata.ipub.get("slide", False):
if (
cells_in_slide > self.max_cells and self.max_cells
) or cell.metadata.ipub.slide == "new":
final_cells.mkdcell(
nonheader_lines, cell.metadata, "verticalbreak_after"
)
cells_in_slide = 1
else:
cells_in_slide += 1
final_cells.mkdcell(
nonheader_lines, cell.metadata, "normal"
)
# current_lines = []
if self.header_slide:
final_cells.mkdcell(
[line], cell.metadata, "horizontalbreak_after_plusvertical"
)
else:
final_cells.mkdcell(
[line], cell.metadata, "horizontalbreak_after"
)
cells_in_slide = 1
elif is_header(line, self.row_level):
if nonheader_lines and cell.metadata.ipub.get("slide", False):
if (
cells_in_slide > self.max_cells and self.max_cells
) or cell.metadata.ipub.slide == "new":
final_cells.mkdcell(
nonheader_lines, cell.metadata, "verticalbreak_after"
)
cells_in_slide = 1
else:
cells_in_slide += 1
final_cells.mkdcell(
nonheader_lines, cell.metadata, "normal"
)
# current_lines = []
final_cells.mkdcell([line], cell.metadata, "verticalbreak_after")
cells_in_slide = 1
else:
nonheader_lines.append(line)
if nonheader_lines and cell.metadata.ipub.get("slide", False):
if (
cells_in_slide > self.max_cells and self.max_cells
) or cell.metadata.ipub.slide == "new":
final_cells.mkdcell(
nonheader_lines, cell.metadata, "verticalbreak_after"
)
cells_in_slide = 1
else:
cells_in_slide += 1
final_cells.mkdcell(nonheader_lines, cell.metadata, "normal")
if not final_cells.finalize():
logging.warning("no cells available for slideshow")
nb.cells = final_cells.cells
return nb, resources
|
999,958 | 7d1818cdce02f30e89188ceb0986f10184c4fa8d | import cv2
import numpy as np
MIN_MATCH_COUNT = 10
# Query image
img = cv2.imread("images/kariusbaktus_v2.png", cv2.IMREAD_GRAYSCALE)
cap = cv2.VideoCapture(0)
# Features SIFT or ORB (Orb doesn't work with flann?)
sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptors = sift.detectAndCompute(img, None)
#orb = cv2.ORB_create()
#keypoints, descriptors = orb.detectAndCompute(img, None)
#img = cv2.drawKeypoints(img, keypoints, img)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict()
# Feature matching
flann = cv2.FlannBasedMatcher(index_params, search_params)
while True:
# Train image
_, frame = cap.read()
grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp_grayframe, desc_grayframe = sift.detectAndCompute(grayframe, None)
#grayframe = cv2.drawKeypoints(grayframe, kp_grayframe, grayframe)
matches = flann.knnMatch(descriptors, desc_grayframe, k=2)
'''
# Alternative method
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1, 0]
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)
img3 = cv2.drawMatchesKnn(img, keypoints, grayframe, kp_grayframe, matches, None, **draw_params)
'''
good = []
for m, n in matches:
if m.distance < 0.6*n.distance:
good.append(m)
# Homography
if len(good) > MIN_MATCH_COUNT:
query_pts = np.float32([keypoints[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
train_pts = np.float32([kp_grayframe[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matrix, mask = cv2.findHomography(query_pts, train_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
# Perspective transform
h, w = img.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, matrix)
homography = cv2.polylines(frame, [np.int32(dst)], True, (255, 0, 0), 3, cv2.LINE_AA)
cv2.imshow("Homography", homography)
else:
cv2.imshow("Homography", grayframe)
#img3 = cv2.drawMatches(img, keypoints, grayframe, kp_grayframe, good, grayframe)
#cv2.imshow("Matches", img3)
#cv2.imshow("Image", img)
#cv2.imshow("Frame", grayframe)
key = cv2.waitKey(1)
# Supposed to be "s" key
if key == 27:
break
cv2.release()
cv2.destroyAllWindows() |
999,959 | 7c024a6ca00a829a806dd529a1b817cf9dc8b04a | # importing module
import random,pygame,sys
from pygame.locals import *
FPS = 30 # frames per second
WIDTH = 650 # width of the window's
HEIGHT = 500 # height of the window's
SLIDING_SPEED = 3 # speed of each boxes on reveals and covers
BOX_SIZE = 35 # size of box height & weight in pixels
GAP_SIZE = 10 # size of gap between boxes in pixels
BOARD_WIDTH = 10 # number of columns of icons
BOARD_HEIGHT = 6 # number of rows of icons
#sanity check
assert (BOARD_WIDTH * BOARD_HEIGHT) % 2 == 0, 'Board needs to have an even number of boxed for pairs of matches'
X_margin = int((WIDTH - (BOARD_WIDTH * (BOX_SIZE + GAP_SIZE))) / 2)
Y_margin = int((HEIGHT - (BOARD_HEIGHT * (BOX_SIZE + GAP_SIZE))) / 2)
# RGB COLORS
GRAY = (100,100,100)
NAVYBLUE = (60,60,100)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
ORANGE = (255,128,0)
PURPLE = (255,0,255)
CYAN = (0,255,255)
BACKGROUND_COLOR = GREEN
LIGHT_BACKGROUND_COLOR = GRAY
BOX_COLOR = WHITE
HIGH_LIGHT_COLOR = BLUE
#Using constant variables instead of raw string
DONUT = 'donut'
SQUARE = 'square'
DIAMOND = 'diamond'
LINES = 'lines'
OVAL = 'oval'
#creating a (tuple) set of colors
ALL_COLORS = (RED,GREEN,BLUE,YELLOW,ORANGE,PURPLE,CYAN)
#creating a set of shapes
ALL_SHAPES = (DONUT,SQUARE,DIAMOND,LINES,OVAL)
# check if their is enough shape/color combination for the size of the board
assert (len(ALL_COLORS) * len(ALL_SHAPES)*2) >= (BOARD_WIDTH*BOARD_HEIGHT),"Board is too big for the number of shapes/colors defined."
def memory_puzzle():
#Game starts from Here
global FPS_CLOCK , DISPLAYSURF
pygame.init()
FPS_CLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WIDTH,HEIGHT))
DISPLAYSURF.fill(BACKGROUND_COLOR)
# for score showing on screen
myfont = pygame.font.SysFont("monospace",16)
#used to store x,y coordinate of mouse event
mouse_x = 0
mouse_y = 0
score = 0 # keep tracking the score when two shape or color gets matched
pygame.display.set_caption("Memory Puzzle")
main_board = getRandomizedBoard() # resturns a data structure to represent the state of the board
#return a structure about which boxes are covered or revealed
revealedBoxes = generateRevealedBoxesData(False)
first_selection = None
Start_Game_Animation(main_board)# to give us a simple hint at first
while True: # main game loop
mouse_Clicked = False # checking mouse click through each iteration
DISPLAYSURF.fill(BACKGROUND_COLOR)
text = myfont.render("Score : "+str(score),1,BLUE)# create object
score_rect = text.get_rect()
score_rect.topleft = (5,10)
DISPLAYSURF.blit(text,score_rect)
draw_Board(main_board,revealedBoxes)#updating the screen function
for event in pygame.event.get(): # event handling loop
if event.type == QUIT or (event.type == KEYUP and event.key ==K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mouse_x,mouse_y = event.pos
elif event.type == MOUSEBUTTONUP:
mouse_x,mouse_y = event.pos
mouse_Clicked = True
#checking the mouse cursor on which box
boxx , boxy = get_Box_At_Pixel(mouse_x,mouse_y)
if boxx !=None and boxy !=None:
#that means mouse is over a box
if not revealedBoxes[boxx][boxy]:
draw_Highlight_Box(boxx,boxy) # draw a highlight over covered box
if not revealedBoxes[boxx][boxy] and mouse_Clicked:
revealedBoxesAnimation(main_board,[(boxx,boxy)])
revealedBoxes[boxx][boxy] = True # to uncover the clicked box in updated frame
#handling the first clicked box
if first_selection == None: # current box is the first box that clicked
first_selection = (boxx,boxy)
else:
#the current box is the second box that clicked
# also check their is match between two icons
icon1shape , icon1color = getShapeAndColor(main_board,first_selection[0],first_selection[1])
icon2shape,icon2color = getShapeAndColor(main_board,boxx,boxy)
#handling a mismatched pair
if icon1shape !=icon2shape or icon1color != icon2color:
#icons or colors didn't match
pygame.time.wait(1000) # 1 second pause
cover_Boxes_Animation(main_board,[(first_selection[0],first_selection[1]),(boxx,boxy)])
revealedBoxes[first_selection[0]][first_selection[1]] = False
revealedBoxes[boxx][boxy] = False
elif icon1shape == icon2shape and icon1color == icon2color:
score +=1 # increasing score
#handling if player won
elif hasWon(revealedBoxes):
#check if all pairs found
game_Won_Animation(main_board)
pygame.time.wait(2000)
#reset the board
main_board = getRandmomizedBoard()
revealedBoxes = generateReavealedBoxesData(False)
#show the unrevealed board for a second
drawBoard(main_board,revealedBoxes)
pygame.display.updste()
pygame.time.wait(1000)
#Reply the start game animation
Start_Game_Animation(main_board)
score = 0 # reset the score board
first_selection = None #reset first selection
#redraw the screen and wait a clock tick
pygame.display.update()
FPS_CLOCK.tick(FPS)
#creating the board data structure
def getRandomizedBoard():
#get a list of every possible shape in every possible color
icons = []
for color in ALL_COLORS:
for shape in ALL_SHAPES:
icons.append( (shape,color) )
random.shuffle(icons)#random order of icons list
#calculating how many icons needed
numOfIcon = int((BOARD_WIDTH * BOARD_HEIGHT)/2)
icons = icons[:numOfIcon] * 2# make two of each
random.shuffle(icons)
#create the board data structure, with randomly placed icon
board = []
for x in range(BOARD_WIDTH):
column = []
for y in range(BOARD_HEIGHT):
column.append(icons[0])
del icons[0]
board.append(column)
return board
#spliting list into a list of lists
def split_into_groups_of(groupSize,theList):
# inner lists have at most groupsize number of items
result = []
for i in range(0,len(theList),groupSize):
result.append(theList[i:i+groupSize])
return result
# getting board Icon's shape and color
def getShapeAndColor(board,boxx,boxy):
# for shape it's stored in board [x][y][0]
# for color it's stored in board [x][y][1]
return board[boxx][boxy][0], board[boxx][boxy][1]
#"Revealed Boxes" data structure
def generateRevealedBoxesData(value):
revealedBoxes = []
for i in range(BOARD_WIDTH):
revealedBoxes.append([value] * BOARD_HEIGHT)
return revealedBoxes
def left_To_Top(x,y):
#converting box to pixel co-ordinates
left = x * (BOX_SIZE + GAP_SIZE) + X_margin
top = y * (BOX_SIZE + GAP_SIZE) + Y_margin
return (left,top)
#converting pixel to box coordinates
def get_Box_At_Pixel(x,y):
for boxx in range(BOARD_WIDTH):
for boxy in range(BOARD_HEIGHT):
left , top = left_To_Top(boxx,boxy)
boxRect = pygame.Rect(left, top, BOX_SIZE,BOX_SIZE)
if boxRect.collidepoint(x,y):
return (boxx,boxy) # found the box , return the coordinates
return (None,None)
#Drawing the entire board or updated board
def draw_Board(board,revealed):
# draw all the boxes covered or revealed
for boxx in range(BOARD_WIDTH):
for boxy in range(BOARD_HEIGHT):
left , top = left_To_Top(boxx,boxy)
if not revealed[boxx][boxy]:
#draw a cover box
pygame.draw.rect(DISPLAYSURF,BOX_COLOR,(left,top,BOX_SIZE,BOX_SIZE))
else:
#draw the revealed icon
shape ,color = getShapeAndColor(board,boxx,boxy)
drawIcon(shape,color,boxx,boxy)
#Telling if player has won
def hasWon(revealedBoxes):
# return true if all the boxes have been uncovered
for x in revealedBoxes:
if False in x:
return False # return false if any boxes are covered
return True
# "Game Won" animation
def game_Won_Animation(board):
coveredBox = generateRevealedBoxesData(True)
color1 = LIGHT_BACKGROUND_COLOR
color2 = BACKGROUND_COLOR
for i in range(10):
color1,color2 = color2,color1
DISPLAYSURF.fill(color1)
drawBoard(board,coveredBox)
pygame.display.update()
pygame.time.wait(300)
# drawing box cover
def draw_box_cover(board,boxes,coverage):
#draw boxes being covered/revealed
# "boxes" is a list of two-item lists which have x & y spot of the box
for box in boxes:
#draw the background color
#draw the icon
#then draw however much of the white box over the icon that is needed.
left , top = left_To_Top(box[0],box[1])
pygame.draw.rect(DISPLAYSURF,BACKGROUND_COLOR,(left,top,BOX_SIZE,BOX_SIZE))
shape,color = getShapeAndColor(board,box[0],box[1])
drawIcon(shape,color,box[0],box[1])
if coverage > 0:
pygame.draw.rect(DISPLAYSURF, BOX_COLOR, (left, top, coverage, BOX_SIZE))
pygame.display.update()
FPS_CLOCK.tick(FPS)
# revealing and covering animation
def revealedBoxesAnimation(board,boxesToReveal):
# Do the box reveal animation
for coverage in range(BOX_SIZE,(-SLIDING_SPEED)-1,SLIDING_SPEED):
draw_box_cover(board,boxesToReveal,coverage)
def cover_Boxes_Animation(board,boxesToCover):
#Do the box cover animation
for coverage in range(0,BOX_SIZE + SLIDING_SPEED ,SLIDING_SPEED):
draw_box_cover(board,boxesToCover,coverage)
#Highlighting the box
def draw_Highlight_Box(boxx,boxy):
left , top = left_To_Top(boxx,boxy)
pygame.draw.rect(DISPLAYSURF , HIGH_LIGHT_COLOR,(left-5,top-5,BOX_SIZE+10,BOX_SIZE+10),4)
# drawing icons
def drawIcon(shape,color,boxx,boxy):
quarter = int(BOX_SIZE * 0.25)# many of the shape drawing func calls use the
half = int(BOX_SIZE * 0.5) # mid-point or quarter point of the box as well
left,top = left_To_Top(boxx,boxy) # get pixel coords from board coords
if shape == DONUT:
pygame.draw.circle(DISPLAYSURF, color, (left + half, top + half), half - 5)
pygame.draw.circle(DISPLAYSURF, BACKGROUND_COLOR, (left + half, top + half), quarter - 5)
elif shape == SQUARE:
pygame.draw.rect(DISPLAYSURF, color, (left + quarter, top + quarter, BOX_SIZE - half, BOX_SIZE - half))
elif shape == DIAMOND:
pygame.draw.polygon(DISPLAYSURF, color, ((left + half, top), (left + BOX_SIZE - 1, top + half), (left + half, top + BOX_SIZE - 1), (left, top + half)))
elif shape == LINES:
for i in range(0,BOX_SIZE,4):
pygame.draw.line(DISPLAYSURF, color, (left, top + i), (left + i, top))
pygame.draw.line(DISPLAYSURF, color, (left + i, top + BOX_SIZE - 1), (left + BOX_SIZE - 1, top + i))
elif shape == OVAL:
pygame.draw.ellipse(DISPLAYSURF, color, (left, top + quarter, BOX_SIZE, half))
# "start Game" animation
def Start_Game_Animation(board):
#randomly reveal the 8 boxes at a time
cover = generateRevealedBoxesData(False)
boxes = []
for x in range(BOARD_WIDTH):
for y in range(BOARD_HEIGHT):
boxes.append((x,y))
random.shuffle(boxes)
box_groups = split_into_groups_of(8,boxes)
draw_Board(board,cover)
for box in box_groups:
revealedBoxesAnimation(board,box) # reveal the boxes
cover_Boxes_Animation(board,box) # cover the boxes
if __name__ == '__main__':
memory_puzzle()
|
999,960 | 073d3ef6e83e14b6e5af78216812c920fd54e4b2 | s=""
for x in input().split():s+=x[0]
print(s.upper()) |
999,961 | 210cf2edf91a39df1126081ee51100ca7303e13f | from django.shortcuts import render, HttpResponse, HttpResponseRedirect
import random
import hashlib
from .models import Diary, Log, AdminDiary, student
from collections import defaultdict
def logging(func):
def wraper(request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
log = Log()
try:
username = request.META.get('USERNAME')
path = request.path
log.ip = ip
log.name = username
log.url = path
log.save()
except:
log.ip = 'wrong'
log.name = 'wrong'
log.url = 'wrong'
log.save()
return func(request)
return wraper
def jurisdiction(func):
"""权限过滤,用于分享权限过滤"""
def wraper(request, dia):
try:
a = AdminDiary.objects.get(identification_id=dia)
session = request.session['share_password'] if 'share_password' in request.session else ''
print('session==', session)
if a.share or session == '1996Chan':
return func(request, dia)
else:
context = {'title': 'No Power', 'content': 'You Have No Power To have This Text', 'next': dia}
return render(request, 'jurisdiction_show.html', context)
except:
context = {'title': 'accident', 'content': 'an accident was happend OR you have no power hahahaha', 'next': dia}
return render(request, 'jurisdiction_show.html', context)
return wraper
def add_power(request):
next = request.GET.get('next')
print('next=', next)
if request.method == 'POST':
password = request.POST['share_password']
print('password', password)
if password == '1996Chan':
request.session['share_password'] = password
request.session.set_expiry(0)
return HttpResponseRedirect('detail/'+next)
else:
context = {'title': 'accident', 'content': 'an accident was happend!'}
return render(request, 'jurisdiction_show.html', context)
@logging
def index(request):
con = Diary.objects.all().values().order_by('-date', '-date_time')
p = defaultdict(list)
for i in con:
if i['text'] is not None and i['identification_id'] not in p:
p[i['identification_id']].append(i)
return render(request, 'index.html', {'content': dict(p)})
def write(request):
if request.method == 'POST':
rand = random.random()
sha = hashlib.sha1()
sha.update(str(rand).encode('utf-8'))
sign = sha.hexdigest()
n = 1
f = True
for i in request.POST:
if 'text' in i:
if f:
a = AdminDiary(header=request.POST['title'], text=request.POST[i], share=0, identification_id=sign)
a.save()
f = False
d = Diary()
d.identification_id = sign
d.header = request.POST['title']
d.text = request.POST[i]
# d.date = time.time()
d.order_num = n
d.user_id = '1123'
d.save()
n += 1
for i in request.FILES:
if 'pic' in i:
d = Diary()
d.identification_id = sign
d.header = request.POST['title']
d.images = request.FILES[i]
d.order_num = n
d.user_id = '1123'
d.save()
n += 1
return render(request, 'write.html', {})
else:
return render(request, 'write.html', {})
@jurisdiction
def detail(request, dia):
a = Diary.objects.filter(identification_id=dia).values()
lis = {}
title = ""
date = ""
for i in a:
print(i['images'])
title = i['header']
date = i['date']
lis[i['order_num']] = i
key = lis.keys()
key = sorted(key)
lis = map(lis.get, key)
return render(request, 'detail.html', {'title': title, 'date': date, 'content': lis})
def search(request):
return render(request, 'infor.html', {})
def information(request):
id = request.POST.get('id')
print(id)
try:
stu = student.objects.get(number=id)
except Exception as e:
stu = None
return render(request, 'infor2.html', {'stu': stu})
def save_information(request):
if request.is_ajax():
id = request.POST.get('ID')
eng = request.POST.get('eng')
# print(id, eng)
a = student.objects.filter(number=id).values('number')
print(a)
if not a:
return HttpResponse('2')
else:
return HttpResponse('succ')
|
999,962 | 4c3de1efda5dc4e3e46c615032cea05484d15311 | #Enter the number to be reversed
string1=input("enter the number to be reversed:")
#converting entered number into string
num1=int(string1)
reverse=0
# Using while loop
while num1>0:
remainder=num1%10
reverse=reverse*10+remainder
num1=num1//10
#printing the output
print(reverse)
|
999,963 | 4de7c49aa4c1cfac0b9b6ce0313fad42299ba40f | # Generated by Django 2.1.8 on 2020-08-26 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photo', '0002_auto_20200825_2237'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='updated',
field=models.DateTimeField(),
),
]
|
999,964 | 665c8ab2941a9dd1284c8f2e3ad46ca3d611ff5f | import requests
r =requests.get('http://127.0.0.1:5000/', params={
'sentence': "I charge it at night and skip taking the cord with me because of the good battery life.",
'aspect': "cord",
'start': 41,
'end': 45
})
print(r.text) |
999,965 | c04d1300d676d9a4f9552a2111db81dc9eb89ed4 | from abc import ABCMeta, abstractmethod
from logging import Logger
from typing import Dict
from rsmtpd.core.config_loader import ConfigLoader
from rsmtpd.handlers.shared_state import SharedState
from rsmtpd.response.base_response import BaseResponse
class Command(metaclass=ABCMeta):
def __init__(self, logger: Logger, config_loader: ConfigLoader, config_suffix: str = "", default_config: Dict = {}):
self._logger = logger
self._config = config_loader.load(self, suffix=config_suffix, default=default_config)
class BaseCommand(Command):
"""
Base SMTP command handler. All handlers must extend this class and implement the handle() method.
"""
@abstractmethod
def handle(self, command: str, argument: str, shared_state: SharedState) -> BaseResponse:
raise NotImplementedError("Abstract method handle() must be implemented in child class")
|
999,966 | a80a134da3c08ad736c2b794a15c5508abf19622 | from pytest_bdd import (
given,
scenario,
then,
when,
parsers,
)
import os
from features.services.WebApp.web_app_api import WebApp
web_app = WebApp()
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Verify List of devices in my network')
def test_verify_list_of_devices_in_my_network():
"""Verify List of devices in my network."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change brightness back to 10')
def test_change_brightness_back_to_10():
"""Change brightness back to 10."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change brightness of the device- false')
def test_change_brightness_of_the_device_false():
"""Change brightness of the device- false."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change brightness of the device-true')
def test_change_brightness_of_the_devicetrue():
"""Change brightness of the device-true."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change color of the device - false')
def test_change_color_of_the_device__false():
"""Change color of the device - false."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change color of the device - true')
def test_change_color_of_the_device__true():
"""Change color of the device - true."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change name of the device-false')
def test_change_name_of_the_devicefalse():
"""Change name of the device-false."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Change name of the device-true')
def test_change_name_of_the_devicetrue():
"""Change name of the device-true."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Store property after re-connecting to the device')
def test_store_property_after_reconnecting_to_the_device():
"""Store property after re-connecting to the device."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Verify Connection to a device')
def test_verify_connection_to_a_device():
"""Verify Connection to a device."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Verify State of a device')
def test_verify_state_of_a_device():
"""Verify State of a device."""
@scenario(r'tests' + os.sep + 'webapp' + os.sep + 'web_app_api.feature', 'Verify State of a device when disconnected from the device')
def test_verify_state_of_a_device_when_disconnected_from_the_device():
"""Verify State of a device when disconnected from the device."""
@when('user sends get call to the devices')
def user_sends_get_call_to_the_devices():
"""user sends get call to the devices."""
web_app.list_devices()
@then('User should get an OK response')
def user_should_get_an_ok_response():
"""User should get an OK response."""
assert web_app.validate_reponse()
@then('the user should be returned with the list of devices with ip address')
def the_user_should_be_returned_with_the_list_of_devices_with_ip_address():
"""the user should be returned with the list of devices with ip address."""
assert web_app.validate_list_devices()
@given('user is disconnected from all devices')
def user_is_disconnected_from_all_devices():
"""user is disconnected from all devices."""
assert web_app.disconnect_from_device()
@when('the current device is disconnected')
def the_current_device_is_disconnected():
"""the current device is disconnected."""
assert web_app.disconnect_from_device()
@when('the user changes the <brightness> of the device')
def the_user_changes_the_brightness_of_the_device(brightness):
"""the user changes the <brightness> of the device."""
web_app.change_property_softassert("brightness",brightness)
@when('the user changes the <color> of the device')
def the_user_changes_the_color_of_the_device(color):
"""the user changes the <color> of the device."""
web_app.change_property_softassert("color",color)
@when('the user changes the <name> of the device')
def the_user_changes_the_name_of_the_device(name):
"""the user changes the <name> of the device."""
web_app.change_property_softassert("name",name)
@when(parsers.cfparse('the user changes the brightness to "{n}"'))
def the_user_changes_the_brightness_to(n):
"""the user changes the brightness to "5"."""
print("Changing brightness to "+ str(n) +"...")
web_app.change_property_softassert("brightness",n)
@when('the user disconnect the device and re-connect again to the same device')
def the_user_disconnect_the_device_and_reconnect_again_to_the_same_device():
"""the user disconnect the device and re-connect again to the same device."""
assert web_app.disconnect_from_device()
assert web_app.connect_to_device1()
@then('the <name> should not reflect in the state of the device')
def the_name_should_not_reflect_in_the_state_of_the_device(name):
"""the <name> should not reflect in the state of the device."""
assert (web_app.check_value_in_state("name",name),False)
@then('the <name> should reflect in the state of the device')
def the_name_should_reflect_in_the_state_of_the_device(name):
"""the <name> should reflect in the state of the device."""
assert web_app.check_value_in_state("name",name)
@then('the <brightness> should not reflect in the state of the device')
def the_name_should_not_reflect_in_the_state_of_the_device(brightness):
"""the <name> should not reflect in the state of the device."""
assert (web_app.check_value_in_state("brightness",brightness),False)
@then('the <brightness> should reflect in the state of the device')
def the_name_should_reflect_in_the_state_of_the_device(brightness):
"""the <name> should reflect in the state of the device."""
assert web_app.check_value_in_state("brightness",brightness)
@then('the <color> should not reflect in the state of the device')
def the_name_should_not_reflect_in_the_state_of_the_device(color):
"""the <name> should not reflect in the state of the device."""
assert (web_app.check_value_in_state("color",color),False)
@then('the <color> should reflect in the state of the device')
def the_name_should_reflect_in_the_state_of_the_device(color):
"""the <name> should reflect in the state of the device."""
assert web_app.check_value_in_state("color",color)
@then('the changed brightness5 should be reflected in the state')
def the_changed_brightness_should_be_reflected_in_the_state_5():
"""the changed brightness should be reflected in the state."""
assert web_app.get_state()
assert web_app.check_value_in_state("brightness","5")
@then('the changed brightness10 should be reflected in the state')
def the_changed_brightness_should_be_reflected_in_the_state_10():
"""the changed brightness should be reflected in the state."""
assert web_app.get_state()
assert web_app.check_value_in_state("brightness","10")
@then('the response should be <result>')
def the_response_should_be_result(result):
"""the response should be <result>."""
assert web_app.check_response(result)
@then('the user should be able to connect to another device')
def the_user_should_be_able_to_connect_to_another_device():
"""the user should be able to connect to another device."""
assert web_app.connect_to_device2()
@then('the user should be able to connect to one of the devices')
def the_user_should_be_able_to_connect_to_one_of_the_devices():
"""the user should be able to connect to one of the devices."""
assert web_app.connect_to_device1()
@then('the user should be able to get the state of the connected device')
def the_user_should_be_able_to_get_the_state_of_the_connected_device():
"""the user should be able to get the state of the connected device."""
assert web_app.get_state()
@then('the user should be able to see the changes in the connected device')
def the_user_should_be_able_to_see_the_changes_in_the_connected_device():
"""the user should be able to see the changes in the connected device."""
assert web_app.check_value_after_reconnect()
@then('the user should not be able to change the <property> of the device with the <value>')
def the_user_should_not_be_able_to_change_the_property_of_the_device(property,value):
"""the user should not be able to change the <property> of the device."""
print("Trying to change property with device disconnected")
bln_result1 = web_app.change_property_hardassert(property,value)
assert (bln_result1, False)
@then('the user should not be able to connect to another device')
def the_user_should_not_be_able_to_connect_to_another_device():
"""the user should not be able to connect to another device."""
print("Trying to connect 2 devices at once")
bln_result = web_app.connect_to_device2()
assert(bln_result, False)
@then('the user should not be able to get the state of the device')
def the_user_should_not_be_able_to_get_the_state_of_the_device():
"""the user should not be able to get the state of the device."""
print("Trying to get status with device disconnected")
bln_result1 = web_app.get_state()
assert(bln_result1, False)
|
999,967 | f67a1b64b7eff6deab1c43b13ff0bbfbfcefafe1 | import numpy as np
def lu_decomposition(matrix):
U = np.copy(matrix)
P = np.diag(np.ones(len(np.diag(U))))
i=0
j=0
while i<len(U) and j<len(U[0]):
pivot = np.argmax([abs(x) for x in U[i:,j]]) + i
if(U[pivot][j] == 0):
j+=1
else:
aux = [x for x in U[pivot]]
U[pivot,:] = U[i]
U[i,:] = aux[:]
aux = [p for p in P[pivot]]
P[pivot,:] = P[i]
P[i,:] = aux[:]
for k in range(i+1,len(U)):
p = U[k,j]/U[i,j]
U[k,j] = p
for l in range(j+1,len(U[k])):
U[k,l] = U[k,l] - (U[i,l] * p)
i+=1
j+=1
L = np.tril(U)
np.fill_diagonal(L,1)
U = np.triu(U)
return P,L,U |
999,968 | 3e26ca4aef652d1e277381a6b2c88048699c48d2 | """
Non-overlapping Intervals
@ Greedy: The key point is try to select the interval with smaller end.
So we sort by interval.end, and try to select the selectable interval with smaller end first.
@ O(NlogN) time for sorting.
"""
class Solution(object):
def eraseOverlapIntervals(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
if not intervals:
return 0
intervals.sort(key=lambda x: x.end)
right = intervals[0].start
res = 0
for inter in intervals:
if inter.start >= right:
right = inter.end
else:
res += 1
return res
|
999,969 | d7723e68ee70b44aaa52856cd3666a9d7a68d4f7 | #Factory libraries
import re
import sys
import os
import inspect
import openpyxl
import datetime
import json
import string
import copy
import importlib
import datetime
import time
from datetime import timedelta
path = os.path.dirname(os.path.abspath(__file__))
#print "Path: ", path
#Changes to config path must be reflected in config.py
with open(path+'/data/localConfig.txt','r') as f:
loaded = json.load(f)
pdirs = loaded["dirs"].split()
pmods = loaded["modules"].split()
#Adding personal directories to path
for pdir in pdirs:
#print "Setting pdir: ", pdir
temp = str(path+"/"+pdir+"/")
sys.path.insert(0,temp)
#os.path.expanduser(temp)
#print "Dirs: ", sys.path
#Personal libraries
import ptext
import pmath
import pexcel
import pconfig
import plogger
import panalyze
import pdata
import test
import schema
import psms
import pcalendar
import ptime
'''
It is important that we import the module at the highest level
Functions from imports shall be called as MODULE.FUNCTION()
This will avoid flooding the namespace
'''
functions = []
for lib in pmods:
globals()[lib] = __import__(lib)
com = inspect.getmembers(globals()[lib], predicate=inspect.isfunction)
"""
globals().update(importlib.import_module(lib).__dict__)
module = importlib.import_module(lib)
globals().update(
{n: getattr(module, n) for n in module.__all__} if hasattr(module, '__all__')
else
{k: v for (k, v) in module.__dict__.items() if not k.startswith('_')
})
com = inspect.getmembers(lib, predicate=inspect.isfunction)
print "Mod: ", lib
print "Com: ", com
"""
#Adding functions to config and setting functions values in globals
for c in com:
#print "Appending to functions: ", str(mod+"."+c[0])
functions.append(str(lib+"."+c[0]))
#fcalls.append({c[0]:mod.c[0]})
#print c[0]
globals()[c[0]] = c[0]
#diff = [x for x in loaded["functions"] if x not in functions]
diff = set(loaded["functions"]).symmetric_difference(set(functions))
#print ("Editing functions: ", diff) if len(diff) > 0 else ""
loaded["functions"] = functions
pdata.updateLocal("Config",loaded)
|
999,970 | e9fe1ecd8fec7862766920426b68fe3efe210adb | #!/usr/bin/env python3
#conding=utf-8
#python version=3.6
#通过ssh 保存交换机配置脚本
import paramiko
device_list = ['192.168.99.2']
device_username = 'admin'
device_password = ''
command = 'dis cu'
for each_device in device_list:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy)
client.connect(each_device,
port = 22,
username=device_username,
password=device_password,
allow_agent=False,
look_for_keys=False)
stdin,stdout,stderr = client.exec_command(command)
my_file = open(each_device,'w')
for each_line in stdout.readlines():
my_file.write(each_line)
my_file.close()
|
999,971 | 6273f30484a894ff0acde9a3b30577c7e030da36 | # Generated by Django 2.1.5 on 2019-04-06 23:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_stash', '0004_auto_20190407_0840'),
]
operations = [
migrations.RemoveField(
model_name='itemset',
name='is_myth',
),
migrations.AddField(
model_name='itemset',
name='is_lv94',
field=models.BooleanField(default=True, verbose_name='要求レベル94'),
),
]
|
999,972 | 97e105a74a003a78dc0a6ec2650a1c0dd41526b9 | #!/usr/bin/python3
from bs4 import BeautifulSoup
import json
import os
import random
import requests
import sys
# Get the config
config_path = input('Config file path: ')
with open(config_path, encoding='utf-8') as config_file:
config = json.load(config_file)
# Get teams to exclude
to_exclude_str = input('Team IDs to exclude, separated by commas: ').strip()
if len(to_exclude_str) > 0:
to_exclude = set(map(lambda s: int(s.strip()), to_exclude_str.split(',')))
else:
to_exclude = set()
# Validate data
to_exclude_clone = set(to_exclude)
if 'teams' not in config:
sys.exit('Config must contain teams')
teams = config['teams']
for team in teams:
if 'name' not in team or 'members' not in team or 'id' not in team:
sys.exit('Teams must have name, members, id')
if team['id'] in to_exclude:
print('excluding ' + str(team['id']))
to_exclude_clone.remove(team['id'])
if len(to_exclude_clone) > 0:
sys.exit('Excluded IDs do not exist: ' + ', '.join(to_exclude_clone))
# Get the DOMJudge host & session
dj_url = input('DOMJudge base URL (no trailing slash): ')
dj_session = input('DOMJudge session cookie: ')
# Create cookies
dj_cookies = { 'domjudge_session': dj_session }
# Get scoreboard
score_req = requests.get(dj_url + '/api/scoreboard', cookies=dj_cookies)
score_req.raise_for_status()
score = json.loads(score_req.text)
print('Downloaded scoreboard')
# Load the number of problems solved per team
for score_team in score:
team = next((t for t in teams if t['id'] == score_team['team']), None)
if team is not None:
team['solved'] = score_team['score']['num_solved']
# Check whether any teams weren't in the scoreboard
for team in teams:
if 'solved' not in team:
print('WARNING: Team without score: ' + team['name'])
# Generate the array of winners, with weighted probability
# To do so, clone people as many times as they have solved problems,
# then shuffle that array, and finally remove duplicates.
# Not very efficient, but len(teams) is <100...
winners = []
for team in teams:
if team['id'] not in to_exclude:
for member in team['members']:
winners += [(member, team['name']) for i in range(max(1, team['solved']))]
random.shuffle(winners)
result = []
for winner in winners:
if winner not in result:
result.append(winner)
# Create the out directory now (there are 2 outputs)
os.makedirs('out', exist_ok=True)
# Print the winners to a file, for logging purposes
with open('out/raffle.log', 'w') as result_log:
print('\n'.join([p[0] + ' | ' + p[1] for p in result]), file=result_log)
# Generate the HTML
result_array = '[\n' + ',\n'.join(['[' + json.dumps(p[0]) + ', ' + json.dumps(p[1]) + ']' for p in result]) + '\n]'
html = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>Raffle</title>
<style>
#container {
background-color: #fff;
}
h1 {
font-size: 10em;
font-weight: 700;
text-align: center;
}
h2 {
font-size: 5em;
font-weight: 400;
text-align: center;
}
#next {
font-size: 2em;
color: #B0B0B0;
position: absolute;
left: 2em;
bottom: 2em;
}
#fullscreen {
font-size: 2em;
color: #F0F0F0;
position: absolute;
right: 2em;
bottom: 2em;
}
</style>
<script>
RESULTS = """ + result_array + """;
var index = 0;
var isLoading = false;
function showNext() {
if(isLoading) {
return;
}
isLoading = true;
var title = document.getElementById('title');
var subtitle = document.getElementById('subtitle');
title.textContent = '';
subtitle.textContent = '';
if(index < RESULTS.length) {
title.textContent = '.';
setTimeout(function() {
title.textContent = '. .';
setTimeout(function() {
title.textContent = '. . .';
setTimeout(function() {
title.textContent = RESULTS[index][0];
subtitle.textContent = RESULTS[index][1];
index++;
isLoading = false;
}, 1000);
}, 1000);
}, 1000);
} else {
subtitle.textContent = "there's nobody left!";
}
}
function showFullscreen() {
var container = document.getElementById('container');
if('requestFullscreen' in container) {
container.requestFullscreen();
} else if ('webkitRequestFullscreen' in container) {
container.webkitRequestFullscreen();
} else if ('mozRequestFullScreen' in container) {
container.mozRequestFullScreen();
} else if ('msRequestFullscreen' in container) {
container.msRequestFullscreen();
}
}
</script>
</head>
<body>
<div id="container">
<h1 id="title">Raffle</h1>
<h2 id="subtitle"></h2>
<a id="next" href="#" onclick="showNext(); return false;">next</a>
<a id="fullscreen" href="#" onclick="showFullscreen(); return false;">fullscreen</a>
</div>
</body>
</html>
"""
# Print the HTML to a file
with open('out/raffle.html', 'w') as html_file:
print(html, file=html_file)
print('Done!')
|
999,973 | 901e9154f89a77e51e8eda0b779bc2f14111d369 | SWITCH_PROFILE = """mutation switchProfile($input: SwitchProfileInput!) { switchProfile(switchProfile: $input) { __typename account { __typename ...accountGraphFragment } activeSession { __typename ...sessionGraphFragment } } } fragment accountGraphFragment on Account { __typename id activeProfile { __typename id } profiles { __typename ...profileGraphFragment } parentalControls { __typename isProfileCreationProtected } flows { __typename star { __typename isOnboarded } } attributes { __typename email emailVerified userVerified locations { __typename manual { __typename country } purchase { __typename country } registration { __typename geoIp { __typename country } } } } } fragment profileGraphFragment on Profile { __typename id name maturityRating { __typename ratingSystem ratingSystemValues contentMaturityRating maxRatingSystemValue isMaxContentMaturityRating } isAge21Verified flows { __typename star { __typename eligibleForOnboarding isOnboarded } } attributes { __typename isDefault kidsModeEnabled groupWatch { __typename enabled } languagePreferences { __typename appLanguage playbackLanguage preferAudioDescription preferSDH subtitleLanguage subtitlesEnabled } parentalControls { __typename isPinProtected kidProofExitEnabled liveAndUnratedContent { __typename enabled } } playbackSettings { __typename autoplay backgroundVideo prefer133 } avatar { __typename id userSelected } } } fragment sessionGraphFragment on Session { __typename sessionId device { __typename id } entitlements experiments { __typename featureId variantId version } homeLocation { __typename countryCode } inSupportedLocation isSubscriber location { __typename countryCode } portabilityLocation { __typename countryCode } preferredMaturityRating { __typename impliedMaturityRating ratingSystem } }"""
ENTITLEMENTS = """query EntitledGraphMeQuery { me { __typename account { __typename ...accountGraphFragment } activeSession { __typename ...sessionGraphFragment } } } fragment accountGraphFragment on Account { __typename id activeProfile { __typename id } profiles { __typename ...profileGraphFragment } parentalControls { __typename isProfileCreationProtected } flows { __typename star { __typename isOnboarded } } attributes { __typename email emailVerified userVerified locations { __typename manual { __typename country } purchase { __typename country } registration { __typename geoIp { __typename country } } } } } fragment profileGraphFragment on Profile { __typename id name maturityRating { __typename ratingSystem ratingSystemValues contentMaturityRating maxRatingSystemValue isMaxContentMaturityRating } isAge21Verified flows { __typename star { __typename eligibleForOnboarding isOnboarded } } attributes { __typename isDefault kidsModeEnabled groupWatch { __typename enabled } languagePreferences { __typename appLanguage playbackLanguage preferAudioDescription preferSDH subtitleLanguage subtitlesEnabled } parentalControls { __typename isPinProtected kidProofExitEnabled liveAndUnratedContent { __typename enabled } } playbackSettings { __typename autoplay backgroundVideo prefer133 } avatar { __typename id userSelected } } } fragment sessionGraphFragment on Session { __typename sessionId device { __typename id } entitlements experiments { __typename featureId variantId version } homeLocation { __typename countryCode } inSupportedLocation isSubscriber location { __typename countryCode } portabilityLocation { __typename countryCode } preferredMaturityRating { __typename impliedMaturityRating ratingSystem } }"""
REGISTER_DEVICE = """mutation ($registerDevice: RegisterDeviceInput!) {registerDevice(registerDevice: $registerDevice) {__typename}}"""
LOGIN = """mutation loginTv($input: LoginInput!) { login(login: $input) { __typename account { __typename ...accountGraphFragment } actionGrant activeSession { __typename ...sessionGraphFragment } }}\nfragment accountGraphFragment on Account { __typename id activeProfile { __typename id } profiles { __typename ...profileGraphFragment } parentalControls { __typename isProfileCreationProtected } flows { __typename star { __typename isOnboarded } } attributes { __typename email emailVerified userVerified locations { __typename manual { __typename country } purchase { __typename country } registration { __typename geoIp { __typename country } } } }}\nfragment profileGraphFragment on Profile { __typename id name maturityRating { __typename ratingSystem ratingSystemValues contentMaturityRating maxRatingSystemValue isMaxContentMaturityRating } isAge21Verified flows { __typename star { __typename eligibleForOnboarding isOnboarded } } attributes { __typename isDefault kidsModeEnabled groupWatch { __typename enabled } languagePreferences { __typename appLanguage playbackLanguage preferAudioDescription preferSDH subtitleLanguage subtitlesEnabled } parentalControls { __typename isPinProtected kidProofExitEnabled liveAndUnratedContent { __typename enabled } } playbackSettings { __typename autoplay backgroundVideo prefer133 } avatar { __typename id userSelected } }}\nfragment sessionGraphFragment on Session { __typename sessionId device { __typename id } entitlements experiments { __typename featureId variantId version } homeLocation { __typename countryCode } inSupportedLocation isSubscriber location { __typename countryCode } portabilityLocation { __typename countryCode } preferredMaturityRating { __typename impliedMaturityRating ratingSystem }}"""
|
999,974 | 41df132e2c753d7c624cedb483ce8d0f12020852 | # -*- coding: utf-8 -*-
import random
from itertools import combinations, chain
from collections import Counter
def gen_scores(num_campany=4, num_students=10):
"""
企業ごとの学生に対するscoreマップを生成
TODO:外部ファイル読み込みにしたほうがよさ気
"""
scores = []
for i in range(num_campany):
score = list(range(num_students))
random.shuffle(score)
scores.append(score)
return scores
def gen_candidates(num_campany=4, num_students=10, students_per_campany=2):
"""
1term分の組み合わせを返すgenerator
"""
def step(people, depth):
if depth == 0:
yield []
for sel, rest in canididates(people, students_per_campany):
yield from (n + [sel] for n in step(rest, depth-1))
yield from step(set(range(num_students)), num_campany)
def canididates(people, sel):
"""
people(iterable)を sel人とn-sel人に分割して返すgenerator
"""
all = set(people)
selected = combinations(all, sel)
for i in selected:
sel = set(i)
yield sel, all.difference(sel)
def score(score_map, pattern):
result = 0
for ix, students in enumerate(pattern):
for student in students:
result += score_map[ix][student]
return result
def is_vaild_patterns(num_students, min_count, max_count, patterns):
"""
与えられたpatternsが実用に耐えうるか
patternsをflattenしてcounterにぶち込む
各countがmin~maxの間にあればok
"""
def valid(count):
return min_count <= count and count <= max_count
counter = Counter()
for i in range(num_students):
counter[i] = 0
print(patterns)
flatten = chain.from_iterable
for i in flatten(flatten(patterns)):
counter[i] += 1
print(counter)
return all(valid(x) for x in counter.values())
if __name__ == '__main__':
campanies = 3
students = 8
students_per_campany = 2
score_map = gen_scores()
candidates = gen_candidates(campanies, students, students_per_campany)
calc = lambda x: score(score_map, x)
result = [(i, calc(i)) for i in candidates]
sorted_result = sorted(result, key=lambda x: x[1], reverse=True)
result = is_vaild_patterns(students, 1, campanies,
[x[0] for x in sorted_result[:3]])
print(result)
|
999,975 | 3d7b687328a6e06c63132bb66d82666240856925 | import os
import shutil
from git import Repo
from scripts.git_utilities import GitUtilities
from scripts.project_details import ProjectDetails
from scripts.release_details import ReleaseDetails
from scripts.utilities import assert_step, replace_text_in_file, use_directory, run, check_url_exists, \
ensure_directory_exists
class PrepareStarterProjectRelease:
@staticmethod
def check_pre_conditions_for_starter_project_repo(details: ReleaseDetails) -> None:
repo = Repo(details.locations.starter_project_dir)
assert_step(not repo.bare)
GitUtilities.check_branch_name(repo, 'master')
@staticmethod
def update_starter_project(details: ReleaseDetails) -> None:
STARTER_PATH_OLD_SINGLE_HEADER = F"{details.locations.starter_project_dir}/lib/{details.old_single_header}"
STARTER_PATH_NEW_SINGLE_HEADER = F"{details.locations.starter_project_dir}/lib/{details.new_single_header}"
# Make sure starter project folder is clean
project_dir = details.locations.starter_project_dir
GitUtilities.reset_and_clean_working_directory(project_dir)
shutil.copyfile(details.release_new_single_header, STARTER_PATH_NEW_SINGLE_HEADER)
# Delete the last release:
if os.path.exists(STARTER_PATH_OLD_SINGLE_HEADER):
os.remove(STARTER_PATH_OLD_SINGLE_HEADER)
else:
raise RuntimeError(F"""
----------------------------------------------------------------
ERROR: Old header file does not exist:
{STARTER_PATH_OLD_SINGLE_HEADER}
Starting state of Starter Project does not match '{details.old_version.get_version_text()}'
Check whether:
1. There were uncommitted changes to version.ini in main project,
from a previous release preparation step.
2. The Starter Project repo needs pulling.
3. This is a CI build of a release tag - in which case the
updated Starter Project has not yet been pushed, and this
failure can be ignored.
----------------------------------------------------------------
""")
# Update the version in the "redirect" header:
replace_text_in_file(
F"{details.locations.starter_project_dir}/lib/{details.project_details.simulated_single_header_file}",
details.old_version.get_version_text(),
details.new_version.get_version_text())
# Update the version number in the Visual Studio projects:
PrepareStarterProjectRelease.update_solution_file(details,
F"{details.locations.starter_project_dir}/visual-studio-2017/StarterProject.vcxproj")
PrepareStarterProjectRelease.update_solution_file(details,
F"{details.locations.starter_project_dir}/visual-studio-2019/StarterProject2019.vcxproj")
@staticmethod
def update_solution_file(details: ReleaseDetails, visual_studio_sln: str) -> None:
if os.path.isfile(visual_studio_sln):
replace_text_in_file(visual_studio_sln,
details.old_single_header,
details.new_single_header)
else:
print(f"Info: No Visual Studio solution file: {visual_studio_sln}")
@staticmethod
def check_starter_project_builds(details: ReleaseDetails) -> None:
build_dir = F"{details.locations.starter_project_dir}/cmake-build-validate-release"
ensure_directory_exists(build_dir)
with use_directory(build_dir):
run(["cmake", ".."])
run(["cmake", "--build", "."])
class DeployStarterProjectRelease:
@staticmethod
def get_url_for_starter_project_single_header_for_version(project_details: ProjectDetails,
version_without_v: str) -> str:
return F'https://raw.githubusercontent.com/approvals/' \
F'{project_details.github_project_name}.StarterProject/master/lib/' \
F'{project_details.library_folder_name}.v.{version_without_v}.hpp'
@staticmethod
def commit_starter_project(details: ReleaseDetails) -> None:
message = F"Update to {details.project_details.github_project_name} {details.new_version_as_text()}"
GitUtilities.commit_everything(details.locations.starter_project_dir, message)
@staticmethod
def push_starter_project(details: ReleaseDetails) -> None:
with use_directory(details.locations.starter_project_dir):
run(["git", "push", "origin", "master"])
@staticmethod
def publish_starter_project(details: ReleaseDetails) -> None:
DeployStarterProjectRelease.commit_starter_project(details)
DeployStarterProjectRelease.push_starter_project(details)
assert_step(DeployStarterProjectRelease.check_starter_project_published(details),
"the starter project is published")
@staticmethod
def check_starter_project_published(details: ReleaseDetails) -> bool:
version = details.new_version.get_version_text_without_v()
url = DeployStarterProjectRelease.get_url_for_starter_project_single_header_for_version(
details.project_details, version)
published = check_url_exists(url)
return published
|
999,976 | 4829aa424b8f6bba0d19e3b1bd1006c0ba94829d | s = '''
{ 1, 0, 0, 0},
{ sqrt(3)/2, 0, 0, 0.5},
{ sqrt(3)/2, 0, 0, -0.5},
{ 0.5, 0, 0, sqrt(3)/2},
{ 0.5, -0, -0, -sqrt(3)/2},
{ 0, 1, 0, 0},
{ 0, sqrt(3)/2, 0.5, 0},
{ 0, sqrt(3)/2, -0.5, 0},
{ 0, 0.5, sqrt(3)/2, 0},
{ -0, 0.5, -sqrt(3)/2, -0},
{ 0, 0, 1, 0},
{ 0, 0, 0, 1},
'''
def go():
data = []
lines = s.split('\n')
lines = [e for e in lines if len(e) > 1]
for line in lines:
line = line.replace('{', '')
line = line.replace('}', '')
line = line.replace(' ', '')
line = line.replace('\t', '')
line = line.split(',')[:-1]
data += [line]
nmax = max([max([len(e) for e in row]) for row in data])
print nmax
output = []
for row in data:
print "{ " + ", ".join([e.rjust(nmax) for e in row]) + " },"
go()
|
999,977 | 3efde453c59554bde3b389675cfa5e8e01305fe6 | from flask import make_response, jsonify, request
parties = {}
class Party(object):
def __init__(self):
self.party_id = 0
self.party_name = ''
self.logo = ''
self.members = ''
self.parties = parties
@staticmethod
def add_party(party_name, logo, members):
"""This method saves party data"""
size = len(parties) + 1
new_party = {
"party_id": size,
"party_name": party_name,
"logo": logo,
"members": members
}
parties[size] = new_party
def get_parties(self):
"""This method shows all political parties in the dictionary"""
return self.parties
def edit_party(self, party_id, party_name, logo, members):
"""This method edits a specific political party in the dictionary"""
if party_id:
if party_id in self.parties:
parties[party_id]['party_name'] = party_name
parties[party_id]['logo'] = logo
parties[party_id]['members'] = members
return make_response(jsonify({
"status": 201,
"message": "Party Updated successfully",
"new details": self.parties
}), 201)
return make_response(jsonify({
"status": 404,
"Message": "party not found"
}), 404)
|
999,978 | 35b85b50e4b9550195f597408844880b1357cf93 | units = {"tank", "plane", "ship", "dog"}
print(units)
# Add lisää yhden arvon settiin
units.add("zeppelin")
print(units)
# Update lisää taulukollisen alkioita settiin
units.update(["helicopter", "juri"])
print(units)
# discard poistaa alkion setistä, heittämättä virhettä
units.discard("dog")
units.discard("dog")
print(units)
# remove poistaa alkion setistä. Jos poistettavaa arvoa ei ole, heitetään KeyError virhe
units.remove("tank")
#units.remove("tank")
print(units)
|
999,979 | d000eb7141067fe8b0468323cb03286470c5448e | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-01 06:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0025_notfoundmovie_release_date'),
]
operations = [
migrations.AddField(
model_name='movie',
name='trailer_link',
field=models.CharField(default='http://www.movieclick.xyz/movie_not_found', max_length=400, null=True),
),
]
|
999,980 | 0dd3d8384856a002f6b07581b678b8102d166922 | import psycopg2
DB_NAME = "iuwubbvc"
DB_USER = "iuwubbvc"
DB_PASS = "wJIkY9ANC0Pe2VWsocTrDCxoft1VSTtm"
DB_HOST = "isilo.db.elephantsql.com"
DB_PORT = "5432"
conn = psycopg2.connect(database=DB_NAME,user=DB_USER,password=DB_PASS,host=DB_HOST,port=DB_PORT)
print("database connected successfully")
cur=conn.cursor()
cur.execute("INSERT INTO employee (ID,NAME,EMAIL) VALUES (1,'VIKRAM','abc@gmail.com')")
conn.commit()
print("DATA INSERTED SUCCESSFULLY")
conn.close() |
999,981 | feffcef2d34870f2187aa4f4e47d38a0459162bd | def s():
[n,m] = list(map(int,input().split()))
a = [input() for _ in range(n)]
b = {input() for _ in range(m)}
res = [[]]
aa = len(a)*[False]
def r(x=0):
if x == n:
p = [a[i] for i in range(n) if aa[i]]
if len(p) <= len(res[0]):
return
for i in p:
for j in p:
if i+' '+j in b or j+' '+i in b:
return
res[0] = p
return x
else:
aa[x] = True
r(x+1)
aa[x] = False
r(x+1)
r()
res = res[0]
res.sort()
print(len(res))
print(*res,sep='\n')
s() |
999,982 | 4ab78fe433d41686014d94286b048e1d6cf7dc41 | # ------------------------------------------------------------------------------
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
# high_resoluton_net related params for segmentation
HIGH_RESOLUTION_NET = CN()
HIGH_RESOLUTION_NET.PRETRAINED_LAYERS = ['*']
HIGH_RESOLUTION_NET.STEM_INPLANES = 64
HIGH_RESOLUTION_NET.FINAL_CONV_KERNEL = 1
HIGH_RESOLUTION_NET.WITH_HEAD = True
HIGH_RESOLUTION_NET.STAGE2 = CN()
HIGH_RESOLUTION_NET.STAGE2.NUM_MODULES = 1
HIGH_RESOLUTION_NET.STAGE2.NUM_BRANCHES = 2
HIGH_RESOLUTION_NET.STAGE2.NUM_BLOCKS = [4, 4]
HIGH_RESOLUTION_NET.STAGE2.NUM_CHANNELS = [32, 64]
HIGH_RESOLUTION_NET.STAGE2.BLOCK = 'BASIC'
HIGH_RESOLUTION_NET.STAGE2.FUSE_METHOD = 'SUM'
HIGH_RESOLUTION_NET.STAGE3 = CN()
HIGH_RESOLUTION_NET.STAGE3.NUM_MODULES = 1
HIGH_RESOLUTION_NET.STAGE3.NUM_BRANCHES = 3
HIGH_RESOLUTION_NET.STAGE3.NUM_BLOCKS = [4, 4, 4]
HIGH_RESOLUTION_NET.STAGE3.NUM_CHANNELS = [32, 64, 128]
HIGH_RESOLUTION_NET.STAGE3.BLOCK = 'BASIC'
HIGH_RESOLUTION_NET.STAGE3.FUSE_METHOD = 'SUM'
HIGH_RESOLUTION_NET.STAGE4 = CN()
HIGH_RESOLUTION_NET.STAGE4.NUM_MODULES = 1
HIGH_RESOLUTION_NET.STAGE4.NUM_BRANCHES = 4
HIGH_RESOLUTION_NET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HIGH_RESOLUTION_NET.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
HIGH_RESOLUTION_NET.STAGE4.BLOCK = 'BASIC'
HIGH_RESOLUTION_NET.STAGE4.FUSE_METHOD = 'SUM'
MODEL_EXTRAS = {
'seg_hrnet': HIGH_RESOLUTION_NET,
}
|
999,983 | 215ebe5f150e15d9704074404022911dc9efa6ce | import os
import subprocess
import gevent
import pytest
@pytest.mark.skipif(
not (gevent.version_info.major >= 1 and gevent.version_info.minor >= 3), reason="gevent 1.3 or later is required"
)
def test_gevent_warning(monkeypatch):
subp = subprocess.Popen(
("python", os.path.join(os.path.dirname(__file__), "wrong_program_gevent.py")),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
assert subp.wait() == 0
assert subp.stdout.read() == b""
assert b"RuntimeWarning: Loading ddtrace before using gevent monkey patching" in subp.stderr.read()
|
999,984 | f124719b46b04b346a3a20f1b98a16cd8d5a853a | import paho.mqtt.client as mqtt
import MySQLdb
import json
#import datetime
import sys
#import time
from datetime import datetime
import requests
####################################################
def get_data(mosq,obj,msg):
db = MySQLdb.connect("localhost","root","root","myPrototype")
cursor = db.cursor()
output_str = "get_data - Unable to Authenticate/get_data... "
date1 = datetime.today()
date = datetime.strftime(date1, '%Y-%m-%d %H:%M:%S.%f')
try:
data1 = json.loads(msg.payload)
#print data1
except ValueError:
return mqttc.publish('/test/Mob/data','{"error_code":"2","error_desc":"Response=invalid input, no proper JSON request"}')
if(not data1):
output_str += ", details are mandatory"
output = '{"function":"get_historys","error_code":"2", "error_desc": "Response=%s"}' %(output_str )
return mqttc.publish('/test/Mob/data',output)
if(data1.get('function') is None):
output_str += ",function is mandatory"
output = '{"function":"get_historys","session_id":"%s","error_code":"2", "error_desc": "Response=%s"}' %(sid,output_str)
return mqttc.publish('/test/Mob/data',output)
function = data1['function']
try:
sqlq2 = "SELECT avg(AccTop)-3*STDDEV(AccTop),avg(AccTop)+3*STDDEV(AccTop),avg(AccBottom)-3*STDDEV(AccBottom),avg(AccBottom)+3*STDDEV(AccBottom) FROM SeatProto WHERE SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY )"
#print sqlq2
cursor.execute(sqlq2)
get_rec = cursor.fetchall()
#print get_rec
if(get_rec > 0):
topminus=get_rec[0][0]
topplus=get_rec[0][1]
botminus=get_rec[0][2]
botplus=get_rec[0][3]
#between 23 and 25
#sqlq3="select sum(count(AccTop)) from SeatProto WHERE AccTop NOT between %s and %s and SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY) GROUP BY MINUTE(TimeStamp) " %(topminus,topplus)
sqlq3 = "SELECT count(AccTop) FROM SeatProto where AccTop NOT between %s and %s and SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY ) " %(topminus,topplus)
print sqlq3
cursor.execute(sqlq3)
get_rec1 = cursor.fetchall()
#sqlq4="select sum(count(AccBottom)) from SeatProto WHERE AccBottom NOT between %s and %s and SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY) GROUP BY MINUTE(TimeStamp) " %(botminus,botplus)
sqlq4 = "SELECT count(AccBottom) FROM SeatProto where AccBottom NOT between %s and %s and SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY )" %(botminus,botplus)
print sqlq4
cursor.execute(sqlq4)
get_rec2 = cursor.fetchall()
if(len(get_rec1) > 0 and len(get_rec2) > 0):
top=get_rec1[0][0]
bottom=get_rec2[0][0]
output='{"function":"get_data","Top":"%s","Bottom":"%s"}' %(top,bottom)
return mqttc.publish('/test/Mob/data',output)
#data = json.loads(web.data())
#value = data["name"]
#return output
else:
output = '{"function":"get_data,"error_code":"3", "error_desc": "Response=Failed to get the data records, NO_DATA_FOUND"}'
return mqttc.publish('/test/Mob/data',output)
except Exception, e:
cursor.close()
db.close()
output = '{"function":"get_data","error_code":"3", "error_desc": "Response=Failed to get the data"}'
return mqttc.publish('/test/Mob/data',output)
################################ get_historys #########################
def get_history(mosq,obj,msg):
db = MySQLdb.connect("localhost","root","root","myPrototype")
cursor = db.cursor()
output_str = "get_history - Unable to Authenticate/get_history... "
date1 = datetime.today()
date = datetime.strftime(date1, '%Y-%m-%d %H:%M:%S.%f')
try:
data1 = json.loads(msg.payload)
#print data1
except ValueError:
return mqttc.publish('/test/Mob/data','{"error_code":"2","error_desc":"Response=invalid input, no proper JSON request"}')
if(not data1):
output_str += ", details are mandatory"
output = '{"function":"get_historys","error_code":"2", "error_desc": "Response=%s"}' %(output_str )
return mqttc.publish('/test/Mob/data',output)
if(data1.get('function') is None):
output_str += ",function is mandatory"
output = '{"function":"get_historys","session_id":"%s","error_code":"2", "error_desc": "Response=%s"}' %(sid,output_str)
return mqttc.publish('/test/Mob/data',output)
function = data1['function']
try:
sqlq1 = "SELECT avg(AccTop)-3*STDDEV(AccTop),avg(AccTop)+3*STDDEV(AccTop),avg(AccBottom)-3*STDDEV(AccBottom),avg(AccBottom)+3*STDDEV(AccBottom) FROM SeatProto WHERE SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY )"
#print sqlq1
cursor.execute(sqlq1)
get_rec = cursor.fetchall()
#print get_rec
#sqlq2="select MacID,AccTop,AccBottom,Theft,TimeStamp from SeatProto WHERE d AccTop NOT between %s and %s and AccBottom NOT between %s and %s ORDER BY ID " %(get_rec[0][0],get_rec[0][1],get_rec[0][2],get_rec[0][3])
#sqlq2="select MacID,AccTop,AccBottom,Theft,TimeStamp from SeatProto WHERE SeatProto.TimeStamp >= ( CURDATE() - INTERVAL 3 DAY ) and AccTop NOT between %s and %s and AccBottom NOT between %s and %s ORDER BY ID " %(get_rec[0][0],get_rec[0][1],get_rec[0][2],get_rec[0][3])
#sqlq2 = "SELECT MacID,AccTop,AccBottom,Theft,TimeStamp FROM SeatProto "
#print sqlq2
#cursor.execute(sqlq2)
#get_historys_rec = cursor.fetchall()
#print len(get_historys_rec)
sql12="select MacID,count(AccTop),TimeStamp from SeatProto WHERE AccTop NOT between %s and %s GROUP BY MINUTE(TimeStamp) ORDER BY ID" %(get_rec[0][0],get_rec[0][1])
#print sql12
cursor.execute(sql12)
get_rec1 = cursor.fetchall()
#print get_rec1
sql13="select MacID,count(AccBottom),TimeStamp from SeatProto WHERE AccBottom NOT between %s and %s GROUP BY MINUTE(TimeStamp) ORDER BY ID" %(get_rec[0][2],get_rec[0][3])
#print sql13
cursor.execute(sql13)
get_rec2 = cursor.fetchall()
#print get_rec2
if(len(get_rec1) > 0 or len(get_rec2) > 0):
#{
output = '{"function":"get_history","error_code":"0", \n "Top_History":'
output += '['
counter = 0
for rec in get_rec1:
#{
#print "came",rec
counter += 1
if(counter == 1):
output += '{"macID":"%s","CTop":"%s","Time":"%s"}' %(rec[0] ,rec[1],rec[2])
else:
output += ',\n {"macID":"%s","CTop":"%s","Time":"%s"}' %(rec[0] ,rec[1],rec[2])
#}
output += '],'
#print len(get_rec2)
output +='\n "Bottom_History":'
#print output
output += '['
counter = 0
for rec in get_rec2:
#{
#print "came 2 ",rec
counter += 1
if(counter == 1):
output += '{"macID":"%s","CBottom":"%s","Time":"%s"}' %(rec[0] ,rec[1],rec[2])
else:
output += ',\n {"macID":"%s","CBottom":"%s","Time":"%s"}' %(rec[0] ,rec[1],rec[2])
#}
output += ']\n'
output += '}'
print output
cursor.close()
db.close()
return mqttc.publish('/test/Mob/data',output)
else:
output = '{"function":"get_history","error_code":"3", "error_desc": "Response=Failed to get the history records, NO_DATA_FOUND"}'
return mqttc.publish('/test/Mob/data',output)
except Exception, e:
cursor.close()
db.close()
output = '{"function":"get_history","error_code":"3", "error_desc": "Response=Failed to get the history"}'
return mqttc.publish('/test/Mob/data',output)
#####################add data #################################################
def add_data(mosq,obj,msg):
print "add_data......."
db = MySQLdb.connect("localhost","root","root","myPrototype")
cursor = db.cursor()
output_str = "add_data - Unable to Authenticate/add_data... "
#print "this is register string : ",str(msg.payload)
#data1 = json.loads(str(msg.payload))
date1 = datetime.today()
date = datetime.strftime(date1, '%Y-%m-%d %H:%M:%S.%f')
#print "date ", date
try:
data1 = json.loads(msg.payload)
#print data1
except ValueError:
return mqttc.publish('/test/error/','{"function":"add_data","error_code":"2","error_desc":"Response=invalid input, no proper JSON request"}')
try:
if((data1.get('ipmacid') is None) or ((data1.get('ipmacid') is not None) and (len(data1['ipmacid']) <= 0))):
output_str += ", ipmacid is mandatory"
output = '{"function":"add_data","session_id":"%s","error_code":"2", "error_desc": "Response=%s"}' %(sid,output_str)
return mqttc.publish('/test/error/',output)
else:
ipmacid = data1['ipmacid']
if((data1.get('topAcc') is None) or ((data1.get('topAcc') is not None) and (len(data1['topAcc']) <= 0))):
output_str += ", topAcc is mandatory"
output = '{"function":"add_data","session_id":"%s","error_code":"2", "error_desc": "Response=%s"}' %(sid,output_str)
return mqttc.publish('/test/error/',output)
else:
topAcc = data1['topAcc']
if((data1.get('bottomAcc') is None) or ((data1.get('bottomAcc') is not None) and (len(data1['bottomAcc']) <= 0))):
output_str += ", bottomAcc is mandatory"
output = '{"function":"add_data","session_id":"%s","error_code":"2", "error_desc": "Response=%s"}' %(sid,output_str)
return mqttc.publish('/test/error/',output)
else:
bottomAcc = data1['bottomAcc']
if((data1.get('Theft') is None) or ((data1.get('Theft') is not None) and (len(data1['Theft']) <= 0))):
output_str += ", Theft is mandatory"
output = '{"function":"add_data","error_code":"2", "error_desc": "Response=%s"}' %(output_str)
return mqttc.publish('/test/error/',output)
else:
Theft = data1['Theft']
add_rec1=cursor.execute("""INSERT INTO SeatProto(MacID,AccTop,AccBottom,Theft,TimeStamp) VALUES (%s,%s,%s,%s,%s)""",(ipmacid,topAcc,bottomAcc,Theft,date))
db.commit()
if add_rec1 > 0:
#print 'data inserted'
return mqttc.publish('/test/error/','{"error_code":"0","Response":"Successfully added"}')
else:
#print 'unable to insert'
return mqttc.publish('/test/error/','{"error_code":"2","Response":"unable to add data"}')
except MySQLdb.Error, e:
try:
#print "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
return mqttc.publish('/test/error/',str(e.args[0])+str(e.args[1]))
except IndexError:
#print "MySQL Error: %s" % str(e)
return mqttc.publish('/test/error/',str(e))
except Exception, e:
cursor.close()
db.close()
output = '{"function":"add_data","error_code":"3", "error_desc": "Response=Failed to add the data"}' %(sid)
return mqttc.publish('/test/error/',output)
################## publish response #################################################
def on_publish(client, userdata, result):
pass
#print "data published \n"
def on_connect(client, userdata, flags, rc):
#print("Connected with result code "+str(rc))
mqttc.subscribe("jts/Ser/#")
def on_disconnect(client, userdata, rc):
if rc != 0:
print "Unexpected MQTT disconnection. Will auto-reconnect"
######################### mqtt methods ####################################
mqttc = mqtt.Client()
mqttc.message_callback_add('/test/Ser/Data',get_history)
mqttc.message_callback_add('/test/Ser/get_data',get_data)
mqttc.message_callback_add('/test/e2s/data',add_data)
mqttc.on_publish = on_publish
mqttc.on_disconnect = on_disconnect
mqttc.on_connect = on_connect
mqttc.username_pw_set('esp', 'ptlesp01')
mqttc.connect("cld003.jts-prod.in", 1883, 60)
mqttc.subscribe("/test/#")
mqttc.loop_forever()
|
999,985 | 37db8e7ef464a97dc420717372a747d428ea5e82 | import pickle
from getDictionary import get_dictionary
meta = pickle.load (open('../data/traintest.pkl', 'rb'))
train_imagenames = meta['train_imagenames']
# -----fill in your implementation here --------
# Adjust these values for potentially more accurate results
a = 200
k = 500
imgPaths = ["../data/" + path for path in train_imagenames]
print("Creating dictionary of words for random points...")
random_words_dictionary = get_dictionary(imgPaths, a, k, "Random")
print("Creating dictionary of words for top Harris points...")
harris_words_dictionary = get_dictionary(imgPaths, a, k, "Harris")
with open("dictionaryRandom.pkl", 'wb') as handle:
pickle.dump(random_words_dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open("dictionaryHarris.pkl", 'wb') as handle:
pickle.dump(harris_words_dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ----------------------------------------------
|
999,986 | e861de441b15f3d0762500ea00018d355bac5f44 | #!/usr/bin/env python
# git-cl -- a git-command for integrating reviews on Rietveld
# Copyright (C) 2008 Evan Martin <martine@danga.com>
import getpass
import optparse
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import upload
import urllib2
import projecthosting_upload
import cl_settings
# mimetype exceptions: if you can't upload to rietveld, add the
# relevant extension to this list. The only important part is the
# "text/x-script." bit; the stuff after the dot doesn't matter
import mimetypes
mimetypes.add_type("text/x-script.scheme", ".scm")
mimetypes.add_type("application/xml", ".xml")
mimetypes.add_type("text/x-script.postscript", ".ps")
mimetypes.add_type("text/x-script.perl", ".pl")
mimetypes.add_type("text/x-script.tex", ".latex")
mimetypes.add_type("text/x-script.texinfo", ".texi")
mimetypes.add_type("text/x-script.shell", ".sh")
try:
import readline
except ImportError:
pass
DEFAULT_SERVER = 'codereview.appspot.com'
PREDCOMMIT_HOOK = '.git/hooks/pre-cl-dcommit'
PREUPLOAD_HOOK = '.git/hooks/pre-cl-upload'
settings = cl_settings.Settings()
def TrackerURL(issue):
"""Return the Tracker URL for a particular issue."""
# make the server/project customizable?
return 'http://code.google.com/p/lilypond/issues/detail?id=%s' % issue
def GetCodereviewSettingsInteractively():
"""Prompt the user for settings."""
server = settings.GetServer(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = raw_input(prompt + ': ')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver and newserver != server:
cl_settings.RunGit(['config', 'rietveld.server', newserver])
tracker_server = settings.GetTrackerServer(error_ok=True)
prompt = 'Allura server'
prompt += ' [%s]' % tracker_server
newtracker = raw_input(prompt + ': ')
while not tracker_server and not newtracker:
prompt = 'You must provide the address of the Allura tracker server: '
newtracker = raw_input(prompt)
if newtracker and newtracker != tracker_server:
cl_settings.RunGit(['config', 'allura.tracker', newtracker])
token = settings.GetToken(error_ok=True)
prompt = 'Allura bearer token (see https://sourceforge.net/auth/oauth/)'
prompt += ' [%s]' % token
newtoken = raw_input(prompt + ': ')
while not token and not newtoken:
prompt = 'You must provide a bearer token to authenticate: '
newtoken = raw_input(prompt)
if newtoken and newtoken != token:
cl_settings.RunGit(['config', 'allura.token', newtoken])
def SetProperty(initial, caption, name):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = raw_input(prompt + ': ')
if new_val == 'x':
cl_settings.RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val and new_val != initial:
cl_settings.RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetCCList(), 'CC list', 'cc')
def LoadCodereviewSettingsFromFile(file):
"""Parse a codereview.settings file."""
settings = {}
for line in file.read().splitlines():
if not line or line.startswith("#"):
continue
k, v = line.split(": ", 1)
settings[k] = v
def GetProperty(name):
return settings.get(name)
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in settings:
cl_settings.RunGit(['config', fullname, settings[setting]])
else:
cl_settings.RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
hooks = {}
if GetProperty('GITCL_PREUPLOAD'):
hooks['preupload'] = GetProperty('GITCL_PREUPLOAD')
if GetProperty('GITCL_PREDCOMMIT'):
hooks['predcommit'] = GetProperty('GITCL_PREDCOMMIT')
return hooks
def CmdConfig(args):
def DownloadToFile(url, filename):
filename = os.path.join(settings.GetRoot(), filename)
if os.path.exists(filename):
print '%s exists, skipping' % filename
return False
contents = urllib2.urlopen(url).read()
file = open(filename, 'w')
file.write(contents)
file.close()
os.chmod(filename, 0755)
return True
parser = optparse.OptionParser(
usage='git cl config [repo root containing codereview.settings]')
(options, args) = parser.parse_args(args)
if len(args) == 0:
GetCodereviewSettingsInteractively()
return
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load Codereview settings and download hooks (if available).
hooks = LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
for key, filename in (('predcommit', PREDCOMMIT_HOOK),
('preupload', PREUPLOAD_HOOK)):
if key in hooks:
DownloadToFile(hooks[key], filename)
def CmdStatus(args):
parser = optparse.OptionParser(usage='git cl status [options]')
parser.add_option('--field', help='print only specific field (desc|id|url)')
(options, args) = parser.parse_args(args)
# TODO: maybe make show_branches a flag if necessary.
show_branches = not options.field
if show_branches:
branches = cl_settings.RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if branches:
print 'Branches associated with reviews:'
for branch in sorted(branches.splitlines()):
cl = cl_settings.Changelist(branchref=branch)
print " %20s: %s" % (cl.GetBranch(), cl.GetRietveldIssue())
cl = cl_settings.Changelist()
if options.field:
if options.field.startswith('desc'):
print cl.GetDescription()
elif options.field == 'id':
print cl.GetRietveldIssue()
elif options.field == 'url':
print cl.GetRietveldURL()
else:
print
print 'Current branch:',
if not cl.GetRietveldIssue():
print 'no issue assigned.'
return 0
print cl.GetBranch()
if cl.GetTrackerIssue():
print 'Tracker issue:', cl.GetTrackerIssue(), '(%s)' % cl.GetTrackerURL()
else:
print 'Tracker issue: None'
print 'Rietveld issue:', cl.GetRietveldIssue(), '(%s)' % cl.GetRietveldURL()
print 'Issue description:'
print cl.GetDescription(pretty=True)
def CmdIssue(args):
parser = optparse.OptionParser(usage='git cl issue [issue_number]')
parser.description = ('Set or display the current Rietveld issue. ' +
'Pass issue number 0 to clear the current issue.')
(options, args) = parser.parse_args(args)
cl = cl_settings.Changelist()
if len(args) > 0:
cl.SetRietveldIssue(int(args[0]))
print 'Rietveld issue:', cl.GetRietveldIssue(), '(%s)' % cl.GetRietveldURL()
def UserEditedLog(starting_text):
"""Given some starting text, let the user edit it and return the result."""
editor = os.getenv('EDITOR', 'vi')
(file_handle, filename) = tempfile.mkstemp()
file = os.fdopen(file_handle, 'w')
file.write(starting_text)
file.close()
ret = subprocess.call(editor + ' ' + filename, shell=True)
if ret != 0:
os.remove(filename)
return
file = open(filename)
text = file.read()
file.close()
os.remove(filename)
stripcomment_re = re.compile(r'^#.*$', re.MULTILINE)
return stripcomment_re.sub('', text).strip()
def RunHook(hook, upstream_branch='origin', error_ok=False):
"""Run a given hook if it exists. By default, we fail on errors."""
hook = '%s/%s' % (settings.GetRoot(), hook)
if not os.path.exists(hook):
return
output = cl_settings.RunCommand([hook, upstream_branch], error_ok).strip()
if output != '':
print output
def CmdPresubmit(args):
"""Reports what presubmit checks on the change would report."""
parser = optparse.OptionParser(
usage='git cl presubmit [options]')
(options, args) = parser.parse_args(args)
if cl_settings.RunGit(['diff-index', 'HEAD']):
print 'Cannot presubmit with a dirty tree. You must commit locally first.'
return 1
print '*** Presubmit checks for UPLOAD would report: ***'
RunHook(PREUPLOAD_HOOK, error_ok=True)
print '*** Presubmit checks for DCOMMIT would report: ***'
RunHook(PREDCOMMIT_HOOK, error_ok=True)
def CmdUpload(args):
parser = optparse.OptionParser(
usage='git cl upload [options] [args to "git diff"]')
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message', help='message for patch')
parser.add_option('-r', '--reviewers',
help='reviewer email addresses')
parser.add_option('--send-mail', action='store_true',
help='send email to reviewer immediately')
parser.add_option("-n", "--no-code-issue",
help="do not upload to issue tracker",
action="store_true", dest="no_code_issue")
(options, args) = parser.parse_args(args)
if cl_settings.RunGit(['diff-index', 'HEAD']):
print 'Cannot upload with a dirty tree. You must commit locally first.'
return 1
cl = cl_settings.Changelist()
if args:
base_branch = args[0]
else:
# Default to diffing against the "upstream" branch.
base_branch = cl.GetUpstreamBranch()
args = [base_branch + "..."]
if not options.bypass_hooks:
RunHook(PREUPLOAD_HOOK, upstream_branch=base_branch, error_ok=False)
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
subprocess.call(['git', 'diff', '--no-ext-diff', '--stat', '-M'] + args,
env=env)
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', settings.GetServer()])
if options.reviewers:
upload_args.extend(['--reviewers', options.reviewers])
upload_args.extend(['--cc', settings.GetCCList()])
if options.message:
upload_args.extend(['--message', options.message])
if options.send_mail:
if not options.reviewers:
settings.DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
if cl.GetRietveldIssue():
upload_args.extend(['--issue', cl.GetRietveldIssue()])
print ("This branch is associated with Rietveld issue %s. "
"Adding patch to that issue." % cl.GetRietveldIssue())
prompt = "Message describing this patch set: "
desc = options.message or raw_input(prompt).strip()
else:
# Construct a description for this change from the log.
# We need to convert diff options to log options.
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
desc = cl_settings.RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
initial_text = """# Enter a description of the change.
# This will be displayed on the codereview site.
# The first line will also be used as the subject of the review."""
desc = UserEditedLog(initial_text + '\n' + desc)
if not desc:
print "Description empty; aborting."
return 1
# TODO: dies when desc is empty
subject = desc.splitlines()[0]
upload_args.extend(['--title', subject])
upload_args.extend(['--message', desc])
upload_args.extend(['--oauth2'])
issue, patchset = upload.RealMain(['upload'] + upload_args + args)
if not cl.GetRietveldIssue():
cl.SetRietveldIssue(issue)
cl.SetPatchset(patchset)
if not options.no_code_issue:
issueId = cl.GetTrackerIssue()
issueId = projecthosting_upload.upload(issue, patchset, subject, desc, issueId)
cl.SetTrackerIssue(issueId)
def CmdDCommit(args):
parser = optparse.OptionParser(
usage='git cl dcommit [options] [git-svn branch to apply against]')
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description)")
parser.add_option('--tbr', action='store_true', dest='tbr',
help="short for 'to be reviewed', commit branch " +
"even without uploading for review")
(options, args) = parser.parse_args(args)
cl = cl_settings.Changelist()
if not args:
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
base_branch = args[0]
if cl_settings.RunGit(['diff-index', 'HEAD']):
print 'Cannot dcommit with a dirty tree. You must commit locally first.'
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = cl_settings.RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print ('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print 'Run "git merge %s" before attempting to dcommit.' % base_branch
return 1
if not options.force and not options.bypass_hooks:
RunHook(PREDCOMMIT_HOOK, upstream_branch=base_branch, error_ok=False)
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print ('The tree is closed. Please wait for it to reopen. Use '
'"git cl dcommit -f" to commit on a closed tree.')
return 1
elif 'unknown' == status:
print ('Unable to determine tree status. Please verify manually and '
'use "git cl dcommit -f" to commit on a closed tree.')
description = options.message
if not options.tbr:
# It is important to have these checks early. Not only for user
# convenience, but also because the cl object then caches the correct values
# of these fields even as we're juggling branches for setting up the commit.
if not cl.GetRietveldIssue():
print 'Current issue unknown -- has this branch been uploaded?'
print 'Use --tbr to commit without review.'
return 1
if not description:
description = cl.GetDescription()
if not description:
print 'No description set.'
print 'Visit %s/edit to set it.' % (cl.GetRietveldURL())
return 1
description += "\n\nReview URL: %s" % cl.GetRietveldURL()
else:
# Submitting TBR. Get a description now.
if not description:
description = UserEditedLog('TBR: ')
if not description:
print "Description empty; aborting."
return 1
if options.contributor:
description += "\nPatch from %s." % options.contributor
print 'Description:', repr(description)
branches = [base_branch, cl.GetBranchRef()]
if not options.force:
subprocess.call(['git', 'diff', '--stat'] + branches)
raw_input("About to commit; enter to confirm.")
# We want to squash all this branch's commits into one commit with the
# proper description.
# We do this by doing a "merge --squash" into a new commit branch, then
# dcommitting that.
MERGE_BRANCH = 'git-cl-commit'
# Delete the merge branch if it already exists.
if cl_settings.RunGit(['show-ref', '--quiet', '--verify', 'refs/heads/' + MERGE_BRANCH],
exit_code=True) == 0:
cl_settings.RunGit(['branch', '-D', MERGE_BRANCH])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = cl_settings.RunGit(['rev-parse', '--show-cdup']).strip()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
try:
cl_settings.RunGit(['checkout', '-q', '-b', MERGE_BRANCH, base_branch])
cl_settings.RunGit(['merge', '--squash', cl.GetBranchRef()])
cl_settings.RunGit(['commit', '-m', description])
# dcommit the merge branch.
output = cl_settings.RunGit(['svn', 'dcommit', '--no-rebase'])
finally:
# And then swap back to the original branch and clean up.
cl_settings.RunGit(['checkout', '-q', cl.GetBranch()])
cl_settings.RunGit(['branch', '-D', MERGE_BRANCH])
if cl.has_RietveldIssue and output.find("Committed r") != -1:
print "Closing issue (you may be prompted for your codereview password)..."
viewvc_url = settings.GetViewVCUrl()
if viewvc_url:
revision = re.compile(".*?\nCommitted r(\d+)",
re.DOTALL).match(output).group(1)
cl.description = (cl.description +
"\n\nCommitted: " + viewvc_url + revision)
cl.CloseRietveldIssue()
cl.SetRietveldIssue(0)
def CmdPatch(args):
parser = optparse.OptionParser(usage=('git cl patch [options] '
'<patch url or Rietveld issue ID>'))
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', action='store_true', dest='force',
help='with -b, clobber any existing branch')
parser.add_option('--reject', action='store_true', dest='reject',
help='allow failed patches and spew .rej files')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help="don't commit after patch applies")
(options, args) = parser.parse_args(args)
if len(args) != 1:
return parser.print_help()
input = args[0]
if re.match(r'\d+', input):
# Input is an issue id. Figure out the URL.
issue = input
fetch = "curl --silent https://%s/%s" % (settings.GetServer(), issue)
grep = "grep -E -o '/download/issue[0-9]+_[0-9]+.diff'"
pipe = subprocess.Popen("%s | %s" % (fetch, grep), shell=True,
stdout=subprocess.PIPE)
path = pipe.stdout.read().strip()
url = 'https://%s%s' % (settings.GetServer(), path)
if len(path) == 0:
# There is no patch to download (patch may be too large, see
# http://code.google.com/p/rietveld/issues/detail?id=196).
# Try to download individual patches for each file instead,
# and concatenate them to obtain the complete patch.
grep = "grep -E -o '/download/issue[0-9]+_[0-9]+_[0-9]+.diff'"
pipe = subprocess.Popen("%s | %s" % (fetch, grep), shell=True,
stdout=subprocess.PIPE)
paths = pipe.stdout.read().strip().split("\n")
url = 'https://%s{%s}' % (settings.GetServer(), ",".join(paths))
else:
# Assume it's a URL to the patch.
match = re.match(r'https?://.*?/issue(\d+)_\d+.diff', input)
if match:
issue = match.group(1)
url = input.replace("http:", "https:")
else:
print "Must pass an Rietveld issue ID or full URL for 'Download raw patch set'"
return 1
if options.newbranch:
if options.force:
cl_settings.RunGit(['branch', '-D', options.newbranch], error_ok=True)
cl_settings.RunGit(['checkout', '-b', options.newbranch])
# Switch up to the top-level directory, if necessary, in preparation for
# applying the patch.
top = cl_settings.RunGit(['rev-parse', '--show-cdup']).strip()
if top:
os.chdir(top)
# Construct a pipeline to feed the patch into "git apply".
# We use "git apply" to apply the patch instead of "patch" so that we can
# pick up file adds.
# 1) Fetch the patch.
fetch = "curl --silent %s" % url
# 2) Munge the patch.
# Git patches have a/ at the beginning of source paths. We strip that out
# with a sed script rather than the -p flag to patch so we can feed either
# Git or svn-style patches into the same apply command.
gitsed = "sed -e 's|^--- a/|--- |; s|^+++ b/|+++ |'"
# 3) Apply the patch.
# The --index flag means: also insert into the index (so we catch adds).
apply = "git apply --index -p0"
if options.reject:
apply += " --reject"
subprocess.check_call(' | '.join([fetch, gitsed, apply]), shell=True)
# If we had an issue, commit the current state and register the issue.
if not options.nocommit:
cl_settings.RunGit(['commit', '-m', 'patch from issue %s' % issue])
cl = cl_settings.Changelist()
cl.SetRietveldIssue(issue)
print "Committed patch."
else:
print "Patch applied to index."
def CmdRebase(args):
# Provide a wrapper for git svn rebase to help avoid accidental
# git svn dcommit.
cl_settings.RunGit(['svn', 'rebase'], redirect_stdout=False)
def GetTreeStatus():
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def CmdTreeStatus(args):
status = GetTreeStatus()
if 'unset' == status:
print 'You must configure your tree status URL by running "git cl config".'
return 2
print "The tree is %s" % status
if status != 'open':
return 1
return 0
def CmdUpstream(args):
cl = cl_settings.Changelist()
print cl.GetUpstreamBranch()
COMMANDS = [
('config', 'edit configuration for this tree', CmdConfig),
('dcommit', 'commit the current changelist via git-svn', CmdDCommit),
('issue', 'show/set current branch\'s Rietveld issue', CmdIssue),
('patch', 'patch in a code review', CmdPatch),
('presubmit', 'run presubmit tests on the current changelist', CmdPresubmit),
('rebase', 'rebase current branch on top of svn repo', CmdRebase),
('status', 'show status of changelists', CmdStatus),
('tree', 'show the status of the tree', CmdTreeStatus),
('upload', 'upload the current changelist to codereview', CmdUpload),
('upstream', 'print the name of the upstream branch, if any', CmdUpstream),
]
def Usage(name):
print 'usage: %s <command>' % name
print 'commands are:'
for name, desc, _ in COMMANDS:
print ' %-10s %s' % (name, desc)
sys.exit(1)
def main(argv):
if len(argv) < 2:
Usage(argv[0])
command = argv[1]
for name, _, func in COMMANDS:
if name == command:
return func(argv[2:])
print 'unknown command: %s' % command
Usage(argv[0])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
999,987 | 46fd993a0ff8bf5d39dbbe47280fd587c2cd2059 | class Solution:
def isMonotonic(self, A):
"""
:type A: List[int]
:rtype: bool
"""
if not A:
return False
if len(A) == 1:
return True
big = lambda x, y: x >= y
small = lambda x, y: x <= y
if A[-1] - A[0] >= 0:
func = small
else:
func = big
for idx in range(len(A) - 1):
if func(A[idx], A[idx + 1]):
continue
else:
return False
return True
|
999,988 | 4b75f4c935919c349e8f6e487ed686b4b3e36dfd | """ Napisz funkcję która zwraca listę liczb pierwszych aż do zadanej wlącznie.
Funkcja ma nazywać się primes i przyjmować jeden argument: to_number. """
def primes(number):
lst = []
k = 0
for i in range(2, number+1):
for j in range(2, i):
if i % j == 0:
k = k + 1
if k == 0:
lst.append(i)
else:
k = 0
return lst
assert primes(5) == [2, 3, 5]
|
999,989 | e80dc850d31b14188a55c38a3f487de0e71ee6bf | from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import pygame
TRACKING_COLOR = pygame.color.Color("green")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN, 32)
pygame.display.set_caption("Kinect Body Game Test")
self.finished = False
self.clock = pygame.time.Clock()
self.kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
self.frame_surface = pygame.Surface((self.kinect.color_frame_desc.Width, self.kinect.color_frame_desc.Height), 0, 32)
self.bodies = None
self.frame_surface.fill((255, 255, 255))
def draw_ind_intro_point(self, joints, jointPoints, color, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
try:
pygame.draw.circle(self.frame_surface, color, center, 10, 0)
except:
pass
def update_intro_screen(self, joints, jointPoints, color):
self.frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_Head)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_HandRight)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_HandTipRight)
## self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_ThumbRight)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_HandLeft)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_HandTipLeft)
## self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_ThumbLeft)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristLeft)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristRight)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight)
def run(self):
while not self.finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
# start round here (remove pass and add function ref)
self.finished = True
if self.kinect.has_new_body_frame():
self.bodies = self.kinect.get_last_body_frame()
if self.bodies is not None:
for i in range(0, self.kinect.max_body_count):
body = self.bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self.kinect.body_joints_to_color_space(joints)
self.update_intro_screen(joints, joint_points, TRACKING_COLOR)
self.screen.blit(self.frame_surface, (0,0))
pygame.display.update()
self.clock.tick(30)
self.kinect.close()
pygame.quit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
|
999,990 | 75d169a6fd8a39d0cd8ee8aa37d543fc8cc5a163 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'KnownDataCollectionEndpointResourceKind',
'KnownDataCollectionRuleResourceKind',
'KnownDataFlowStreams',
'KnownExtensionDataSourceStreams',
'KnownPerfCounterDataSourceStreams',
'KnownPublicNetworkAccessOptions',
'KnownSyslogDataSourceFacilityNames',
'KnownSyslogDataSourceLogLevels',
'KnownSyslogDataSourceStreams',
'KnownWindowsEventLogDataSourceStreams',
]
class KnownDataCollectionEndpointResourceKind(str, Enum):
"""
The kind of the resource.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class KnownDataCollectionRuleResourceKind(str, Enum):
"""
The kind of the resource.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class KnownDataFlowStreams(str, Enum):
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownExtensionDataSourceStreams(str, Enum):
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownPerfCounterDataSourceStreams(str, Enum):
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
class KnownPublicNetworkAccessOptions(str, Enum):
"""
The configuration to set whether network access from public internet to the endpoints are allowed.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class KnownSyslogDataSourceFacilityNames(str, Enum):
AUTH = "auth"
AUTHPRIV = "authpriv"
CRON = "cron"
DAEMON = "daemon"
KERN = "kern"
LPR = "lpr"
MAIL = "mail"
MARK = "mark"
NEWS = "news"
SYSLOG = "syslog"
USER = "user"
UUCP = "uucp"
LOCAL0 = "local0"
LOCAL1 = "local1"
LOCAL2 = "local2"
LOCAL3 = "local3"
LOCAL4 = "local4"
LOCAL5 = "local5"
LOCAL6 = "local6"
LOCAL7 = "local7"
ASTERISK = "*"
class KnownSyslogDataSourceLogLevels(str, Enum):
DEBUG = "Debug"
INFO = "Info"
NOTICE = "Notice"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
ALERT = "Alert"
EMERGENCY = "Emergency"
ASTERISK = "*"
class KnownSyslogDataSourceStreams(str, Enum):
MICROSOFT_SYSLOG = "Microsoft-Syslog"
class KnownWindowsEventLogDataSourceStreams(str, Enum):
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
MICROSOFT_EVENT = "Microsoft-Event"
|
999,991 | ed3a61e5d28b6518a622f8386b01ddc4c4c77a51 | import csv
from collections import defaultdict
import numpy as np
from sklearn.cross_validation import train_test_split
from pre_survey import *
from sets import Set
responseUserID = {}
userScore = {}
userVideoTime = {}
userVideoMatrix = {}
videoNames = []
videoCounts = defaultdict(int)
videoCountsClassification = defaultdict(int)
userAvgExtraTime = defaultdict(int)
userTtlExtraTime = defaultdict(int)
screen_name_to_gender = defaultdict(int)
screen_name_to_year_of_birth = defaultdict(int)
screen_name_to_education = defaultdict(int)
screen_name_to_post_count = defaultdict(int)
screen_name_to_comment_count = defaultdict(int)
screen_name_activity_score = {}
activity_name_set = []
screen_name_to_continent = {}
train_num = 20
countThreshhold = 2
# get activity grade
with open("../../data/EarthSciences_ResGeo202_Spring2015_ActivityGrade.csv", "r") as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if line[12] == "'anon_screen_name'" :
continue
# line[12] -> anon_screen_name, line[14] -> module_id, line[3] -> grade
if line[12] not in screen_name_activity_score :
screen_name_activity_score[line[12]] = {}
screen_name_activity_score[line[12]][line[14]] = line[3]
else :
screen_name_activity_score[line[12]][line[14]] = line[3]
if line[14] not in activity_name_set:
activity_name_set.append(line[14])
# print screen_name_activity_score
## Get ResponseID - UserID matching
with open('../../data/survey_post_EarthSciences_ResGeo202_Spring2015_respondent_metadata.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if line[1] not in responseUserID:
responseUserID[line[1]] = line[2]
# print responseUserID
# Get Survey Data, only get the ID that has a matching
with open('../../data/survey_post_EarthSciences_ResGeo202_Spring2015_response.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if (line[2] == "Q1.1" and line[4] != ''):
if line[1] in responseUserID:
userScore[responseUserID[line[1]]] = int(line[4])
# build nationality to continent mapping
Asia = Set()
Europe = Set()
North_America = Set()
South_America = Set()
Africa = Set()
Austrilia = Set()
current_continent = -1
continents = [Asia, North_America, South_America, Africa, Europe, Austrilia]
with open('/Users/guorm/stanford/2016-spring/CS341/codes/countries_of_continent.txt', 'r') as csvfile :
lines = csv.reader(csvfile)
for line in lines :
if len(line) == 0 or line[0].startswith('#') or len(line[0]) == 1 :
continue
if line[0].startswith('$') :
current_continent += 1
else :
continents[current_continent].add(line[0])
# extracting demographic feature
first = True
with open('../../data/EarthSciences_ResGeo202_Spring2015_demographics.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if first:
first = False
continue
# if line[0] in userScore :
if True:
# gender feature: blank -> 0, m -> -1, f -> +1
if line[1] == "m" :
screen_name_to_gender[line[0]] = -1
if line[1] == "f" :
screen_name_to_gender[line[0]] = 1
if line[1] not in screen_name_to_gender and line[0] != "\\N" :
screen_name_to_gender[line[0]] = 0
# year_of_birth
if line[2] != "\\N" :
screen_name_to_year_of_birth[line[0]] = int(line[2])
# ?????????????????????????????????????????????????
## should set it to 0 if birth year not provided???
if line[2] == "\\N" and line[0] != "\\N":
screen_name_to_year_of_birth[line[0]] = 0
# level_of_education:
# Doctorate -> 7
# Masters or professional degree -> 6
# Bachelors -> 5
# Associates -> 4
# Secondary/High School -> 3
# Junior secondary/junior high/middle School -> 2
# Elementary/Primary School -> 1
# None, Other, User withheld, Signup before level collected -> 0
if line[3] == "Doctorate" :
screen_name_to_education[line[0]] = 7
if line[3] == "Masters or professional degree" :
screen_name_to_education[line[0]] = 6
if line[3] == "Bachelors" :
screen_name_to_education[line[0]] = 5
if line[3] == "Associates" :
screen_name_to_education[line[0]] = 4
if line[3] == "Secondary/High School" :
screen_name_to_education[line[0]] = 3
if line[3] == "Junior secondary/junior high/middle School" :
screen_name_to_education[line[0]] = 2
if line[3] == "Elementary/Primary School" :
screen_name_to_education[line[0]] = 1
if line[0] not in screen_name_to_education and line[0] != "\\N":
screen_name_to_education[line[0]] = 0
# nationality
screen_name_to_continent[line[0]] = [0] * 7
if ";" in line[5] :
if "Taiwan" in line[5] :
screen_name_to_continent[line[0]][0] = 1
else :
screen_name_to_continent[line[0]][6] = 1
else :
for i in range(6) :
if line[5] in continents[i] :
screen_name_to_continent[line[0]][i] = 1
break
if screen_name_to_continent[line[0]] == [0] * 7 :
screen_name_to_continent[line[0]][6] = 1
# print len(screen_name_to_gender)
# print len(screen_name_to_year_of_birth)
# print len(screen_name_to_education)
# Extracting forum feature
first = True
with open('../../data/EarthSciences_ResGeo202_Spring2015_Forum.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if first:
first = False
continue
# if len(line) > 2 and line[1] in userScore:
if len(line) > 2:
# print line[1], line[2]
if line[2] == "CommentThread":
screen_name_to_post_count[line[1]] += 1
if line[2] == "Comment":
screen_name_to_comment_count[line[1]] += 1
with open('../countVideos/EarthSciences_ResGeo202_Spring2015_VideoNames.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
video = line[0]
videoNames.append(video)
# print len(videoNames)
# Extracting video counts
first = True
with open('../countVideos/EarthSciences_ResGeo202_Spring2015_UserVideo_Matrix.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
if first:
first = False
continue
key = line[0]
count = 0
for i in xrange(train_num + 1, len(line)):
count += int(line[i])
videoCounts[key] = count
countClass = 0
if count >= countThreshhold:
countClass = 1
videoCountsClassification[key] = countClass
# print line
# print len(line)
for i in xrange(1, len(line)):
video = videoNames[i-1]
if key not in userVideoMatrix:
userVideoMatrix[key] = {}
userVideoMatrix[key][video] = int(line[i])
with open('../countVideos/EarthSciences_ResGeo202_Spring2015_UserAvgExtraTime.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
user = line[0]
avgTime = int(line[1])
userAvgExtraTime[user] = avgTime
# print userAvgExtraTime
with open('../countVideos/EarthSciences_ResGeo202_Spring2015_UserTtlExtraTime.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
user = line[0]
avgTime = int(line[1])
userAvgExtraTime[user] = avgTime
# user video time
with open('../countVideos/EarthSciences_ResGeo202_Spring2015_UserVideoTime.csv', 'r') as csvfile :
lines = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for line in lines :
user = line[0]
video = line[1]
time = int(line[2])
if user not in userVideoTime:
userVideoTime[user] = {}
userVideoTime[user][video] = time
# print userVideoTime
# print videoNames
activityFeatureLen = len(activity_name_set)
activityFeatureLen = 0
videoFeatureLen = len(videoNames)
videoFeatureTrainLen = train_num
# number of other features
widthOther = 0
width = widthOther + videoFeatureTrainLen + activityFeatureLen
# number of data
height = len(videoCountsClassification)
# height = len(userScore)
X = np.zeros((height, width))
# Y = np.array([0])
Y = np.zeros(height)
i = 0
for id in videoCountsClassification:
# for id in userScore:
# print id
# X[i][0] = screen_name_to_gender[id]
# X[i][1] = screen_name_to_year_of_birth[id]
# X[i][2] = screen_name_to_education[id]
# X[i][3] = screen_name_to_post_count[id]
# X[i][4] = screen_name_to_comment_count[id]
# X[i][5] = userAvgExtraTime[id]
# X[i][6] = userTtlExtraTime[id]
# X[i][7] = screen_name_courses_started[id]
# X[i][8] = screen_name_courses_finished[id]
# X[i][9] = screen_name_hours_spent[id]
for j in range(widthOther, width - activityFeatureLen):
video = videoNames[j - widthOther]
X[i][j] = 0
if id in userVideoTime and video in userVideoTime[id]:
X[i][j] = userVideoTime[id][video]
# X[i][j] = userVideoMatrix[id][video]
# for j in range(activityFeatureLen):
# video = activity_name_set[j]
# if id in screen_name_activity_score and video in screen_name_activity_score[id]:
# X[i][j] = screen_name_activity_score[id][video]
# X[i][0] = videoCountsClassification[id]
# Y[i] = userScore[id]
Y[i] = videoCountsClassification[id]
i += 1
# print X
# print Y
x_train, x_test, y_train, y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
print len(videoCountsClassification)
print len(userScore)
|
999,992 | 0722aec69a901e906b5b1f0fbc1a844ddb13e623 | from hashlib import sha256
import json
import time
import pymongo
from pymongo import MongoClient
from flask import Flask, request,flash
import requests
import copy
import quality_control
class Block:
def __init__(self, index, transactions, timestamp, previous_hash, nonce=0):
self.index = index
self.transactions = transactions
self.timestamp = timestamp
self.previous_hash = previous_hash
self.nonce = nonce
def compute_hash(self):
"""
A function that return the hash of the block contents.
"""
block_string = json.dumps(self.__dict__, sort_keys=True)
return sha256(block_string.encode()).hexdigest()
class Blockchain:
# difficulty of our PoW algorithm
difficulty = 1
def __init__(self):
self.tx_ids = set()
self.qc_checker={}
self.unconfirmed_transactions = {}
self.chain = []
self.unvalidated_transactions = {}
self.mining_right=False
def create_genesis_block(self):
"""
A function to generate genesis block and appends it to
the chain. The block has index 0, previous_hash as 0, and
a valid hash.
"""
genesis_block = Block(0, [], 0, "0")
genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
@property
def last_block(self):
return self.chain[-1]
def add_block(self, block, proof):
"""
A function that adds the block to the chain after verification.
Verification includes:
* Checking if the proof is valid.
* The previous_hash referred in the block and the hash of latest block
in the chain match.
"""
previous_hash = self.last_block.hash
if previous_hash != block.previous_hash:
return False
if not Blockchain.is_valid_proof(block, proof):
return False
block.hash = proof
self.chain.append(block)
return True
@staticmethod
def proof_of_work(block):
"""
Function that tries different values of nonce to get a hash
that satisfies our difficulty criteria.
"""
block.nonce = 0
computed_hash = block.compute_hash()
while not computed_hash.startswith('0' * Blockchain.difficulty):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def add_new_transaction(self, transaction):
tx_hash = sha256(json.dumps(transaction).encode()).hexdigest()
if tx_hash not in self.tx_ids:
self.unvalidated_transactions[tx_hash] = transaction
self.tx_ids.add(tx_hash)
return transaction
def tx_validation(self,transaction):
checking = False
tx_id = sha256(json.dumps(transaction).encode()).hexdigest()
SC =10
if checking:
qc_id = '%s_%s'%(transaction['CI'],transaction['term'])
if qc_id not in self.qc_checker:
record_in_db(transaction=tx_id,activity='Opening quality control',qc_id=qc_id,mode='validation')
self.qc_checker[qc_id] = quality_control.QualityControl(CI=transaction['CI'],term=transaction['term'])
self.qc_checker[qc_id].tx_ids =[]
qc_variable = list(transaction['data'].keys())[0]
qc_value = transaction['data'][qc_variable]
record_in_db(transaction=tx_id,activity='Filling quality control',qc_id=qc_id,mode='validation')
self.qc_checker[qc_id].add_variable(qc_variable,qc_value)
self.qc_checker[qc_id].tx_ids.append(tx_id)
#Caution!!!
#Quality checking part
if len(self.qc_checker[qc_id].data)==SC:
self.qc_checker[qc_id].update_validation()
if self.qc_checker[qc_id].validation ==True:
record_in_db(transaction =self.qc_checker[qc_id].tx_ids,activity='Closing quality control',qc_id=qc_id,mode='validation')
for txs in self.qc_checker[qc_id].tx_ids:
self.unconfirmed_transactions[txs] = self.unvalidated_transactions[txs]
del self.unvalidated_transactions[txs]
return True, self.qc_checker[qc_id].tx_ids, qc_id
else:
qc_id = '%s_%s'%(transaction['CI'],transaction['term'])
record_in_db(transaction=tx_id,activity='Transaction validated (without qc)',qc_id=qc_id,mode='validation')
self.unconfirmed_transactions[tx_id] = self.unvalidated_transactions[tx_id]
del self.unvalidated_transactions[tx_id]
return True, None
def add_validated_transaction(self,transaction):
"""
Add received transactions from other node which are validated
"""
tx_hash = sha256(json.dumps(transaction).encode()).hexdigest()
if tx_hash not in list(self.unconfirmed_transactions.keys()):
self.unconfirmed_transactions[tx_hash] = transaction
@classmethod
def is_valid_proof(cls, block, block_hash):
"""
Check if block_hash is valid hash of block and satisfies
the difficulty criteria.
"""
return (block_hash.startswith('0' * Blockchain.difficulty) and
block_hash == block.compute_hash())
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
for block in chain:
block_hash = block.hash
# remove the hash field to recompute the hash again
# using `compute_hash` method.
delattr(block, "hash")
if not cls.is_valid_proof(block, block_hash) or \
previous_hash != block.previous_hash:
result = False
break
block.hash, previous_hash = block_hash, block_hash
return result
def mine(self):
"""
This function serves as an interface to add the pending
transactions to the blockchain by adding them to the block
and figuring out Proof Of Work.
"""
if self.mining_right ==True:
if not self.unconfirmed_transactions:
return False
last_block = self.last_block
new_block = Block(index=last_block.index + 1,
transactions=self.unconfirmed_transactions,
timestamp=time.time(),
previous_hash=last_block.hash)
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
self.unconfirmed_transactions = {}
return True
app = Flask(__name__)
# the node's copy of blockchain
blockchain = Blockchain()
blockchain.create_genesis_block()
# the address to other participating members of the network
peers = set()
# endpoint to submit a new transaction. This will be used by
# our application to add new data (posts) to the blockchain
@app.route('/new_transaction', methods=['POST'])
def new_transaction():
tx_data = request.get_json()
required_fields = ["CI",'term']
for field in required_fields:
if not tx_data.get(field):
print('WHAT', field)
return "Invalid transaction data", 404
tx_data["timestamp"] = time.time()
record_in_db(tx_data,'Transaction initiated')
add_tx_result = blockchain.add_new_transaction(tx_data)
record_in_db(add_tx_result,'Transaction in validated')
announce_new_transaction(add_tx_result)
blockchain.tx_validation(add_tx_result)
mine_unconfirmed_transactions()
return "Success", 201
# endpoint to return the node's copy of the chain.
# Our application will be using this endpoint to query
# all the posts to display.
@app.route('/chain', methods=['GET'])
def get_chain():
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
return json.dumps({"length": len(chain_data),
"chain": chain_data,
"peers": list(peers)})
@app.route('/peers', methods=['GET'])
def get_peers():
return json.dumps(list(peers))
# endpoint to request the node to mine the unconfirmed
# transactions (if any). We'll be using it to initiate
# a command to mine from our application itself.
@app.route('/mine', methods=['GET'])
def mine_unconfirmed_transactions():
if len(blockchain.unconfirmed_transactions) >=10:
result = blockchain.mine()
if not result:
return "No transactions to mine"
else:
# Making sure we have the longest chain before announcing to the network
chain_length = len(blockchain.chain)
consensus()
if chain_length == len(blockchain.chain):
tx_in_last_block = blockchain.last_block.transactions
record_in_db(tx_in_last_block,'Mining new block',mode='block')
# announce the recently mined block to the network
announce_new_block(blockchain.last_block)
return "Block #{} is mined.".format(blockchain.last_block.index)
# endpoint to add new peers to the network.
@app.route('/register_node', methods=['POST'])
def register_new_peers():
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
# Add the node to the peer list
peers.add(node_address)
# Return the consensus blockchain to the newly registered node
# so that he can sync
return get_chain()
@app.route('/mining_right',methods=['GET'])
def permit_minig():
if blockchain.mining_right ==True:
pass
else:
blockchain.mining_right =True
return str(blockchain.mining_right)
@app.route('/register_with', methods=['POST'])
def register_with_existing_node():
"""
Internally calls the `register_node` endpoint to
register current node with the node specified in the
request, and sync the blockchain as well as peer data.
"""
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
data = {"node_address": request.host_url}
headers = {'Content-Type': "application/json"}
# Make a request to register with remote node and obtain information
response = requests.post(node_address + "/register_node",
data=json.dumps(data), headers=headers)
if response.status_code == 200:
global blockchain
global peers
# update chain and the peers
chain_dump = response.json()['chain']
blockchain = create_chain_from_dump(chain_dump)
# peers.update(response.json()['peers'])
peers.add(node_address+'/') #Add other node address to peers
return "Registration successful", 200
else:
# if something goes wrong, pass it on to the API response
return response.content, response.status_code
def create_chain_from_dump(chain_dump):
generated_blockchain = Blockchain()
generated_blockchain.create_genesis_block()
for idx, block_data in enumerate(chain_dump):
if idx == 0:
continue # skip genesis block
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = generated_blockchain.add_block(block, proof)
if not added:
raise Exception("The chain dump is tampered!!")
return generated_blockchain
# endpoint to add a block mined by someone else to
# the node's chain. The block is first verified by the node
# and then added to the chain.
@app.route('/add_block', methods=['POST'])
def verify_and_add_block():
block_data = request.get_json()
block = Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = blockchain.add_block(block, proof)
announce_new_block(block)
if not added:
return "The block was discarded by the node", 400
else:
for tx_hash in list(block.transactions.keys()):
try:
del blockchain.unconfirmed_transactions[tx_hash]
except:
pass
# else:
# del blockchain.unconfirmed_transactions[]
return "Block added to the chain", 201
# endpoint to add new transaction by someone else to own mining pool
@app.route('/add_received_transaction', methods=['POST'])
def add_transaction():
tx_data = request.get_json()
tx_data = json.dumps(tx_data,ensure_ascii=False)
tx_data = eval(tx_data)
tx_hash = sha256(json.dumps(tx_data).encode()).hexdigest()
if tx_hash in blockchain.tx_ids:
added = False
pass
else:
add_tx_result = blockchain.add_new_transaction(tx_data)
record_in_db(add_tx_result,'Transaction received')
announce_new_transaction(add_tx_result)
blockchain.tx_validation(add_tx_result)
mine_unconfirmed_transactions()
added = True
if not added:
return "Something wrong transaction is not added", 400
return "Block added to the chain", 201
# endpoint to query unconfirmed transactions
@app.route('/pending_tx',methods=['GET'])
def get_pending_tx():
return json.dumps(blockchain.unconfirmed_transactions)
@app.route('/unvalidated_tx',methods=['GET'])
def get_unvalidated_tx():
return json.dumps(blockchain.unvalidated_transactions)
def consensus():
"""
Our naive consnsus algorithm. If a longer valid chain is
found, our chain is replaced with it.
"""
global blockchain
longest_chain = None
current_len = len(blockchain.chain)
for node in peers:
response = requests.get('{}chain'.format(node))
length = response.json()['length']
chain = response.json()['chain']
if length > current_len and blockchain.check_chain_validity(chain):
current_len = length
longest_chain = chain
if longest_chain:
blockchain = longest_chain
return True
return False
def announce_new_transaction(tx_data):
"""
A function to announce to the network once a transaction has been transferred.
Other nodes store the received transaction in the unmined memory
"""
for peer in peers:
url = "{}/add_received_transaction".format(peer)
headers = {'Content-Type': "application/json"}
requests.post(url,
data=json.dumps(tx_data),
headers=headers)
def announce_new_block(block):
"""
A function to announce to the network once a block has been mined.
Other blocks can simply verify the proof of work and add it to their
respective chains.
"""
for peer in peers:
url = "{}/add_block".format(peer)
headers = {'Content-Type': "application/json"}
requests.post(url,
data=json.dumps(block.__dict__, sort_keys=True),
headers=headers)
def record_in_db(transaction,activity,mode='transaction',qc_id=None):
'''
Record invoked activity and transaction in database
mode : default = transaction
1)transaction
2)block
'''
pass
conn = MongoClient('172.28.2.1:27017')
# conn = MongoClient('127.0.0.1:27017')
db = conn.blockchaindb
collect = db.transactions
if mode =='transaction':
dbtx = copy.deepcopy(transaction)
dbtx['Transaction ID'] = sha256(json.dumps(transaction).encode()).hexdigest()
dbtx['Time in DB'] = time.time()
dbtx['Node'] = 'Node '+ str(port-8000)
dbtx['activity'] = activity
collect.insert(dbtx)
elif mode =='block':
dbtx={}
tx_ids = list(transaction.keys())
dbtx['Transaction ID'] = tx_ids
dbtx['Time in DB'] = time.time()
dbtx['Node'] = 'Node '+ str(port-8000)
dbtx['activity'] = activity
collect.insert(dbtx)
elif mode == 'validation':
dbtx={}
dbtx['Transaction ID'] = transaction
dbtx['Time in DB'] = time.time()
dbtx['Node'] = 'Node '+ str(port-8000)
dbtx['activity'] = activity
dbtx['Quality Control ID'] = qc_id
collect.insert(dbtx)
return "Success", 201
# Uncomment this line if you want to specify the port number in the code
#app.run(debug=True, port=8000)
if __name__ =='__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='0.0.0.0',port=port,debug=True) |
999,993 | 71e984e071f95075e16eeb209d5d5f221858aa6d | # DROP TABLES
songplay_table_drop = "DROP TABLE songplays CASCADE"
user_table_drop = "DROP TABLE users CASCADE"
song_table_drop = "DROP TABLE songs CASCADE"
artist_table_drop = "DROP TABLE artists CASCADE"
time_table_drop = "DROP TABLE time CASCADE"
# CREATE TABLES
songplay_table_create = """CREATE TABLE IF NOT EXISTS songplays (
songplay_id BIGSERIAL PRIMARY KEY,
start_time TIMESTAMP WITHOUT TIME ZONE NOT NULL REFERENCES time(start_time),
user_id VARCHAR NOT NULL REFERENCES users(user_id),
level VARCHAR NOT NULL,
song_id VARCHAR REFERENCES songs(song_id),
artist_id VARCHAR REFERENCES artists(artist_id),
session_id INT NOT NULL,
location TEXT,
user_agent TEXT NOT NULL,
UNIQUE(songplay_id,user_id,song_id,artist_id))
"""
#
# songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agent
user_table_create = """CREATE TABLE IF NOT EXISTS users (
user_id VARCHAR,
first_name VARCHAR NOT NULL,
last_name VARCHAR NOT NULL,
gender CHAR(1) NOT NULL CHECK(gender = 'F' OR gender = 'M'),
level VARCHAR NOT NULL CHECK(level = 'free' OR level = 'paid'),
PRIMARY KEY (user_id));
"""
song_table_create = """CREATE TABLE IF NOT EXISTS songs (
song_id VARCHAR PRIMARY KEY,
song_title VARCHAR NOT NULL,
artist_id VARCHAR NOT NULL,
year int,
song_duration DECIMAL,
UNIQUE (song_id,artist_id));
"""
artist_table_create = """CREATE TABLE IF NOT EXISTS artists (
artist_id VARCHAR PRIMARY KEY,
artist_name VARCHAR NOT NULL,
location TEXT,
latitude NUMERIC,
longitude NUMERIC);
"""
time_table_create = """CREATE TABLE IF NOT EXISTS time (
start_time TIMESTAMP WITHOUT TIME ZONE PRIMARY KEY,
hour SMALLINT NOT NULL CHECK (hour <=24),
day SMALLINT NOT NULL CHECK (day <=31),
week SMALLINT NOT NULL ,
month SMALLINT NOT NULL CHECK(month <= 12),
year INT NOT NULL,
weekday SMALLINT NOT NULL);
"""
# INSERT RECORDS
songplay_table_insert = """
INSERT INTO songplays(start_time, user_id, level, song_id, artist_id, session_id, location, user_agent) \
VALUES(%s,%s,%s,%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING
"""
user_table_insert = """INSERT INTO users(
user_id, first_name, last_name, gender, level) VALUES (%s,%s,%s,%s,%s) ON CONFLICT (user_id)
DO UPDATE
SET level = EXCLUDED.level
"""
song_table_insert = """INSERT INTO songs(
song_id, song_title, artist_id, year, song_duration) VALUES (%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING
"""
artist_table_insert = """INSERT INTO artists(
artist_id, artist_name, location, latitude, longitude) VALUES (%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING
"""
time_table_insert = """INSERT INTO time(
start_time, hour, day, week, month, year, weekday) VALUES (%s,%s,%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING
"""
# FIND SONGS
song_select = """
SELECT song_id,artist_id FROM songs NATURAL JOIN artists \
WHERE song_title = %s AND artist_name = %s AND song_duration = %s;
"""
# QUERY LISTS
create_table_queries = [
user_table_create,
song_table_create,
artist_table_create,
time_table_create,
songplay_table_create,
]
drop_table_queries = [
songplay_table_drop,
user_table_drop,
song_table_drop,
artist_table_drop,
time_table_drop,
]
|
999,994 | 20c277ff663c86e7a46e3bb593e17cc0b0183c70 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import pymysql
def insert_new_user(stime, y, y_sum):
sql = "INSERT INTO new_user (day_t,hour0, hour1, hour2,hour3,hour4,hour5,hour6,hour7,hour8,hour9,hour10,hour11,hour12,hour13,hour14,hour15,hour16,hour17,hour18,hour19,hour20,hour21,hour22,hour23,sums)VALUES('{1}',{0[0]},{0[1]},{0[2]},{0[3]},{0[4]},{0[5]},{0[6]},{0[7]},{0[8]},{0[9]},{0[10]},{0[11]},{0[12]},{0[13]},{0[14]},{0[15]},{0[16]},{0[17]},{0[18]},{0[19]},{0[20]},{0[21]},{0[22]},{0[23]},{2})".format(
y, stime, y_sum)
# print(sql)
return sql
def insert_new_fans(stime, y, y_sum):
sql = "INSERT INTO aa_new_fans (day_t,hour0, hour1, hour2,hour3,hour4,hour5,hour6,hour7,hour8,hour9,hour10,hour11,hour12,hour13,hour14,hour15,hour16,hour17,hour18,hour19,hour20,hour21,hour22,hour23,sums)VALUES('{1}',{0[0]},{0[1]},{0[2]},{0[3]},{0[4]},{0[5]},{0[6]},{0[7]},{0[8]},{0[9]},{0[10]},{0[11]},{0[12]},{0[13]},{0[14]},{0[15]},{0[16]},{0[17]},{0[18]},{0[19]},{0[20]},{0[21]},{0[22]},{0[23]},{2})".format(
y, stime, y_sum)
# print(sql)
return sql
def insert_uid_json(stime, user_json):
tsql = "insert into user(day_t,info) values('{day_t}','{json}')"
s = json.dumps(user_json)
sql1 = tsql.format(day_t=stime, json=pymysql.escape_string(s))
# print(sql1)
return sql1
def update_json(stime, field_name, user_json, table_name):
tsql = "UPDATE {table_name} SET {field_name}={field_value} WHERE day_t='{day_t}'"
field_value = json.dumps(user_json)
sql1 = tsql.format(day_t=stime, field_name=field_name, field_value=field_value, table_name=table_name)
# print(sql1)
return sql1
def update_twof_json(stime, f_n1, f_n2, f_v1, f_v2, table_name):
tsql = "UPDATE {table_name} SET `{f_n1}`='{f_v1_json}',`{f_n2}`={f_v2} WHERE day_t='{day_t}'"
sql1 = tsql.format(day_t=stime, f_n1=f_n1, f_n2=f_n2, f_v1_json=f_v1, f_v2=f_v2, table_name=table_name)
# print(sql1)
return sql1
def update_threef_json(stime, f_n1, f_n2, f_n3, f_v1, f_v2, f_v3, table_name):
tsql = "UPDATE {table_name} SET `{f_n1}`='{f_v1_json}',`{f_n3}`='{f_v3}',`{f_n2}`={f_v2} WHERE day_t='{day_t}'"
sql1 = tsql.format(day_t=stime, f_n1=f_n1, f_n2=f_n2, f_v1_json=f_v1, f_v2=f_v2, f_n3=f_n3, f_v3=f_v3,
table_name=table_name)
# print(sql1)
return sql1
|
999,995 | 53217abbbe75ca8b9da8b0948fc526b04ea28e50 | import pandas as pd
import csv
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='choose test result path')
parser.add_argument("-output", default=None, type=str, required=True)
parser.add_argument("-k", default=None, type=int, required=True)
args = parser.parse_args()
df=pd.read_csv('../data/test_clean.csv')
df['1']=0
df['2']=0
df['3']=0
df['4']=0
for i in range(args.k):
temp=pd.read_csv('../output/model_textcnn/fold_{}/test_result_{}.csv'.format(args.k,i))
df['1']+=temp['label_1']/args.k
df['2']+=temp['label_2']/args.k
df['3']+=temp['label_3']/args.k
df['4']+=temp['label4']/args.k
print(df['1'].mean())
df['label']=np.argmax(df[['1','2','3', '4']].values,-1)
for _ in df.index:
df.loc[_, 'label'] += 1
df[['id','label']].to_csv(args.output,index=False)
n1=n2=n3=n4=0
for _ in df.index:
if df.loc[_, 'label'] == 1:
n1 += 1
elif df.loc[_, 'label'] == 2:
n2 += 1
elif df.loc[_, 'label'] == 3:
n3 += 1
else:
n4 += 1
print([n1,n2,n3,n4])
|
999,996 | b0c286b3e51ad59bbcde27114d3f049e62782d5d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# for large table should be replaced
# to non-recursive version of algorithm
def findCycles(graph, begin, current, path = None, paths = None):
if path is None:
path = []
if paths is None:
paths = []
path.append(current)
current_node_refs = graph.get(current, [])
for related in current_node_refs:
if related == begin and len(path) >= 1:
paths.append(list(path))
if related not in path:
paths = findCycles(graph, begin, related, path, paths)
path.pop()
return paths
# unpack 2 dimensional list to 1 dimensional
def flatten(l):
return [value for sublist in l for value in sublist]
# try find cycles for all nodes of graph
def graph_find_cycles(graph):
nodes = set()
for key in graph.keys():
nodes.update(set(flatten(findCycles(graph, key, key))))
return list(nodes)
# test code
def main():
a = {
'1': ['2', '4'],
'2': ['3', '5'],
'3': ['1'],
'4': ['1'],
'5': ['6'],
'6': ['4']
}
print graph_find_cycles(a)
if __name__ == '__main__':
main() |
999,997 | 6bbdc30b872115a9afc2fca1ccb194ca66ea3ebf | # -*- coding: utf-8 -*-
from halo import Halo
import subprocess
import requests
import random
import sys
import json
output_file = open('.work/context', 'w')
feedback_file = open('.work/feedback', 'w')
setup={}
spinner = Halo(text='Loading', spinner='dots')
GKE_NUM_DEPLOYMENTS=3
GKE_FRONTEND='3000'
GKE_DB='9200'
GKE_BACKEND='5000'
COUNT_GKE=3
TOTAL_SCORE=20
TEST_CASES_URL="https://s3.amazonaws.com/hkonda-code/data/test_cases/test_cases.json" #TODO: change this course public link
class TestCases:
def __init__(self, node_url):
self.node_url=node_url
data=requests.get(TEST_CASES_URL).text.split("\n")
self.test_cases=map(random.choice(data),range(10))
self.score=self.validate()
@staticmethod
def get_movies(mv):
return map(lambda movie:movie['title'],mv)
def validate(self):
score=0.0
for raw_test in self.test_cases:
test_case=json.loads(raw_test)
r=requests.post(self.node_url+"/es/users/recommend",data=test_case['q'])
s_movies_list=TestCases.get_movies(r.json())
a_movies_list=TestCases.get_movies(test_case['a'])
score+=(1.0 if s_movies_list==a_movies_list else 0.0)
return score/len(self.test_cases)
def run_shell_command(cmd):
cmd_list = cmd.split()
return str(subprocess.check_output(cmd_list).decode('UTF-8'))
def get_user_input(prompt):
print(prompt)
return input()
def writeFeedBack(msg,Exit=False):
feedback_file.write(msg+"\n")
spinner.fail(msg)
if Exit:
output_file.write("Marks:0\n")
sys.exit(1)
def get_contexts():
contexts = run_shell_command('kubectl config get-contexts -o name').strip().split('\n')
gke_context=None
try:
gke_context=filter(lambda x:x.startswith("gke"),contexts)[0]
spinner.info('Using {} as the GKE context'.format(gke_context))
except Exception as e:
feedback = 'Exception: {} looking up contexts'.format(e)
writeFeedBack(feedback, Exit=True)
return gke_context
def switch_context(target):
run_shell_command('kubectl config use-context {}'.format(target))
def list_ips():
cmd = "kubectl get services -o json | jq -r '.items[] | [.metadata.name,.status.loadBalancer.ingress[]?.ip,.spec.ports[0].targetPort]| @csv'"
output = subprocess.check_output(cmd, shell=True).strip().split("\n")
ips=map(lambda x:x.replace('"',"").split(","),json.loads(output))
parsedIps=filter(lambda d:len(d)==3,ips)
return parsedIps
'''
Fetches the ips
checks the number of services
'''
def get_ips(gke_context):
switch_context(gke_context)
ips = list_ips()
gke_front,gke_back,gke_db=[[]]*3
if len(ips) != GKE_NUM_DEPLOYMENTS:
msg="Found {} ip(s) , please deploy {} deployments".format(len(ips),GKE_NUM_DEPLOYMENTS)
writeFeedBack(msg,Exit=True)
try:
gke_front = filter(lambda x:x[2]==GKE_FRONTEND,ips)[0][1]
spinner.info('Using {} as the GKE frontend IP address'.format(gke_front))
gke_back = filter(lambda x:x[2]==GKE_BACKEND,ips)[0][1]
spinner.info('Using {} as the GKE backend IP address'.format(gke_back))
gke_db = filter(lambda x:x[2]==GKE_DB,ips)[0][1]
spinner.info('Using {} as the GKE elastic-search IP address'.format(gke_db))
except IndexError:
feedback = 'Exception:looking up IP address in GKE context'
writeFeedBack(feedback,Exit=True)
return {'gke_front': gke_front, 'gke_back': gke_back, 'gke_db': gke_db}
def count_deployment(context, name, expected):
switch_context(context)
output = run_shell_command('kubectl get deployments -o name').strip().split('\n')
actual = len(output)
if actual != expected:
feedback = 'Expected {} deployment(s) in {}, got {}'.format(expected, name, actual)
feedback_file.write('{}\n'.format(feedback))
print(feedback)
return actual
def check_deployments(gke_context):
count = 0
count += count_deployment(gke_context, 'GKE', expected_gke)
return 1.0 if COUNT_GKE==count else 0.0
def compute_score():
spinner.start("checking for gke context")
gke_context = get_contexts()
spinner.succeed("found gke context!")
score=0.0
spinner.start("counting the number of deployments")
score+=(0.1*TOTAL_SCORE*check_deployments(gke_context))
spinner.succeed("done counting deplyments!")
spinner.start("counting the number of services")
ips = get_ips( gke_context)
score+=(1.0 if COUNT_GKE == len(ips.keys()) else 0.0)*0.1*TOTAL_SCORE
spinner.succeed("done counting services")
spinner.start("testing for correctness")
score+=TestCases(ips['gke_front']).score*0.8*TOTAL_SCORE
spinner.succeed("done grading")
if __name__ == "__main__":
print("--------------------------------------------------------------------------------")
print("--------------------------------- Task 2! --------------------------------------")
print("--------------------------------------------------------------------------------")
compute_score()
output_file.write(json.dumps(setup)) |
999,998 | a34b7b8017b43b499fa8bb3df7fb9270ada7f0ac | # an array of state dictionaries
import random
states = [
{"name": "Alabama", "capital": "Montgomery"},
{"name": "Alaska", "capital": "Juneau"},
{"name": "Arizona", "capital": "Phoenix"},
{"name": "Arkansas", "capital": "Little Rock"},
{"name": "California", "capital": "Sacramento"},
{"name": "Colorado", "capital": "Denver"},
{"name": "Connecticut", "capital": "Hartford"},
{"name": "Delaware", "capital": "Dover"},
{"name": "Florida", "capital": "Tallahassee"},
{"name": "Georgia", "capital": "Atlanta"},
{"name": "Hawaii", "capital": "Honolulu"},
{"name": "Idaho", "capital": "Boise"},
{"name": "Illinois", "capital": "Springfield"},
{"name": "Indiana", "capital": "Indianapolis"},
{"name": "Iowa", "capital": "Des Moines"},
{"name": "Kansas", "capital": "Topeka"},
{"name": "Kentucky", "capital": "Frankfort"},
{"name": "Louisiana", "capital": "Baton Rouge"},
{"name": "Maine", "capital": "Augusta"},
{"name": "Maryland", "capital": "Annapolis"},
{"name": "Massachusetts", "capital": "Boston"},
{"name": "Michigan", "capital": "Lansing"},
{"name": "Minnesota", "capital": "St. Paul"},
{"name": "Mississippi", "capital": "Jackson"},
{"name": "Missouri", "capital": "Jefferson City"},
{"name": "Montana", "capital": "Helena"},
{"name": "Nebraska", "capital": "Lincoln"},
{"name": "Nevada", "capital": "Carson City"},
{"name": "New Hampshire", "capital": "Concord"},
{"name": "New Jersey", "capital": "Trenton"},
{"name": "New Mexico", "capital": "Santa Fe"},
{"name": "New York", "capital": "Albany"},
{"name": "North Carolina", "capital": "Raleigh"},
{"name": "North Dakota", "capital": "Bismarck"},
{"name": "Ohio", "capital": "Columbus"},
{"name": "Oklahoma", "capital": "Oklahoma City"},
{"name": "Oregon", "capital": "Salem"},
{"name": "Pennsylvania", "capital": "Harrisburg"},
{"name": "Rhode Island", "capital": "Providence"},
{"name": "South Carolina", "capital": "Columbia"},
{"name": "South Dakota", "capital": "Pierre"},
{"name": "Tennessee", "capital": "Nashville"},
{"name": "Texas", "capital": "Austin"},
{"name": "Utah", "capital": "Salt Lake City"},
{"name": "Vermont", "capital": "Montpelier"},
{"name": "Virginia", "capital": "Richmond"},
{"name": "Washington", "capital": "Olympia"},
{"name": "West Virginia", "capital": "Charleston"},
{"name": "Wisconsin", "capital": "Madison"},
{"name": "Wyoming", "capital": "Cheyenne"},
]
for dictionary in states:
dictionary["correct"] = 0
dictionary["wrong"] = 0
def play_game():
welcome_message = "Welcome to the 50 states game!"
print(welcome_message)
is_game_over = False
while is_game_over == False:
random.shuffle(states)
for dictionary in states:
print("What is the capital of " + dictionary.get("name") + "?")
guess = input("Enter guess: ")
if guess.upper() == dictionary.get("capital").upper():
print("Yay! Your answer is correct")
# dictionary["correct"] = dictionary["correct"] + 1
dictionary["correct"] += 1
else:
print("Incorrect response")
dictionary["wrong"] += 1
print(
"You answered "
+ str(dictionary.get("correct"))
+ " out of "
+ str(dictionary.get("correct") + dictionary.get("wrong"))
+ " for "
+ dictionary.get("name")
)
response = input("Do you want to play again? ")
if response.upper() != "YES":
is_game_over = True
play_game()
|
999,999 | 3ed1d9b84ee80055ace62d6981b8575cee869d72 | from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from typing import Any, Generic, Iterable, List, Mapping, TypeVar
from snuba.utils.codecs import Encoder, TDecoded, TEncoded
logger = logging.getLogger("snuba.writer")
WriterTableRow = Mapping[str, Any]
T = TypeVar("T")
class BatchWriter(ABC, Generic[T]):
@abstractmethod
def write(self, values: Iterable[T]) -> None:
raise NotImplementedError
class BatchWriterEncoderWrapper(BatchWriter[TDecoded]):
def __init__(
self, writer: BatchWriter[TEncoded], encoder: Encoder[TEncoded, TDecoded]
) -> None:
self.__writer = writer
self.__encoder = encoder
def write(self, values: Iterable[TDecoded]) -> None:
return self.__writer.write(map(self.__encoder.encode, values))
class BufferedWriterWrapper(Generic[TEncoded, TDecoded]):
"""
This is a wrapper that adds a buffer around a BatchWriter.
When consuming data from Kafka, the buffering logic is generally
performed by the batch processor.
This is for the use cases that are not Kafka related.
This is not thread safe. Don't try to do parallel flush hoping in the GIL.
"""
def __init__(
self,
writer: BatchWriter[TEncoded],
buffer_size: int,
encoder: Encoder[TEncoded, TDecoded],
):
self.__writer = writer
self.__buffer_size = buffer_size
self.__buffer: List[TEncoded] = []
self.__encoder = encoder
def __flush(self) -> None:
logger.debug("Flushing buffer with %d elements", len(self.__buffer))
self.__writer.write(self.__buffer)
self.__buffer = []
def __enter__(self) -> BufferedWriterWrapper[TEncoded, TDecoded]:
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
if self.__buffer:
self.__flush()
def write(self, row: TDecoded) -> None:
self.__buffer.append(self.__encoder.encode(row))
if len(self.__buffer) >= self.__buffer_size:
self.__flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.