index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,700 | 0f64f9e31f84aef54505cf07853d70a24620c07a | """
Base class for all IxExplorer package tests.
@author yoram@ignissoft.com
"""
from os import path
from trafficgenerator.tgn_utils import ApiType
from trafficgenerator.test.test_tgn import TgnTest
from ixexplorer.ixe_app import init_ixe
class IxeTestBase(TgnTest):
TgnTest.config_file = path.join(path.dirname(__file__), 'IxExplorer.ini')
def setUp(self):
super(IxeTestBase, self).setUp()
self.ixia = init_ixe(ApiType[self.config.get('IXE', 'api')], self.logger,
host=self.config.get('IXE', 'server'), port=self.config.getint('IXE', 'tcp_port'),
rsa_id=self.config.get('IXE', 'rsa_id'))
self.ixia.connect(self.config.get('IXE', 'user'))
self.ixia.add(self.config.get('IXE', 'chassis'))
self.port1 = self.config.get('IXE', 'port1')
self.port2 = self.config.get('IXE', 'port2')
def tearDown(self):
self.ixia.disconnect()
super(IxeTestBase, self).tearDown()
def testHelloWorld(self):
pass
def _reserve_ports(self):
self.ports = self.ixia.session.reserve_ports([self.port1, self.port2], force=True)
def _load_config(self, cfg1, cfg2):
self._reserve_ports()
self.ports[self.port1].load_config(cfg1)
self.ports[self.port2].load_config(cfg2)
|
989,701 | 89422d8dd39328cfa315482100e29519b3ee4269 | # -*- coding: utf-8 -*-
"""
Example showing how to run the bilinear algorithm
@author: alex
"""
import numpy as np
from wsdp_utility import *
from wsdp_export import *
from wsdp_alg_base import *
from wsdp_alg_bilinear import *
np.random.seed(19)
# Create SDE structure from weak SDP by defining sizes of each consecutive
# index block P_i (including the left over region).
P = CreateConsecPartition([2,1,3,2,1,3])
# Create a random SDE structure for the certificate of the above weak SDP
Q = CreateCertificateStructure(5, P)
# Check if the certificate and weak SDP SDE structures are compatible.
isValid = ValidateCertStructure(P, Q)
print("Valid P,Q: " + str(isValid))
# Execute the bilinear algorithm to find a weak SDP and certificate with the
# structures specified above.
[A, b, X] = CreateWeakSysCertSDE(P, Q, [-2,2], [1,1])
# Check if the generated system and certificate are valid. This should always
# be true, use this function for debugging.
isValid = ValidateWeakCert(A, b, X, 1, True)
print("Valid: " + str(isValid))
# Create an image of the system and certificate and rescale it uniformly.
img = CreateSDEPairImage(P, Q, A, X, Aprefix='A', Xprefix='X')
img = ResizeImageUniform(img, width = 1024)
# Extend the SDE of the above weak SDP with additional entries.
[A, b] = ExtendWeakSDE(A, b, X, 4)
# Check that the extended sequence is valid.
isValid = ValidateWeakCert(A, b, X, 1, True)
print("Valid: " + str(isValid))
# Display the maximum elements of the system A and certificate X
print("A max: " + str(np.amax(A)))
print("X max: " + str(np.amax(X)))
# Check the condition number of the operator A
print("A condition: " + str(Condition(A)))
# Create an image of the system and certificate and rescale it uniformly.
img_extended = CreateSDEPairImage(P, Q, A, X, Aprefix='A', Xprefix='X')
img_extended = ResizeImageUniform(img_extended, width = 1024)
# Rotate elements of A and X arbitrarily
[T,Ti] = RotateBases(A, X, 100)
# Check that the rotated sequence is correct.
isValid = ValidateWeakCert(A, b, X, 1, True)
print("Valid: " + str(isValid))
# Rotate the entire sequence A using row operations
[A, b, F] = RotateSequence(A, b)
# Check that the rotated sequence is correct.
isValid = ValidateWeakCert(A, b, X, 1, True)
print("Valid: " + str(isValid))
# Check the condition number of the operator A
print("A condition: " + str(Condition(A)))
# Create an image of the system and certificate and rescale it uniformly.
img_messy = CreateSDEPairImage(P, Q, A, X, Aprefix='A', Xprefix='X', useColor=False, F=F, T=T)
img_messy = ResizeImageUniform(img_messy, width = 1024) |
989,702 | 48fa12d443e366521e69862f5a55755b57839464 | #NOTE: intersect a list of subjects with specific data fields in a table
import argparse
def parse_args():
parser=argparse.ArgumentParser(description="intersect a list of subjects with specific data fields in a table")
parser.add_argument("--subjects_file")
parser.add_argument("--table")
parser.add_argument("--fields_to_ignore",nargs="*")
parser.add_argument("--fields_to_use_for_gwas",nargs="*")
parser.add_argument("--outf")
return parser.parse_args()
#handles the case when the header is 1 field shorter than the body of the table
def get_health_code_index(data_table,table_name):
tokens=data_table[0].split('\t')
try:
if len(data_table)==1:
return [tokens.index("healthCode"),0]
else:
first_entry=data_table[1].split('\t')
if len(tokens)==len(first_entry):
return [tokens.index("healthCode"),0]
else:
return [tokens.index("healthCode")+1,1]
except:
print("Failed to get healthCode index for table:"+table_name)
exit()
def main():
args=parse_args()
print(args.table)
subjects=open(args.subjects_file,'r').read().strip().split('\n')
subject_dict=dict()
for subject in subjects:
subject_dict[subject]=1
data_table=open(args.table,'r').read().strip().split('\n')
[health_code_col,offset]=get_health_code_index(data_table,args.table)
header=data_table[0].split('\t')
field_index=dict()
field_tally=dict()
field_subject_values=dict()
for i in range(len(header)):
cur_field=header[i]
if cur_field not in args.fields_to_ignore:
field_index[cur_field]=i+offset
field_tally[cur_field]=set([])
field_subject_values[cur_field]=dict()
for row in data_table[1::]:
tokens=row.split('\t')
subject=tokens[health_code_col]
if subject in subject_dict:
#augment our field tallies!
for field in field_index:
cur_index=field_index[field]
cur_value=tokens[cur_index]
if (cur_value!="NA") and (cur_value!=""):
field_tally[field].add(subject)
field_subject_values[field][subject]=cur_value
#print the table name & field counts
for field in field_tally:
print(args.table+":"+field+":"+str(len(field_tally[field])))
for field in args.fields_to_use_for_gwas:
if field in field_subject_values:
outf=open(args.outf+"."+field,'w')
outf.write('Subject\t'+field+'\n')
for subject in subjects:
if subject in field_subject_values[field]:
outf.write(subject+'\t'+str(field_subject_values[field][subject])+'\n')
else:
outf.write(subject+'\t'+"NA"+"\n")
if __name__=="__main__":
main()
|
989,703 | 4d0d4510fc44fc38220110366be066282369474a | #! /usr/bin/env python
# =====================================================
# INPUTS
# =====================================================
#ST SF for TT and WJets, HT for WJets
#path2016 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2016/11May2020_noToppt_applySTtoTTWJets/";
#path2017 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2017/11May2020_noToppt_applySTtoTTWJets/";
#path2018 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv6/2018/11May2020_noToppt_applySTtoTTWJets/";
#WJets, no ST scaling,
#path2016 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2016/30Apr2020_applyWt/";
#path2017 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2017/30Apr2020_applyWt/";
#path2018 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv6/2018/30Apr2020_applyWt/";
#path2016 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2016/17June2020_puidTig_noalignCR_noScale_fixHEMFlat_Skim/";
#path2017 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2017/17June2020_puidTig_noalignCR_noScale_fixHEMFlat_Skim/";
#path2018 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv6/2018/17June2020_puidTig_noalignCR_noScale_fixHEMFlat_Skim/";
#path2016 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2016/16June2020_puidTig_noalignCR_noScale_debugHEM/";
#path2017 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2017/16June2020_puidTig_noalignCR_noScale_debugHEM/";
#path2018 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv6/2018/16June2020_puidTig_noalignCR_noScale_debugHEM/";
#path2016 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2016/3Jul2020_nopuDR_noalignCR_Scale_fixHEM_Sys_Skim/";
#path2017 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv5/2017/3Jul2020_nopuDR_noalignCR_Scale_fixHEM_Sys_Skim/";
#path2018 = "/Users/amodak/VLQ/NanoPostProc/files/NanoAODv6/2018/3Jul2020_nopuDR_noalignCR_Scale_fixHEM_Sys_Skim/";
path2016 = "out_wjet/2016_hist/";
path2017 = "out_wjet/2017_hist/";
path2018 = "out_wjet/2018_hist/";
applyHTScale = True
CH = "Mu"
if (CH == "Mu"):
print ("Using Muon Ch Data")
f_Data_ReMiniAOD_2016 = TFile(path2016+'SingleMuon_2016'+'.root')
f_Data_ReMiniAOD_2017 = TFile(path2017+'SingleMuon_2017'+'.root')
f_Data_ReMiniAOD_2018 = TFile(path2018+'SingleMuon_2018'+'.root')
elif (CH == "Ele"):
print ("Using Electron Ch Data")
f_Data_ReMiniAOD_2016 = TFile(path2016+'SingleElectron_2016'+'.root')
f_Data_ReMiniAOD_2017 = TFile(path2017+'SingleElectron_2017'+'.root')
f_Data_ReMiniAOD_2018 = TFile(path2018+'SingleElectron_2018'+'.root')
f_2016DY100to200 = TFile(path2016+'DYJetsToLL_M-50_HT-100to200_2016'+'.root')
f_2016DY200to400 = TFile(path2016+'DYJetsToLL_M-50_HT-200to400_2016'+'.root')
f_2016DY400to600 = TFile(path2016+'DYJetsToLL_M-50_HT-400to600_2016'+'.root')
f_2016DY600to800 = TFile(path2016+'DYJetsToLL_M-50_HT-600to800_2016'+'.root')
f_2016DY800to1200 = TFile(path2016+'DYJetsToLL_M-50_HT-800to1200_2016'+'.root')
f_2016DY1200to2500 = TFile(path2016+'DYJetsToLL_M-50_HT-1200to2500_2016'+'.root')
f_2016DY2500toInf = TFile(path2016+'DYJetsToLL_M-50_HT-2500toInf_2016'+'.root')
f_2016WJ100to200 = TFile(path2016+'WJetsToLNu_HT-100To200_2016'+'.root')
f_2016WJ200to400 = TFile(path2016+'WJetsToLNu_HT-200To400_2016'+'.root')
f_2016WJ400to600 = TFile(path2016+'WJetsToLNu_HT-400To600_2016'+'.root')
f_2016WJ600to800 = TFile(path2016+'WJetsToLNu_HT-600To800_2016'+'.root')
f_2016WJ800to1200 = TFile(path2016+'WJetsToLNu_HT-800To1200_2016'+'.root')
f_2016WJ1200to2500 = TFile(path2016+'WJetsToLNu_HT-1200To2500_2016'+'.root')
f_2016WJ2500toInf = TFile(path2016+'WJetsToLNu_HT-2500ToInf_2016'+'.root')
f_2016ST_tW_top = TFile(path2016+'ST_tW_top_2016'+'.root')
f_2016ST_tW_antitop = TFile(path2016+'ST_tW_antitop_2016'+'.root')
f_2016ST_t_top = TFile(path2016+'ST_t-channel_top_2016'+'.root')
f_2016ST_t_antitop = TFile(path2016+'ST_t-channel_antitop_2016'+'.root')
f_2016ttbar_pow = TFile(path2016+'TT_TuneCUETP8M2T4_powheg-pythia8_2016'+'.root')
f_2016ttbar = TFile(path2016+'TTJets_TuneCUETP8M1_13TeV-madgraphMLM_2016'+'.root')
f_2016QCD170to300 = TFile(path2016+'QCD_Pt_170to300_2016'+'.root')
f_2016QCD300to470 = TFile(path2016+'QCD_Pt_300to470_2016'+'.root')
f_2016QCD470to600 = TFile(path2016+'QCD_Pt_470to600_2016'+'.root')
f_2016QCD600to800 = TFile(path2016+'QCD_Pt_600to800_2016'+'.root')
f_2016QCD800to1000 = TFile(path2016+'QCD_Pt_800to1000_2016'+'.root')
f_2016QCD1000to1400 = TFile(path2016+'QCD_Pt_1000to1400_2016'+'.root')
f_2016QCD1400to1800 = TFile(path2016+'QCD_Pt_1400to1800_2016'+'.root')
f_2016QCD1800to2400 = TFile(path2016+'QCD_Pt_1800to2400_2016'+'.root')
f_2016QCD2400to3200 = TFile(path2016+'QCD_Pt_2400to3200_2016'+'.root')
f_2016QCD3200toInf = TFile(path2016+'QCD_Pt_3200toInf_2016'+'.root')
f_2016SIG1200 = TFile(path2016+'TprimeBToBW_M-1200_2016'+'.root')
#===== cross sections (pb)==========
Top_xs_2016 = 831.76 *gSF
#2016 Muon Scaling
#DeepCSV
#Top_xs_2016 = 831.76 *gSF * 0.794
#DeepFLV
#Top_xs_2016 = 831.76 *gSF * 0.783
#2016 Ele Scaling
#DeepCSV
#Top_xs_2016 = 831.76 *gSF * 0.931
#DeepFLV
#Top_xs_2016 = 831.76 *gSF * 0.925
DY100to200_xs_2016 = 147.4 *gSF
DY200to400_xs_2016 = 41.04 *gSF
DY400to600_xs_2016 = 5.674 *gSF
DY600to800_xs_2016 = 1.358 *gSF
DY800to1200_xs_2016 = 0.6229 *gSF
DY1200to2500_xs_2016 = 0.1512 *gSF
DY2500toInf_xs_2016 = 0.003659 *gSF
##With LHE correction
#For Ele Ch
corrSF = 1.0
#corrSF = 1.171
if (applyHTScale):
WJ100to200_xs_2016 = 1345.0 *gSF *1.21 *1.0 *corrSF
WJ200to400_xs_2016 = 359.7 *gSF *1.21 *1.0 *corrSF
WJ400to600_xs_2016 = 48.9 *gSF *1.21 *0.88842 *corrSF
WJ600to800_xs_2016 = 12.05 *gSF *1.21 *0.83367 *corrSF
WJ800to1200_xs_2016 = 5.501 *gSF *1.21 *0.76412 *corrSF
WJ1200to2500_xs_2016 = 1.329 *gSF *1.21 *0.67636 *corrSF
WJ2500toInf_xs_2016 = 0.03216 *gSF *1.21 *0.58820 *corrSF
else:
WJ100to200_xs_2016 = 1345.0 *gSF *1.21 *corrSF
WJ200to400_xs_2016 = 359.7 *gSF *1.21 *corrSF
WJ400to600_xs_2016 = 48.9 *gSF *1.21 *corrSF
WJ600to800_xs_2016 = 12.05 *gSF *1.21 *corrSF
WJ800to1200_xs_2016 = 5.501 *gSF *1.21 *corrSF
WJ1200to2500_xs_2016 = 1.329 *gSF *1.21 *corrSF
WJ2500toInf_xs_2016 = 0.03216 *gSF *1.21 *corrSF
'''
#From Julie
WJ100to200_xs_2016 = 1345.0 *gSF *1.21 *0.998056 *corrSF
WJ200to400_xs_2016 = 359.7 *gSF *1.21 *0.978569 *corrSF
WJ400to600_xs_2016 = 48.9 *gSF *1.21 *0.928054 *corrSF
WJ600to800_xs_2016 = 12.05 *gSF *1.21 *0.856705 *corrSF
WJ800to1200_xs_2016 = 5.501 *gSF *1.21 *0.757463 *corrSF
WJ1200to2500_xs_2016 = 1.329 *gSF *1.21 *0.608292 *corrSF
WJ2500toInf_xs_2016 = 0.03216 *gSF *1.21 *0.454246 *corrSF
'''
#Without LHE correction
#WJ100to200_xs_2016 = 1345.0 *gSF *1.21
#WJ200to400_xs_2016 = 359.7 *gSF *1.21
#WJ400to600_xs_2016 = 48.9 *gSF *1.21
#WJ600to800_xs_2016 = 12.05 *gSF *1.21
#WJ800to1200_xs_2016 = 5.501 *gSF *1.21
#WJ1200to2500_xs_2016 = 1.329 *gSF *1.21
#WJ2500toInf_xs_2016 = 0.03216 *gSF *1.21
ST_tW_top_xs_2016 = 35.6 *gSF
ST_tW_antitop_xs_2016 = 35.6 *gSF
ST_t_top_xs_2016 = 44.33 *gSF
ST_t_antitop_xs_2016 = 26.38 *gSF
#QCD NLO XS
fact = 1
QCD170to300_xs_2016 = 117276 * fact
QCD300to470_xs_2016 = 7823 *fact
QCD470to600_xs_2016 = 648 *fact
QCD600to800_xs_2016 = 187 *fact
QCD800to1000_xs_2016 = 32 *fact
QCD1000to1400_xs_2016 = 9.4 *fact
QCD1400to1800_xs_2016 = 0.84 *fact
QCD1800to2400_xs_2016 = 0.12 *fact
QCD2400to3200_xs_2016 = 0.007 *fact
QCD3200toInf_xs_2016 = 0.0002 *fact
WW1L2Q_xs_2016 = 45.85 #powheg nnlo 50.0
WZ1L2Q_xs_2016 = 10.71
SIG1200_xs_2016 = 1.0
Top_pow_xs_2016 = 831.76 *gSF
#===== Number of generated events ======
nEvt_2016 = [1.01991e+07,1.10171e+07,9.60914e+06,9.72566e+06,8.29296e+06,2.67307e+06,596079,399492,7.8043e+07,3.89268e+07,7.7597e+06,1.86684e+07,7.83054e+06,6.87244e+06,2.63782e+06,6.95283e+06,6.93309e+06,6.71059e+07,3.8811e+07,1.47968e+07,2.24704e+07,3.96e+06,1.35247e+07,1.96971e+07,9.84662e+06,2.87343e+06,996130,996130,391735, 93600, 7.67481e+07]
Top_num_2016 = nEvt_2016[0]
DY100to200_num_2016 = nEvt_2016[1]
DY200to400_num_2016 = nEvt_2016[2]
DY400to600_num_2016 = nEvt_2016[3]
DY600to800_num_2016 = nEvt_2016[4]
DY800to1200_num_2016 = nEvt_2016[5]
DY1200to2500_num_2016 = nEvt_2016[6]
DY2500toInf_num_2016 = nEvt_2016[7]
#Without Ht
WJ100to200_num_2016 = nEvt_2016[8]
WJ200to400_num_2016 = nEvt_2016[9]
WJ400to600_num_2016 = nEvt_2016[10]
WJ600to800_num_2016 = nEvt_2016[11]
WJ800to1200_num_2016 = nEvt_2016[12]
WJ1200to2500_num_2016 = nEvt_2016[13]
WJ2500toInf_num_2016 = nEvt_2016[14]
ST_tW_top_num_2016 = nEvt_2016[15]
ST_tW_antitop_num_2016= nEvt_2016[16]
ST_t_top_num_2016 = nEvt_2016[17]
ST_t_antitop_num_2016 = nEvt_2016[18]
QCD170to300_num_2016 = nEvt_2016[19]
QCD300to470_num_2016 = nEvt_2016[20]
QCD470to600_num_2016 = nEvt_2016[21]
QCD600to800_num_2016 = nEvt_2016[22]
QCD800to1000_num_2016 = nEvt_2016[23]
QCD1000to1400_num_2016 = nEvt_2016[24]
QCD1400to1800_num_2016 = nEvt_2016[25]
QCD1800to2400_num_2016 = nEvt_2016[26]
QCD2400to3200_num_2016 = nEvt_2016[27]
QCD3200toInf_num_2016 = nEvt_2016[28]
#WW1L2Q_num_2016 = nEvt_2016[26]
#WZ1L2Q_num_2016 = nEvt_2016[27]
SIG1200_num_2016 = nEvt_2016[29]
Top_pow_num_2016 = nEvt_2016[30]
#2017
f_2017DY100to200 = TFile(path2017+'DYJetsToLL_M-50_HT-100to200_2017'+'.root')
f_2017DY200to400 = TFile(path2017+'DYJetsToLL_M-50_HT-200to400_2017'+'.root')
f_2017DY400to600 = TFile(path2017+'DYJetsToLL_M-50_HT-400to600_2017'+'.root')
f_2017DY600to800 = TFile(path2017+'DYJetsToLL_M-50_HT-600to800_2017'+'.root')
f_2017DY800to1200 = TFile(path2017+'DYJetsToLL_M-50_HT-800to1200_2017'+'.root')
f_2017DY1200to2500 = TFile(path2017+'DYJetsToLL_M-50_HT-1200to2500_2017'+'.root')
f_2017DY2500toInf = TFile(path2017+'DYJetsToLL_M-50_HT-2500toInf_2017'+'.root')
f_2017WJ100to200 = TFile(path2017+'WJetsToLNu_HT-100To200_2017'+'.root')
f_2017WJ200to400 = TFile(path2017+'WJetsToLNu_HT-200To400_2017'+'.root')
f_2017WJ400to600 = TFile(path2017+'WJetsToLNu_HT-400To600_2017'+'.root')
f_2017WJ600to800 = TFile(path2017+'WJetsToLNu_HT-600To800_2017'+'.root')
f_2017WJ800to1200 = TFile(path2017+'WJetsToLNu_HT-800To1200_2017'+'.root')
f_2017WJ1200to2500 = TFile(path2017+'WJetsToLNu_HT-1200To2500_2017'+'.root')
f_2017WJ2500toInf = TFile(path2017+'WJetsToLNu_HT-2500ToInf_2017'+'.root')
f_2017ST_tW_top = TFile(path2017+'ST_tW_top_2017'+'.root')
f_2017ST_tW_antitop = TFile(path2017+'ST_tW_antitop_2017'+'.root')
f_2017ST_t_top = TFile(path2017+'ST_t-channel_top_2017'+'.root')
f_2017ST_t_antitop = TFile(path2017+'ST_t-channel_antitop_2017'+'.root')
f_2017ttbar = TFile(path2017+'TTJets_TuneCP5_13TeV-madgraphMLM_2017'+'.root')
f_2017ttbar_2L2Nu = TFile(path2017+'TTTo2L2Nu_TuneCP5_powheg-pythia8_2017'+'.root')
f_2017ttbar_SemiLep = TFile(path2017+'TTToSemiLeptonic_TuneCP5_powheg-pythia8_2017'+'.root')
f_2017ttbar_2L2NuPS = TFile(path2017+'TTTo2L2Nu_TuneCP5_PSweights_powheg-pythia8_2017'+'.root')
f_2017ttbar_SemiLepPS = TFile(path2017+'TTToSemiLeptonic_TuneCP5_PSweights_powheg-pythia8_2017'+'.root')
f_2017ttbar_HadPS = TFile(path2017+'TTToHadronic_TuneCP5_PSweights_powheg-pythia8_2017'+'.root')
f_2017TTWJetsToLNu = TFile(path2017+'TTWJetsToLNu_TuneCP5_PSweights_amcatnloFXFX-madspin-pythia8_2017'+'.root')
f_2017TTWJetsToQQ = TFile(path2017+'TTWJetsToQQ_TuneCP5_amcatnloFXFX-madspin-pythia8_2017'+'.root')
f_2017TTZToLLNuNu = TFile(path2017+'TTZToLLNuNu_M-10_TuneCP5_amcatnlo-pythia8_2017'+'.root')
f_2017TTZToQQ = TFile(path2017+'TTZToQQ_TuneCP5_amcatnlo-pythia8_2017'+'.root')
#f_2017QCD100to200 = TFile(path2017+'QCD_HT100to200_2017'+'.root')
#f_2017QCD200to300 = TFile(path2017+'QCD_HT200to300_2017'+'.root')
f_2017QCD300to500 = TFile(path2017+'QCD_HT300to500_2017'+'.root')
f_2017QCD500to700 = TFile(path2017+'QCD_HT500to700_2017'+'.root')
f_2017QCD700to1000 = TFile(path2017+'QCD_HT700to1000_2017'+'.root')
f_2017QCD1000to1500 = TFile(path2017+'QCD_HT1000to1500_2017'+'.root')
f_2017QCD1500to2000 = TFile(path2017+'QCD_HT1500to2000_2017'+'.root')
f_2017QCD2000toInf = TFile(path2017+'QCD_HT2000toInf_2017'+'.root')
f_2017SIG1200 = TFile(path2017+'TprimeBToBW_M-1200_2017'+'.root')
#===== cross sections (pb)==========
Top_xs_2017 = 831.76 *gSF
#2017 Ele Ch SF
#DeepCSV
#Top_xs_2017 = 831.76 *gSF *0.864
#DeepFLV
#Top_xs_2017 = 831.76 *gSF *0.914
DY100to200_xs_2017 = 147.4 *gSF
DY200to400_xs_2017 = 41.04 *gSF
DY400to600_xs_2017 = 5.674 *gSF
DY600to800_xs_2017 = 1.358 *gSF
DY800to1200_xs_2017 = 0.6229 *gSF
DY1200to2500_xs_2017 = 0.1512 *gSF
DY2500toInf_xs_2017 = 0.003659 *gSF
if (applyHTScale):
WJ100to200_xs_2017 = 1345.0 *gSF *1.21 *1.0 *corrSF
WJ200to400_xs_2017 = 359.7 *gSF *1.21 *1.0 *corrSF
WJ400to600_xs_2017 = 48.9 *gSF *1.21 *0.88842 *corrSF
WJ600to800_xs_2017 = 12.05 *gSF *1.21 *0.83367 *corrSF
WJ800to1200_xs_2017 = 5.501 *gSF *1.21 *0.76412 *corrSF
WJ1200to2500_xs_2017 = 1.329 *gSF *1.21 *0.67636 *corrSF
WJ2500toInf_xs_2017 = 0.03216 *gSF *1.21 *0.58820 *corrSF
else:
WJ100to200_xs_2017 = 1345.0 *gSF *1.21 *corrSF
WJ200to400_xs_2017 = 359.7 *gSF *1.21 *corrSF
WJ400to600_xs_2017 = 48.9 *gSF *1.21 *corrSF
WJ600to800_xs_2017 = 12.05 *gSF *1.21 *corrSF
WJ800to1200_xs_2017 = 5.501 *gSF *1.21 *corrSF
WJ1200to2500_xs_2017 = 1.329 *gSF *1.21 *corrSF
WJ2500toInf_xs_2017 = 0.03216 *gSF *1.21 *corrSF
'''
##From Julie
WJ100to200_xs_2017 = 1345.0 *gSF *1.21 *0.998056
WJ200to400_xs_2017 = 359.7 *gSF *1.21 *0.978569
WJ400to600_xs_2017 = 48.9 *gSF *1.21 *0.928054
WJ600to800_xs_2017 = 12.05 *gSF *1.21 *0.856705
WJ800to1200_xs_2017 = 5.501 *gSF *1.21 *0.757463
WJ1200to2500_xs_2017 = 1.329 *gSF *1.21 *0.608292
WJ2500toInf_xs_2017 = 0.03216 *gSF *1.21 *0.454246
'''
#Without LHE correction
#WJ100to200_xs_2017 = 1345.0 *gSF *1.21
#WJ200to400_xs_2017 = 359.7 *gSF *1.21
#WJ400to600_xs_2017 = 48.9 *gSF *1.21
#WJ600to800_xs_2017 = 12.05 *gSF *1.21
#WJ800to1200_xs_2017 = 5.501 *gSF *1.21
#WJ1200to2500_xs_2017 = 1.329 *gSF *1.21
#WJ2500toInf_xs_2017 = 0.03216 *gSF *1.21
ST_tW_top_xs_2017 = 35.6 *gSF
ST_tW_antitop_xs_2017 = 35.6 *gSF
ST_t_top_xs_2017 = 44.33 *gSF
ST_t_antitop_xs_2017 = 26.38 *gSF
#QCD NLO XS
fact = 1
QCD100to200_xs_2017 = 27990000 * fact
QCD200to300_xs_2017 = 1710000 *fact
QCD300to500_xs_2017 = 347500 *fact
QCD500to700_xs_2017 = 32060 *fact
QCD700to1000_xs_2017 = 6829 *fact
QCD1000to1500_xs_2017 = 1207 *fact
QCD1500to2000_xs_2017 = 120 *fact
QCD2000toInf_xs_2017 = 25.25 *fact
SIG1200_xs_2017 = 1.0
Top_xs_2017_2L2Nu = 88.29 *gSF
Top_xs_2017_SemiLep = 365.34 *gSF
Top_xs_2017_2L2NuPS = 88.29 *gSF
Top_xs_2017_SemiLepPS = 365.34 *gSF
Top_xs_2017_HadPS = 377.96 *gSF
TTWJetsToLNu_xs_2017 = 0.2001 * gSF
TTWJetsToQQ_xs_2017 = 0.405 * gSF
TTZToLLNuNu_xs_2017 = 0.2529 * gSF
TTZToQQ_xs_2017 = 0.5297 * gSF
#===== Number of generated events ======
#More samples added
nEvt_2017 = [8.01696e+06,1.11801e+07,1.18968e+07,1.00037e+07,8.69161e+06,3.08971e+06,616923,401334,3.58046e+07,2.11922e+07,1.316e+07,2.15823e+07,2.0273e+07,1.99919e+07,2.06296e+07,2.72081e+08,2.79005e+08,5.98206e+06,3.67591e+06,9.3202e+07,5.91333e+07,6.02057e+07,5.6041e+07,4.74604e+07,1.64853e+07,1.15086e+07,5.82557e+06,100000, 6.4873e+08,1.31544e+10,4.7051e+09,2.92622e+10,3.99283e+10,1.69012e+06,560315,1.8384e+06,4.56491e+06]
Top_num_2017 = nEvt_2017[0]
DY100to200_num_2017 = nEvt_2017[1]
DY200to400_num_2017 = nEvt_2017[2]
DY400to600_num_2017 = nEvt_2017[3]
DY600to800_num_2017 = nEvt_2017[4]
DY800to1200_num_2017 = nEvt_2017[5]
DY1200to2500_num_2017 = nEvt_2017[6]
DY2500toInf_num_2017 = nEvt_2017[7]
#Without Ht
WJ100to200_num_2017 = nEvt_2017[8]
WJ200to400_num_2017 = nEvt_2017[9]
WJ400to600_num_2017 = nEvt_2017[10]
WJ600to800_num_2017 = nEvt_2017[11]
WJ800to1200_num_2017 = nEvt_2017[12]
WJ1200to2500_num_2017 = nEvt_2017[13]
WJ2500toInf_num_2017 = nEvt_2017[14]
ST_tW_top_num_2017 = nEvt_2017[15]
ST_tW_antitop_num_2017= nEvt_2017[16]
ST_t_top_num_2017 = nEvt_2017[17]
ST_t_antitop_num_2017 = nEvt_2017[18]
QCD100to200_num_2017 = nEvt_2017[19]
QCD200to300_num_2017 = nEvt_2017[20]
QCD300to500_num_2017 = nEvt_2017[21]
QCD500to700_num_2017 = nEvt_2017[22]
QCD700to1000_num_2017 = nEvt_2017[23]
QCD1000to1500_num_2017 = nEvt_2017[24]
QCD1500to2000_num_2017 = nEvt_2017[25]
QCD2000toInf_num_2017 = nEvt_2017[26]
SIG1200_num_2017 = nEvt_2017[27]
Top_num_2017_2L2Nu = nEvt_2017[28]
Top_num_2017_SemiLep = nEvt_2017[29]
Top_num_2017_2L2NuPS = nEvt_2017[30]
Top_num_2017_SemiLepPS = nEvt_2017[31]
Top_num_2017_HadPS = nEvt_2017[32]
TTWJetsToLNu_num_2017 = nEvt_2017[33]
TTWJetsToQQ_num_2017 = nEvt_2017[34]
TTZToLLNuNu_num_2017 = nEvt_2017[35]
TTZToQQ_num_2017 = nEvt_2017[36]
#2018
f_2018DY100to200 = TFile(path2018+'DYJetsToLL_M-50_HT-100to200_2018'+'.root')
f_2018DY200to400 = TFile(path2018+'DYJetsToLL_M-50_HT-200to400_2018'+'.root')
f_2018DY400to600 = TFile(path2018+'DYJetsToLL_M-50_HT-400to600_2018'+'.root')
f_2018DY600to800 = TFile(path2018+'DYJetsToLL_M-50_HT-400to600_2018'+'.root')
f_2018DY600to800 = TFile(path2018+'DYJetsToLL_M-50_HT-600to800_2018'+'.root')
f_2018DY800to1200 = TFile(path2018+'DYJetsToLL_M-50_HT-800to1200_2018'+'.root')
f_2018DY1200to2500 = TFile(path2018+'DYJetsToLL_M-50_HT-1200to2500_2018'+'.root')
f_2018DY2500toInf = TFile(path2018+'DYJetsToLL_M-50_HT-2500toInf_2018'+'.root')
f_2018WJ100to200 = TFile(path2018+'WJetsToLNu_HT-100To200_2018'+'.root')
f_2018WJ200to400 = TFile(path2018+'WJetsToLNu_HT-200To400_2018'+'.root')
f_2018WJ400to600 = TFile(path2018+'WJetsToLNu_HT-400To600_2018'+'.root')
f_2018WJ600to800 = TFile(path2018+'WJetsToLNu_HT-600To800_2018'+'.root')
f_2018WJ800to1200 = TFile(path2018+'WJetsToLNu_HT-800To1200_2018'+'.root')
f_2018WJ1200to2500 = TFile(path2018+'WJetsToLNu_HT-1200To2500_2018'+'.root')
f_2018WJ2500toInf = TFile(path2018+'WJetsToLNu_HT-2500ToInf_2018'+'.root')
f_2018ST_tW_top = TFile(path2018+'ST_tW_top_2018'+'.root')
f_2018ST_tW_antitop = TFile(path2018+'ST_tW_antitop_2018'+'.root')
f_2018ST_t_top = TFile(path2018+'ST_t-channel_top_2018'+'.root')
f_2018ST_t_antitop = TFile(path2018+'ST_t-channel_antitop_2018'+'.root')
#f_2018ttbar = TFile(path2018+'TTJets_TuneCP5_13TeV-madgraphMLM_2018'+'.root')
f_2018ttbar_2L2Nu = TFile(path2018+'TTTo2L2Nu_TuneCP5_powheg-pythia8_2018'+'.root')
f_2018ttbar_SemiLep = TFile(path2018+'TTToSemiLeptonic_TuneCP5_powheg-pythia8_2018'+'.root')
f_2018ttbar_Had = TFile(path2018+'TTToHadronic_TuneCP5_powheg-pythia8_2018'+'.root')
#f_2018QCD100to200 = TFile(path2018+'QCD_HT100to200_2018'+'.root')
#f_2018QCD200to300 = TFile(path2018+'QCD_HT200to300_2018'+'.root')
f_2018QCD300to500 = TFile(path2018+'QCD_HT300to500_2018'+'.root')
f_2018QCD500to700 = TFile(path2018+'QCD_HT500to700_2018'+'.root')
f_2018QCD700to1000 = TFile(path2018+'QCD_HT700to1000_2018'+'.root')
f_2018QCD1000to1500 = TFile(path2018+'QCD_HT1000to1500_2018'+'.root')
f_2018QCD1500to2000 = TFile(path2018+'QCD_HT1500to2000_2018'+'.root')
f_2018QCD2000toInf = TFile(path2018+'QCD_HT2000toInf_2018'+'.root')
f_2018SIG1200 = TFile(path2018+'TprimeBToBW_M-1200_2018'+'.root')
#===== cross sections (pb)==========
Top_xs_2018 = 831.76 *gSF
#2018 Muon Ch SF
#DeepCSV
#Top_xs_2018 = 831.76 *gSF *0.895
#DeepFLV
#Top_xs_2018 = 831.76 *gSF *0.877
#2018 Ele Ch SF
#DeepCSV
#Top_xs_2018 = 831.76 *gSF *0.852
#DeepFLV
#Top_xs_2018 = 831.76 *gSF *0.843
DY100to200_xs_2018 = 147.4 *gSF
DY200to400_xs_2018 = 41.04 *gSF
DY400to600_xs_2018 = 5.674 *gSF
DY600to800_xs_2018 = 1.358 *gSF
DY800to1200_xs_2018 = 0.6229 *gSF
DY1200to2500_xs_2018 = 0.1512 *gSF
DY2500toInf_xs_2018 = 0.003659 *gSF
if (applyHTScale):
WJ100to200_xs_2018 = 1345.0 *gSF *1.21 *1.0 *corrSF
WJ200to400_xs_2018 = 359.7 *gSF *1.21 *1.0 *corrSF
WJ400to600_xs_2018 = 48.9 *gSF *1.21 *0.88842 *corrSF
WJ600to800_xs_2018 = 12.05 *gSF *1.21 *0.83367 *corrSF
WJ800to1200_xs_2018 = 5.501 *gSF *1.21 *0.76412 *corrSF
WJ1200to2500_xs_2018 = 1.329 *gSF *1.21 *0.67636 *corrSF
WJ2500toInf_xs_2018 = 0.03216 *gSF *1.21 *0.58820 *corrSF
else:
WJ100to200_xs_2018 = 1345.0 *gSF *1.21 *corrSF
WJ200to400_xs_2018 = 359.7 *gSF *1.21 *corrSF
WJ400to600_xs_2018 = 48.9 *gSF *1.21 *corrSF
WJ600to800_xs_2018 = 12.05 *gSF *1.21 *corrSF
WJ800to1200_xs_2018 = 5.501 *gSF *1.21 *corrSF
WJ1200to2500_xs_2018 = 1.329 *gSF *1.21 *corrSF
WJ2500toInf_xs_2018 = 0.03216 *gSF *1.21 *corrSF
'''
##From Julie
WJ100to200_xs_2018 = 1345.0 *gSF *1.21 *0.998056
WJ200to400_xs_2018 = 359.7 *gSF *1.21 *0.978569
WJ400to600_xs_2018 = 48.9 *gSF *1.21 *0.928054
WJ600to800_xs_2018 = 12.05 *gSF *1.21 *0.856705
WJ800to1200_xs_2018 = 5.501 *gSF *1.21 *0.757463
WJ1200to2500_xs_2018 = 1.329 *gSF *1.21 *0.608292
WJ2500toInf_xs_2018 = 0.03216 *gSF *1.21 *0.454246
'''
#Without LHE correction
#WJ100to200_xs_2018 = 1345.0 *gSF *1.21
#WJ200to400_xs_2018 = 359.7 *gSF *1.21
#WJ400to600_xs_2018 = 48.9 *gSF *1.21
#WJ600to800_xs_2018 = 12.05 *gSF *1.21
#WJ800to1200_xs_2018 = 5.501 *gSF *1.21
#WJ1200to2500_xs_2018 = 1.329 *gSF *1.21
#WJ2500toInf_xs_2018 = 0.03216 *gSF *1.21
ST_tW_top_xs_2018 = 35.6 *gSF
ST_tW_antitop_xs_2018 = 35.6 *gSF
ST_t_top_xs_2018 = 44.33 *gSF
ST_t_antitop_xs_2018 = 26.38 *gSF
#QCD NLO XS
fact = 1
QCD100to200_xs_2018 = 27990000 * fact
QCD200to300_xs_2018 = 1710000 *fact
QCD300to500_xs_2018 = 347500 *fact
QCD500to700_xs_2018 = 32060 *fact
QCD700to1000_xs_2018 = 6829 *fact
QCD1000to1500_xs_2018 = 1207 *fact
QCD1500to2000_xs_2018 = 120 *fact
QCD2000toInf_xs_2018 = 25.25 *fact
SIG1200_xs_2018 = 1.0
Top_xs_20182L2Nu = 88.29 *gSF
Top_xs_2018SemiLep = 365.34 *gSF
Top_xs_2018Had = 377.96 *gSF
#===== Number of generated events ======
nEvt_2018 = [1.02304e+07, 1.15167e+07,1.12046e+07,3.84234e+07,8.82624e+06,3.12098e+06,531567,415517,2.83323e+07,2.54151e+07,5.9136e+06,1.96908e+07,8.35792e+06,7.56707e+06,3.1894e+06,3.34875e+08,2.6647e+08,1.66035e+10,5.126e+09,9.39482e+07,5.4247e+07,5.45941e+07,5.50468e+07,4.80282e+07,1.54035e+07,1.08839e+07,5.41226e+06, 100000, 4.62208e+09,6.00504e+10,6.26094e+10]
Top_num_2018 = nEvt_2018[0]
DY100to200_num_2018 = nEvt_2018[1]
DY200to400_num_2018 = nEvt_2018[2]
DY400to600_num_2018 = nEvt_2018[3]
DY600to800_num_2018 = nEvt_2018[4]
DY800to1200_num_2018 = nEvt_2018[5]
DY1200to2500_num_2018 = nEvt_2018[6]
DY2500toInf_num_2018 = nEvt_2018[7]
#Without Ht
WJ100to200_num_2018 = nEvt_2018[8]
WJ200to400_num_2018 = nEvt_2018[9]
WJ400to600_num_2018 = nEvt_2018[10]
WJ600to800_num_2018 = nEvt_2018[11]
WJ800to1200_num_2018 = nEvt_2018[12]
WJ1200to2500_num_2018 = nEvt_2018[13]
WJ2500toInf_num_2018 = nEvt_2018[14]
ST_tW_top_num_2018 = nEvt_2018[15]
ST_tW_antitop_num_2018= nEvt_2018[16]
ST_t_top_num_2018 = nEvt_2018[17]
ST_t_antitop_num_2018 = nEvt_2018[18]
QCD100to200_num_2018 = nEvt_2018[19]
QCD200to300_num_2018 = nEvt_2018[20]
QCD300to500_num_2018 = nEvt_2018[21]
QCD500to700_num_2018 = nEvt_2018[22]
QCD700to1000_num_2018 = nEvt_2018[23]
QCD1000to1500_num_2018 = nEvt_2018[24]
QCD1500to2000_num_2018 = nEvt_2018[25]
QCD2000toInf_num_2018 = nEvt_2018[26]
SIG1200_num_2018 = nEvt_2018[27]
Top_num_20182L2Nu = nEvt_2018[28]
Top_num_2018SemiLep = nEvt_2018[29]
Top_num_2018Had = nEvt_2018[30]
# Legend
#AI:
#leg = TLegend(0.76,0.88,0.94,0.50)
leg = TLegend(0.73,0.45,0.88,0.86)
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg.SetLineColor(10)
leg.SetLineWidth(0)
# =====================================================
# FUNCTIONS
# =====================================================
def setTitle(hs,xTitle):
y = hs.GetYaxis()
x = hs.GetXaxis()
#AI:
y.SetTitle("Events / bin")
x.SetTitle(xTitle)
y.SetLabelSize(0.05)
#AI:
y.SetTitleSize(0.06)
y.SetTitleOffset(0.8)
y.SetTitleFont(42)
x.SetTitleSize(0.05)
x.SetTitleFont(42)
def prepareRatio(h_ratio, h_ratiobkg, scale, xTitle):
h_ratio.SetTitle("")
h_ratio.GetYaxis().SetTitle("Data / Bkg")
h_ratio.GetXaxis().SetTitle(xTitle)
h_ratio.SetMarkerStyle(8)
h_ratio.SetMaximum(1.5)
h_ratio.SetMinimum(0.5)
#h_ratio.SetMaximum(3.0)
#h_ratio.SetMinimum(-1.0)
h_ratio.GetYaxis().SetLabelSize(0.06*scale)
#AI:
h_ratio.GetYaxis().SetTitleOffset(1.20/scale*0.5)
#AI:
h_ratio.GetYaxis().SetTitleSize(0.07*scale)
h_ratio.GetYaxis().SetTitleFont(42)
h_ratio.GetXaxis().SetLabelSize(0.06*scale)
#AI:
h_ratio.GetXaxis().SetTitleOffset(0.5*scale)
#AI:
h_ratio.GetXaxis().SetTitleSize(0.08*scale)
h_ratio.GetYaxis().SetNdivisions(505)
h_ratio.GetXaxis().SetNdivisions(510)
h_ratio.SetTickLength(0.06,"X")
h_ratio.SetTickLength(0.05,"Y")
## The uncertainty band
h_ratio_bkg.SetMarkerSize(0)
h_ratio_bkg.SetFillColor(kGray+1)
h_ratio_bkg.GetYaxis().SetLabelSize(0.6*scale)
h_ratio_bkg.GetYaxis().SetTitleOffset(1.00/scale*0.6)
h_ratio_bkg.GetYaxis().SetTitleSize(0.08*scale)
h_ratio_bkg.GetYaxis().SetTitleFont(42)
h_ratio_bkg.GetXaxis().SetLabelSize(0.08*scale)
h_ratio_bkg.GetXaxis().SetTitleOffset(0.45*scale)
h_ratio_bkg.GetXaxis().SetTitleSize(0.09*scale)
h_ratio_bkg.GetYaxis().SetNdivisions(505)
h_ratio_bkg.GetXaxis().SetNdivisions(510)
h_ratio_bkg.SetTickLength(0.05,"X")
h_ratio_bkg.SetTickLength(0.05,"y")
h_ratio_bkg.SetTitle("")
h_ratio_bkg.SetMaximum(1.6)
h_ratio_bkg.SetMinimum(0.4)
def overUnderFlow(hist):
xbins = hist.GetNbinsX()
hist.SetBinContent(xbins, hist.GetBinContent(xbins)+hist.GetBinContent(xbins+1))
hist.SetBinContent(1, hist.GetBinContent(0)+hist.GetBinContent(1))
hist.SetBinError(xbins, TMath.Sqrt(TMath.Power(hist.GetBinError(xbins),2)+TMath.Power(hist.GetBinError(xbins+1),2)))
hist.SetBinError(1, TMath.Sqrt(TMath.Power(hist.GetBinError(0),2)+TMath.Power(hist.GetBinError(1),2)))
hist.SetBinContent(xbins+1, 0.)
hist.SetBinContent(0, 0.)
hist.SetBinError(xbins+1, 0.)
hist.SetBinError(0, 0.)
def setCosmetics(hist, legname, hname, color, doCosmetics):
#hist.Rebin(rebinS)
if (doCosmetics):
#AI:
# hist.SetLineColor(color)
hist.SetLineColor(1)
hist.SetName(hname)
if 'Data' in hname:
leg.AddEntry(hist, legname, 'pl')
hist.SetMarkerStyle(8)
elif 'tZ' in hname:
hist.SetLineWidth(2)
leg.AddEntry(hist, legname, 'l')
#AI:
elif 'Signal' in hname:
leg.AddEntry(hist,legname,'pl')
hist.SetLineWidth(2)
hist.SetLineStyle(7)
else:
hist.SetFillColor(color)
leg.AddEntry(hist, legname, 'f')
def getHisto( label, leg, dir, var, Samples, color, verbose, doCosmetics) :
histos = []
for iSample in Samples :
ifile = iSample[0]
xs = iSample[1]
nevt = iSample[2]
lumi = iSample[3]
readname = var
#readname = dir+'/'+var
hist = ifile.Get(readname).Clone()
hist.SetDirectory(0);
if verbose:
print ('file: {0:<20}, histo:{1:<10}, integral before weight:{2:<3.3f}, nEntries:{3:<3.0f}, weight:{4:<2.3f}'.format(
ifile.GetName(),
hist.GetName(),
hist.Integral(), hist.GetEntries(), xs * lumi /nevt
))
#hist.Sumw2()
hist.Scale( xs * lumi /nevt)
hist.Rebin(rebinS)
histos.append( hist )
histo = histos[0]
setCosmetics(histo, leg, label+var, color, doCosmetics)
for ihisto in range(1, len(histos) ):
#print 'ihisto =', ihisto, 'integral', histos[ihisto].Integral(), ', entries', histos[ihisto].GetEntries(), ", x low ", histos[ihisto].GetBinLowEdge(1), ", x high ", histos[ihisto].GetBinLowEdge(histos[ihisto].GetXaxis().GetNbins()+1), ", nBins ", histos[ihisto].GetXaxis().GetNbins(), ", bin width ", histos[ihisto].GetXaxis().GetBinWidth(1)
histo.Add( histos[ihisto], 1 )
#print 'after addition', histo.Integral()
if verbose:
print ('newName: {0:<5}, Entries:{1:5.2f}, newIntegral: {2:5.2f}'.format(label+var, histo.GetEntries(), histo.Integral() ) )
return histo
|
989,704 | 8bc2c72cde7074b4bbdbf492c23bd70ecdd3c81a | #!/usr/bin/python3
"""
Takes a CTM (time aligned) file and produces an Elan file.
If the CTM has confidence values, write them as a ref tier.
Copyright: University of Queensland, 2021
Contributors:
Ben Foley - (University of Queensland, 2021)
Nicholas Lambourne - (University of Queensland, 2018)
"""
from argparse import ArgumentParser
from csv import reader
from loguru import logger
from pathlib import Path
from typing import Dict, Tuple
import codecs
from pympi.Elan import Eaf
# The magic number 20 here is to help pympi find the parent annotation.
# There may be a better way to do it but i noticed that if I used the exact start time,
# sometimes pympi would locate the child annotation with the parent annotation that is adjacent to the intended one.
# Also happened for +1 but seems to be finding the parent better with this "buffer" of 20. Weird.
PYMPI_CHILD_ANNOTATION_OFFSET = 20
def ctm_to_dictionary(
ctm_file_path: str, segments_dictionary: Dict[str, str], confidence: bool
) -> dict:
with codecs.open(ctm_file_path, encoding="utf8") as file:
ctm_entries = list(reader(file, delimiter=" "))
ctm_dictionary = dict()
for entry in ctm_entries:
utterance_id, segment_start_time = segments_dictionary[entry[0]]
if utterance_id not in ctm_dictionary:
ctm_dictionary[utterance_id] = []
relative_start_time = float(entry[2])
absolute_start_time = segment_start_time + relative_start_time
absolute_end_time = absolute_start_time + float(entry[3])
inferred_text = entry[4]
confidence = entry[5] if confidence else None
utterance_segment = (
str(absolute_start_time),
str(absolute_end_time),
inferred_text,
confidence,
)
ctm_dictionary[utterance_id].append(utterance_segment)
return ctm_dictionary
def get_segment_dictionary(segment_file_name: str) -> Dict[str, Tuple[str, float]]:
with open(segment_file_name, "r") as file:
segment_entries = list(reader(file, delimiter=" "))
segment_dictionary = dict()
for entry in segment_entries:
segment_id = entry[0]
utterance_id = entry[1]
start_time = float(entry[2])
segment_dictionary[segment_id] = (utterance_id, start_time)
return segment_dictionary
def wav_scp_to_dictionary(scp_file_name: str) -> dict:
wav_dictionary = dict()
with open(scp_file_name) as file:
wav_entries = file.read().splitlines()
for line in wav_entries:
entry = line.split(" ", 1) # use 1 here in case wav filenames include spaces
utterance_id = entry[0]
wav_file_path = entry[1]
wav_dictionary[utterance_id] = wav_file_path
return wav_dictionary
def create_eaf_and_textgrid(
wav_dictionary: dict, ctm_dictionary: dict, confidence: bool, output_directory: str
):
for index, [utterance_id, audio_filename] in enumerate(wav_dictionary.items()):
eaf = Eaf()
eaf.add_linked_file(audio_filename)
eaf.add_linguistic_type("conf_lt", "Symbolic_Association")
eaf.add_tier("default")
if confidence:
eaf.add_tier("confidence", parent="default", ling="conf_lt")
for annotation in ctm_dictionary[utterance_id]:
# Annotation looks like ('0.32', '0.52', 'word', '0.81')
# Convert times to ms integers
start, end, value, *conf = annotation
start_ms = int(float(start) * 1000)
end_ms = int(float(end) * 1000)
# Add the transcription annotation
eaf.add_annotation("default", start_ms, end_ms, value)
# Add the confidence value as a reference annotation
if conf:
# Add a time value to the start time so the ref falls within a parent slot
eaf.add_ref_annotation(
"confidence", "default", start_ms + PYMPI_CHILD_ANNOTATION_OFFSET, conf[0]
)
# Save as Elan eaf file
output_eaf = str(Path(output_directory, f"utterance-{index}.eaf"))
eaf.to_file(output_eaf)
# Make a Textgrid format version
output_textgrid = str(Path(output_directory, f"utterance-{index}.Textgrid"))
textgrid = eaf.to_textgrid()
textgrid.to_file(output_textgrid)
def main() -> None:
parser: ArgumentParser = ArgumentParser(
description="Converts Kaldi CTM format to Elan .eaf format."
)
parser.add_argument("-c", "--ctm", type=str, help="The input CTM format file", required=True)
parser.add_argument("-w", "--wav", type=str, help="The input wav.scp file", required=True)
parser.add_argument(
"-s", "--seg", type=str, help="The segment to utterance mapping", default="./segments"
)
parser.add_argument(
"-o", "--outdir", type=str, help="The directory path for the Elan output", default="."
)
parser.add_argument("--confidence", dest="confidence", action="store_true")
parser.add_argument("--no-confidence", dest="confidence", action="store_false")
parser.set_defaults(confidence=True)
arguments = parser.parse_args()
segments_dictionary = get_segment_dictionary(arguments.seg)
ctm_dictionary = ctm_to_dictionary(arguments.ctm, segments_dictionary, arguments.confidence)
wav_dictionary = wav_scp_to_dictionary(arguments.wav)
output_directory = Path(arguments.outdir)
logger.info("==== CTM to Elan args ====")
logger.info(f"{segments_dictionary=}")
logger.info(f"{ctm_dictionary=}")
logger.info(f"{wav_dictionary=}")
logger.info(f"{output_directory=}")
if not output_directory.parent:
Path.mkdir(output_directory.parent, parents=True)
create_eaf_and_textgrid(wav_dictionary, ctm_dictionary, arguments.confidence, output_directory)
if __name__ == "__main__":
main()
|
989,705 | 65815d19d9dfa6e6cde6ee047b5caaa63436d53b |
print ((lambda a,b,c,d: a**b+c**d )((int(input())),(int(input())),(int(input())),(int(input()))))
|
989,706 | 67c7a194f8feabeea88293c199daf35d7680957a | #coding=utf-8
from time import sleep
from public.common import mytest
from public.pages import XeroLoginPage
from public.pages import XeroDashboardPage
from public.pages import XeroBankAccountsPage
from public.pages import XeroChartOfAccountsPage
from public.common import datainfo
from public.common import pyselenium
class SanityTest_Xero(mytest.MyTest):
"""UAT"""
def test_add_bank_account_info(self):
""" Login page """
#STEP1: go to login page and check the page title
login_page = XeroLoginPage.XeroLogin(self.dr)
login_page.into_login_page()
title_login = login_page.return_title()
#CheckPoint1: to check title of the Login page
self.assertIn('Login | Xero Accounting Software',title_login,"CheckPoint1: Failed to load Login page")
""" Dashboard page """
#STEP2: login, check title of Dashboard after switching to another Organ
login_page.do_login()
login_page.switch_to_random_ORG()
title = login_page.return_title()
#CheckPoint2: to check the title of Dashboard page
self.assertIn("Xero | Dashboard",title,"CheckPoint2: Failed to load Dashboard after switching to another Organsiation")
""" Bank Accounts page """
#STEP3: go to Bank Account page and check the title
dashboard = XeroDashboardPage.XeroDashboard(self.dr)
dashboard.switch_to_bankaccount_page()
title_bankaccount = login_page.return_title()
#CheckPoint3: to check the title of Bank Accounts page
self.assertIn("Xero | Bank accounts",title_bankaccount,"CheckPoint3: Failed to load Bank accounts page")
""" ANZ (NZ) bank availability """
#STEP4: Add bank account and check whether ANZ (NZ) bank is available
bankAccount = XeroBankAccountsPage.XeroBankaccounts(self.dr)
result_ANZavailability = bankAccount.check_bank_option()
#CheckPoint4: to check whether the ANZ (NZ) bank is available
self.assertIn('Available',result_ANZavailability,"CheckPoint4: Failed to add ANZ (NZ) bank as the option is not available")
""" New Bank Account """
#STEP5: Add bank account and check whether bank account has been added successfully
bankAccount = XeroBankAccountsPage.XeroBankaccounts(self.dr)
result_bankStatus = bankAccount.add_bank_details()
#CheckPoint5: to check whether bank account has been added successfully
self.assertIn('Failed', result_bankStatus,"CheckPoint5: Failed to add Bank account")
""" TearDown """
#STEP6: Clear newly added bank account info from this account
chartAccounts = XeroChartOfAccountsPage.XeroChartAccount(self.dr)
result_bankRemoval = chartAccounts.delete_bankAccount()
#CheckPoint6: to check whether bank account has been deleted successfully
self.assertIn('Bank Account has been deleted successfully', result_bankRemoval,"CheckPoint6: Failed to Delete Bank account")
|
989,707 | e9435932f56ac1b2a787a1355271c257a7013ebe | from django.shortcuts import render
from django.contrib.auth.models import User
from django.db.models import Sum
from django_filters import FilterSet
from django_filters import rest_framework as filters
from rest_framework import viewsets
from rest_framework.filters import OrderingFilter
from rest_framework.exceptions import APIException
from url_filter.integrations.drf import DjangoFilterBackend
from adjustapi.serializers import UserSerializer, DatasetSerializer
from dataset.models import Dataset
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
# ViewSets define the view behavior.
class DatasetViewSet(viewsets.ModelViewSet):
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
filter_backends = [DjangoFilterBackend, OrderingFilter]
filter_fields = ['date', 'channel', 'country', 'os']
ordering_fields = ['date', 'channel', 'country', 'os', 'impressions',
'clicks', 'installs', 'spend', 'revenue']
def get_queryset(self):
group_params = self.request.query_params.get('group_by')
queryset = Dataset.objects.all()
if group_params:
group_fields = [x.strip() for x in group_params.split(',')]
if len(list(set(group_fields) - set(self.filter_fields))) > 0 and len(group_fields) > 0:
raise APIException('Wrong Params to group with!')
queryset = queryset.values(*group_fields).annotate(impressions=Sum(
'impressions'), clicks=Sum('clicks'), installs=Sum('installs'),
spend=Sum('spend'), revenue=Sum('revenue'))
return queryset
|
989,708 | e03d0698ad44bf9fb0fc82c16fe95299a2d10470 | #!/usr/bin/python
import email, sqlite3, glob, os
from email.header import Header, decode_header, make_header
from sendtelegram import SendTelegram
from loadblacklist import LoadBlacklist
from loadconfig import LoadConfig
os.system("offlineimap")
conn = sqlite3.connect('mail.db')
c = conn.cursor()
c.execute('CREATE TABLE if not exists mails(ID INTEGER PRIMARY KEY AUTOINCREMENT, MAILID TEXT, MAILFROM TEXT, MAILTO TEXT, MAILSUBJECT TEXT, MAILBODY TEXT, SENT INTEGER)')
lbl = LoadBlacklist()
lbl.loadlist()
blacklist = lbl.getlist()
lc = LoadConfig()
lc.loadconfig()
mailpath = lc.getconfig('mailpath')
telegram = SendTelegram()
telegram.setToken(lc.getconfig('bottoken'))
for data in glob.glob(mailpath):
f = open(data,"r")
mail = f.read()
f.close()
msg = email.message_from_string(mail)
mailid = msg['Message-ID']
mailid = mailid.encode('base64','strict');
mailfrom = msg['from']
mailto = msg['to']
mailsubject = msg['subject']
subjecttext = ""
for text, encoding in email.Header.decode_header(mailsubject):
subjecttext += text+" "
subjecttext = str(subjecttext)
mailbody = ""
for part in msg.walk():
if part.get_content_type() == 'text/plain':
mailbody += part.get_payload()
mailbody = str(mailbody)
for entry in blacklist:
if not entry in mailfrom:
result = ""
for row in c.execute("SELECT MAILID FROM mails WHERE MAILID='"+mailid+"' LIMIT 1"):
result = row[0]
if not result == mailid:
telegramtext = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mailfrom, mailto, subjecttext, mailbody)
telegramtext = telegramtext[:4096]
telegram.sendText(telegramtext)
mailfrom = mailfrom.encode('base64','strict');
mailto = mailto.encode('base64','strict');
subjecttext = subjecttext.encode('base64','strict');
mailbody = mailbody.encode('base64','strict');
c.execute("INSERT INTO mails VALUES (null, '"+mailid+"', '"+mailfrom+"', '"+mailto+"', '"+subjecttext+"', '"+mailbody+"', 0)")
conn.commit()
conn.close()
|
989,709 | b4b8b5d1ec64e73a09d917f3c6ccf93ef025202b | import os
def calculateMaskIntersection(mask_list, fsl_path, workdir, output_name, output_dir):
output_mask = os.path.join(workdir, 'combined_mask.nii.gz')
base_string = '%s -t %s' % (os.path.join(fsl_path, 'fslmerge'), output_mask)
for i in mask_list:
base_string = base_string + ' ' + i
os.system(base_string)
intersection_mask = os.path.join(output_dir, '%s.nii.gz' % output_name)
os.system('%s %s -Tmean -thr 1 %s' % (os.path.join(fsl_path, 'fslmaths'), output_mask, intersection_mask))
print('Intersection mask built successfuly')
|
989,710 | 3a13d9553648a7ad91dc34e7eb5037423a0e725f | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""TFDS databuilder for fmow_v1.0."""
import datetime as dt
import os
import numpy as onp
import pandas as pd
import pytz
import tensorflow as tf
import tensorflow_datasets as tfds
def filter_date(date, start, end):
return date >= start and date < end
class Fmow(tfds.core.BeamBasedBuilder):
"""TFDS builder for fmow_v1.
The Functional Map of the World land use / building classification dataset.
This is a processed version of the Functional Map of the World dataset
originally sourced from https://github.com/fMoW/dataset.
This dataset is part of the WILDS benchmark.
"""
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
You must manually download and extract fmow_v1.0 data from
(https://worksheets.codalab.org/rest/bundles/0xc59ea8261dfe4d2baa3820866e33d781/contents/blob/)
and place them in `manual_dir`.
"""
VERSION = tfds.core.Version('1.0.0')
_SPLITS = ['train', 'val_id', 'val_ood', 'test_id', 'test_ood']
_CLASSES = [
'airport', 'airport_hangar', 'airport_terminal', 'amusement_park',
'aquaculture', 'archaeological_site', 'barn', 'border_checkpoint',
'burial_site', 'car_dealership', 'construction_site', 'crop_field', 'dam',
'debris_or_rubble', 'educational_institution', 'electric_substation',
'factory_or_powerplant', 'fire_station', 'flooded_road', 'fountain',
'gas_station', 'golf_course', 'ground_transportation_station', 'helipad',
'hospital', 'impoverished_settlement', 'interchange', 'lake_or_pond',
'lighthouse', 'military_facility', 'multi-unit_residential',
'nuclear_powerplant', 'office_building', 'oil_or_gas_facility', 'park',
'parking_lot_or_garage', 'place_of_worship', 'police_station', 'port',
'prison', 'race_track', 'railway_bridge', 'recreational_facility',
'road_bridge', 'runway', 'shipyard', 'shopping_mall',
'single-unit_residential', 'smokestack', 'solar_farm', 'space_facility',
'stadium', 'storage_tank', 'surface_mine', 'swimming_pool', 'toll_booth',
'tower', 'tunnel_opening', 'waste_disposal', 'water_treatment_facility',
'wind_farm', 'zoo'
]
# pylint: disable=g-tzinfo-datetime
# pylint: disable=g-long-lambda
_DOMAIN_FILTERS = {
'train':
lambda date: filter(date, dt.datetime(2002, 1, 1, tzinfo=pytz.UTC),
dt.datetime(2013, 1, 1, tzinfo=pytz.UTC)),
'val_id':
lambda date: filter(date, dt.datetime(2002, 1, 1, tzinfo=pytz.UTC),
dt.datetime(2013, 1, 1, tzinfo=pytz.UTC)),
'val_ood':
lambda date: filter(date, dt.datetime(2013, 1, 1, tzinfo=pytz.UTC),
dt.datetime(2016, 1, 1, tzinfo=pytz.UTC)),
'test_id':
lambda date: filter(date, dt.datetime(2002, 1, 1, tzinfo=pytz.UTC),
dt.datetime(2013, 1, 1, tzinfo=pytz.UTC)),
'test_ood':
lambda date: filter(date, dt.datetime(2016, 1, 1, tzinfo=pytz.UTC),
dt.datetime(2018, 1, 1, tzinfo=pytz.UTC)),
}
# pylint: enable=g-long-lambda
# pylint: enable=g-tzinfo-datetime
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=('fmow:'),
features=tfds.features.FeaturesDict({
'image':
tfds.features.Image(
shape=(None, None, 3), encoding_format='jpeg'),
'label':
tfds.features.ClassLabel(names=self._CLASSES)
}),
supervised_keys=('image', 'label'),
homepage='https://github.com/fMoW/dataset',
citation=r"""@inproceedings{fmow2018,
title={Functional Map of the World},
author={Christie, Gordon and Fendley, Neil and Wilson, James and Mukherjee, Ryan},
booktitle={CVPR},
year={2018}
}""",
)
def _split_generators(self, dl_manager):
"""Download data and define the splits."""
image_dirs = os.path.join(dl_manager.manual_dir)
meta_dirs = os.path.join(dl_manager.manual_dir, 'rgb_metadata.csv')
splits = []
for split in self._SPLITS:
gen_kwargs = {
'data_dir': image_dirs,
'meta_dir': meta_dirs,
'split': split,
}
splits.append(
tfds.core.SplitGenerator(name=f'{split}', gen_kwargs=gen_kwargs))
return splits
def _build_pcollection(self, pipeline, data_dir, meta_dir, split):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
with tf.io.gfile.GFile(meta_dir) as meta_file:
meta_lines = meta_file.readlines()
header = meta_lines[0].split(',')
examples_descriptions = meta_lines[1:]
total_examples = len(examples_descriptions)
examples_descriptions = enumerate(examples_descriptions)
split_index = header.index('split')
date_index = header.index('timestamp')
def _process_example(example_description):
(idx, features) = example_description
(unused_split, unused_img_filename, unused_img_path,
unused_spatial_reference, unused_epsg, category, unused_visible,
unused_img_width, unused_img_height, unused_country_code,
unused_cloud_cover, unused_timestamp, unused_lat,
unused_lon) = features.split(',')
chunk_size = total_examples // 100
batch_indx = int(idx) // chunk_size
img_indx = int(idx) % chunk_size
image = onp.load(
os.path.join(data_dir, f'rgb_all_imgs_{batch_indx}.npy'),
mmap_mode='r')[img_indx]
return idx, {'image': image, 'label': category}
def _filter_example(example_description):
time_condition = self._DOMAIN_FILTERS[split](
pd.to_datetime(example_description[1].split(',')[date_index]))
split_condition = (
example_description[1].split(',')[split_index] == split.split('_')[0])
return time_condition and split_condition
return pipeline | beam.Create(
(examples_descriptions
)) | beam.Filter(_filter_example) | beam.Map(_process_example)
|
989,711 | 9fe59b3bafcd89da467c942e3cc03765c984dc0d | from flask import Flask, render_template
from flask import jsonify, request
import flask
from safety import check_safety_dflow
from safety import imgTogif
from safety import darkflow_check
from darkflow.net.build import TFNet
import base64
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
import csv
import urllib
class Path:
def __init__(self, name):
self.name = name
self.headings = []
self.objects = []
def add_heading(self, heading):
self.headings.append(heading)
def add_object(self, object):
self.objects.append(object)
count=-1
origin_images=[]
adv_images=[]
difference_heading=[]
difference_object=[]
new_heading = 0
ori_path = Path('Original')
adv_path = Path('Adv')
options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights", "threshold": 0.4}
tfnet = TFNet(options)
app=Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.debug=True
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/_add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
print (a, b)
return jsonify(result=a + b)
@app.route('/_check_image')
def check_image():
ori_l=[]
adv_l=[]
u = request.args.get('u', 0, type=str)
https = request.args.get('https', 0, type=str)
l_pano = request.args.get('l_pano', 0, type=str)
fov = request.args.get('fov', 0, type=str)
heading = request.args.get('heading', 0, type=str)
pitch = request.args.get('pitch', 0, type=str)
key = request.args.get('key', 0, type=str)
ori_path.add_heading(heading)
global count
count+=1
step_name='step{0}.png'.format(count)
if count<10:
step_name='step0{0}.png'.format(count)
adv_found=check_safety_dflow(count, https, l_pano, float(fov), float(heading), float(pitch), key, tfnet)
#results = darkflow_check(count, https, l_pano, float(fov), float(heading), float(pitch), key, tfnet)
#adv_heading = results[2]
#count+=1
#for result in results[0]:
# ori_l.append(result['label'])
#for result in results[1]:
# adv_l.append(result['label'])
#ori_path.add_object(ori_l)
#adv_path.add_object(adv_l)
#adv_path.add_heading(adv_heading)
#zipped = zip(ori_path.headings, adv_path.headings)
#with open('headings.csv','w') as f:
# writer = csv.writer(f, delimiter='\t')
# writer.writerows(zipped)
#difference_heading.append(adv_heading - float(heading))
#with open('heading_difference.txt','w') as f:
# for heading in difference_heading:
# f.write("%f\n" % heading)
#zip_obj = zip(ori_path.objects, adv_path.objects)
#with open('objects.csv','w') as f:
# writer = csv.writer(f, delimiter='\t')
# writer.writerows(zip_obj)
#difference_object.append(set(ori_l).symmetric_difference(set(adv_l)))
#with open('object_difference.csv','w') as f:
# writer = csv.writer(f)
# writer.writerows(difference_object)
#origin_images.append('./images/'+step_name)
#if adv_found:
# adv_images.append('./images/adv_'+step_name)
#else:
# adv_images.append('./images/'+step_name)
#imgTogif(origin_images, adv_images)
#if adv_found:
# return jsonify(image_ret=step_name, adv_image_ret='adv_'+step_name, img_gif_ret='./images/img_out.gif',adv_gif_ret='./images/adv_out.gif',new_h = new_heading)
#else:
# return jsonify(image_ret=step_name, adv_image_ret=step_name, img_gif_ret='./images/img_out.gif',adv_gif_ret='./images/adv_out.gif',new_h = new_heading)
origin_images.append('./images/'+step_name)
adv_images.append('./images/adv_'+step_name)
imgTogif(origin_images, adv_images)
return jsonify(image_ret=step_name, adv_image_ret='adv_'+step_name, img_gif_ret='./images/img_out.gif',adv_gif_ret='./images/adv_out.gif',new_h = new_heading)
@app.route("/images/<path:path>")
def images(path):
fullpath = "./images/"+path
with open(fullpath, 'rb') as f:
resp = flask.make_response(f.read())
resp.content_type = "image/gif"
return resp
if __name__=='__main__':
app.run()
|
989,712 | 536599aaf20130666ae7ade51eff4e5d358ba59d | #=========================================================================
# regincr-adhoc-test <input-values>
#=========================================================================
# Note that you can turn on line tracing, text waveforms, and VCD
# waveforms by adding these options to the DefaultPassGroup.
#
# model.apply( DefaultPassGroup(linetrace=True,textwave=True,vcdwave="regincr-adhoc-test.vcd") )
#
# You will also need to add this to the very end of the script:
#
# model.print_textwave()
#
from pymtl3 import *
from pymtl3.passes.backends.verilog import *
from sys import argv
from RegIncr import RegIncr
# Get list of input values from command line
input_values = [ int(x,0) for x in argv[1:] ]
# Add three zero values to end of list of input values
input_values.extend( [0]*3 )
# ''' TUTORIAL TASK ''''''''''''''''''''''''''''''''''''''''''''''''''''''
# This simulator script is incomplete. As part of the tutorial you will
# insert code here for constructing and elaborating a RegIncr model.
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\/
model = RegIncr()
model.elaborate()
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''/\
# Apply the Verilog import passes and the default pass group
model.apply( VerilogPlaceholderPass() )
model = VerilogTranslationImportPass()( model )
model.apply( DefaultPassGroup() )
# Reset simulator
model.sim_reset()
# Apply input values and display output values
for input_value in input_values:
# Write input value to input port
model.in_ @= input_value
model.sim_eval_combinational()
# Print input and output ports
print( f" cycle = {model.sim_cycle_count()}: in = {model.in_}, out = {model.out}" )
# Tick simulator one cycle
model.sim_tick()
|
989,713 | a2a5ade696b6ac426421ce803995c5893231b4e1 | from abc import ABCMeta, abstractmethod
class Transition(object):
__metaclass__= ABCMeta
@abstractmethod
def get_trigger(self):
pass
class DefaultTransition(Transition):
def __init__(self, lt_type, label, role):
self.role = role
self.label = label
self.lt_type = lt_type
def get_trigger(self):
return "%s_%s_%s" %(self.lt_type, self.label, str.lower(str(self.role)))
@classmethod
def create_from_string(cls, from_string):
[type, label, role] = from_string.split('_')
return cls(type, label, role)
class AssertionTransition(Transition):
__metaclass__= ABCMeta
@abstractmethod
def get_payload_variable(self):
pass
@abstractmethod
def get_assertion(self):
pass
class PayloadTransition(Transition):
@abstractmethod
def get_peyload(self):
pass
class DefaultAssertionTransition(AssertionTransition):
def __init__(self, lt_type, label, role, payload, assertion):
self.role = role
self.label = label
self.lt_type = lt_type
self.payload = payload
self.assertion = assertion
def get_trigger(self):
return "%s_%s_%s" %(self.lt_type, self.label, self.str.lower(str(self.role)))
def get_payload_variable(self):
return self.payload
def get_assertion(self):
return self.asserti
class TransitionFactory:
@classmethod
def create(cls, lt_type, label, role, settings = None):
if (settings == None):
return "%s_%s_%s" %(lt_type, label, str.lower(str(role)))
|
989,714 | e6b417ee81674a7c3a1d882cf8a26a8387a8ea18 | # ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# dkratzert@gmx.de> wrote this file. As long as you retain
# this notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# Dr. Daniel Kratzert
# ----------------------------------------------------------------------------
import re
from contextlib import suppress
from pathlib import Path
from gemmi import cif as gcif
from finalcif.cif.cif_file_io import CifContainer
from finalcif.datafiles.bruker_frame import BrukerFrameHeader
from finalcif.datafiles.data import WorkDataMixin
from finalcif.datafiles.p4p_reader import P4PFile
from finalcif.datafiles.sadabs import Sadabs
from finalcif.datafiles.saint import SaintListFile
from finalcif.datafiles.shelx_lst import SolutionProgram
from finalcif.gui.dialogs import show_general_warning
class MissingCifData():
def __init__(self):
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
class BrukerData(WorkDataMixin):
def __init__(self, app, cif: CifContainer):
super(BrukerData, self).__init__()
self.cif = cif
self.app = app
self.saint_data = SaintListFile(name_patt='*_0*m._ls', directory=self.cif.fileobj.parent.resolve())
# Using the saint list files name as base reference for all other data containing files:
basename = self.saint_data.filename.stem.split('_0m')[0]
self.basename = re.sub(r'^(cu|mo|ag)_', '', basename)
# This is only in this list file, not in the global:
saint_first_ls = SaintListFile(name_patt='*_01._ls', directory=self.cif.fileobj.parent.resolve())
sol = SolutionProgram(cif)
solution_program = None
if 'shelx' in self.cif.block.find_value('_audit_creation_method').lower():
shelx = 'Sheldrick, G.M. (2015). Acta Cryst. A71, 3-8.\nSheldrick, G.M. (2015). Acta Cryst. C71, 3-8.\n'
else:
shelx = ''
if cif.res_file_data and cif.dsr_used:
dsr = 'The program DSR was used for model building:\n' \
'D. Kratzert, I. Krossing, J. Appl. Cryst. 2018, 51, 928-934. doi: 10.1107/S1600576718004508'
shelx += dsr
abstype = '?'
t_min = '?'
t_max = '?'
# Going back from last dataset:
for n in range(1, len(self.sadabs.datasets) + 1):
try:
abstype = 'numerical' if self.sadabs.dataset(-n).numerical else 'multi-scan'
t_min = self.sadabs.dataset(-n).transmission.tmin
t_max = self.sadabs.dataset(-n).transmission.tmax
if all([abstype, t_min, t_max]):
break
except (KeyError, AttributeError, TypeError):
pass
# print('No .abs file found.')
# no abs file found
# the lower temp is more likely:
try:
temp1 = self.frame_header.temperature
except (AttributeError, KeyError, FileNotFoundError):
temp1 = 293
try:
kilovolt = self.frame_header.kilovolts
except (AttributeError, KeyError, FileNotFoundError):
kilovolt = ''
try:
milliamps = self.frame_header.milliamps
except (AttributeError, KeyError, FileNotFoundError):
milliamps = ''
try:
frame_name = self.frame_header.filename.name
except FileNotFoundError:
frame_name = ''
if not self.cif['_computing_structure_solution'] and self.cif.solution_program_details:
solution_program = (self.cif.solution_program_details, self.cif.fileobj.name)
if self.cif['_computing_structure_solution']:
solution_program = (gcif.as_string(self.cif['_computing_structure_solution']), self.cif.fileobj.name)
if not solution_program:
solution_program = (sol.program.version, Path(sol.program.filename).name)
if self.cif.absorpt_process_details:
absdetails = (self.cif.absorpt_process_details, self.cif.fileobj.name)
else:
absdetails = (self.sadabs.version, self.sadabs.filename.name)
if self.cif.absorpt_correction_type:
abscorrtype = (self.cif.absorpt_correction_type, self.cif.fileobj.name)
else:
abscorrtype = (abstype, self.sadabs.filename.name)
if self.cif.absorpt_correction_t_max:
abs_tmax = (self.cif.absorpt_correction_t_max, self.cif.fileobj.name)
else:
abs_tmax = (str(t_max), self.sadabs.filename.name)
if self.cif.absorpt_correction_t_min:
abs_tmin = (self.cif.absorpt_correction_t_min, self.cif.fileobj.name)
else:
abs_tmin = (str(t_min), self.sadabs.filename.name)
if self.sadabs.Rint:
rint = (self.sadabs.Rint, self.sadabs.filename.name)
self.sources['_diffrn_reflns_av_R_equivalents'] = rint
temp2 = self.p4p.temperature
temperature = round(min([temp1, temp2]), 1)
if temperature < 0.01:
temperature = ''
if (self.cif['_diffrn_ambient_temperature'].split('(')[0] or
self.cif['_cell_measurement_temperature']).split('(')[0] == '0':
show_general_warning('<b>Warning of impossible temperature specification</b>:<br>'
'You probably entered −273.15 °C instead '
'of −173.15 °C into the SHELX instruction file.<br>'
'A temperature of 0 K is likely to be wrong.')
try:
if abs(int(self.cif['_diffrn_ambient_temperature'].split('(')[0]) - int(temperature)) >= 2 and \
not self.app.temperature_warning_displayed:
self.app.temperature_warning_displayed = True
show_general_warning('<b>Warning</b>: The temperature from the measurement and '
'from SHELX differ. Please double-check for correctness.<br><br>'
'SHELX says: {} K<br>'
'The P4P file says: {} K<br>'
'Frame header says: {} K<br><br>'
'You may add a '
'<a href="http://shelx.uni-goettingen.de/shelxl_html.php#TEMP">TEMP</a> '
'instruction to your SHELX file (in °C).'
.format(self.cif['_diffrn_ambient_temperature'].split('(')[0],
round(temp2, 1),
round(temp1, 1)))
except ValueError:
# most probably one value is '?'
pass
if not self.cif['_space_group_name_H-M_alt']:
try:
self.sources['_space_group_name_H-M_alt'] = (
self.cif.space_group, 'Calculated by gemmi: https://gemmi.readthedocs.io')
except AttributeError:
pass
if not self.cif['_space_group_name_Hall']:
with suppress(AttributeError):
self.sources['_space_group_name_Hall'] = (
self.cif.hall_symbol, 'Calculated by gemmi: https://gemmi.readthedocs.io')
if not self.cif['_space_group_IT_number']:
with suppress(AttributeError):
self.sources['_space_group_IT_number'] = (
self.cif.spgr_number_from_symmops, 'Calculated by gemmi: https://gemmi.readthedocs.io')
if not self.cif['_space_group_crystal_system']:
with suppress(AttributeError):
csystem = self.cif.crystal_system
self.sources['_space_group_crystal_system'] = (
csystem, 'calculated by gemmi: https://gemmi.readthedocs.io')
if not self.cif.symmops and self.cif.symmops_from_spgr:
loop = self.cif.block.init_loop('_space_group_symop_operation_', ['xyz'])
for symmop in reversed(self.cif.symmops_from_spgr):
loop.add_row([gcif.quote(symmop)])
# All sources that are not filled with data will be yellow in the main table
# data tooltip
self.sources['_cell_measurement_reflns_used'] = (
self.saint_data.cell_reflections, self.saint_data.filename.name)
self.sources['_cell_measurement_theta_min'] = (
self.saint_data.cell_res_min_theta or '', self.saint_data.filename.name)
self.sources['_cell_measurement_theta_max'] = (
self.saint_data.cell_res_max_theta or '', self.saint_data.filename.name)
self.sources['_computing_data_collection'] = (saint_first_ls.aquire_software, saint_first_ls.filename.name)
self.sources['_computing_cell_refinement'] = (self.saint_data.version, self.saint_data.filename.name)
self.sources['_computing_data_reduction'] = (self.saint_data.version, self.saint_data.filename.name)
self.sources['_exptl_absorpt_correction_type'] = abscorrtype
self.sources['_exptl_absorpt_correction_T_min'] = abs_tmin
self.sources['_exptl_absorpt_correction_T_max'] = abs_tmax
self.sources['_exptl_absorpt_process_details'] = absdetails
self.sources['_cell_measurement_temperature'] = (temperature, self.p4p.filename.name)
self.sources['_diffrn_ambient_temperature'] = (temperature, self.p4p.filename.name)
self.sources['_exptl_crystal_colour'] = (self.p4p.crystal_color, self.p4p.filename.name)
self.sources['_exptl_crystal_description'] = (self.p4p.morphology, self.p4p.filename.name)
self.sources['_exptl_crystal_size_min'] = (self.p4p.crystal_size[0] or '', self.p4p.filename.name)
self.sources['_exptl_crystal_size_mid'] = (self.p4p.crystal_size[1] or '', self.p4p.filename.name)
self.sources['_exptl_crystal_size_max'] = (self.p4p.crystal_size[2] or '', self.p4p.filename.name)
self.sources['_computing_structure_solution'] = solution_program
self.sources['_atom_sites_solution_primary'] = (sol.method, 'Inherited from solution program.')
self.sources['_diffrn_source_voltage'] = (kilovolt or '', frame_name)
self.sources['_diffrn_source_current'] = (milliamps or '', frame_name)
self.sources['_chemical_formula_moiety'] = ('', '')
self.sources['_publ_section_references'] = (shelx, '')
self.sources['_refine_special_details'] = ('', '')
self.sources['_exptl_crystal_recrystallization_method'] = ('', '')
if not self.cif.is_centrosymm:
self.sources['_chemical_absolute_configuration'] = ('', '')
if self.saint_data.is_twin and self.saint_data.components_firstsample == 2:
with suppress(Exception):
law = self.saint_data.twinlaw[list(self.saint_data.twinlaw.keys())[0]]
self.sources['_twin_individual_twin_matrix_11'] = (str(law[0][1]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_12'] = (str(law[0][2]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_13'] = (str(law[0][0]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_21'] = (str(law[1][1]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_22'] = (str(law[1][2]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_23'] = (str(law[1][0]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_31'] = (str(law[2][1]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_32'] = (str(law[2][2]), self.saint_data.filename.name)
self.sources['_twin_individual_twin_matrix_33'] = (str(law[2][0]), self.saint_data.filename.name)
self.sources['_twin_individual_id'] = (
str(self.saint_data.components_firstsample), self.saint_data.filename.name)
self.sources['_twin_special_details'] = (
'The data was integrated as a 2-component twin.', self.saint_data.filename.name)
@property
def sadabs(self):
sad = Sadabs(basename='*.abs', searchpath=self.cif.fileobj.parent)
return sad
@property
def frame_header(self):
return BrukerFrameHeader(self.basename, self.cif.fileobj.parent)
@property
def p4p(self):
return P4PFile(self.basename, self.cif.fileobj.parent)
|
989,715 | 39818e93ce65cd4095c2fffa562be58f5f266c8c | import logging
from time import sleep
import click as click
import yaml
from bb8.process.cmd import get_return_code
from bb8.script.config import default_config_file
class CheckResult:
def __init__(self):
self.last_failed = 0
self.last_output = None
def add(self, return_code, output):
self.last_output = output
if return_code:
self.last_failed += 1
else:
self.last_failed = 0
@property
def ok(self):
return self.last_failed == 0
def __repr__(self, *args, **kwargs):
return self.__str__(*args, **kwargs)
def __str__(self, *args, **kwargs):
if self.ok:
return "OK"
else:
return "FAILED {0} times. Output: {1}".format(self.last_failed, self.last_output)
class FailedMon(object):
def __init__(self, item):
self.item = item
self.check_result = CheckResult()
self.failed_result = CheckResult()
def check(self):
check_return_code, check_out = get_return_code(self.item['check'])
self.check_result.add(check_return_code, check_out)
def execute_on_failed(self):
print("Run {0}".format(self.item['failed']))
failed_rc, failed_out = get_return_code(self.item['failed'])
self.failed_result.add(failed_rc, failed_out)
# if failed_rc:
# print("Run failed command FAILED. {0}".format(failed_out))
class FailedMonManager:
def __init__(self, items, check_only):
assert isinstance(items, list)
self.check_only = check_only
self.mons = {}
for item in items:
check_name = item['name']
self.mons[check_name] = FailedMon(item)
self.logger = logging.getLogger(self.__class__.__name__)
def execute(self):
for check_name in self.mons:
try:
mon = self.mons[check_name]
mon.check()
print("Check {0}: {1}".format(check_name, mon.check_result))
if self.check_only:
continue
if not mon.check_result.ok:
mon.execute_on_failed()
print(mon.failed_result)
except Exception as e:
self.logger.error("Check {0} failed".format(check_name))
self.logger.exception(e)
@click.command('failed-mon', help='Run check, if failed, run ')
@click.option('--check', 'check_only', is_flag=True, help='Check then exit')
@click.option('--sleep', '-s', 'sleep_time', default=5 * 60, help='Sleep time')
@click.option('--config', '-c', 'config_file', default=default_config_file, help='Path to config file')
def failed_monitor(config_file, check_only, sleep_time):
data = yaml.load(open(config_file))
items = data['failed-monitor']
failed_mon_manager = FailedMonManager(items, check_only=check_only)
while True:
failed_mon_manager.execute()
# for item in items:
# try:
# check_name = item['name']
# check_return_code, check_out = get_return_code(item['check'])
#
# if check_return_code:
# print("Check {0}: FAILED ({1}). Out: {2}".format(check_name, check_return_code, check_out))
#
# if check_only:
# continue
#
# print("Run {0}".format(item['failed']))
# failed_rc, failed_out = get_return_code(item['failed'])
# if failed_rc:
# print("Run failed command FAILED. {0}".format(failed_out))
#
# print(failed_out)
# else:
# print("Check {0}: PASSED".format(check_name))
# except Exception as e:
# logging.error("Error on %s" % item)
# logging.error(e)
print("Sleep %s before check again" % sleep_time)
sleep(sleep_time)
# print(data)
|
989,716 | f50ef2f1c6509e2b163ebdadfe6ab2380eb62335 | import numpy as np
import pandas as pd
import pypianoroll as pyp
datadir = '/Users/sorensabet/Desktop/MSC/CSC2506_Project/data/Generated MIDI/'
mt = pyp.read(datadir + 'major_36_16th_MEL_TWINKLE.mid')
print(mt)
# mt.resolution: Temporal resolution in timesteps per quarter note
# mt.tempo: Tempo of the song at each timestep. Don't need to worry about this because it is standardized.
#mt.plot()
num_beats_trim = 4
mt2 = mt.copy()
mt2.set_resolution(12)
mt2.trim(0, num_beats_trim*mt2.resolution) # Trim
mt2.binarize(1)
mt2 = mt2.pad_to_multiple(4)
mt2.plot()
track = mt2.tracks[0].pianoroll
# Okay. The NPY array seems to be:
# Timesteps based on beat resolution * 128
# Transposed version of pianoroll.
# I can manually assemble the MIDI data into that
# Hopefully there is a function that extracts it from MIDI messages
# So that I don't need to write one myself.
# pypianoroll.Track.standardize():
# returns standardized pypianoroll track (Standard Track)
# Clips Pianoroll to [0, 127 and casts to np.uint8]
# Pypianoroll can parse pretty MIDI
# Slow way: Import all generated MIDI files with pretty midi and generate npy files
# Fast way: Find a way to convert in memory and generate npy files progrmatically
# Read PyPianoRoll Source Code to find best way to split
# Read CycleGAN paper to see what the npy files should contain.
|
989,717 | 0cf9228d265ab38c4470bdc41fbd142a294f1ff6 | """
Module containing all player methods and data
"""
import random
INTRO = """
You've entered a world where everyone is dead. You're the only one left
and you're also almost dead. You're also borderline insane and have an
infatuation with kittens that is rivaled by none. Armed with your purse, and
your lack of wits, you decide to venture off into the darkness.
The question is, are you being brave, or are you insane?
"""
BRAVE_OR_CRAZY = "('brave' or 'insane')\n:"
CRAZY_WORDS = ["nuts", "crazy", "insane", "bonkers",]
BRAVE_WORDS = ["brave", "courageous", "tough",]
LEVEL_UP_TEXT = """
Coming out of that, it's hard to tell if you're more %s or %s than before. What
do you think?
"""
class Player(object):
def __init__(self):
self.difficulty = None
self._insanity = 10
self._courage = 10
self.health = self._courage * 2
self.kennel = []
self.special_kennel = []
self.attacking_kittens = 0
self.defending_kittens = 0
self.inventory = []
self.weapon = None
self.level = 1
self.xp = [0, 1]
self.boss_fights = [i*5 + 5 for i in range(10)]
def __len__(self):
return len(self.kennel)
def kittenCount(self):
return len(self.kennel)
def updateInsanity(self, mod=0):
self._insanity += mod
return self._insanity
def updateCourage(self, mod=0):
self._courage += mod
return self._courage
def setMaxHealth(self, mod=0):
self.health = self._courage * 2 + self.level + mod
def updateHealth(self, mod=0):
if self.health + mod >= (self._courage * 2) + self.level:
self.setMaxHealth()
else:
self.health += mod
return self.health
def equip(self, item):
self.weapon = item
def getBonusDamageFromInsanity(self):
return int(round((self._insanity ** 2)/50)) -1
def getCatBonus(self, count, state):
allocated = self.__dict__["%s_kittens" % state]
stats = {"attacking": int(round(self._insanity/6))-1,
"defending": int(round(self._courage/6))-1}
number_of_cats = random.randint(0, count if count >= 0 else 0)
if allocated:
if stats[state] + number_of_cats > allocated:
number_of_cats = allocated
else:
number_of_cats += stats[state]
cat_sample = random.sample(self.kennel, number_of_cats)
cat_bonus = sum([i.level for i in cat_sample])
return cat_bonus, number_of_cats
else:
return 0, 0
def getDamage(self):
"""Returns total damage and number of attacking kittens"""
weapon_dmg = self.weapon.getDamage()
cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,
"attacking")
true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()
return true_dmg, att_cats
def adoptKitten(self, kitten, special=False):
if special:
self.special_kennel.append(kitten)
else:
self.kennel.append(kitten)
def checkInventory(self):
items = {}
for item in self.inventory:
if item.name in items:
items[item.name] += 1
else:
items[item.name] = 1
return items
def insanityChanceBonus(self):
return self._insanity * 0.01
def getKittenCourageBonus(self):
return self._courage * 0.0075
def experienceBar(self):
bar = "#"* int(((float(self.xp[0]) / (self.xp[1])) * 100)/5)
space = "-"* (20 - int(((float(self.xp[0]) / (self.xp[1]) * 100)/5)))
return "XP: %s [%s%s] %s" % (self.xp[0], bar, space, self.xp[1])
def healthBar(self):
bar = "#"* int(((float(self.health) / (self._courage*2 + self.level)) * 100)/5)
space = "-"* (20 - int(((float(self.health) / (self._courage*2 + self.level) * 100)/5)))
name = "You:"
full_bar = "%s [%s%s] %s" % (self.health, bar, space, self._courage * 2 + self.level)
return name + full_bar.rjust(60-len(name))
def newStats(self):
good = False
while not good:
answer = input(BRAVE_OR_CRAZY)
if answer == "brave":
self.updateCourage(2)
print("You go, Grandma!\n")
good = True
elif answer == "insane":
self.updateInsanity(2)
print("Ya, thought so...\n")
good = True
else:
print("Sorry, what now?")
self.setMaxHealth(1)
def startLevelUp(self, rewards=None):
if self.xp[0] >= self.xp[1]:
if rewards:
for reward in rewards:
reward()
print(LEVEL_UP_TEXT % (random.choice(CRAZY_WORDS),
random.choice(BRAVE_WORDS)))
self.newStats()
self.level += 1
self.xp = [0, self.level]
self.checkKittenLevels()
def checkKittenLevels(self):
for cat in self.kennel:
cat.levelUp()
for special_cat in self.special_kennel:
special_cat.levelUp()
def intro(self):
print(INTRO)
while (self._courage == 10 and self._insanity == 10):
self.newStats()
return self
|
989,718 | 8dcf1583f38994abd8ef817e94b6baf78edf1f30 | # -*- coding: utf-8 -*-
"""
CSC373 Data Mining
Assignment 2: Classification
Author: Tianqi Hong, Han Bao, Michael Si
Date: 09/24/2020
Description:
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# this is to ask the user to enter their address for the file
err = 0
direc = input("Please enter the directory of your file: ")
while (True):
try:
data = pd.read_csv(direc)
break
except FileNotFoundError:
print("No such file! Please re-run the program :D")
err = 1
break
# C:/Users/89709/Desktop/Data Mining/Assignment 1/train_activities.csv
# Dataset is now stored in a Pandas Dataframe
if (err == 0):
# this part is for us to understand the data before we graph them
# this function is my favorite function to see if we have any null in our dataset (see if we need to clean our dataset)
data.isnull().sum()
data.info
data.shape
# this is to print the first five rows of our dataset
data.head()
# this is to print the last five rows of our dataset
data.tail()
data.columns
data.describe()
# this is to see the uniqueness of our dataset. (To see if we have a lot of repeated data like what Prof. Khuri mentioned during class
data.nunique()
# dropping filename and timestamp because they don't seems like our independent variables
data = data.drop(['filename', 'timestamp'], axis=1)
data.head() |
989,719 | 74b43479a147a45a1fad1dd8c2a41b880b0aef73 | import datetime
import os
import tempfile
import fiona
from fiona.crs import from_epsg
from shapely.geometry import mapping
VALID_LEVELS = ["LAT", "MSL"]
def tide_values_from_dfs0(mikepath, meta, dfsfilepath, level):
"""Read and extract values from dfs0 file using DHI.Generic.MikeZero.DFS.
Parameters
----------
mikepath : str
Path to MIKE installation directory.
meta : dictionary
Metadata dictionary created by read_meta().
dfsfilepath : str
Path to the dfs file created by make_dfs0().
level : str
Click option LAT or MSL.
Returns
-------
tide_values : list
List of tide values for image acquisiton date and time.
Raises
------
ValueError
If an invalid level type was provided.
ValueError
If DHI.Generic could not be imported or is not found in the sdkpath folder.
ValueError
If no tide values could be generated.
"""
if level not in VALID_LEVELS:
raise ValueError(f"Level should be one of {VALID_LEVELS}, not {level}.")
import clr
clr.AddReference("System")
import System
generic_mike_zero_path = list(
mikepath.glob("**/Mike SDK/**/*DHI.Generic.MikeZero.DFS.dll")
)[0]
try:
clr.AddReference(str(generic_mike_zero_path))
import DHI.Generic.MikeZero.DFS
except (ImportError, System.IO.FileNotFoundException) as exception:
msg = f'DHI.Generic not found. Is the path to the mike installation directory correct: "{mikepath}"?'
raise ValueError(msg) from exception
dfs_img_datetime = datetime.datetime.strptime(
meta["sensing_time"], "%Y-%m-%dT%H:%M:%S"
)
dfsfile = DHI.Generic.MikeZero.DFS.DfsFileFactory.DfsGenericOpen(dfsfilepath)
tide_values = []
# read timestep in seconds, convert to minutes
timestep = int(dfsfile.FileInfo.TimeAxis.TimeStep / 60)
sdt = dfsfile.FileInfo.TimeAxis.StartDateTime
dfs_start_datetime = datetime.datetime(
*(getattr(sdt, n) for n in ["Year", "Month", "Day", "Hour", "Minute", "Second"])
)
diff = dfs_img_datetime - dfs_start_datetime
img_timestep = int(((diff.days * 24 * 60) + (diff.seconds / 60)) / timestep)
for i in range(len(dfsfile.ItemInfo)):
min_value = float(dfsfile.ItemInfo[i].MinValue)
acq_value = dfsfile.ReadItemTimeStep(i + 1, img_timestep).Data[
0
] # Value c.f. MSL
if level == "LAT":
lat_value = acq_value - min_value # Value above LAT
tide_values.append(lat_value)
elif level == "MSL":
tide_values.append(acq_value)
else:
raise ValueError("Invalid level.")
dfsfile.Dispose()
if not tide_values:
raise ValueError("No tide values generated, recheck AOI")
return tide_values
def write_tide_values(tide_values, plist, level):
"""Write generated points and tide values to a new shapefile.
Parameters
----------
tide_values : list
List of tide values generated by tide_values_from_dfs0().
plist : list
List of shapely points generated by create_pts().
level : str
Click option LAT or MSL.
"""
pts_schema = {
"geometry": "Point",
"properties": {"p_ID": "int", str(level): "float"},
}
mem_file = fiona.MemoryFile()
ms = mem_file.open(crs=from_epsg(4326), driver="ESRI Shapefile", schema=pts_schema,)
for pid, (p, tv) in enumerate(zip(plist, tide_values)):
prop = {"p_ID": int(pid + 1), str(level): float(tv)}
ms.write({"geometry": mapping(p), "properties": prop})
return ms
def main(infile, date, mikepath, outfile, **kwargs):
dirpath, filepath = os.path.split(infile)
with tempfile.TemporaryDirectory(dir=dirpath) as tempdir:
write_tide_values(infile, date, mikepath, outfile, tempdir, **kwargs) |
989,720 | 203ca074de957e3ccdd6fcda9e1786e0d3f6069d | # -*- coding: utf-8 -*-
import math
import unittest
from gatilegrid import getTileGrid, GeoadminTileGridLV03, \
GeoadminTileGridLV95, GlobalMercatorTileGrid, GlobalGeodeticTileGrid
class TestGeoadminTileGrid(unittest.TestCase):
def testgetTileGrid(self):
tileGrid = getTileGrid(21781)
self.assertIs(tileGrid, GeoadminTileGridLV03)
self.assertIsInstance(tileGrid(), GeoadminTileGridLV03)
tileGrid = getTileGrid(2056)
self.assertIs(tileGrid, GeoadminTileGridLV95)
self.assertIsInstance(tileGrid(), GeoadminTileGridLV95)
tileGrid = getTileGrid(3857)
self.assertIs(tileGrid, GlobalMercatorTileGrid)
self.assertIsInstance(tileGrid(), GlobalMercatorTileGrid)
tileGrid = getTileGrid(4326)
self.assertIs(tileGrid, GlobalGeodeticTileGrid)
self.assertIsInstance(tileGrid(), GlobalGeodeticTileGrid)
def testUnsupportedTileGrid(self):
with self.assertRaises(AssertionError):
getTileGrid(7008)
def testTileGridWrongExtent(self):
with self.assertRaises(AssertionError):
GeoadminTileGridLV03(extent=[10.0, 10.0, 20.0, 20.0])
with self.assertRaises(AssertionError):
GeoadminTileGridLV03(
extent=[430000.0, 40000.0, 420000.0, 340000.0])
def testTileGridWrongOrigin(self):
with self.assertRaises(AssertionError):
GlobalGeodeticTileGrid(originCorner='top-right')
def testTileSize(self):
gagrid = GeoadminTileGridLV03()
ts = gagrid.tileSize(20)
self.assertEqual(ts, 2560.0)
self.assertEqual(gagrid.tileAddressTemplate,
'{zoom}/{tileRow}/{tileCol}')
with self.assertRaises(AssertionError):
gagrid.tileSize(40)
def testGetResolution(self):
gagrid = GeoadminTileGridLV95()
res = gagrid.getResolution(0)
self.assertEqual(res, 4000.0)
res = gagrid.getResolution(28)
self.assertEqual(res, 0.1)
with self.assertRaises(AssertionError):
gagrid.getResolution(-1)
with self.assertRaises(AssertionError):
gagrid.getResolution(29)
def testGetZoom(self):
gagrid = GeoadminTileGridLV95()
zoom = gagrid.getZoom(4000.0)
self.assertEqual(zoom, 0)
zoom = gagrid.getZoom(0.1)
self.assertEqual(zoom, 28)
with self.assertRaises(AssertionError):
gagrid.getZoom(4000.000001)
with self.assertRaises(AssertionError):
gagrid.getZoom(3999.999999)
with self.assertRaises(AssertionError):
gagrid.getZoom(0.1000001)
with self.assertRaises(AssertionError):
gagrid.getZoom(0.00000001)
def testGetClosestZoom(self):
gagrid = GeoadminTileGridLV95()
zoom = gagrid.getClosestZoom(100000.5)
self.assertEqual(zoom, 0)
self.assertIsInstance(zoom, int)
zoom = gagrid.getClosestZoom(2555.5)
self.assertEqual(zoom, 6)
self.assertIsInstance(zoom, int)
zoom = gagrid.getClosestZoom(2500)
self.assertEqual(zoom, 6)
self.assertIsInstance(zoom, int)
zoom = gagrid.getClosestZoom(0.09)
self.assertEqual(zoom, 28)
self.assertIsInstance(zoom, int)
# Test WGS84 degrees conversion
gagrid = GlobalGeodeticTileGrid()
# Input meters
zoom = gagrid.getClosestZoom(600)
self.assertEqual(zoom, 7)
zoom = gagrid.getClosestZoom(0.29)
self.assertEqual(zoom, 18)
# Input degrees
zoom = gagrid.getClosestZoom(0.021, unit='degrees')
self.assertEqual(zoom, 5)
def testTileBoundsAndAddress(self):
gagrid = GeoadminTileGridLV03()
tbe = [548000.0, 196400.0, 573600.0, 222000.0]
tb = gagrid.tileBounds(17, 5, 5)
self.assertEqual(tb[0], tbe[0])
self.assertEqual(tb[1], tbe[1])
self.assertEqual(tb[2], tbe[2])
self.assertEqual(tb[3], tbe[3])
with self.assertRaises(AssertionError):
gagrid.tileBounds(77, 5, 5)
ta = gagrid.tileAddress(0, [gagrid.MINX, gagrid.MAXY])
self.assertEqual(ta[0], 0)
self.assertEqual(ta[1], 0)
ta = gagrid.tileAddress(17, [tb[0], tb[3]])
self.assertEqual(ta[0], 5)
self.assertEqual(ta[1], 5)
def testIterGrid(self):
gagrid = GeoadminTileGridLV03()
gen = gagrid.iterGrid(0, 0)
self.assertTrue(hasattr(gen, '__iter__'))
tileSpec = [t for t in gen]
self.assertEqual(len(tileSpec), 1)
self.assertEqual(len(tileSpec[0]), 4)
self.assertEqual(tileSpec[0][1], 0)
self.assertEqual(tileSpec[0][2], 0)
self.assertEqual(tileSpec[0][3], 0)
self.assertEqual(str(tileSpec[0][0]), str(gagrid.tileBounds(0, 0, 0)))
gen = gagrid.iterGrid(13, 14)
tilesSpec = [i for i in gen]
self.assertEqual(len(tilesSpec), 12)
self.assertEqual(tilesSpec[0][1], 13)
self.assertEqual(tilesSpec[6][1], 14)
bounds = tilesSpec[2][0]
z = tilesSpec[2][1]
col = tilesSpec[2][2]
row = tilesSpec[2][3]
self.assertEqual(bounds, gagrid.tileBounds(z, col, row))
with self.assertRaises(AssertionError):
next(gagrid.iterGrid(13, 33))
with self.assertRaises(AssertionError):
next(gagrid.iterGrid(-1, 11))
with self.assertRaises(AssertionError):
next(gagrid.iterGrid(13, 11))
def testGetScale(self):
gagrid = GeoadminTileGridLV03()
s14 = gagrid.getScale(14)
s28 = gagrid.getScale(28)
self.assertGreater(s14, s28)
self.assertEqual(round(s14), 2456688.0)
self.assertEqual(round(s28), 378.0)
def testGetScaleLV95(self):
gagrid = GeoadminTileGridLV95()
s14 = gagrid.getScale(14)
s28 = gagrid.getScale(28)
self.assertGreater(s14, s28)
self.assertEqual(round(s14), 2456688.0)
self.assertEqual(round(s28), 378.0)
def testIterGridWithExtent(self):
offset = 20000.0
gagridDefault = GeoadminTileGridLV03()
extent = [gagridDefault.MINX + offset, gagridDefault.MINY + offset,
gagridDefault.MAXX - offset, gagridDefault.MAXY - offset]
gagridExtent = GeoadminTileGridLV03(extent=extent)
self.assertGreater(gagridDefault.xSpan, gagridExtent.xSpan)
self.assertGreater(gagridDefault.ySpan, gagridExtent.ySpan)
tilesSpecDefault = [t for t in gagridDefault.iterGrid(20, 21)]
tilesSpecExtent = [t for t in gagridExtent.iterGrid(20, 21)]
self.assertGreater(len(tilesSpecDefault), len(tilesSpecExtent))
self.assertEqual(tilesSpecExtent[0][1], 20)
self.assertEqual(tilesSpecExtent[len(tilesSpecExtent) - 1][1], 21)
nbTiles = gagridExtent.numberOfTilesAtZoom(20) + \
gagridExtent.numberOfTilesAtZoom(21)
self.assertEqual(len(tilesSpecExtent), nbTiles)
def testNumberOfTilesLV03(self):
zoom = 20
gagrid = GeoadminTileGridLV03()
[minRow, minCol, maxRow, maxCol] = gagrid.getExtentAddress(zoom)
nb = gagrid.numberOfTilesAtZoom(zoom)
nbx = gagrid.numberOfXTilesAtZoom(zoom)
nby = gagrid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, maxRow)
self.assertEqual(len([t for t in gagrid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, 23500)
self.assertEqual(nb, nbx * nby)
self.assertGreater(nbx, nby)
zoom = 22
[minRow, minCol, maxRow, maxCol] = gagrid.getExtentAddress(zoom)
nb = gagrid.numberOfTilesAtZoom(zoom)
nbx = gagrid.numberOfXTilesAtZoom(zoom)
nby = gagrid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, maxRow)
self.assertEqual(len([t for t in gagrid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, 375000)
self.assertEqual(nb, nbx * nby)
self.assertGreater(nbx, nby)
def testNumberOfTilesLV95(self):
zoom = 20
gagrid = GeoadminTileGridLV95()
[minRow, minCol, maxRow, maxCol] = gagrid.getExtentAddress(zoom)
nb = gagrid.numberOfTilesAtZoom(zoom)
nbx = gagrid.numberOfXTilesAtZoom(zoom)
nby = gagrid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, maxRow)
self.assertEqual(len([t for t in gagrid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, 23500)
self.assertEqual(nb, nbx * nby)
self.assertGreater(nbx, nby)
zoom = 22
[minRow, minCol, maxRow, maxCol] = gagrid.getExtentAddress(zoom)
nb = gagrid.numberOfTilesAtZoom(zoom)
nbx = gagrid.numberOfXTilesAtZoom(zoom)
nby = gagrid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, maxRow)
self.assertEqual(len([t for t in gagrid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, 375000)
self.assertEqual(nb, nbx * nby)
self.assertGreater(nbx, nby)
def testNumberOfTilesMercator(self):
grid = GlobalMercatorTileGrid()
zoom = 0
nb = grid.numberOfTilesAtZoom(zoom)
nbx = grid.numberOfXTilesAtZoom(zoom)
nby = grid.numberOfYTilesAtZoom(zoom)
self.assertEqual(nb, nbx * nby)
self.assertEqual(nb, 1)
zoom = 2
[minRow, minCol, maxRow, maxCol] = grid.getExtentAddress(zoom)
nb = grid.numberOfTilesAtZoom(zoom)
nbx = grid.numberOfXTilesAtZoom(zoom)
nby = grid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, minCol)
self.assertGreater(maxRow, minRow)
self.assertEqual(len([t for t in grid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, nbx * nby)
self.assertEqual(nb, 16)
def testNumberOfTilesGeodetic(self):
grid = GlobalGeodeticTileGrid(originCorner='bottom-left',
tmsCompatible=False)
zoom = 0
nb = grid.numberOfTilesAtZoom(zoom)
nbx = grid.numberOfXTilesAtZoom(zoom)
nby = grid.numberOfYTilesAtZoom(zoom)
self.assertEqual(nb, nbx * nby)
self.assertEqual(nb, 1)
zoom = 2
[minRow, minCol, maxRow, maxCol] = grid.getExtentAddress(zoom)
nb = grid.numberOfTilesAtZoom(zoom)
nbx = grid.numberOfXTilesAtZoom(zoom)
nby = grid.numberOfYTilesAtZoom(zoom)
self.assertGreater(maxCol, minCol)
self.assertGreater(maxRow, minRow)
self.assertEqual(len([t for t in grid.iterGrid(zoom, zoom)]), nb)
self.assertEqual(nb, nbx * nby)
self.assertEqual(nb, 8)
grid = GlobalGeodeticTileGrid(originCorner='bottom-left',
tmsCompatible=True)
zoom = 0
nb = grid.numberOfTilesAtZoom(zoom)
nbx = grid.numberOfXTilesAtZoom(zoom)
nby = grid.numberOfYTilesAtZoom(zoom)
self.assertEqual(nb, nbx * nby)
self.assertEqual(nb, 2)
def testMercatorGridBoundsAndAddress(self):
grid = GlobalMercatorTileGrid()
[z, x, y] = [8, 135, 91]
[xmin, ymin, xmax, ymax] = grid.tileBounds(z, x, y)
self.assertAlmostEqual(xmin, 1095801.2374962866)
self.assertAlmostEqual(ymin, 5635549.221409475)
self.assertAlmostEqual(xmax, 1252344.271424327)
self.assertAlmostEqual(ymax, 5792092.255337516)
center = [xmin + (xmax - xmin) / 2, ymin + (ymax - ymin) / 2]
[xa, ya] = grid.tileAddress(z, center)
self.assertEqual(xa, x)
self.assertEqual(ya, y)
def testGeodeticGridBoundsAndAddress(self):
grid = GlobalGeodeticTileGrid(originCorner='top-left',
tmsCompatible=True)
[z, x, y] = [8, 268, 60]
[xmin, ymin, xmax, ymax] = grid.tileBounds(z, x, y)
self.assertAlmostEqual(xmin, 8.4375)
self.assertAlmostEqual(ymin, 47.109375)
self.assertAlmostEqual(xmax, 9.140625)
self.assertAlmostEqual(ymax, 47.8125)
center = [xmin + (xmax - xmin) / 2, ymin + (ymax - ymin) / 2]
[xa, ya] = grid.tileAddress(z, center)
self.assertEqual(xa, x)
self.assertEqual(ya, y)
[z, x, y] = [8, 266, 193]
grid = GlobalGeodeticTileGrid(originCorner='bottom-left',
tmsCompatible=True)
[xmin, ymin, xmax, ymax] = grid.tileBounds(z, x, y)
self.assertAlmostEqual(xmin, 7.03125)
self.assertAlmostEqual(ymin, 45.703125)
self.assertAlmostEqual(xmax, 7.734375)
self.assertAlmostEqual(ymax, 46.40625)
center = [xmin + (xmax - xmin) / 2, ymin + (ymax - ymin) / 2]
[xa, ya] = grid.tileAddress(z, center)
self.assertEqual(xa, x)
self.assertEqual(ya, y)
|
989,721 | 2d2622a99010a2c05a2b4fee90194875b8166e9e | import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from strats.threshold_momentum import (
threshold_momentum_returns,
threshold_momentum_limit_returns,
threshold_momentum_holdout_returns)
def single_col_df(series):
return pd.DataFrame({'AAPL': series}, dtype=float)
class TestSellHighThresholdMomentum(unittest.TestCase):
def test_sell(self):
# Correct returns for single jump
close_prices = single_col_df([1, 2, 2])
hi_prices = single_col_df([1, 2, 4])
expected = single_col_df([np.nan, np.nan, 1.0])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_no_sell(self):
# Returns nan series for no jumps
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 10, 20])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_buy_sell(self):
# Buys and sells on the same day
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, 1.0, 1.0])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
class TestCloseoutThresholdMomentum(unittest.TestCase):
limit = 0.05
def test_hits(self):
# Returns only limit when limit exceeded
close_prices = single_col_df([1, 2, 2])
hi_prices = single_col_df([1, 2, 4])
expected = single_col_df([np.nan, np.nan, self.limit])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_closes_out(self):
# Sells at close when limit not hit
close_prices = single_col_df([1, 2, 1])
eps = 0.01
hi_below_limit = close_prices.iloc[1] * (1 + self.limit) - eps
hi_prices = single_col_df([1, 2, hi_below_limit])
expected = single_col_df([np.nan, np.nan, -.5])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_no_sell(self):
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_buy_sell(self):
# Buys and sells on the same day
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, self.limit, self.limit])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
class TestHoldoutThresholdMomentum(unittest.TestCase):
limit = 0.05
def test_waits(self):
# Recovers 2 days after buying
close_prices = single_col_df([1, 2, 1, 2])
hi_prices = single_col_df([1, 2, 1, 2])
expected_returns = single_col_df([np.nan, np.nan, np.nan, 0])
expected_drawdowns = single_col_df([np.nan, np.nan, -0.5, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_hits(self):
# Hits day after buying
close_prices = single_col_df([1, 2, 1, 2])
hi_prices = single_col_df([1, 2, 3, 2])
expected_returns = single_col_df([np.nan, np.nan, self.limit, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_no_sell(self):
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_buy_sell(self):
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, self.limit, self.limit])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_sells_if_breakeven_during_day(self):
# Test case where doesn't sell next day, and day after closes
# at a loss but breaks even during the day (high >= buy_price)
close_prices = single_col_df([1, 2, 1, 1])
hi_prices = single_col_df([1, 2, 1, 2])
expected_returns = single_col_df([np.nan, np.nan, np.nan, 0])
expected_drawdowns = single_col_df([np.nan, np.nan, -0.5, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
if __name__ == '__main__':
unittest.main()
|
989,722 | 14ad9b85b170dee3258003a9c23c9b0d12668d62 | import json
import os
from os import environ, path
from anonymizeip import anonymize_ip
from flask import Flask, jsonify, redirect, request
app = Flask(__name__)
current_dir = path.dirname(path.realpath(__file__))
path_list = current_dir + "/icon-sets.json"
path_cache = current_dir + "/cache"
path_views = path_cache + "/views.json"
path_static = current_dir + "/public"
def load_list_file():
"""Load list of icon sets to memory"""
with open(path_list, "r") as list_file:
return json.load(list_file)
def load_views_file():
"""Load or create views file and load IP addresses into memory. Create cache for total number of
unique views per icon set (view_counts)"""
if not path.exists(path_cache):
os.makedirs(path_cache)
if not path.exists(path_views):
with open(path_views, "w+") as view_file:
addresses = {}
counts = {}
json.dump({}, view_file)
else:
with open(path_views, "r") as view_file:
addresses = json.load(view_file)
counts = {}
for icon_set_id, ip_addresses in addresses.items():
counts[icon_set_id] = len(ip_addresses)
return addresses, counts
@app.route("/iconsets", methods=["GET"])
def get_icon_sets():
"""Get list of icon sets with basic information and number of views"""
# Match icon sets with their number of unique views
response = icon_sets
for icon_set in response:
if icon_set["id"] in view_counts:
icon_set["views"] = view_counts[icon_set["id"]]
else:
icon_set["views"] = 0
return jsonify(response)
@app.route("/views", methods=["PATCH"])
def register_view():
"""Add IP address of client to icon set entry in views.json unless it already exists"""
icon_set_id = request.args.get("iconSetId")
ip_address = request.remote_addr
ip_address_anonymized = anonymize_ip(ip_address)
# Add IP address to corresponding icon set
if icon_set_id not in view_addresses:
view_addresses[icon_set_id] = [ip_address_anonymized]
view_counts[icon_set_id] = 1
elif ip_address_anonymized not in view_addresses[icon_set_id]:
view_addresses[icon_set_id].append(ip_address_anonymized)
view_counts[icon_set_id] += 1
else:
return ""
with open(path_views, "w+") as view_file:
# Write updated object to file
json.dump(view_addresses, view_file)
return ""
@app.route("/<path:invalid_path>")
def catch_all(invalid_path):
"""Catch-all route: Redirect to root path"""
return redirect("/", code=302)
icon_sets = load_list_file()
view_addresses, view_counts = load_views_file()
if __name__ == "__main__":
if "FLASK_ENV" in environ and environ["FLASK_ENV"] == "development":
app.run(host="0.0.0.0")
else:
app.run()
|
989,723 | 2c382528d7b80327eb4d8ce21dfe24845c2eab5f | a=input('input n1=')
a=int(a)
b=input('input n2=')
b=int(b)
c = a + b
print(c) |
989,724 | 49ff54b5e03d653dc0d0b405f47ff69eac76a99d | from datetime import timedelta
import datetime
from django.contrib.auth.models import User
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template.context import RequestContext
from django.utils import simplejson
import time
from erp.apps.timesheet.forms.internal import InternalForm
from erp.apps.timesheet.forms.timesheet import TimeSheetForm
from erp.apps.timesheet.utils import get_timesheet_list
from erp.apps.timesheet.models import TimeSheet, InternalTimeSheet, Workshop
from erp.libs.workflows import utils
from erp.libs import workflows
from erp.libs.workflows.models import Workflow, Transition, State
def timesheet(request):
"""
Default view that renders calendar
"""
return render(
request,
'timesheet/timesheet.html'
)
def timesheet_all(request):
"""
Default view that renders calendar for all employees; Houston
we need a better solution here
"""
return render(
request,
'timesheet/timesheet_all.html'
)
def timesheet_form(request):
"""
View that serves forms for creating a new instance of TimeSheet or Internal object
"""
timsheetForm = TimeSheetForm()
internalForm = InternalForm()
return render(
request,
'timesheet/forms/add.html',
{
'timsheetForm':timsheetForm,
'internalForm':internalForm
}
)
def timesheet_edit_form(request, type, id):
"""
View that serves forms for creating a new instance of TimeSheet or Internal object
"""
if type == 'timesheet':
timesheet = TimeSheet.objects.get(pk=int(id))
editForm = TimeSheetForm(
initial = {
'dueDate':timesheet.DueDate,
'hours':timesheet.Hours,
'partner':timesheet.Partner,
'project':timesheet.Project,
'phase':timesheet.Phase,
'activity':timesheet.Activity
})
else:
timesheet = InternalTimeSheet.objects.get(pk=int(id))
editForm = InternalForm(
initial = {
'dueDate':timesheet.InternalDueDate,
'hours':timesheet.Hours,
'internal':timesheet.Internal,
'activity':timesheet.Activity
})
return render(
request,
'timesheet/forms/edit.html',
{
'editForm':editForm,
'type':type,
'timesheet':timesheet
})
def form_save(request, type=None, id=None):
"""
View to save the timesheet form
"""
if type == 'timesheet':
currentForm = TimeSheetForm(request.POST)
else:
currentForm = InternalForm(request.POST)
if currentForm.is_valid():
currentForm.save(request.user, id)
# Generally, the ajax call after submitting the form will
# refetch events so no big deal if we return something
# meaningless here
return HttpResponse('1')
# View that serves list of all timesheets for the current user
def get_timesheet(request, argument=None):
if argument is None:
user = User.objects.filter(pk=request.user.id)
else:
user = User.objects.all().exclude(groups__name='Ex-employee')
epoch_month = time.gmtime(float(request.REQUEST.get('start')))
if epoch_month.tm_mon == 12:
month = 1
year = epoch_month.tm_year+1
else:
month = epoch_month.tm_mon+1
year = epoch_month.tm_year
response = get_timesheet_list(month,year,user)
return HttpResponse(simplejson.dumps(list(response), cls=DjangoJSONEncoder))
# View that is responsible for cloning the timesheet
def clone_timesheet(request):
user = request.user
event_id = request.POST.get('id')
delta = int(request.POST.get('delta'))
type = request.POST.get('type[]')
if type == 'timesheet':
event = TimeSheet.objects.get(pk=int(event_id))
if type == 'internal':
event = InternalTimeSheet.objects.get(pk=int(event_id))
workflow = Workflow.objects.get(name='Timesheet')
new_delta = 1
while delta != 0:
if type == 'timesheet':
item = TimeSheet(Activity=event.Activity, Hours=event.Hours, Phase=event.Phase, Project=event.Project, User=event.User, DueDate=event.DueDate+timedelta(days=new_delta), Partner=event.Partner)
item.save()
if type == 'internal':
item = InternalTimeSheet(Activity=event.Activity, Hours=event.Hours, Internal=event.Internal, User=event.User, InternalDueDate=event.InternalDueDate+timedelta(days=new_delta))
item.save()
#assign WF and set status
utils.set_workflow(item, workflow)
state = utils.get_state(item)
item.Status = state
item.save()
delta = delta - 1
new_delta += 1
json = 'Success'
return HttpResponse(simplejson.dumps(list(json), cls=DjangoJSONEncoder))
# View for approving the timesheet
def timesheet_approval(request):
if request.method != 'POST':
timesheet_list = InternalTimeSheet.objects.filter(Status__name='New', Internal__Name='Holiday').values('User__id').distinct()
user_list = User.objects.filter(id__in=timesheet_list).exclude(groups__name='Ex-employee').order_by('first_name')
for user in user_list:
count = InternalTimeSheet.objects.filter(Status__name='New', Internal__Name='Holiday', User=user).count()
user.__dict__['count'] = count
return render(
request,
'timesheet/approval.html',
{
'user_list':user_list
}
)
else:
internal_list = request.POST.getlist('internal')
transition = Transition.objects.get(name=request.POST.get('action_type'))
for item in internal_list:
internal = InternalTimeSheet.objects.get(pk=int(item))
workflows.utils.do_transition(internal, transition, request.user)
internal.Status = transition.destination
internal.save()
return HttpResponseRedirect(reverse('approveTimesheet'))
# View that will server all unapproved timesheets
def timesheet_approval_fetch(request, id=None, page=1):
timesheet_list = InternalTimeSheet.objects.filter(Status__name='New', Internal__Name='Holiday', User__id=int(id)).order_by('InternalDueDate')[((int(page)-1)*10):(int(page)*10)];
list_size = InternalTimeSheet.objects.filter(Status__name='New', Internal__Name='Holiday', User__id=int(id)).count();
# actual page, max pages
timesheet_list.actual_page = int(page)
timesheet_list.max_pages = int(list_size)/10+1
return render(
request,
'timesheet/approval_single.html',
{
'timesheet_list':timesheet_list
}
)
def add_workshop(request):
if request.method == 'POST':
item = WorkshopForm(request.POST).save()
workflow = Workflow.objects.get(name='Workshop')
utils.set_workflow(item, workflow)
state = utils.get_state(item)
item.Status = state
item.save()
title = str(item.Ws_Partner)+' - '+str(item.Hours)
json_list = {'title':title, 'year':item.DueDate.year, 'month':item.DueDate.month-1, 'day':item.DueDate.day, 'id':item.id, 'color':'#21aa38','className':'workshop'}
return HttpResponse(simplejson.dumps(json_list))
def edit(request, type, id):
if type == 'internal':
internal = InternalTimeSheet.objects.get(pk=int(id))
user = internal.User
status = internal.Status
if request.method != 'POST':
form = InternalForm(instance=internal)
return render(
request,
'timesheet/int_edit.html',
{
'form':form,
'internal':internal
})
else:
item = InternalForm(request.POST, instance=internal).save(commit=False)
item.User = user
item.Status = status
item.save()
json_list = {'title':item.Internal.Name, 'year':item.InternalDueDate.year, 'month':item.InternalDueDate.month-1, 'day':item.InternalDueDate.day, 'id':item.id}
return HttpResponse(simplejson.dumps(json_list))
elif type == 'timesheet':
timesheet = TimeSheet.objects.get(pk=int(id))
user = timesheet.User
status = timesheet.Status
if request.method != 'POST':
form = TimeSheetForm(instance=timesheet)
return render_to_response('timesheet/ts_edit.html', {'form':form,
'timesheet':timesheet
}, context_instance=RequestContext(request))
else:
item = TimeSheetForm(request.POST, instance=timesheet).save(commit=False)
item.User = user
item.Status = status
item.save()
json_list = {'title':item.Project.Name, 'year':item.DueDate.year, 'month':item.DueDate.month-1, 'day':item.DueDate.day, 'id':item.id}
return HttpResponse(simplejson.dumps(json_list))
def delete(request, type, id):
if type == 'timesheet':
timesheet = TimeSheet.objects.get(pk=int(id))
transition = Transition.objects.get(name="DeleteTimesheet")
workflows.utils.do_transition(timesheet, transition, request.user)
timesheet.Status = transition.destination
timesheet.save()
json_list = {'result':'success'}
return HttpResponse(simplejson.dumps(json_list))
elif type == 'internal':
internal = InternalTimeSheet.objects.get(pk=int(id))
transition = Transition.objects.get(name="DeleteInternal")
workflows.utils.do_transition(internal, transition, request.user)
internal.Status = transition.destination
internal.save()
json_list = {'result':'success'}
return HttpResponse(simplejson.dumps(json_list)) |
989,725 | 97b323284d40c3bbac416158a06053defb53aff4 | """Definition of shape inference for primitives."""
import operator
import numpy
from dataclasses import is_dataclass
from functools import partial, reduce
from ..dshape import NOSHAPE, TupleShape, ListShape, ClassShape, \
find_matching_shape, shape_cloner
from ..dtype import Array, Tuple, List, Class, TypeType, ismyiatype, \
pytype_to_myiatype
from ..infer import ANYTHING, GraphInferrer, register_inferrer, \
PartialInferrer, Track, MyiaShapeError, Inferrer, MetaGraphInferrer, \
InferenceError, MyiaTypeError, TransformedReference, MultiInferrer, \
DummyInferrer, Context
from ..infer.jinf import JInferrer
from ..ir import Graph, MetaGraph
from . import ops as P
from .inferrer_utils import static_getter, getelement
from .ops import Primitive
def prod(iterable):
"""Return the product of the elements of the iterator."""
return reduce(operator.mul, iterable, 1)
shape_inferrer_constructors = {}
@shape_cloner.variant
def _stag_shape(self, shp: Inferrer):
return NOSHAPE
class ShapeTrack(Track):
"""Infer the shape of a constant."""
def __init__(self, engine, name, *,
constructors=shape_inferrer_constructors):
"""Initialize a ShapeTrack."""
super().__init__(engine, name)
self.constructors = constructors
def default(self, values):
"""Default value for ShapeTrack."""
if ismyiatype(values['type'], Array):
raise Exception(
'There is no default value for Arrays on the shape track.'
) # pragma: no cover
if ismyiatype(values['type'], Tuple):
tup = values['type']
return TupleShape(self.default({'type': e}) for e in tup.elements)
elif ismyiatype(values['type'], List):
lst = values['type']
return ListShape(self.default({'type': lst.element_type}))
elif ismyiatype(values['type'], Class):
cls = values['type']
return ClassShape(dict((attr, self.default({'type': tp}))
for attr, tp in cls.attributes.items()))
return NOSHAPE
def from_value(self, v, context):
"""Infer the shape of a constant."""
if isinstance(v, Primitive):
return self.constructors[v](self)
elif isinstance(v, Graph):
return GraphInferrer(self, v, context)
elif isinstance(v, MetaGraph):
return MetaGraphInferrer(self, v)
elif isinstance(v, tuple):
return TupleShape(self.from_value(e, context) for e in v)
elif isinstance(v, list):
shps = [self.from_value(e, context) for e in v]
if len(shps) == 0: # pragma: no cover
# from_value of the type track will fail before this
raise InferenceError('Cannot infer the shape of []')
return ListShape(find_matching_shape(shps))
elif is_dataclass(v):
if isinstance(v, type):
rec = self.constructors[P.make_record](self)
typ = pytype_to_myiatype(v)
vref = self.engine.vref({'value': typ, 'type': TypeType})
return PartialInferrer(self, rec, [vref])
else:
return ClassShape(
dict((n, self.from_value(getattr(v, n), context))
for n in v.__dataclass_fields__.keys()))
elif isinstance(v, numpy.ndarray):
return v.shape
else:
return NOSHAPE
def jtag(self, shp):
"""Return type for J(x) given shape(x)."""
if isinstance(shp, Inferrer):
return JInferrer(shp, TupleShape)
else:
return shp
def stag(self, t):
"""Return type for sensitivity of x given shape(x)."""
return _stag_shape(t)
shape_inferrer = partial(register_inferrer,
constructors=shape_inferrer_constructors)
@shape_inferrer(P.scalar_add, P.scalar_sub, P.scalar_mul, P.scalar_div,
P.scalar_mod, P.scalar_pow, P.scalar_trunc, P.scalar_floor,
P.scalar_uadd, P.scalar_usub, P.scalar_exp, P.scalar_log,
P.scalar_sin, P.scalar_cos, P.scalar_tan,
P.scalar_eq, P.scalar_lt, P.scalar_gt, P.scalar_ne,
P.scalar_le, P.scalar_ge,
P.bool_not, P.bool_and, P.bool_or, P.bool_eq,
P.typeof, P.hastype,
P.tuple_len, P.list_len, P.array_len,
P.scalar_cast,
nargs=None)
async def infer_shape_scalar(track, *args):
"""Infer the shape of all scalar primitives."""
return NOSHAPE
@shape_inferrer(P.shape, nargs=1)
async def infer_shape_shape(track, ary):
"""Infer the shape for shape."""
shp = await ary['shape']
return TupleShape((NOSHAPE,) * len(shp))
@shape_inferrer(P.make_tuple, nargs=None)
async def infer_shape_make_tuple(track, *args):
"""Infer the shape for make_tuple."""
sh = [await x['shape'] for x in args]
return TupleShape(sh)
@shape_inferrer(P.tuple_getitem, nargs=2)
async def infer_shape_tuple_getitem(track, seq, idx):
"""Infer the shape of tuple_getitem."""
seq_sh = await seq['shape']
idx_v = await idx['value']
return seq_sh.shape[idx_v]
@shape_inferrer(P.tuple_setitem, nargs=3)
async def infer_shape_tuple_setitem(track, seq, idx, value):
"""Infer the shape of tuple_setitem."""
seq_sh = await seq['shape']
idx_v = await idx['value']
value_sh = await value['shape']
new_sh = list(seq_sh.shape)
new_sh[idx_v] = value_sh
return TupleShape(new_sh)
@shape_inferrer(P.list_getitem, nargs=2)
async def infer_shape_list_getitem(track, seq, idx):
"""Infer the shape of list_getitem."""
seq_sh = await seq['shape']
return seq_sh.shape
@shape_inferrer(getelement, nargs=1)
async def infer_shape_getelement(track, seq):
"""Infer the shape of an arbitrary element."""
shp = await seq['shape']
if isinstance(shp, ListShape):
return shp.shape
elif isinstance(shp, tuple):
# Array
return NOSHAPE
else:
raise AssertionError()
@shape_inferrer(P.make_record, nargs=None)
async def infer_type_make_record(track, cls, *elems):
"""Infer the shape of make_record."""
elem_shapes = [await x['shape'] for x in elems]
cls_v = await cls['value']
return ClassShape(dict(zip(cls_v.attributes.keys(), elem_shapes)))
@shape_inferrer(P.return_, nargs=1)
async def infer_shape_return(track, v):
"""Infer the shape of return."""
return await v['shape']
@shape_inferrer(P.switch, nargs=3)
async def infer_shape_switch(track, cond, tb, fb):
"""Infer the shape of switch."""
v = await cond['value']
if v is True:
# We only visit the first branch if the condition is provably true
return await tb['shape']
elif v is False:
# We only visit the second branch if the condition is provably false
return await fb['shape']
elif v is ANYTHING:
# The first branch to finish will return immediately. When the other
# branch finishes, its result will be checked against the other.
res = await track.assert_same(tb, fb, refs=[tb, fb])
if isinstance(res, Inferrer):
tinf = await tb['shape']
finf = await fb['shape']
return MultiInferrer((tinf, finf), [tb, fb])
return res
else:
raise AssertionError("Invalid condition value for switch.")
@shape_inferrer(P.partial, nargs=None)
async def infer_shape_partial(engine, fn, *args):
"""Infer the return type of partial."""
fn_t = await fn['shape']
return PartialInferrer(engine, fn_t, args)
@shape_inferrer(P.array_map, nargs=None)
async def infer_shape_array_map(track, fn, *arrays):
"""Infer the shape of array_map."""
fn_t = await fn['shape']
vrefs = [TransformedReference(track.engine, getelement, a)
for a in arrays]
elem_shp = await fn_t(*vrefs)
assert elem_shp is NOSHAPE
shapes = [await a['shape'] for a in arrays]
shape0, *rest = shapes
if any(len(s) != len(shape0) for s in rest):
raise MyiaShapeError("Expect same shapes for array_map")
rshape = []
for entries in zip(*shapes):
entries = set(entries)
entries.add(ANYTHING)
if len(entries) == 1:
rshape.append(ANYTHING)
elif len(entries) == 2:
entries.remove(ANYTHING)
entry, = entries
rshape.append(entry)
else:
raise MyiaShapeError("Expect same shapes for array_map")
return tuple(rshape)
@shape_inferrer(P.list_append, nargs=2)
async def infer_shape_list_append(track, seq, value):
"""Infer the shape for list_append."""
lshp = await seq['shape']
vshp = await value['shape']
return ListShape(find_matching_shape((lshp.shape, vshp)))
@shape_inferrer(P.list_map, nargs=None)
async def infer_shape_list_map(track, fn, *lsts):
"""Infer the shape of list_map."""
argrefs = [TransformedReference(track.engine, getelement, xs)
for xs in lsts]
return ListShape(await (await fn['shape'])(*argrefs)) # noqa: W606
@shape_inferrer(P.array_scan, nargs=4)
async def infer_shape_array_scan(track, fn, init, ary, ax):
"""Infer the shape of array_scan."""
return await ary['shape']
@shape_inferrer(P.array_reduce, nargs=3)
async def infer_shape_array_reduce(track, fn, ary, shp):
"""Infer the shape of array_reduce."""
shp_i = await ary['shape']
shp_v = await shp['value']
if shp_v == ANYTHING:
raise AssertionError(
'We currently require knowing the shape for reduce.'
)
# return (ANYTHING,) * (len(shp_i) - 1)
else:
delta = len(shp_i) - len(shp_v)
if delta < 0 \
or any(1 != s1 != ANYTHING and 1 != s2 != ANYTHING and s1 != s2
for s1, s2 in zip(shp_i[delta:], shp_v)):
raise MyiaShapeError(
f'Incompatible dims for reduce: {shp_i}, {shp_v}'
)
return shp_v
@shape_inferrer(P.distribute, nargs=2)
async def infer_shape_distribute(track, v, shape):
"""Infer the shape of distribute."""
shp = await shape['value']
if shp == ANYTHING:
shp_t = await shape['type']
shp = (ANYTHING,) * len(shp_t.elements)
v_t = await v.get_shallow('type')
if ismyiatype(v_t, Array):
v_shp = await v['shape']
delta = len(shp) - len(v_shp)
if delta < 0:
raise MyiaShapeError("Cannot distribute to smaller shape")
elif delta > 0:
v_shp = (1,) * delta + v_shp
for vs, s in zip(v_shp, shp):
if vs != s and vs not in (1, ANYTHING) and s not in (1, ANYTHING):
raise MyiaShapeError("Cannot change shape when distributing")
return shp
@shape_inferrer(P.reshape, nargs=2)
async def infer_shape_reshape(track, v, shape):
"""Infer the shape of reshape."""
shp = await shape['value']
if shp == ANYTHING:
shp_t = await shape['type']
shp = (ANYTHING,) * len(shp_t.elements)
v_shp = await v['shape']
if (all(s is not ANYTHING for s in shp) and
all(s is not ANYTHING for s in v_shp) and
prod(shp) != prod(v_shp)):
raise MyiaShapeError("Cannot change the total number of elements "
"in reshape")
return shp
@shape_inferrer(P.transpose, nargs=2)
async def infer_shape_transpose(track, v, permutation):
"""Infer the shape of transpose."""
perm = await permutation['value']
if perm == ANYTHING:
perm_t = await permutation['type']
return (ANYTHING,) * len(perm_t.elements)
v_shp = await v['shape']
if list(sorted(perm)) != list(range(len(v_shp))):
raise MyiaShapeError(
'The second argument of transpose must be a permutation of'
' all of the array\'s axes.',
refs=[permutation]
)
shp = tuple(v_shp[i] for i in perm)
return shp
@shape_inferrer(P.invert_permutation, nargs=1)
async def infer_shape_invert_permutation(track, permutation):
"""Infer the shape for invert_permutation."""
t = await permutation['type']
return TupleShape([NOSHAPE for _ in t.elements])
@shape_inferrer(P.dot, nargs=2)
async def infer_shape_dot(track, a, b):
"""Infer the shape of dot."""
a_shp = await a['shape']
b_shp = await b['shape']
if len(a_shp) != 2 or len(b_shp) != 2:
raise MyiaShapeError("dot needs matrix inputs")
if (a_shp[1] != b_shp[0] and
a_shp[1] is not ANYTHING and b_shp[0] is not ANYTHING):
raise MyiaShapeError(
f"Incompatible shapes in dot: {a_shp} and {b_shp}"
)
return (a_shp[0], b_shp[1])
@shape_inferrer(P.resolve, nargs=2)
async def infer_shape_resolve(track, data, item):
"""Infer the shape of resolve."""
async def on_dcattr(data, data_t, item_v): # pragma: no cover
raise MyiaTypeError('Cannot resolve on Class.')
return await static_getter(
track, data, item,
fetch=operator.getitem,
on_dcattr=on_dcattr
)
@shape_inferrer(P.getattr, nargs=2)
async def infer_shape_getattr(track, data, item):
"""Infer the shape of getattr."""
async def on_dcattr(data, data_t, item_v):
data_sh = await data['shape']
return data_sh.shape[item_v]
return await static_getter(
track, data, item,
fetch=getattr,
on_dcattr=on_dcattr
)
@shape_inferrer(P.identity, nargs=1)
async def infer_shape_identity(track, x):
"""Infer the shape of identity."""
return await x['shape']
@shape_inferrer(P.scalar_to_array, nargs=1)
async def infer_shape_scalar_to_array(track, x):
"""Infer the shape of scalar_to_array."""
return ()
@shape_inferrer(P.array_to_scalar, nargs=1)
async def infer_shape_array_to_scalar(track, ary):
"""Infer the shape of array_to_scalar."""
shp = await ary['shape']
if shp == ():
return NOSHAPE
else:
raise MyiaTypeError(
'array_to_scalar only works on 0d arrays',
refs=[ary]
)
@shape_inferrer(P.broadcast_shape, nargs=2)
async def infer_shape_broadcast_shape(track, shpx, shpy):
"""Infer the shape of broadcast_shape."""
tx = await shpx['type']
ty = await shpy['type']
n = max(len(tx.elements), len(ty.elements))
return TupleShape([NOSHAPE] * n)
@shape_inferrer(P.make_list, nargs=None)
async def infer_shape_make_list(track, *elems):
"""Infer the return shape of make_list."""
shps = [await e['shape'] for e in elems]
if len(shps) == 0:
raise InferenceError('Cannot infer the shape of []')
return ListShape(find_matching_shape(shps))
@shape_inferrer(P.list_reduce, nargs=3)
async def infer_shape_list_reduce(track, fn, lst, dflt):
"""Infer the return shape of list_reduce."""
elem = TransformedReference(track.engine, getelement, lst)
fn_inf = await fn['shape']
shp1 = await fn_inf(dflt, elem)
shp2 = await fn_inf(elem, elem)
return find_matching_shape([shp1, shp2])
@shape_inferrer(P.J, nargs=1)
async def infer_shape_J(track, x):
"""Infer the return shape of J."""
return track.jtag(await x.get_shallow('shape'))
@shape_inferrer(P.Jinv, nargs=1)
async def infer_shape_Jinv(track, x):
"""Infer the return shape of Jinv."""
shp = await x.get_shallow('shape')
if isinstance(shp, JInferrer):
return shp.fn
elif isinstance(shp, GraphInferrer):
g = shp._graph
primal = g and g.transforms.get('primal', None)
if primal:
primal = track.engine.pipeline.resources.convert(primal)
if isinstance(primal, Graph) and primal.parent:
return DummyInferrer(track)
else:
return track.from_value(primal, Context.empty())
else: # pragma: no cover
# This error is also caught by the type inferrer
raise MyiaTypeError('Bad input type for Jinv', refs=[x])
else:
return shp
@shape_inferrer(P.embed, nargs=1)
async def infer_shape_embed(track, x):
"""Infer the return shape of embed."""
return NOSHAPE
@shape_inferrer(P.env_setitem, nargs=3)
async def infer_shape_env_setitem(track, env, key, x):
"""Infer the return shape of env_setitem."""
return NOSHAPE
@shape_inferrer(P.env_getitem, nargs=3)
async def infer_shape_env_getitem(track, env, key, default):
"""Infer the return shape of env_getitem."""
key_v = await key['value']
assert key_v is not ANYTHING
shp = track.stag(key_v.inferred['shape'])
return await track.assert_same(shp, default)
@shape_inferrer(P.env_add, nargs=2)
async def infer_shape_env_add(track, env1, env2):
"""Infer the return shape of env_add."""
return NOSHAPE
|
989,726 | 182fade649f97f22620de621f4898bb2c0011586 | from __future__ import division
import numpy as np
from numpy.linalg import svd, qr, cholesky
# Solves Ax = b with svd.
def solve_svd(A, b):
assert len(A[0]) == len(b)
U, S, V = svd(A)
UTb = np.dot(np.transpose(U), b)
w = np.divide(UTb, S)
return np.dot(np.transpose(V), w)
def solve_QR(A, b):
n = len(b)
Q, R = qr(A)
QTb = np.dot(np.transpose(Q), b)
x = np.zeros(n)
for i in xrange(n-1, -1, -1):
x[i] = (QTb[i] - np.dot(R[i,i+1:n], x[i+1:n]))/R[i,i]
return x
def solve_normal(A, b):
n = len(b)
L = cholesky(np.dot(np.transpose(A),A))
LT = np.transpose(L)
ATb = np.dot(np.transpose(A), b)
w = np.zeros(n)
for i in xrange(0, n):
w[i] = (ATb[i] - np.dot(L[i,0:i], w[0:i]))/L[i,i]
x = np.zeros(n)
for i in xrange(n-1, -1, -1):
x[i] = (w[i] - np.dot(LT[i,i+1:n], x[i+1:n]))/LT[i,i]
return x
|
989,727 | fe6d72d7bdca46f7be40ba20f5b0b4ad585a6d22 | """Tests for the Tag model."""
from conduit.openapi import json_renderer
from conduit.tag.models import Tag
from pyramid.testing import DummyRequest
import json
def test_json_renderer(dummy_request: DummyRequest) -> None:
"""Test that Tag is correctly rendered for an OpenAPI JSON response."""
tag = Tag(name="foö")
renderer = json_renderer()
output = renderer(None)(tag, {})
assert json.loads(output) == "foö"
|
989,728 | 37cf7a8990744c39e635a72729d11c7910048bc6 | import sys
from collections import deque
from astar import RoutingProblem
from ways import graph, info
from ways.tools import compute_distance
r = graph.load_map_from_csv()
problem = RoutingProblem(0, 0, r)
def idastar_search(s, t):
problem.goal = t
problem.s_start = s
max_speed = max(info.SPEED_RANGES[0])
new_limit = compute_distance(r[s].lat, r[s].lon, r[t].lat, r[t].lon) / max_speed
def dfs_l(f_limit):
start = problem.G[problem.s_start]
end = problem.G[problem.goal]
max_speed = max(info.SPEED_RANGES[0])
new_limit = sys.maxsize
frontier = deque()
frontier.append(Node(problem.s_start, air_dis=problem.hx(problem.s_start)))
while frontier:
node = frontier.pop()
new_f = node.path_cost + node.air_dis
if new_f > f_limit:
new_limit = min(new_limit, new_f)
else:
frontier.extend(child for child in node.expand(problem))
if problem.is_goal(node.state):
return node.solution(), node.path_cost, compute_distance(start.lat, start.lon, end.lat,
end.lon) / max_speed
return None, new_limit, compute_distance(start.lat, start.lon, end.lat,
end.lon) / max_speed
while True:
sol, new_limit, dis = dfs_l(new_limit)
if sol:
return sol, new_limit, dis
class Node:
def __init__(self, state, parent=None, action=None, path_cost=0, air_dis=0):
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
self.depth = 0
self.air_dis = air_dis
if parent:
self.depth = parent.depth + 1
def expand(self, problem):
return ordered_set([self.child_node(problem, action) for action in problem.actions(self.state)])
def child_node(self, problem, action):
next_state = problem.succ(self.state, action)
next_node = Node(next_state, self, action,
self.path_cost + problem.step_cost(self.state, action),
problem.hx(action))
return next_node
def solution(self):
return [node.state for node in self.path()[1:]]
def path(self):
node, path_back = self, []
while node:
path_back.append(node)
if node.parent is None:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
def __repr__(self):
return f"<{self.state}>"
def __lt__(self, node):
return self.state < node.state
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.state)
def ordered_set(coll):
return dict.fromkeys(coll).keys()
|
989,729 | d64dad1e8b804abf7f4638710f56a238df9d605b | import zope.interface
class ILicenseEditForm(zope.interface.Interface):
"""Marker interface for LicenseEditForm."""
|
989,730 | 1a5459a26f728c67e0cc7114946c96f6cbe64abb | # -*- coding: UTF-8 -*-
import jieba
import jieba.posseg as jbps
import jieba.analyse as jban
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def add_freq_word():
jieba.suggest_freq('原告', True)
jieba.suggest_freq('诉称', True)
def divide_sentence(str):
result_set = jbps.cut(str, HMM=False)
return result_set
if __name__ == '__main__':
str0 = "我们中出了一个叛徒"
str1 = "原告诉称,原、被告于2011年9月5日在岳池县双鄢乡人民政府协议离婚,离婚协议对夫妻财产约定:“1、武装部旁边一套面积200平方米的住房属于男方所有(因房屋有贷款三年后所有权归男方),其它所有财产属女方所有;2、凡由夫妻双方签字认可的所有欠债由女方承担,所有房屋贷款由女方承担,其于借款谁借谁还"
str2 = "……"
# jieba.suggest_freq('原告', True)
print('='*40)
words = divide_sentence(str2)
result = []
for word in words:
result.append(word.word + '-' + word.flag)
print word.word, word.flag
# print word
[r.encode('utf8') for r in result]
print result
# for r in result:
# print r
print('='*40)
key_word = []
key_words = jban.textrank(str1, withWeight=True)
for x, w in key_words:
key_word.append(x + '-' + str(w))
print('%s %s' % (x, w))
print key_word
for word in key_word:
print word
|
989,731 | 134ba21adf0a559cca8ddd63bde0ddd59e837ac3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-17 06:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nettest', '0008_monitable'),
]
operations = [
migrations.CreateModel(
name='mdetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mip', models.CharField(blank=True, max_length=50, null=True)),
('mtime', models.IntegerField()),
('mtotal_pkt', models.IntegerField()),
('sumup', models.IntegerField(blank=True, null=True)),
('sumdown', models.IntegerField(blank=True, null=True)),
('meanup', models.FloatField(blank=True, null=True)),
('meandown', models.FloatField(blank=True, null=True)),
('avg_u_s', models.FloatField(blank=True, null=True)),
('avg_d_s', models.FloatField(blank=True, null=True)),
('bandup', models.CharField(blank=True, max_length=50, null=True)),
('banddown', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'db_table': 'mdetails',
},
),
]
|
989,732 | 402460ac392751bb58dc3a682271ca2c93cd641e | def result(type, id, **kwargs):
kwargs['type'] = type
kwargs['id'] = id
return kwargs
def article(id, title, input_message_content, **kwargs):
return result('article', id, title=title, input_message_content=input_message_content, **kwargs)
def photo(id, photo_url, thumb_url, **kwargs):
return result('photo', id, photo_url=photo_url, thumb_url=thumb_url, **kwargs)
def gif(id, gif_url, **kwargs):
return result('gif', id, gif_url=gif_url, **kwargs)
def mpeg4gif(id, mpeg4_url, **kwargs):
return result('mpeg4_gif', id, mpeg4_url=mpeg4_url, **kwargs)
def video(id, video_url, mime_type, thumb_url, title, **kwargs):
return result('video', id, video_url=video_url, mime_type=mime_type, thumb_url=thumb_url, title=title, **kwargs)
def audio(id, audio_url, title, **kwargs):
return result('audio', id, audio_url=audio_url, title=title, **kwargs)
def voice(id, voice_url, title, **kwargs):
return result('voice', id, voice_url=voice_url, title=title, **kwargs)
def document(id, document_url, title, **kwargs):
return result('document', id, document_url=document_url, title=title, **kwargs) |
989,733 | eab5d1c522bcd7923f29d85613840bc87cce39eb | from collections import defaultdict
def digits(n):
count = 0
while n > 0 or count < 3:
yield n % 10
n //= 10
count += 1
def parse_inst(memory, inst_ptr, rel_base):
opcode = memory[inst_ptr] % 100
modes = memory[inst_ptr] // 100
if opcode in (1, 2, 7, 8):
inst = list(memory[i] for i in range(inst_ptr + 1, inst_ptr + 4))
args = get_args(memory, opcode, inst, inst_ptr, modes, rel_base)
# Last argument is an lvalue
mode = modes // 100
args[-1] = inst[-1] if mode == 0 else rel_base + inst[-1] if mode == 2 else None
elif opcode == 3:
inst = list(memory[i] for i in range(inst_ptr + 1, inst_ptr + 2))
mode = modes % 10
args = [inst[0] if mode == 0 else rel_base + inst[0] if mode == 2 else None]
elif opcode == 4:
inst = list(memory[i] for i in range(inst_ptr + 1, inst_ptr + 2))
args = get_args(memory, opcode, inst, inst_ptr, modes, rel_base)
elif opcode in (5, 6):
inst = list(memory[i] for i in range(inst_ptr + 1, inst_ptr + 3))
args = get_args(memory, opcode, inst, inst_ptr, modes, rel_base)
elif opcode == 9:
inst = list(memory[i] for i in range(inst_ptr + 1, inst_ptr + 2))
args = get_args(memory, opcode, inst, inst_ptr, modes, rel_base)
else:
args = []
return opcode, args
def get_args(memory, opcode, inst, inst_ptr, modes, rel_base):
return [
get_arg(memory, opcode, arg, inst_ptr, rel_base, mode)
for arg, mode in zip(inst, digits(modes))
]
def get_arg(memory, opcode, arg, inst_ptr, rel_base, mode):
if mode == 0:
return memory[arg]
elif mode == 1:
return arg
elif mode == 2:
return memory[rel_base + arg]
else:
raise Exception(
f'Invalid mode {mode} in instruction {opcode} at address {inst_ptr}')
def run_program(program):
memory = defaultdict(lambda: 0, enumerate(program))
inst_ptr = 0
rel_base = 0
opcode, args = parse_inst(memory, inst_ptr, rel_base)
while opcode != 99:
if opcode == 1:
memory[args[2]] = args[0] + args[1]
jump = 4
elif opcode == 2:
memory[args[2]] = args[0] * args[1]
jump = 4
elif opcode == 3:
memory[args[0]] = yield
jump = 2
elif opcode == 4:
yield args[0]
jump = 2
elif opcode == 5:
if args[0] != 0:
inst_ptr = args[1]
jump = 0
else:
jump = 3
elif opcode == 6:
if args[0] == 0:
inst_ptr = args[1]
jump = 0
else:
jump = 3
elif opcode == 7:
memory[args[2]] = int(args[0] < args[1])
jump = 4
elif opcode == 8:
memory[args[2]] = int(args[0] == args[1])
jump = 4
elif opcode == 9:
rel_base += args[0]
jump = 2
else:
raise Exception(f'Invalid opcode {opcode} at address {inst_ptr}')
inst_ptr += jump
opcode, args = parse_inst(memory, inst_ptr, rel_base)
|
989,734 | 863b9c67d15ab4fd1ca4e8928fa32eac0269aed8 | from Start_up import*
from Bullet import Bullet
class Enemy(pygame.sprite.Sprite):
# class to hold an enemy taking the random variables given in the game class
def __init__(self, id):
self.surface = pygame.Surface((20, 20))
self.rect = self.surface.get_rect()
self.pos = (width, height/2)
self.move_function = random.randint(0, 2)
self.bounce_range = random.randint(2, 7)
self.cool = True
self.cool_time = random.randint(30, 70)
self.cool_counter = 0
self.bullet_power = 1
# random.randint(1, 2))
# create a temporary variable for the y coordinate
y_temp = self.pos[1]
# if statements to determine the movement of the enemy cos = 0, sin = 1, none = 2
# the y position will then be randomised such that its function will not take it
# off the screen
if self.move_function == 2: # Move in a straight line
y_temp = random.randint(30, height - 30)
elif self.move_function == 1: # Move using a sin function
if self.bounce_range <= 3:
y_temp = random.randint(150, height - 170)
if self.bounce_range > 3:
y_temp = height/2
if self.bounce_range > 3:
self.bounce_range = 3
elif self.move_function == 0 and self.bounce_range <= 4: # Move using a sin function
y_temp = random.randint(100, height - 120)
# set the positions to the rect
self.rect.x = self.pos[0]
self.rect.y = y_temp
# used to start either moving upwards or downwards
self.function_up = random
# set the counter accordingly
if self.function_up:
self.counter = 0
else:
if self.move_function == 0:
self.counter = 180
else:
self.counter = 360
self.dx = -random.randint(1, 3)
self.dy = 0
# give the enemy an id so that it can be removed from lists
# easily in the kill enemy function in the Game class
self.id = id
# set the health of the enemy and give it a shade based on that
self.health = random.randint(1, settings.loaded_enemy_health_max)
self.health_colours = [(50, 50, 50), # low health
(100, 100, 100),
(200, 200, 200),
(255, 255, 255)] # high health
self.surface.fill(self.health_colours[self.health - 1])
self.dead = False
# how much money will the player get for killing the enemy
self.money = random.randint(5, 10)
# the enemy will steal twice the amount if it reaches the left side
self.will_steal = self.money * 2
def check_collide(self, bullet_list):
# check the enemy against all off the bullets in the bullet list
for x in range(0, len(bullet_list)):
# check that the bullet was shot from a
# player and should damage the enemy
if bullet_list[len(bullet_list) - x - 1].shot_from == "Player":
if pygame.sprite.collide_rect(self, bullet_list[len(bullet_list) - x - 1]):
# if a bullet has collided remove the correct amount of health based
# off of the power of the players bullets
self.health -= bullet_list[len(bullet_list) - x - 1].power
# if the enemy will still be alive set its new colour based off of
# its new health value
if self.health > 0:
self.surface.fill(self.health_colours[self.health - 1])
# remove the bullet
del bullet_list[len(bullet_list) - x - 1]
def check_health(self):
# function to check if the enemy is dead
if self.health < 1:
self.dead = True
def shoot(self, bullet_list):
# check the gun can shoot
if self.cool:
# create a new bullet and add it to the games bullet list
new_bullet = Bullet(self.rect.center, self.bullet_power, "Enemy")
bullet_list.append(new_bullet)
# set the gun to not able to shoot
self.cool = False
return bullet_list
def check_cool_down(self):
# allow the gun to run or add to the cool counter
if not self.cool:
self.cool_counter += 1
if self.cool_counter > self.cool_time:
self.cool = True
self.cool_counter = 0
# create a new cool down time, this makes the
# enemies shooting a little more unpredictable
self.cool_time = random.randint(40, 300)
def move(self):
# if the player should be moving up add 2 to its counter
# else subtract 2 from the counter
if self.function_up:
self.counter += 2
else:
self.counter -= 2
# if the counter has reached the top, set the player to
# move downwards and if the counter is too low, set the
# player to move upwards
if self.counter >= 360:
self.function_up = False
if self.counter <= 0:
self.function_up = True
# check the function of the enemy and calculate its new y speed
# the enemy will not move in an actual sin wave but the behaviour is
# much more interesting i think
if self.move_function == 0:
self.dy = int(math.cos(deg_to_rad(self.counter)) * self.bounce_range)
elif self.move_function == 1:
self.dy = int(math.sin(deg_to_rad(self.counter)) * self.bounce_range)
elif self.move_function == 2:
self.dy = 0
def update(self, bullet_list):
# call all of the functions needed to update the enemy
# passing the bullet list to the collide function
self.check_collide(bullet_list)
self.check_health()
self.move()
# check if the gun can shoot
# then try to shoot the gun
self.check_cool_down()
self.shoot(bullet_list)
# set the new position
self.rect.x += self.dx
self.rect.y += self.dy
def display(self):
# display the surface at the rect's coordinates
main_s.blit(self.surface, (self.rect.x, self.rect.y))
|
989,735 | 26f32dcb7c857c0c1a4e8cecc00d0b75fe8e1681 | # Required Python Packages
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, log_loss
import subprocess
from sklearn.tree import export_graphviz
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import matplotlib.pyplot as plt
from sklearn.calibration import CalibratedClassifierCV
# File Paths
# INPUT_PATH = "data/breast-cancer-wisconsin.data"
OUTPUT_PATH = "data/First_stab_data_values.csv"
# Headers
# HEADERS = ["CodeNumber", "ClumpThickness", "UniformityCellSize", "UniformityCellShape", "MarginalAdhesion",
# "SingleEpithelialCellSize", "BareNuclei", "BlandChromatin", "NormalNucleoli", "Mitoses", "CancerType"]
def read_data(path):
"""
Read the data into pandas dataframe
:param path:
:return:
"""
data = pd.read_csv(path)
return data
def get_headers(dataset):
"""
dataset headers
:param dataset:
:return:
"""
return dataset.columns.values
def add_headers(dataset, headers):
"""
Add the headers to the dataset
:param dataset:
:param headers:
:return:
"""
dataset.columns = headers
return dataset
def data_file_to_csv():
"""
:return:
"""
# Headers
headers = ["CodeNumber", "ClumpThickness", "UniformityCellSize", "UniformityCellShape", "MarginalAdhesion",
"SingleEpithelialCellSize", "BareNuclei", "BlandChromatin", "NormalNucleoli", "Mitoses",
"CancerType"]
# Load the dataset into Pandas data frame
dataset = read_data(INPUT_PATH)
# Add the headers to the loaded dataset
dataset = add_headers(dataset, headers)
# Save the loaded dataset into csv format
dataset.to_csv(OUTPUT_PATH, index=False)
print("File saved ...!")
def split_dataset(dataset, train_percentage, valid_percentage):
"""
Split the dataset with train_percentage and valid_percentage
:param dataset:
:param train_percentage:
:param valid_percentage:
:param feature_headers:
:param target_header:
:return: train_x, valid_x, test_x, train_y, valid_y, test_y
"""
# Split dataset into train and test dataset
train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],
train_size=train_percentage + valid_percentage,
test_size=1-(train_percentage + valid_percentage))
valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]
valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]
return train_x, valid_x, test_x, train_y, valid_y, test_y
def handle_missing_values(dataset, missing_values_header, missing_label):
"""
Filter missing values from the dataset
:param dataset:
:param missing_values_header:
:param missing_label:
:return:
"""
return dataset[dataset[missing_values_header] != missing_label]
def random_forest_classifier(train_x, train_y, valid_x, valid_y):
"""
To train the random forest classifier with features and target data
:param train_x:
:param train_y:
:param valid_x:
:param valid_y:
:return: trained random forest classifier
"""
clf = RandomForestClassifier(n_estimators=25)
clf.fit(train_x, train_y)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(valid_x, valid_y)
return clf, sig_clf
def dataset_statistics(dataset):
"""
Basic statistics of the dataset
:param dataset: Pandas dataframe
:return: None, print the basic statistics of the dataset
"""
print(dataset.describe())
def visualize_tree(tree, feature_names, filename):
"""Create tree png using graphviz.
Args
----
tree -- scikit-learn DecsisionTree.
feature_names -- list of feature names.
"""
with open("dt.dot", 'w') as f:
export_graphviz(tree, out_file=f,
feature_names=feature_names)
command = ["dot", "-Tpng", "dt.dot", "-o", "plots-decision/%s.png" % filename]
try:
subprocess.check_call(command)
except:
exit("Could not run dot, ie graphviz, to "
"produce visualization")
def plot_importances(importances, features):
indices = np.argsort(importances)
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), np.array(features)[indices])
plt.xlabel('Relative Importance')
plt.show()
def main():
"""
Main function
:return:
"""
# Load the csv file into pandas dataframe
dataset = pd.read_csv(OUTPUT_PATH)
# Get basic statistics of the loaded dataset
HEADERS = get_headers(dataset)
dataset_statistics(dataset)
df = dataset
enc = OneHotEncoder(categorical_features=np.array([0, 2, 4, 5, 6, 7, 8, 9]))
enc.fit(df)
print(enc.n_values_)
encoded = enc.transform(df).toarray()
# Filter missing values
# dataset = handle_missing_values(dataset, HEADERS[6], '?')
train_x, valid_x, test_x, train_y, valid_y, test_y = split_dataset(encoded, 0.6, 0.2)
# Train and Test dataset size details
print("Train_x Shape :: ", train_x.shape)
print("Train_y Shape :: ", train_y.shape)
print("Test_x Shape :: ", test_x.shape)
print("Test_y Shape :: ", test_y.shape)
# Create random forest classifier instance
original_model, calibrated_model = random_forest_classifier(train_x, train_y, valid_x, valid_y)
print("Trained model :: ", calibrated_model)
predictions = calibrated_model.predict(test_x)
print("Train Accuracy :: ", accuracy_score(train_y, calibrated_model.predict(train_x)))
print("Test Accuracy :: ", accuracy_score(test_y, predictions))
print("Confusion matrix \n", confusion_matrix(test_y, predictions))
clf_probs = original_model.predict_proba(test_x)
score = log_loss(test_y, clf_probs)
sig_clf_probs = calibrated_model.predict_proba(test_x)
sig_score = log_loss(test_y, sig_clf_probs)
print()
print("Log-loss of")
print(" * uncalibrated classifier trained on 60%% datapoints: %.3f "
% score)
print(" * classifier trained on 60%% datapoints and calibrated on "
"20%% datapoint: %.3f" % sig_score)
print()
for i in range(0, 5):
print("Actual outcome :: {} and Predicted outcome :: {} and Predicted probability :: {}".
format(list(test_y)[i], predictions[i], sig_clf_probs[i][0]))
plot_importances(original_model.feature_importances_, range(np.shape(encoded)[1]-1))
if __name__ == "__main__":
main()
|
989,736 | bb2f3f1064da409c8a7168f148f00bb7999df2fb | from .base import GameServerPacket
class ServerSocketClose(GameServerPacket):
type = Int8(175)
constant = Int32(0)
arg_order = ["type", "constant"]
|
989,737 | 8a7ee6cb932576d44753cc8997ca6463769dea3d | count = 0
w = input()
while True:
tmp = list(input().split())
if 'END_OF_TEXT' in tmp:
break
for i in tmp:
x = str.lower(i)
if x == w:
count = count + 1
print(count)
|
989,738 | 081a25d0e17a88ef7b85b46dbeab8e4d87fcb75f | import sys
fileName="Practice\\MthElement.txt"
with open(fileName,'r') as fileLines:
for line in fileLines:
lineList=list(line.strip().split(" "))
index=(int(lineList[-1]))
if index>1 and index<len(lineList):
print(lineList[(len(lineList)-1)-index]) |
989,739 | 8a3ed8cd257d083990a32dca3ff3b4ce9ad05d8a | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import warnings
from concurrent.futures import Future
from unittest.mock import MagicMock
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.mobilityd_pb2 import IPAddress
from magma.pipelined.app.classifier import Classifier
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.tests.app.flow_query import RyuDirectFlowQuery as FlowQuery
from magma.pipelined.tests.app.packet_injector import ScapyPacketInjector
from magma.pipelined.tests.app.start_pipelined import (
PipelinedController,
TestSetup,
)
from magma.pipelined.tests.pipelined_test_util import (
FlowTest,
FlowVerifier,
SnapshotVerifier,
create_service_manager,
start_ryu_app_thread,
stop_ryu_app_thread,
wait_after_send,
)
from scapy.all import IP, TCP, UDP, Ether
from scapy.contrib.gtp import GTP_U_Header
class PagingTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_1 = '5e:cc:cc:b1:49:4b'
MAC_2 = '0a:00:27:00:00:02'
BRIDGE_IP = '192.168.128.1'
EnodeB_IP = '192.168.60.141'
Dst_nat = '192.168.129.42'
CLASSIFIER_CONTROLLER_ID = 5
@classmethod
@unittest.mock.patch(
'netifaces.ifaddresses',
return_value=[[{'addr': '00:aa:bb:cc:dd:ee'}]],
)
@unittest.mock.patch('netifaces.AF_LINK', 0)
def setUpClass(cls, *_):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(PagingTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([], ['classifier'])
cls._tbl_num = cls.service_manager.get_table_num(Classifier.APP_NAME)
testing_controller_reference = Future()
classifier_reference = Future()
test_setup = TestSetup(
apps=[
PipelinedController.Classifier,
PipelinedController.Testing,
PipelinedController.StartupFlows,
],
references={
PipelinedController.Classifier:
classifier_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'internal_ip_subnet': '192.168.0.0/16',
'ovs_gtp_port_number': 32768,
'ovs_mtr_port_number': 15577,
'ovs_internal_sampling_port_number': 15578,
'ovs_internal_sampling_fwd_tbl_number': 201,
'ovs_internal_conntrack_port_number': 15579,
'ovs_internal_conntrack_fwd_tbl_number': 202,
'clean_restart': True,
'paging_timeout': 30,
'classifier_controller_id': 5,
'enable_nat': True,
'ovs_uplink_port_name': "patch-up",
},
mconfig=PipelineD(),
loop=None,
service_manager=cls.service_manager,
integ_test=False,
rpc_stubs={'sessiond_setinterface': MagicMock()},
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
cls.thread = start_ryu_app_thread(test_setup)
cls.classifier_controller = classifier_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_install_paging_flow(self):
"""
Add paging flow in table 0
"""
# Need to delete all default flows in table 0 before
# install the specific flows test case.
self.classifier_controller._delete_all_flows()
ue_ip_addr = "192.168.128.30"
self.classifier_controller.install_paging_flow(
200,
IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')),
True,
)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with snapshot_verifier:
pass
def test_remove_paging_flow(self):
"""
Delete the paging flow from table 0
"""
ue_ip_addr = "192.168.128.30"
self.classifier_controller.remove_paging_flow(IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')))
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with snapshot_verifier:
pass
def test_traffic_paging_flow(self):
"""
Add paging flow in table 0
"""
# Need to delete all default flows in table 0 before
# install the specific flows test case.
self.classifier_controller._delete_all_flows()
ue_ip_addr = "192.168.128.30"
self.classifier_controller.install_paging_flow(
200,
IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')),
True,
)
# Create a set of packets
pkt_sender = ScapyPacketInjector(self.BRIDGE)
eth = Ether(dst=self.MAC_1, src=self.MAC_2)
ip = IP(src=self.Dst_nat, dst='192.168.128.30')
o_udp = UDP(sport=2152, dport=2152)
i_udp = UDP(sport=1111, dport=2222)
i_tcp = TCP(seq=1, sport=1111, dport=2222)
i_ip = IP(src='192.168.60.142', dst=self.EnodeB_IP)
gtp_packet_udp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=28, gtp_type=255) / i_ip / i_udp
gtp_packet_tcp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=68, gtp_type=255) / i_ip / i_tcp
# Check if these flows were added (queries should return flows)
flow_queries = [
FlowQuery(
self._tbl_num, self.testing_controller,
match=MagmaMatch(tunnel_id=1, in_port=32768),
),
FlowQuery(
self._tbl_num, self.testing_controller,
match=MagmaMatch(ipv4_dst='192.168.128.30'),
),
]
# =========================== Verification ===========================
# Verify 2 flows installed for classifier table (2 pkts matched)
flow_verifier = FlowVerifier(
[
FlowTest(
FlowQuery(
self._tbl_num,
self.testing_controller,
), 2, 2,
),
], lambda: wait_after_send(self.testing_controller),
)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
)
with flow_verifier, snapshot_verifier:
pkt_sender.send(gtp_packet_udp)
pkt_sender.send(gtp_packet_tcp)
flow_verifier.verify()
if __name__ == "__main__":
unittest.main()
|
989,740 | 3ff5ed0705527cacfd02d590619d0abbde1a2d7b | # Write a function, `rec_intersection(rect1, rect2)` and returns the
# intersection of the two.
#
# Rectangles are represented as a pair of coordinate-pairs: the
# bottom-left and top-right coordinates (given in `[x, y]` notation).
#
# Hint: You can calculate the left-most x coordinate of the
# intersection by taking the maximum of the left-most x coordinate of
# each rectangle. Likewise, you can calculate the top-most y
# coordinate of the intersection by taking the minimum of the top most
# y coordinate of each rectangle.
def rectangle_intersection(rec1, rec2):
bot_left1, top_right1 = rec1[0], rec1[1]
bot_left2, top_right2 = rec2[0], rec2[1]
left_most = max(bot_left1[0], bot_left2[0])
right_most = min(top_right1[0], top_right2[0])
top_most = min(top_right1[1], top_right2[1])
bot_most = max(bot_left1[1], bot_left2[1])
if left_most not in range(bot_left2[0], top_right2[0]):
if top_most not in range(bot_left2[1], top_right2[1]):
return 'nil'
return [[left_most,bot_most], [ right_most, top_most]]
|
989,741 | 9da430ec4ca97779bcd954a207819c4aa8afaab7 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import HtmlResponse
from JP.items import JpItem
from bs4 import BeautifulSoup as bs
class SjruSpider(scrapy.Spider):
name = 'sjru'
allowed_domains = ['superjob.ru']
start_urls = ['https://russia.superjob.ru/vacancy/search/?keywords=%D1%82%D0%B0%D0%BA%D1%81%D0%B8']
def parse(self, response: HtmlResponse):
next_page = response.css('div._3QBXO div._3R0rZ:nth-child(1) div._1X8YL div._1XEGw div.iJCa5._1JhPh._2gFpt._1znz6._1LlO2._2nteL div._3Qutk div._1Ttd8._2CsQi:nth-child(1) div.L1p51:nth-child(7) > a.icMQ_._1_Cht._3ze9n.f-test-button-dalshe.f-test-link-Dalshe:nth-child(9)::attr(href)').extract_first()
yield response.follow(next_page, callback=self.parse)
vacancy = response.css('div.f-test-vacancy-item a::attr(href)').extract()
for vac in vacancy:
yield response.follow(vac, callback=self.vac_parse)
def vac_parse(self, response: HtmlResponse):
res = bs(response.text, 'lxml')
source = self.allowed_domains[0]
vac_link = response.url
vac_name = res.find('h1',{'class':'_3mfro'}).getText()
salary = res.find('span',{'class':'ZON4b'}).getText()
yield JpItem(source=source, vac_link=vac_link, vac_name=vac_name, salary=salary)
|
989,742 | 6162b1cefd07f0e75a1c9ca88a9245712b8c1bb2 |
#app.run('127.0.0.1',port=5000,debug=True)
from flask import Flask , render_template , jsonify ,request, session, redirect, url_for # render_template 안에서 html 사용
import requests
from bs4 import BeautifulSoup
#엑셀 파일 생성 시 필요
from openpyxl import load_workbook, Workbook
import jwt
import datetime #로그인시 토큰시간 지정
import hashlib #비밀번호 해쉬 암호화 db 저장
app = Flask(__name__)
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
## HTML을 주는 부분
SECRET_KEY = 'apple' # 토근시 필요한 보안 - 아무거나 입력 원하는거
@app.route('/register') # 회원이 아니라 면을 클릭하게 되면
def register():
return render_template('register.html')
@app.route('/api/register', methods=['POST']) #회원가입시 db.counting.insert_one(imformation)
def api_register():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
db.temp.insert_one({'id':id_receive,'pw':pw_hash})
return jsonify({'result': 'success'})
@app.route('/api/login', methods=['POST'])
#로그인 완료시 id와 pwd 비교 성공하면 html 에서 index.html로 이동
def api_login():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
# 회원가입 때와 같은 방법으로 pw를 암호화합니다.
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
# id, 암호화된pw을 가지고 해당 유저를 찾습니다.
result = db.temp.find_one({'id':id_receive,'pw':pw_hash})
# 찾으면 JWT 토큰을 만들어 발급합니다.
if result is not None:
# JWT 토큰에는, payload와 시크릿키가 필요합니다.
# 시크릿키가 있어야 토큰을 디코딩(=풀기) 해서 payload 값을 볼 수 있습니다.
# 아래에선 id와 exp를 담았습니다. 즉, JWT 토큰을 풀면 유저ID 값을 알 수 있습니다.
# exp에는 만료시간을 넣어줍니다. 만료시간이 지나면, 시크릿키로 토큰을 풀 때 만료되었다고 에러가 납니다.
payload = {
'id': id_receive,
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
}
token = jwt.encode(payload, SECRET_KEY, algorithm='HS256').decode('utf-8')
# token을 줍니다.
return jsonify({'result': 'success','token':token})
# 찾지 못하면
else:
return jsonify({'result': 'fail', 'msg':'아이디/비밀번호가 일치하지 않습니다.'})
@app.route('/api/id', methods=['GET'])
def api_valid():
# 토큰을 주고 받을 때는, 주로 header에 저장해서 넘겨주는 경우가 많습니다.
# header로 넘겨주는 경우, 아래와 같이 받을 수 있습니다.
token_receive = request.headers['token_give']
# try / catch 문?
# try 아래를 실행했다가, 에러가 있으면 except 구분으로 가란 얘기입니다.
try:
# token을 시크릿키로 디코딩합니다.
# 보실 수 있도록 payload를 print 해두었습니다. 우리가 로그인 시 넣은 그 payload와 같은 것이 나옵니다.
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
# print(payload)
# payload 안에 id가 들어있습니다. 이 id로 유저정보를 찾습니다.
# 여기에선 그 예로 닉네임을 보내주겠습니다.
userinfo = db.temp.find_one({'id':payload['id']},{'_id':0})
return jsonify({'result': 'success','id':userinfo['id']})
except jwt.ExpiredSignatureError:
# 위를 실행했는데 만료시간이 지났으면 에러가 납니다.
return jsonify({'result': 'fail', 'msg':'로그인 시간이 만료되었습니다.'})
@app.route('/')
def home():
return render_template('index2.html')
@app.route('/index2.html')
def index2():
return render_template('index2.html')
@app.route('/index.html')
def index():
return render_template('index.html')
@app.route('/input.html')
def input():
return render_template('input.html')
@app.route('/excel.html')
def excel():
return render_template('excel.html')
@app.route('/entire.html')
def entire():
return render_template('entire.html')
@app.route('/month.html')
def month():
return render_template('month.html')
## API 역할을 하는 부분
@app.route('/money', methods=['POST'])
def saving():
id =request.form['id_give']
date = request.form['someDate_give']
pay = request.form['money_give']
cont = request.form['content_give']
tag = request.form['tag_give']
# 클라이언트로부터 데이터를 받는 부분
# mongoDB에 넣는 부분
imformation = {
'id': id,
'somedate': date,
'money':pay,
'content':cont,
'tag':tag,
}
db.counting.insert_one(imformation)
return jsonify({'result': 'success'})
@app.route('/money', methods=['GET'])
def getting():
# 모든 document 찾기 & _id 값은 출력에서 제외하기
# result =list(db.counting.find({},{'_id':0}))
result =list(db.counting.find({},{'_id':0}))
# shops라는 키 값으로 내려주기
return jsonify({'result': 'success', 'shops': result})
#엑셀 생성시
@app.route('/excel',methods=['GET'])
def exeting():
result = list(db.counting.find({}, {'_id': 0}))
row = 4
token_receive = request.headers['token_give']
#payload를 찍어보면 id 값이 존재한다
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
# 해당 아이디 값을 받아서 엑셀 저장 이름에 넣는다.
id = payload['id'] + '.xlsx'
work_book = Workbook()
print(id)
work_sheet = work_book.active
work_sheet.title="par"
sum=0
cafe=0
food=0
medical=0
other=0
work_sheet.cell(row=3, column=1, value="날짜") # 날짜 엑셀에 적용
work_sheet.cell(row=3, column=2, value="내용") # 날짜 엑셀에 적용
work_sheet.cell(row=3, column=3, value="종류") # 날짜 엑셀에 적용
work_sheet.cell(row=3, column=4, value="금액") # 날짜 엑셀에 적용
work_sheet.cell(row=3, column=6, value="총금액") # 날짜 엑셀에 적용
work_sheet.cell(row=6, column=6, value="카페") # 날짜 엑셀에 적용
work_sheet.cell(row=6, column=7, value="의료") # 날짜 엑셀에 적용
work_sheet.cell(row=6, column=8, value="음식") # 날짜 엑셀에 적용
work_sheet.cell(row=6, column=9, value="기타") # 날짜 엑셀에 적용
for s in result:
# print(s['somedate'])
if(payload['id']==s['id']):
work_sheet.cell(row=row, column=1, value=s['somedate']) #날짜 엑셀에 적용
work_sheet.cell(row=row, column=2, value=s['content']) # 내용 엑셀에 적용
work_sheet.cell(row=row, column=3, value=s['tag']) # 태그 엑셀에 적용
work_sheet.cell(row=row, column=4, value=s['money']) #돈 엑셀에 적용
sum+=int(s['money'])
if(s['tag']=='카페'):
cafe+=int(s['money'])
if (s['tag'] == '카페'):
cafe += int(s['money'])
if (s['tag'] == '음식'):
food += int(s['money'])
if (s['tag'] == '의료비 및 보험비'):
medical += int(s['money'])
if (s['tag'] == '기타'):
other += int(s['money'])
row=row+1
work_sheet.cell(row=4, column=6, value=sum) # 총 금액 엑셀에 적용
#태그별 금액
work_sheet.cell(row=7, column=6, value=cafe)
work_sheet.cell(row=7, column=7, value=food)
work_sheet.cell(row=7, column=8, value=medical)
work_sheet.cell(row=7, column=9, value=other)
work_book.save(id)
return jsonify({'result': 'success', 'shops': result})
if __name__ == '__main__':
app.run('127.0.0.1',port=5000,debug=True)
#aws 0.0.0.0
#127.0.0.1 |
989,743 | ec5802339bbee323d533b1b4d2c91c2c2fd39800 | print('-' * 40)
print(f'{"LOJA SUPER BARATÃO":^40}')
print('-' * 40)
mais_de_1000 = total_compra = preco_produto_mais_barato = 0
nome_produto_mais_barato = ''
while True:
produto_atual = str(input('Nome do Produto: ')).title().strip()
preco_atual = float(input('Preço: R$'))
while preco_atual <= 0:
preco_atual = float(input('Preço: R$'))
if preco_produto_mais_barato == 0 or preco_atual < preco_produto_mais_barato:
preco_produto_mais_barato = preco_atual
nome_produto_mais_barato = produto_atual
if preco_atual > 1000:
mais_de_1000 += 1
total_compra += preco_atual
opcao = str(input('Quer continuar? [S/N] ')).upper().strip()
while opcao != 'S' and opcao != 'N':
opcao = str(input('Quer continuar? [S/N] ')).upper().strip()
if opcao == 'N':
break
print(f'{" FIM DO PROGRAMA ":-^40}')
print(f'''Total da compra --> R${total_compra:.2f}
Total de produtos custando mais de R$1000.00 --> {mais_de_1000}
O produto mais barato foi {nome_produto_mais_barato} que custa R${preco_produto_mais_barato:.2f}''')
print('-' * 40)
|
989,744 | 9183ef475c6a23eaddb00b615cee5631739269f6 | """
This script makes used of face_recognition library to calculate the 128D descriptor to be used for face recognition.
"""
# Import required packages:
import face_recognition
import cv2
# Load image:
image = cv2.imread("jared_1.jpg")
# Convert image from BGR (OpenCV format) to RGB (face_recognition format):
image = image[:, :, ::-1]
# Calculate the encodings for every face of the image:
encodings = face_recognition.face_encodings(image)
# Show the first encoding:
print(encodings[0])
|
989,745 | 5c7aadb708dea18d1e3b1d838269b966b54c66ef | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-15 15:34
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logrounds', '0040_auto_20160713_1210'),
]
operations = [
migrations.RemoveField(
model_name='logset',
name='logdef',
),
migrations.AlterField(
model_name='logentry',
name='log_time',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 15, 10, 34, 56, 359916)),
),
]
|
989,746 | 8f563fc80bc3144f0876d495e77aca4c5cb0b5fb | # KAMUS
# tanggal : string
# db_consumable : variabel global dari load data / matriks consumable.csv
# db_consumable_history : variabel global dari load data / matriks consumable_history.csv
# db_user : variabel global dari load data / matriks user.csv
# riwayatambil() -> F13
def matriks(csv):
# I.S. mengimport file csv
# F.S. file csv dijadikan matriks
with open(csv, 'r') as file:
line = [clean_line.replace('\n', '') for clean_line in file.readlines()]
array = []
for i in range(len(line)):
tempArr = []
counter = 0
string = line[i]
length = len(line[i])
for j in range(length):
if j == length - 1:
tempArr.append(string[counter:(j + 1)])
elif string[j] == ';':
tempArr.append(string[counter:j])
counter = j + 1
array.append(tempArr)
return array
def convert(matriks, Str=False, Int=False):
# I.S. tipe data elemen pada matriks tidak sesuai keinginan
# F.S. tipe data elemen pada matriks berubah sesuai keinginan
# Pilihan tipe: Str : string / Int : integer
if Str:
for i in matriks:
for j in range(len(i)):
try:
i[j] = str(i[j])
except:
continue
if Int:
for i in matriks:
for j in range(len(i)):
try:
i[j] = int(i[j])
except:
continue
return matriks
def cek_tanggal(tanggal):
# I.S. memasukkan data tanggal dengan bentuk "--/--/----"
# F.S. mengembalikan list angka dari tanggal tersebut
v = ""
hasil_tanggal=[]
raw_tanggal = tanggal + "/"
for w in raw_tanggal:
if (w == "/"):
hasil_tanggal.append(int(v))
v = ""
else:
v += w
return(hasil_tanggal)
def sort_tanggal(array):
# I.S. menerima matriks away yang diubah oleh fungsi matriks untuk
# file gadget borrow history dan gadget return history
# F.S. mengeluarkan matriks yang sudah diurutkan berdasarkan tanggal
# menggunakan selection sort
array_sort = array
for i in range(1, len(array) - 1):
Imax = i
for j in range(i, len(array)):
tanggal_baris_start = cek_tanggal(array_sort[i][3])
tanggal_baris_lain = cek_tanggal(array_sort[j][3])
if (tanggal_baris_start[2] < tanggal_baris_lain[2]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
elif (tanggal_baris_start[2] == tanggal_baris_lain[2]):
if (tanggal_baris_start[1] < tanggal_baris_lain[1]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
elif (tanggal_baris_start[1] == tanggal_baris_lain[1]):
if (tanggal_baris_start[0] < tanggal_baris_lain[0]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
return (array_sort)
def sort_tanggal_2(array):
# I.S. menerima matriks away yang diubah oleh fungsi matriks untuk
# file gadget borrow history dan gadget return history
# F.S. mengeluarkan matriks yang sudah diurutkan berdasarkan tanggal
# menggunakan selection sort
array_sort = array
for i in range(1, len(array) - 1):
Imax = i
for j in range(i, len(array)):
tanggal_baris_start = cek_tanggal(array_sort[i][2])
tanggal_baris_lain = cek_tanggal(array_sort[j][2])
if (tanggal_baris_start[2] < tanggal_baris_lain[2]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
elif (tanggal_baris_start[2] == tanggal_baris_lain[2]):
if (tanggal_baris_start[1] < tanggal_baris_lain[1]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
elif (tanggal_baris_start[1] == tanggal_baris_lain[1]):
if (tanggal_baris_start[0] < tanggal_baris_lain[0]):
Imax = j
array_simpan = array_sort[Imax]
array[Imax] = array_sort[i]
array_sort[i] = array_simpan
return (array_sort)
def riwayatambil(db_consumable_history, db_user, db_consumable, user):
# I.S. mengecek jika akun pengguna adalah admin
# F.S. menampilkan sejarah gadget yang dipinjam semua pengguna, mulai dari yang paling baru
# (menampilkan 5 data pertama, jika diminta dapat menampilkan lebih banyak)
if user[5]=="Admin":
take_hist_sort=sort_tanggal(db_consumable_history)
# mencari nama orang dari id peminjam
for i in range(1, len(db_consumable_history)):
for j in range(1, len(db_user)):
if (take_hist_sort[i][1] == db_user[j][0]):
take_hist_sort[i][1] = db_user[j][2]
# mencari nama consumbale dari id consumable
for i in range(1, len(db_consumable_history)):
for j in range(1, len(db_consumable)):
if (take_hist_sort[i][2] == db_consumable[j][0]):
take_hist_sort[i][2] = db_consumable[j][1]
barishistory = len(db_consumable_history) - 1
barisawal = 1
v = 1
w = 1
# mencetak hasil permintaan riwayatambil
while (v < 2):
if (barishistory > 5):
while (w <= 5 and barisawal <= barishistory):
print("ID Peminjaman: " + str(take_hist_sort[barisawal][0]))
print("Nama Pengambil: " + str(take_hist_sort[barisawal][1]))
print("Nama Consumable: " + str(take_hist_sort[barisawal][2]))
print("Tanggal Peminjaman: " + str(take_hist_sort[barisawal][3]))
print("Jumlah: " + str(take_hist_sort[barisawal][4]))
print("")
barisawal += 1
w += 1
if (barishistory >= barisawal):
mengulang = str(input("Apakah anda ingin melihat data selanjutnya (Y/N): "))
if (mengulang == "Y") or (mengulang == "y"):
w = 1
else:
v = 2
else:
v = 2
elif (0 < barishistory and barishistory <= 5):
for i in range(1, barishistory+1):
print("ID Peminjaman: " + str(take_hist_sort[i][0]))
print("Nama Pengambil: " + str(take_hist_sort[i][1]))
print("Nama Consumable: " + str(take_hist_sort[i][2]))
print("Tanggal Peminjaman: " + str(take_hist_sort[i][3]))
print("Jumlah: " + str(take_hist_sort[i][4]))
print("")
v = 2
elif (barishistory == 0):
print("Belum ada consumable yang diambil")
v = 2
else:
print("Anda tidak dapat mengakses riwayat pengambil") |
989,747 | 470a56a788737d6a526344b22921e8c209b43105 | #A Boolean expression is an expression that evaluates to produce a result which is a Boolean value
a = 3
b = 4
ketqua = a == b
print (ketqua)
9==9
print (9==9)
"hi"+"ha"=="hiha"
print ("hi"+"ha"=="hiha")
|
989,748 | b374db30ce1cff1eebf5ec435d7b7ad5afd03631 | import json
from board_tile import BoardTile
from location import Location
class BoardTileParser:
@staticmethod
def get_tiles(data, mapWidth, mapHeight):
tiles = []
for col in range(0, mapWidth):
for row in range(0, mapHeight):
location = Location(col, row)
tileType = data['Board'][col][row]
tile = BoardTile(location, tileType)
# tile.print_debug()
tiles.append(tile)
return tiles
|
989,749 | b27221e3dc1a6c66d1fa33af664bcc0808d8dab4 | #__coding__:'utf-8'
#auther:ly
import requests
class HttpRequest:
'''该类主要是完成http的get和post请求,并返回一个消息实体,可通过text,json()查看具体内容,cookies = cookies'''
def http_request(self,method,url,params,header):
if method.upper() == 'GET':
try:
resp = requests.get(url,params=params,headers = header)
except Exception as e:
resp = 'get请求出错了:{}'.format(e)
elif method.upper() == 'POST':
try:
resp = requests.post(url,data=params.encode(),headers = header)
except Exception as e:
resp = 'post请求出错了:{}'.format(e)
elif method.upper() =='PUT':
try:
resp = requests.put(url, data=params, headers=header)
except Exception as e:
resp = 'put请求出错了:{}'.format(e)
elif method.upper() == 'DELETE':
try:
resp = requests.delete(url, params = params, headers=header)
except Exception as e:
resp = 'delete请求出错了:{}'.format(e)
else:
print('不支持此种类型请求')
resp = None
return resp
if __name__ == '__main__':
import json
h = HttpRequest()
params = {
"TenancyName": "default",
"StoreCode": "CD",
"PosCode": "1",
"CommunicationPassword": "123456",
"MachineMac": "1",
"MachineName": "1",
"Platform": 1
}
params = json.dumps(params)
header = {
'Content-Type': 'application/json;',
}
resp = h.http_request('post','http://192.168.1.41:11001/api/services/app/Auth/Bind',params,)
print(resp.text)
|
989,750 | d0a5bbe2867d282b12b1ac4e73e3b2b63d49fc4e | from jnpr.junos import Device
from jnpr.junos.op.ethport import EthPortTable
from getpass import getpass
from pprint import pprint
a_device = Device(host="srx2.lasthop.io", user="pyclass", password="")
a_device.open()
ports = EthPortTable(a_device)
ports.get()
pprint(ports)
pprint(ports.keys())
pprint(ports.values())
for k,v in ports.items():
print(k)
print( v)
|
989,751 | 131ad8ab2c18ade4ca2211a33e31e0c02a9d7a47 | from flask import Flask, render_template, request, url_for, redirect, abort, make_response
app = Flask(__name__)
@app.route('/str')
def return_str():
return '返回了str'
@app.route('/page')
def return_page():
# return '返回了一个页面'
return render_template('index.html', msg='登录用户名或密码错误')
# post方式请求
@app.route('/login', methods=['GET', 'POST'])
def login():
print(request)
uname = request.form['uname']
password = request.form['password']
return '正在登录' + '您的用户名是:' + uname + '密码是:' + password
@app.route('/index')
def return_url():
# return '返回了一个url'
return redirect(url_for('return_page'))
@app.route('/code')
def return_error():
# return '返回一个状态码'
abort(405)
@app.route("/set")
def return_set():
# return '返回了一个自定义响应'
response = make_response("返回一个自定义的响应")
response.headers["cookie"] = 'abc'
return response
if __name__ == '__main__':
app.run(debug=1)
|
989,752 | 7386127c3690af137ca4dc8e744b6ab2f5c6650c | #!/usr/bin/env python3
import argparse
import logging
import sys
import config_system
import config_system.log_handlers
import config_system.config_json
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
# Add counting Handler
counter = config_system.log_handlers.ErrorCounterHandler()
root_logger.addHandler(counter)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Path to the configuration file (*.config)")
parser.add_argument(
"-d",
"--database",
default="Mconfig",
help="Path to the configuration database (Mconfig)",
)
parser.add_argument(
"--ignore-missing",
action="store_true",
default=False,
help="Ignore missing database files included with 'source'",
)
parser.add_argument(
"-j",
"--json",
metavar="OUT",
required=True,
help="Write JSON configuration file",
)
return parser.parse_args()
def main():
args = parse_args()
config_system.read_config(args.database, args.config, args.ignore_missing)
config_system.config_json.write_config(args.json)
return counter.errors() + counter.criticals()
if __name__ == "__main__":
sys.exit(main())
|
989,753 | 9b32e8ab1d5eb95d5d665a36cbffaf92fc88b2fc | """
仅用于datax从MySQL和Oracle往hive中抽数使用
生成和源表结构一致的hive建表语句文件并在hive中建表
生成datax配置json文件
"""
import pymysql
import cx_Oracle as oracle
import pandas as pd
import os
import json
import sys
class datax_db_2_hive(object):
"""docstring for datax_db_2_hive
connect_id为配置文件的id,根据id取数据库连接
table = 为mysql、Oracle表名,hive表名需加前缀
schema = 为数据库中的库名,MySQL中schema和库名一致,此参数用于Oracle
"""
conf_file = pd.read_table(r'F:\code\ods\etl\conf_file',index_col='id')
def __init__(self, connect_id, schema, table):
super(datax_db_2_hive, self).__init__()
self.connect_id = connect_id
self.hostname = datax_db_2_hive.conf_file.host[connect_id]
self.username = datax_db_2_hive.conf_file.username[connect_id]
self.password = datax_db_2_hive.conf_file.password[connect_id]
self.db = datax_db_2_hive.conf_file.db[connect_id]
self.port = str(datax_db_2_hive.conf_file.port[connect_id])
self.dbtype = datax_db_2_hive.conf_file.db_type[connect_id]
self.prefix = datax_db_2_hive.conf_file.prefix[connect_id]
self.path = datax_db_2_hive.conf_file.path[connect_id]
self.schema = schema.lower()
self.table = table.lower()
def get_mysql_info(self, mysqlcharset='utf8'):
"""
从MySQL元数据获取抽数配置信息
"""
connection=pymysql.connect(host = self.hostname,
user = self.username,
password = self.password,
db = self.db,
port = int(self.port),
charset = mysqlcharset
)
cols = []
create_body=''
query_str='select '
try:
#获取一个游标
with connection.cursor(cursor=pymysql.cursors.DictCursor) as cursor:
# 打印表数据量
cnt_sql = 'select count(1) from {0}.{1}'.format(self.db, self.table)
cursor.execute(cnt_sql)
tablecnt = cursor.fetchone()[0]
print('{0}数据量为:{1}'.format(self.table, tablecnt))
# 取表字段信息
sql='SHOW FULL FIELDS FROM {0}'.format(self.table)
cnt=cursor.execute(sql) #返回记录条数
try:
for row in cursor:#cursor.fetchall()
#print(row)
if row['Type'].split('(')[0] in ('int', 'tinyint', 'smallint', 'mediumint', 'integer'):
row['Type'] = "int"
row['writeType'] = "string"
elif 'bigint' in row['Type']:
row['Type'] = "bigint"
row['writeType'] = "string"
elif row['Type'].split('(')[0] in ('double','float'):
row['Type'] = "double"
row['writeType'] = "double"
elif 'decimal' in row['Type']:
row['Type'] = row['Type']
row['writeType'] = "double"
else:
row['Type'] = "string"
row['writeType'] = "string"
create_body += row['Field'] + ' '+ row['Type'] +' comment \'' + row['Comment'] + '\' ,\n'
query_str += row['Field'] + ','
coljson = eval('{"name":"' + row['Field'] + '","type":"' + row['writeType'] + '"}')
# print(coljson)
cols.append(coljson)
# print(cols)
except Exception as e:
print('程序异常!')
raise e
# 取表注释
comment_sql = "SELECT t2.TABLE_COMMENT FROM information_schema.TABLES t2 WHERE t2.table_schema = lower('{0}') and t2.table_name = lower('{1}')".format(self.db,self.table)
cursor.execute(comment_sql)
tablecomment = cursor.fetchone()['TABLE_COMMENT']
finally:
connection.close()
# create_body += 'etl_time string comment \'etl时间\') \ncomment \'%s\''%tablecomment
# query_str += 'etl_time from {0}.{1}'.format(self.db,self.table)
# cols.append(eval('{"name":"etl_time","type":"string"}'))
return create_body, query_str, cols, tablecomment
def get_oracle_info(self):
"""
从oracle元数据获取抽数配置信息
"""
# connect oracle database
connect_str = self.username + '/' + self.password + '@' + self.hostname + ':' + self.port + '/' + self.db
connection = oracle.connect(connect_str, encoding = "UTF-8", nencoding = "UTF-8")
# create cursor
# cursor = connection.cursor()
cols = []
create_body=''
query_str='select '
try:
#获取一个游标
with connection.cursor() as cursor:
# 打印表数据量
cnt_sql = 'select count(1) from {0}.{1}'.format(self.schema, self.table)
cursor.execute(cnt_sql)
tablecnt = cursor.fetchone()[0]
print('{0}数据量为:{1}'.format(self.table, tablecnt))
# 取表字段信息
sql="select T.COLUMN_NAME,T.COMMENTS,C.DATA_TYPE,C.DATA_PRECISION,C.DATA_SCALE from all_COL_COMMENTS t, all_TAB_COLUMNS c where c.column_name = t.column_name and c.owner = t.owner and c.TABLE_NAME = t.TABLE_NAME and c.owner = upper('{0}') and c.TABLE_NAME = upper('{1}') order by c.COLUMN_ID".format(self.schema, self.table)
cursor.execute(sql)
try:
for tp_row in cursor:#cursor.fetchall()
#print(row)
row = list(tp_row)
if row[2] == 'INTEGER':
row[2] = "bigint"
row.append("string")
elif row[2] == 'NUMBER':
if row[4] != 0:
row[2] = "decimal(" + str(row[3]) + ',' + str(row[4]) + ')'
row.append("double")
elif row[3] <= 11:
row[2] = 'int'
row.append('string')
else:
row[2] = 'bigint'
row.append('string')
elif row[2] in ('BINARY_FLOAT', 'BINARY_DOUBLE', 'FLOAT'):
row[2] = "double"
row.append("double")
else:
row[2] = "string"
row.append("string")
create_body += row[0] + ' '+ row[2] +' comment \'' + str(row[1]) + '\' ,\n'
query_str += row[0] + ','
coljson = eval('{"name":"' + row[0] + '","type":"' + row[5] + '"}')
# print(coljson)
cols.append(coljson)
# print(cols)
except Exception as e:
print('程序异常!')
raise e
# 取表注释
comment_sql = "select t.comments from all_tab_comments t where owner = upper('{0}') and table_name = upper('{1}')".format(self.schema,self.table)
cursor.execute(comment_sql)
tablecomment = cursor.fetchone()[0]
finally:
connection.close()
# create_body += 'etl_time string comment \'etl时间\') \ncomment \'%s\''%tablecomment
# query_str += 'etl_time from {0}.{1}'.format(self.db,self.table)
# cols.append(eval('{"name":"etl_time","type":"string"}'))
return create_body, query_str, cols, tablecomment
def dumpjson(self, query_sql, cols):
"""
生成配置json文件
"""
f = open(r'F:\code\ods\etl\datax_db_2_hive.json', encoding='utf-8')
setting = json.load(f, strict=False)
#json文件配置项
setting['job']['content'][0]['reader']["name"] = self.dbtype + 'reader'
setting['job']['content'][0]['reader']['parameter']['password'] = self.password
setting['job']['content'][0]['reader']['parameter']['username'] = self.username
setting['job']['content'][0]['reader']['parameter']['connection'][0]['querySql'][0] = query_sql
if self.dbtype == 'mysql':
jdbc = 'jdbc:mysql://' + self.hostname + ':' + self.port + '/' + self.db + '?useUnicode=true&characterEncoding=UTF8&tinyInt1isBit=false'
elif self.dbtype == 'oracle':
jdbc = 'jdbc:oracle:thin:@' + self.hostname + ':' + self.port + '/' + self.db
pass
setting['job']['content'][0]['reader']['parameter']['connection'][0]['jdbcUrl'][0] = jdbc
setting['job']['content'][0]['writer']['parameter']['column'] = cols
setting['job']['content'][0]['writer']['parameter']['path'] = '/user/hive/warehouse/bigdata_ods.db/ods_' + self.prefix + '_' + self.table + '/'
setting['job']['content'][0]['writer']['parameter']['fileName'] = 'ods_' + self.prefix + '_' + self.table
jsObj = json.dumps(setting)
write_json_path = 'F:\\code\\ods\\' + self.path + '\\' + 'ods_' + self.prefix + '_' + self.table
if not os.path.exists(write_json_path):
os.makedirs(write_json_path)
write_path_json = write_json_path + '\\' + 'ods_' + self.prefix + '_' + self.table + '.json'
with open(write_path_json, "w") as f:
f.write(jsObj)
f.close()
return print('已生成json文件:', write_path_json)
def create_hive_table(self, ispartition = False):
'''
ispartition : 是否分区默认为分区
'''
create_head = '''
create table if not exists bigdata_ods.ods_{0}_{1}('''.format(self.prefix,self.table)
if ispartition:
create_tail = r'''
partitioned by (ds string comment '分区日期')
row format delimited fields terminated by '\001';'''
else:
create_tail = r'''
row format delimited fields terminated by '\001';'''
if self.dbtype == 'mysql':
create_body, query_str, cols, tablecomment = datax_db_2_hive.get_mysql_info(self)
query_str += 'current_timestamp as etl_time from {0}.{1}'.format(self.db,self.table)
elif self.dbtype == 'oracle':
create_body, query_str, cols, tablecomment = datax_db_2_hive.get_oracle_info(self)
query_str += 'sysdate as etl_time from {0}.{1}'.format(self.schema,self.table)
create_body += 'etl_time string comment \'etl时间\') \ncomment \'%s\''%tablecomment
cols.append(eval('{"name":"etl_time","type":"string"}'))
datax_db_2_hive.dumpjson(self, query_str, cols)
create_str = create_head + '\n' + create_body + create_tail
write_create_path = 'F:\\code\\ods\\' + self.path + '\\' + 'ods_' + self.prefix + '_' + self.table
if not os.path.exists(write_create_path):
os.makedirs(write_create_path)
write_create_sql = write_create_path + '\\create_ods_' + self.prefix + '_' + self.table + '.sql'
with open(write_create_sql, "w") as f:
f.write(create_str)
f.close()
os.popen("hive -f %s" %write_create_sql)
# print("hive -f %s" %write_create_sql)
return print('已生成建表语句文件并开始执行:', write_create_sql)
def main():
if len(sys.argv) == 4:
try:
argv1 = int(sys.argv[1])
#datax_db_2_hive(52, 'rpt', 'dim_big_cate').create_hive_table()
#datax_db_2_hive(51, 'basis', 'big_cate_day').create_hive_table()
datax_db_2_hive(argv1, sys.argv[2], sys.argv[3]).create_hive_table()
except Exception as e:
raise e
elif len(sys.argv) == 5:
try:
argv1 = int(sys.argv[1])
#datax_db_2_hive(52, 'rpt', 'dim_big_cate').create_hive_table()
#datax_db_2_hive(51, 'basis', 'big_cate_day').create_hive_table()
datax_db_2_hive(argv1, sys.argv[2], sys.argv[3]).create_hive_table(sys.argv[4])
except Exception as e:
raise e
elif len(sys.argv) == 2 and sys.argv[1].upper() in ('HELP', 'H'):
print('传入参数为:conf_file中的id schema table [是否分区 = False]')
else:
print('请传入正确的参数')
if __name__ == '__main__':
main()
|
989,754 | 4768b6917bc8bf9484dbbb4970fdb591d44aadb2 | from . import transforms
from .amaxa import *
from .constants import *
|
989,755 | 5f79bd0130efca92197561dd48c048557c31489b | import numpy as np
from scipy.optimize import minimize
def action(path, vf_func, D=1, dt=1):
# centers
x = (path[:-1] + path[1:]) * 0.5
v = np.diff(path, axis=0) / dt
s = (v - vf_func(x)).flatten()
s = 0.5 * s.dot(s) * dt / D
return s
def action_aux(path_flatten, vf_func, dim, start=None, end=None, **kwargs):
path = reshape_path(path_flatten, dim, start=start, end=end)
return action(path, vf_func, **kwargs)
def action_grad(path, vf_func, jac_func, D=1, dt=1):
x = (path[:-1] + path[1:]) * 0.5
v = np.diff(path, axis=0) / dt
dv = v - vf_func(x)
J = jac_func(x)
z = np.zeros(dv.shape)
for s in range(dv.shape[0]):
z[s] = dv[s] @ J[:, :, s]
grad = (dv[:-1] - dv[1:]) / D - dt / (2 * D) * (z[:-1] + z[1:])
return grad
def action_grad_aux(path_flatten, vf_func, jac_func, dim, start=None, end=None, **kwargs):
path = reshape_path(path_flatten, dim, start=start, end=end)
return action_grad(path, vf_func, jac_func, **kwargs).flatten()
def reshape_path(path_flatten, dim, start=None, end=None):
path = path_flatten.reshape(int(len(path_flatten) / dim), dim)
if start is not None:
path = np.vstack((start, path))
if end is not None:
path = np.vstack((path, end))
return path
def least_action_path(start, end, vf_func, jac_func, n_points=20, init_path=None, D=1):
dim = len(start)
if init_path is None:
path_0 = (
np.tile(start, (n_points + 1, 1))
+ (np.linspace(0, 1, n_points + 1, endpoint=True) * np.tile(end - start, (n_points + 1, 1)).T).T
)
else:
path_0 = init_path
fun = lambda x: action_aux(x, vf_func, dim, start=path_0[0], end=path_0[-1], D=D)
jac = lambda x: action_grad_aux(x, vf_func, jac_func, dim, start=path_0[0], end=path_0[-1], D=D)
sol_dict = minimize(fun, path_0[1:-1], jac=jac)
path_sol = reshape_path(sol_dict["x"], dim, start=path_0[0], end=path_0[-1])
return path_sol, sol_dict
|
989,756 | 49c8e51d3b3bbe5d19fd913b6a1ed71507160c2e | #Excercise 1
def sleep_in(weekday, vacation):
if not weekday or vacation:
return True
else:
return False
# Given 2 int values, return True if one is negative and one is positive.
# Except if the parameter "negative" is True,
# then return True only if both are negative.
def pos_neg(a, b, negative):
if negative:
if a < 0 and b < 0:
return True
elif a > 0 or b > 0:
return False
if not negative:
if a < 0 and b < 0:
return False
if a < 0 or b < 0:
return True
else:
return False
def not_string(str):
if str.startswith('not'):
return str
else:
return 'not ' + str
#Given a non-empty string and an int n, return a new string where the char at index n has been removed.
# The value of n will be a valid index of a char in the original string
# (i.e. n will be in the range 0..len(str)-1 inclusive).
def missing_char(str, n):
return str[:n] + str[n + 1:]
# str[:n] the column indicates that we take everythin unil the index we specify. n doesn't count
# so if n = 1, in "Kitten" we would only take the k
# str [n+1] we take the rest of the word from the index we specified.
# like this we form the word "Ktten"
# Given an "out" string length 4, such as "<<>>",
# and a word, return a new string where the word
# is in the middle of the out string, e.g. "<<word>>".
def make_out_word(out, word):
return out[:2] + word +out[2:]
# Given a string, return a new string made of 3 copies
# of the last 2 chars of the original string.
# The string length will be at least 2.
# Ex: "Hello" => "lololo"
def extra_end(str):
end = str[len(str) -2:]
return end * 3
# The result getting the table is encoded as an int value with 0=no, 1=maybe, 2=yes.
# If either of you is very stylish, 8 or more, then the result is 2 (yes).
# With the exception that if either of you has style of 2 or less, then the result is 0 (no).
# Otherwise the result is 1 (maybe).
def date_fashion(you, date):
if (you > 7 and not date < 3) or (date > 7 and not you < 3):
return 2
if you < 3 or date < 3:
return 0
return 1
|
989,757 | 09a2bd840a001961d4420714a8149fe746361ea9 | from pets_world import __version__
import pytest
from pets_world.pets_classes import Pet, Cat, Dog
def test_version():
assert __version__ == '0.1.0'
def test_pets_counter(data):
assert Pet.get_pets_count() == 4
def test_cat_has_hair(data):
assert data[0].has_hair == True
assert data[1].has_hair == False
def test_dog_age(data):
assert data[2].age == 12
assert data[3].age == 17
@pytest.fixture
def data():
cat1 = Cat('Cat1', 5, True)
cat2 = Cat('Cat2', 6, False)
dog1 = Dog('Dog1', 12)
dog2 = Dog('Dog2', 17)
return [cat1, cat2, dog1, dog2] |
989,758 | a48818d762b0f9d4b753440b59b2f1e14b7d2058 | #!/usr/bin env python
scaleUnc = {1000:
{"up": [1.0, 0.932801544666, 0.686887860298, 0.683348536491],
"do": [1.0, 1.07134735584, 1.65871071815, 1.68442952633]
},
2000 :
{"up": [1.0, 0.864271104336, 0.625839948654, 0.624607920647],
"do": [1.0, 0.917300820351, 1.47475910187, 1.48835909367]
},
200 :
{"up": [1.0, 0.922315955162, 0.691816151142, 0.661118149757],
"do": [1.0, 1.03203868866, 1.53590130806, 1.61896574497]
},
3000 :
{"up": [1.0, 0.882398068905, 0.628052890301, 0.626575350761],
"do": [1.0, 0.985951781273, 1.65007591248, 1.6384768486]
},
300 :
{"up": [1.0, 0.905074596405, 0.669205188751, 0.649496674538],
"do": [1.0, 1.07495331764, 1.56859827042, 1.62269902229]
},
500 :
{"up": [1.0, 0.915703773499, 0.66854339838, 0.661232233047],
"do": [1.0, 1.10617852211, 1.63316297531, 1.6953394413],
},
700:
{"up": [1.0, 0.925199747086, 0.681025445461, 0.677552878857],
"do": [1.0, 1.07631421089, 1.62018072605, 1.66252076626]
}
}
from scipy.interpolate import interp1d
import numpy as np
import ROOT
functionsUp=[]
functionsDo=[]
for i in range(4):
x=[]
yUp=[]
yDo=[]
for mass in scaleUnc.keys():
x.append(float(mass))
yUp.append(scaleUnc[mass]["up"][i])
yDo.append(scaleUnc[mass]["do"][i])
functionsUp.append(interp1d(x, yUp, kind='linear'))
functionsDo.append(interp1d(x, yDo, kind='linear'))
xnew=np.linspace(200, 3000, 101)
ynewUp=(functionsUp[3])(xnew)
ynewDo=(functionsDo[3])(xnew)
gUp = ROOT.TGraph(len(xnew), xnew,ynewUp)
gDo = ROOT.TGraph(len(xnew), xnew,ynewDo)
gUp.Draw("AL")
gUp.GetYaxis().SetRangeUser(0., 2)
gDo.Draw("L")
a=raw_input("ciao")
|
989,759 | fbbee029b7cd97e0c891b8f99279ef4d63849eef | class Solution:
def isHappy(self, n: int) -> bool:
is_repeat = set()
total = n
is_repeat.add(total)
while total != 1:
total1 = 0
for s in str(total):
s = int(s)
total1 += s * s
total = total1
if total in is_repeat:
return False
is_repeat.add(total1)
total = total1
print(total)
return True
s = Solution()
print(s.isHappy(19))
|
989,760 | a2cb81e30e0376c078ef15ebe23de794a26607cd | version https://git-lfs.github.com/spec/v1
oid sha256:631794cafc6cb55905b1c096936fd0395953c96cf34b5ff513b7671a80c708dc
size 2426
|
989,761 | c51474c79a6564acde5bdde23aab790c18d3c4ce | #!/usr/bin/env python
"""
Usage:
nmap_scan -H TARGETHOST -p PORT [-o]
nmap_scan -H TARGETHOST [-o]
Options:
-h --help Show this usage.
-v --version Show the version.
-H TARGETHOST The target host.
-p PORT The port to scan.
-o Show only open ports. [Default: False]
"""
from docopt import docopt
import nmap
def nmapScan(tgtHost, tgtPort):
"""
The actual scanning of the host occurs here.
"""
nmScan = nmap.PortScanner()
nmScan.scan(tgtHost, tgtPort)
state = nmScan[tgtHost]['tcp'][int(tgtPort)]['state']
if state == 'open' or not arguments["-o"]:
print ' [*] %s tcp/ %s %s' % (tgtHost, tgtPort, state)
def main(args):
"""
This is a python port scanner that uses the nmap
library to do the majority of the work.
"""
if '-' in args['-p']:
tmp = args['-p'].split('-')
tgtPorts = [str(i) for i in xrange(int(tmp[0]), int(tmp[1])+1)]
else:
tgtPorts = [args['-p']]
tgtHost = args['-H']
for tgtPort in tgtPorts:
nmapScan(tgtHost, tgtPort)
if __name__ == "__main__":
arguments = docopt(__doc__, version="Nmap scan v1.0.0")
main(arguments)
|
989,762 | 08570319d692f61c7d5aa79a091c576316d01e5f | from .base_page import BasePage
from .locators import ProductPageLocators
class ProductPage(BasePage):
def should_book_into_box(self):
self.should_be_adding_to_basket_button()
self.add_book_to_basket()
self.solve_quiz_and_get_code() #for stepic task check
self.should_be_confirm_messages()
self.should_be_book_name_in_confirm_message()
self.should_be_confirmation_message_for_book()
self.should_be_benefit_offer()
self.should_be_product_amount_message()
self.should_be_product_amount_mini_basket()
self.should_be_confirmation_message_with_amount()
def should_be_adding_to_basket_button(self):
assert self.is_element_present(*ProductPageLocators.ADD_TO_BASKET_BUTTON), \
"\"Add to basket\" button is Not exist"
def add_book_to_basket(self):
basket_button = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET_BUTTON)
basket_button.click()
def should_be_confirm_messages(self, timeout=10):
self.browser.implicitly_wait(timeout)
assert self.is_element_present(
*ProductPageLocators.MESSAGE_PRODUCT), "One of confirm messages are Not displayed"
assert self.is_element_present(
*ProductPageLocators.MESSAGE_BENEFIT), "One of confirm messages are Not displayed"
assert self.is_element_present(
*ProductPageLocators.MESSAGE_PAYMENT), "Payment confirm messages is Not displayed"
def should_be_book_name_in_confirm_message(self, timeout=10):
self.browser.implicitly_wait(timeout)
book_name = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME)
book_name = book_name.text
book_name_confirm = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME_CONFIRM)
book_name_confirm = book_name_confirm.text
assert book_name == book_name_confirm, \
f"Wrong product name, got '{book_name_confirm}' instead of '{book_name}'"
def should_be_confirmation_message_for_book(self):
book_name = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME)
book_name = book_name.text
message = self.browser.find_element(*ProductPageLocators.MESSAGE_PRODUCT)
message = message.text
assert message == f"{book_name} has been added to your basket.", \
f"Wrong confirm book payment message, got {message}"
def should_be_benefit_offer(self):
message = self.browser.find_element(*ProductPageLocators.MESSAGE_BENEFIT)
message = message.text
assert message == "Your basket now qualifies for the Deferred benefit offer offer.", \
f"Wrong benefit message, got {message} instead of " \
f"\"Your basket now qualifies for the Deferred benefit offer offer.\""
def should_be_product_amount_message(self):
amount = self.browser.find_element(*ProductPageLocators.PRODUCT_PAYMENT)
amount = amount.text
amount_confirm = self.browser.find_element(*ProductPageLocators.PRODUCT_PAYMENT_CONFIRM)
amount_confirm = amount_confirm.text
assert amount_confirm == amount, \
f"Wrong product amount in message, got '{amount_confirm}' instead of '{amount}'"
def should_be_product_amount_mini_basket(self):
amount_basket = self.browser.find_element(*ProductPageLocators.PRODUCT_PAYMENT_BASKET_MINI)
amount_basket = amount_basket.text
amount = self.browser.find_element(*ProductPageLocators.PRODUCT_PAYMENT)
amount = amount.text
assert amount in amount_basket, \
f"Wrong product amount in basket, expected {amount} instead of {amount_basket}"
def should_be_confirmation_message_with_amount(self):
message = self.browser.find_element(*ProductPageLocators.MESSAGE_PAYMENT)
message = message.text
amount = self.browser.find_element(*ProductPageLocators.PRODUCT_PAYMENT)
amount = amount.text
assert f"Your basket total is now {amount}" in message, \
f"Wrong confirm amount message, got '{message}'"
|
989,763 | 09bb6df1552f232aa65b8fc06ebc4c490a86861f | from Tensor import Tensor
from Shape import Shape
from typing import List
from enum import Enum
import numpy as np
class Layer:
"""
Layer interface.
"""
# def get_input_shape(self) -> Shape:
# """
# Returns the shape of the input to this layer.
# """
# pass
#
# def set_input_shape(self, input_shape: Shape):
# pass
#
# def get_output_shape(self) -> Shape:
# """
# Returns the shape of the output of this layer.
# """
# pass
def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):
"""
Use elements of in_tensors to calculate elements in out_tensors.
:param in_tensors: List of input tensors.
:param out_tensors: The tensors after going through this layer.
"""
pass
def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):
"""
Use deltas of out_tensors to calculate deltas of in_tensors.
:param in_tensors: List of incoming tensors.
:param out_tensors: List of outgoing tensors.
"""
pass
def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:
"""
Use elements of in_tensors and deltas of out_tensors to calculate delta_weights
:param out_tensors: a list of incoming tensors
:param in_tensors: a list of outgoing tensors
"""
return None
def update_parameter(self, parameter: List[Tensor]):
pass
class InputLayer:
"""
InputLayer Interface.
"""
# def get_output_shape(self) -> Shape:
# pass
def forward(self, raw_data):
pass
class FullyConnectedLayer(Layer):
"""
A fully connected layer.
"""
def __init__(self, nb_neurons: int):
self.nb_neurons = nb_neurons
self.biases = Tensor(shape=Shape([1, self.nb_neurons]))
self.weights = None
self.out_shape = None
# def set_input_shape(self, input_shape: Shape):
# self.in_shape = input_shape
# self.weights = Tensor(Shape([self.nb_neurons, input_shape.size()]))
#
# def get_input_shape(self) -> Shape:
# return self.in_shape
#
# def get_output_shape(self) -> Shape:
# return self.out_shape
def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):
if self.out_shape is None:
self.out_shape = Shape([in_tensors[0].get_shape().axis[0], self.nb_neurons])
if self.weights is None:
self.weights = Tensor(shape=Shape([in_tensors[0].get_shape().axis[1], self.nb_neurons]))
for i in range(len(in_tensors)):
if i >= len(out_tensors):
out_tensors.append(Tensor(self.out_shape))
out_tensors[i].elements = in_tensors[i].elements.dot(self.weights.elements) + self.biases.elements
def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):
for i in range(len(in_tensors)):
in_tensors[i].deltas = np.dot(out_tensors[i].deltas, self.weights.elements.transpose())
def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:
batch_size = in_tensors[0].elements.shape[0]
dw = np.dot(in_tensors[0].elements.transpose(), out_tensors[0].deltas) / float(batch_size)
db = np.dot(np.ones((1, batch_size)), out_tensors[0].deltas) / float(batch_size)
return [Tensor(elements=dw), Tensor(elements=db)]
def update_parameter(self, parameter: List[Tensor]):
self.weights -= parameter[0]
self.biases -= parameter[1]
class Padding(Enum):
"""
Padding used in Convolutional Layer.
"""
NONE = 0
HALF = 1
FULL = 2
class Conv2DLayer(Layer):
"""
A convolutional layer.
"""
def __init__(self, kernel_tensor: Tensor, padding: Padding):
"""
To-Do: Extend Signature according to slides.
:param kernel_tensor: 4-dim tensor that forms the weights of this layer
"""
self._kernel_tensor = kernel_tensor
self._padding = padding
def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):
"""
Y = InputTensor * KernelTensor + Bias, where '*' is the convolution operator
:param in_tensors:
:param out_tensors:
:return:
"""
pass
def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):
# todo
pass
def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):
# todo
pass
|
989,764 | 9fa9c1d918a0c634baf5be4104197bc477961c08 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2021 Recurve Analytics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from matplotlib.ticker import AutoMinorLocator
import matplotlib.pyplot as plt
import re
import seaborn as sns
import numpy as np
from .settings import ACC_COMPONENTS_ELECTRICITY
__all__ = ("plot_results",)
def plot_results(outputs_table_totals, elec_benefits, gas_benefits):
"""Generate a series of plots based on the results of the FlexValueRun
Parameters
----------
outputs_table_totals: pd.DataFrame
A table with summarized outputs including TRC and PAC, total costs,
and GHG impacts summed across all measure/project/portfolio entries.
The TRC and PAC values are then recalculated based on the summed benefits
and costs.
elec_benefits: pd.DataFrame
Returns a year-month average daily load shape for each
measure/project/portoflio, concatanated into a single dataframe
gas_benefits: float
The sum of all gas benefits across all measure/project/portfolio entries.
"""
summer_months = [6, 7, 8, 9]
shoulder_months = [3, 4, 5, 10]
winter_months = [11, 12, 1, 2]
peak_hours = [16, 17, 18, 19, 20]
pct_hours_in_summer = 2928 / 8760
pct_hours_in_shoulder = 2952 / 8760
pct_hours_in_winter = 2880 / 8760
trc_costs_record = outputs_table_totals["TRC Costs ($)"]
pac_costs_record = outputs_table_totals["PAC Costs ($)"]
trc_record = outputs_table_totals["TRC"]
pac_record = outputs_table_totals["PAC"]
lifecycle_net_mwh = outputs_table_totals["Electricity Lifecycle Net Savings (MWh)"]
lifecycle_net_therms = outputs_table_totals["Gas Lifecycle Net Savings (Therms)"]
lifecycle_net_ghg = outputs_table_totals["Total Lifecycle GHG Savings (Tons)"]
# Getting variables for plots
elec_benefits_cols = (
["hourly_savings"] + ACC_COMPONENTS_ELECTRICITY + ["av_csts_levelized"]
)
elec_benefits_hour_month_year = (
elec_benefits.groupby(["hour_of_day", "year", "month"])
.agg(
{
**{component: "sum" for component in ACC_COMPONENTS_ELECTRICITY},
**{
"hourly_savings": "sum",
"marginal_ghg": "sum",
"av_csts_levelized": "mean",
},
}
)
.reset_index()
)
total_benefits = list(
elec_benefits_hour_month_year.groupby(["hour_of_day"])["total"].sum()
)
summer_benefits = list(
elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(summer_months))
]
.groupby(["hour_of_day"])["total"]
.sum()
)
summer_peak_benefits = elec_benefits_hour_month_year["total"][
(elec_benefits_hour_month_year["month"].isin(summer_months))
& (elec_benefits_hour_month_year["hour_of_day"].isin(peak_hours))
].sum()
shoulder_benefits = list(
elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(shoulder_months))
]
.groupby(["hour_of_day"])["total"]
.sum()
)
winter_benefits = list(
elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(winter_months))
]
.groupby(["hour_of_day"])["total"]
.sum()
)
total_savings = list(
elec_benefits_hour_month_year.groupby(["hour_of_day"])["hourly_savings"].sum()
)
summer_savings = list(
elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(summer_months))
]
.groupby(["hour_of_day"])["hourly_savings"]
.sum()
)
shoulder_savings = list(
elec_benefits_hour_month_year[
((elec_benefits_hour_month_year["month"].isin(shoulder_months)))
]
.groupby(["hour_of_day"])["hourly_savings"]
.sum()
)
summer_peak_savings = elec_benefits_hour_month_year["hourly_savings"][
(elec_benefits_hour_month_year["month"].isin(summer_months))
& (elec_benefits_hour_month_year["hour_of_day"].isin(peak_hours))
].sum()
winter_savings = list(
elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(winter_months))
]
.groupby(["hour_of_day"])["hourly_savings"]
.sum()
)
total_av_csts_avg = list(
elec_benefits_hour_month_year.groupby(["hour_of_day"])[
"av_csts_levelized"
].mean()
)
summer_av_csts_avg = list(
pct_hours_in_summer
* elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(summer_months))
]
.groupby(["hour_of_day"])["av_csts_levelized"]
.mean()
)
summer_peak_av_csts_avg = elec_benefits_hour_month_year["av_csts_levelized"][
(elec_benefits_hour_month_year["month"].isin(summer_months))
& (elec_benefits_hour_month_year["hour_of_day"].isin(peak_hours))
].mean()
shoulder_av_csts_avg = list(
pct_hours_in_shoulder
* elec_benefits_hour_month_year[
((elec_benefits_hour_month_year["month"].isin(shoulder_months)))
]
.groupby(["hour_of_day"])["av_csts_levelized"]
.mean()
)
winter_av_csts_avg = list(
pct_hours_in_winter
* elec_benefits_hour_month_year[
(elec_benefits_hour_month_year["month"].isin(winter_months))
]
.groupby(["hour_of_day"])["av_csts_levelized"]
.mean()
)
elec_benefits_sum_by_hod = (
elec_benefits[elec_benefits_cols].groupby(elec_benefits["hour_of_day"]).sum()
)
elec_benefits_hoy = (
elec_benefits[elec_benefits_cols]
.groupby(elec_benefits["hour_of_year"])
.sum()
.cumsum()
.reset_index()
)
sav_avcsts_288 = (
elec_benefits.groupby(["hour_of_day", "month"])
.agg(
{
**{component: "sum" for component in ACC_COMPONENTS_ELECTRICITY},
**{
"hourly_savings": "sum",
"marginal_ghg": "sum",
"av_csts_levelized": "mean",
},
}
)
.reset_index()
)
sav_avcsts_288 = sav_avcsts_288[
["hour_of_day", "month", "hourly_savings", "total", "marginal_ghg"]
]
ghgsav = sav_avcsts_288.pivot("hour_of_day", "month", "marginal_ghg")
sav = sav_avcsts_288.pivot("hour_of_day", "month", "hourly_savings")
avcsts = sav_avcsts_288.pivot("hour_of_day", "month", "total")
# savings load shape plot
fig0, (ax1, ax2, ax3) = plt.subplots(
1, 3, figsize=(18, 5), sharex=True, sharey=True
)
plt.subplots_adjust(wspace=0, hspace=0)
axs = [ax1, ax2, ax3]
hod = elec_benefits_sum_by_hod.index
legend_labels1 = ["Summer"]
legend_labels2 = ["Shoulder"]
legend_labels3 = ["Winter"]
ax1.plot(
hod,
summer_savings,
c="firebrick",
linewidth=5,
marker="$\u25EF$",
markersize=13,
linestyle="-",
)
ax2.plot(
hod,
shoulder_savings,
c="royalblue",
linewidth=5,
marker="$\u2206$",
markersize=13,
linestyle="-",
)
ax3.plot(
hod,
winter_savings,
c="green",
linewidth=5,
marker="$\u25A1$",
markersize=13,
linestyle="-",
)
ax1.axhline(y=0, color="gray", linewidth=1, linestyle="--")
ax2.axhline(y=0, color="gray", linewidth=1, linestyle="--")
ax3.axhline(y=0, color="gray", linewidth=1, linestyle="--")
# Shade peak region
ax1.axvspan(16, 21, alpha=0.2, color="grey")
leg1 = ax1.legend(legend_labels1, fontsize=14, loc="upper left", frameon=False)
for line, text in zip(leg1.get_lines(), leg1.get_texts()):
text.set_color(line.get_color())
leg2 = ax2.legend(legend_labels2, fontsize=14, loc="upper left", frameon=False)
for line, text in zip(leg2.get_lines(), leg2.get_texts()):
text.set_color(line.get_color())
leg3 = ax3.legend(legend_labels3, fontsize=14, loc="upper left", frameon=False)
for line, text in zip(leg3.get_lines(), leg3.get_texts()):
text.set_color(line.get_color())
ax1.set_ylabel("Savings (MWh/hr)", size=16)
ax2.set_xlabel("Hour of Day", size=16)
if max(summer_savings + shoulder_savings + winter_savings) < 0:
ymax = 0
else:
ymax = max(summer_savings + shoulder_savings + winter_savings)
if min(summer_savings + shoulder_savings + winter_savings) > 0:
ymin = 0
else:
ymin = min(summer_savings + shoulder_savings + winter_savings)
# Tick and lebel parameters
ax1.set_ylim(ymin * 1.08, ymax * 1.08)
ax1.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax2.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax3.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax1.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax2.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax3.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.set_xticks(np.arange(0, 24, step=4))
ax1.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax2.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax2.xaxis.set_minor_locator(AutoMinorLocator())
ax3.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax3.xaxis.set_minor_locator(AutoMinorLocator())
# Set plot title, size, and position
ax1.set_title("Seasonal Savings Load Shapes", size=18, loc="left").set_position(
[0, 1.03]
)
# benefits_seasonal_shape_plot
fig1, (ax1, ax2, ax3) = plt.subplots(
1, 3, figsize=(18, 5), sharex=True, sharey=True
)
plt.subplots_adjust(wspace=0, hspace=0)
axs = [ax1, ax2, ax3]
hod = elec_benefits_sum_by_hod.index
legend_labels1 = ["Summer"]
legend_labels2 = ["Shoulder"]
legend_labels3 = ["Winter"]
ax1.plot(
hod,
summer_benefits,
c="firebrick",
linewidth=5,
marker="$\u2B24$",
markersize=13,
linestyle=":",
)
ax2.plot(
hod,
shoulder_benefits,
c="royalblue",
linewidth=5,
marker="$\u25B2$",
markersize=13,
linestyle=":",
)
ax3.plot(
hod,
winter_benefits,
c="green",
linewidth=5,
marker="$\u25A0$",
markersize=13,
linestyle=":",
)
ax1.axhline(y=0, color="gray", linewidth=1, linestyle="--")
ax2.axhline(y=0, color="gray", linewidth=1, linestyle="--")
ax3.axhline(y=0, color="gray", linewidth=1, linestyle="--")
# Shade peak region
ax1.axvspan(16, 21, alpha=0.2, color="grey")
leg1 = ax1.legend(legend_labels1, fontsize=15, loc="upper left", frameon=False)
for line, text in zip(leg1.get_lines(), leg1.get_texts()):
text.set_color(line.get_color())
leg2 = ax2.legend(legend_labels2, fontsize=15, loc="upper left", frameon=False)
for line, text in zip(leg2.get_lines(), leg2.get_texts()):
text.set_color(line.get_color())
leg3 = ax3.legend(legend_labels3, fontsize=15, loc="upper left", frameon=False)
for line, text in zip(leg3.get_lines(), leg3.get_texts()):
text.set_color(line.get_color())
ax1.set_ylabel("TRC Benefits ($/hr)", size=16)
ax2.set_xlabel("Hour of Day", size=16)
if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:
ymax = 0
else:
ymax = max(summer_benefits + shoulder_benefits + winter_benefits)
if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:
ymin = 0
else:
ymin = min(summer_benefits + shoulder_benefits + winter_benefits)
# Tick and label parameters
ax1.set_ylim(ymin * 1.08, ymax * 1.08)
ax1.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax2.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax3.set_yticks(
np.arange(
ymin * 1.08,
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax1.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax2.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax3.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.set_xticks(np.arange(0, 24, step=4))
ax1.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax2.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax2.xaxis.set_minor_locator(AutoMinorLocator())
ax3.tick_params(
which="major", axis="x", direction="out", length=7, width=2, labelsize=14
)
ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax3.xaxis.set_minor_locator(AutoMinorLocator())
# Set plot title, size, and position
ax1.set_title(
"Seasonal TRC Benefits by Hour ($)", size=18, loc="left"
).set_position([0, 1.03])
# sum_hourly_plot
fig2 = plt.figure(figsize=(12, 7), dpi=250)
ax = fig2.gca()
colors = [
"royalblue",
"black",
"pink",
"firebrick",
"gray",
"darkviolet",
"darkorange",
"green",
"saddlebrown",
]
legend_labels = []
x = 1
while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):
if x == 1:
ax.bar(
hod,
elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],
color=colors[x - 1],
)
legend_labels.append(
re.findall(
".*Name: (.*),",
str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),
)[0]
)
x += 1
else:
ax.bar(
hod,
elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],
bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),
color=colors[x - 1],
)
legend_labels.append(
re.findall(
".*Name: (.*),",
str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),
)[0]
)
x += 1
# Set x and y limits based on min and max values
ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()
if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:
ymin = 0
else:
ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()
ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax.set_ylim(ymin * 1.1, ymax * 1.08)
# Set x and y axis labels
ax.set_xlabel("Hour of Day", size=17, labelpad=5)
ax.set_ylabel("$ Avoided Costs", size=17)
# Set plot title, size, and position
ax.set_title(
"Sum of Electric Avoided Costs by Component and Hour of Day",
size=17,
loc="left",
)
# Tick and lebel parameters
ax.tick_params(bottom=True, top=False, left=True, right=False)
ax.set_xticks(np.arange(0, 24, step=4))
ax.set_yticks(
np.arange(
int(round(ymin * 1.1, 0)),
ymax * 1.08,
step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=14
)
ax.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
# Minor ticks
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
# Legend
plt.legend(
legend_labels,
bbox_to_anchor=(1, 1),
fontsize=12,
loc="upper left",
frameon=False,
)
# avoided_cost_summary_plot
fig3, (ax1, ax2, ax3) = plt.subplots(
3, 1, figsize=(6, 10), sharex=True, sharey=False
)
axs = [ax1, ax2, ax3]
hod = elec_benefits_sum_by_hod.index
legend_labels = ["Total", "Summer", "Shoulder", "Winter"]
ax1.plot(
hod,
total_benefits,
c="royalblue",
marker="$\u25EF$",
markersize=10,
linewidth=3,
linestyle="-",
)
ax1.plot(hod, summer_benefits, c="darkorchid", linewidth=1, linestyle="--")
ax1.plot(hod, shoulder_benefits, c="olivedrab", linewidth=1, linestyle=":")
ax1.plot(hod, winter_benefits, c="teal", linewidth=1, linestyle="-")
ax2.plot(
hod,
total_savings,
c="firebrick",
marker="$\u2206$",
markersize=10,
linewidth=3,
linestyle="-",
)
ax2.plot(hod, summer_savings, c="darkorchid", linewidth=1, linestyle="--")
ax2.plot(hod, shoulder_savings, c="olivedrab", linewidth=1, linestyle=":")
ax2.plot(hod, winter_savings, c="teal", linewidth=1, linestyle="-")
ax3.plot(
hod,
total_av_csts_avg,
c="green",
marker="$\u25A0$",
markersize=10,
linewidth=3,
linestyle="-",
)
ax3.plot(hod, summer_av_csts_avg, c="darkorchid", linewidth=1, linestyle="--")
ax3.plot(hod, shoulder_av_csts_avg, c="olivedrab", linewidth=1, linestyle=":")
ax3.plot(hod, winter_av_csts_avg, c="teal", linewidth=1, linestyle="-")
leg1 = ax1.legend(legend_labels, fontsize=11, loc="upper left", frameon=False)
for line, text in zip(leg1.get_lines(), leg1.get_texts()):
text.set_color(line.get_color())
leg2 = ax2.legend(legend_labels, fontsize=11, loc="upper left", frameon=False)
for line, text in zip(leg2.get_lines(), leg2.get_texts()):
text.set_color(line.get_color())
leg3 = ax3.legend(legend_labels, fontsize=11, loc="upper left", frameon=False)
for line, text in zip(leg3.get_lines(), leg3.get_texts()):
text.set_color(line.get_color())
ax3.set_xticks(np.arange(0, 24, step=4))
ax3.set_xlabel("Hour of Day", size=14, labelpad=5)
ax3.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=12
)
ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)
ax3.xaxis.set_minor_locator(AutoMinorLocator())
ax1.set_ylabel("TRC Benefits ($)", size=14)
ax2.set_ylabel("Savings (MWh)", size=14)
ax3.set_ylabel("Av. Cost ($/MWh)", size=14)
if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:
ymax1 = 0
else:
ymax1 = max(
total_benefits + summer_benefits + shoulder_benefits + winter_benefits
)
if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:
ymin1 = 0
else:
ymin1 = min(
total_benefits + summer_benefits + shoulder_benefits + winter_benefits
)
if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:
ymax2 = 0
else:
ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)
if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:
ymin2 = 0
else:
ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)
if (
max(
total_av_csts_avg
+ summer_av_csts_avg
+ shoulder_av_csts_avg
+ winter_av_csts_avg
)
< 0
):
ymax3 = 0
else:
ymax3 = max(
total_av_csts_avg
+ summer_av_csts_avg
+ shoulder_av_csts_avg
+ winter_av_csts_avg
)
if (
min(
total_av_csts_avg
+ summer_av_csts_avg
+ shoulder_av_csts_avg
+ winter_av_csts_avg
)
> 0
):
ymin3 = 0
else:
ymin3 = min(
total_av_csts_avg
+ summer_av_csts_avg
+ shoulder_av_csts_avg
+ winter_av_csts_avg
)
# Tick and lebel parameters
ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)
ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)
ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)
ax1.set_yticks(
np.arange(
ymin1 * 1.08,
ymax1 * 1.08,
step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),
)
)
ax2.set_yticks(
np.arange(
ymin2 * 1.08,
ymax2 * 1.08,
step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),
)
)
ax3.set_yticks(
np.arange(
ymin3 * 1.08,
ymax3 * 1.08,
step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),
)
)
ax1.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=12
)
ax2.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=12
)
ax3.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=12
)
# Shade peak region
ax1.axvspan(16, 21, alpha=0.2, color="grey")
ax2.axvspan(16, 21, alpha=0.2, color="grey")
ax3.axvspan(16, 21, alpha=0.2, color="grey")
# Print key information
plt.annotate(
"Electric Benefits = $" + str(round(elec_benefits["total"].sum(), 2)),
xy=(350, 530),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Gas Benefits = $" + str(round(gas_benefits, 2)),
xy=(350, 505),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Total Benefits = $"
+ str(round(elec_benefits["total"].sum() + gas_benefits, 2)),
xy=(350, 480),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"TRC Costs = $" + str(trc_costs_record),
xy=(350, 455),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"PAC Costs = $" + str(pac_costs_record),
xy=(350, 430),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"TRC = " + str(trc_record),
xy=(350, 405),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"PAC = " + str(pac_record),
xy=(350, 380),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Net Lifecycle Electric Savings = " + str(lifecycle_net_mwh) + " MWh",
xy=(350, 335),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Net Lifecycle Gas Savings = " + str(lifecycle_net_therms) + " Therms",
xy=(350, 310),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Net Lifecycle GHG Savings = " + str(lifecycle_net_ghg) + " Tons",
xy=(350, 285),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))
+ "% MWh savings during summer peak period",
xy=(350, 260),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))
+ "% Electric TRC benefits from summer peak period",
xy=(350, 235),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Electric Benefits per MWh = $"
+ str(round(elec_benefits["total"].sum() / lifecycle_net_mwh, 2)),
xy=(350, 210),
xycoords="axes points",
fontsize=18,
)
plt.annotate(
"Typical Avoided Cost per MWh = $"
+ str(round(elec_benefits["av_csts_levelized"].mean(), 2)),
xy=(350, 145),
xycoords="axes points",
fontsize=18,
)
# Set plot title, size, and position
ax1.set_title(
"Savings and Avoided Cost Profiles", size=16, loc="left"
).set_position([0, 1.03])
# marginal_ghg_savings_plot
cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)
fig4 = plt.figure(figsize=(8, 6), dpi=100)
ax1 = fig4.gca()
y_ticks = [
0,
"",
2,
"",
4,
"",
6,
"",
8,
"",
10,
"",
12,
"",
14,
"",
16,
"",
18,
"",
20,
"",
22,
]
hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)
ax1.set_xlabel("Month", size=15)
ax1.set_ylabel("Hour of Day", size=15)
ax1.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=13
)
ax1.tick_params(
which="major",
axis="y",
direction="out",
length=6,
width=2,
labelsize=13,
rotation=0,
)
ax1.set_title("Electric GHG Savings by Month and Hour", size=15, loc="left", pad=8)
cbar1 = hmp.collections[0].colorbar
cbar1.ax.tick_params(labelsize=14)
plt.annotate("Sum GHG", xy=(370, 352), xycoords="axes points", fontsize=12)
plt.annotate("Savings (Tons)", xy=(370, 336), xycoords="axes points", fontsize=12)
# month_hour_savings_benefits_plot
fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)
y_ticks = [
0,
"",
2,
"",
4,
"",
6,
"",
8,
"",
10,
"",
12,
"",
14,
"",
16,
"",
18,
"",
20,
"",
22,
]
fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)
fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)
ax1.set_xlabel("Month", size=22)
ax1.set_ylabel("Hour of Day", size=22)
ax2.set_xlabel("Month", size=22)
ax2.set_ylabel("Hour of Day", size=22)
ax1.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=18
)
ax1.tick_params(
which="major",
axis="y",
direction="out",
length=6,
width=2,
labelsize=18,
rotation=0,
)
ax2.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=18
)
ax2.tick_params(
which="major",
axis="y",
direction="out",
length=6,
width=2,
labelsize=18,
rotation=0,
)
ax1.set_title(
"MWh Savings by Month and Hour", size=24, loc="left", pad=15
).set_position([0, 1.1])
ax2.set_title("$ Benefits by Month and Hour", size=24, loc="left", pad=15)
fig4.tight_layout(pad=2.0)
cbar1 = fleft.collections[0].colorbar
cbar1.ax.tick_params(labelsize=18)
cbar2 = fright.collections[0].colorbar
cbar2.ax.tick_params(labelsize=18)
plt.annotate("Sum MWh", xy=(-200, 585), xycoords="axes points", fontsize=20)
plt.annotate("Savings", xy=(-193, 560), xycoords="axes points", fontsize=20)
plt.annotate("Sum TRC", xy=(435, 585), xycoords="axes points", fontsize=20)
plt.annotate("Benefits", xy=(442, 560), xycoords="axes points", fontsize=20)
# savings_benefits_cumulative_sum_plot
fig6 = plt.figure(figsize=(12, 7), dpi=250)
ax1 = fig6.gca()
ax1.plot(
elec_benefits_hoy["hour_of_year"],
elec_benefits_hoy["hourly_savings"],
color="royalblue",
linewidth=3,
)
ax2 = ax1.twinx()
ax2.plot(
elec_benefits_hoy["hour_of_year"],
elec_benefits_hoy["total"],
color="firebrick",
linewidth=3,
linestyle="--",
)
ax2.axhline(y=0, color="gray", linewidth=0.7, linestyle="--")
# Set x and y limits based on min and max values
if (
elec_benefits_hoy["hourly_savings"].max() >= 0
and elec_benefits_hoy["total"].max() >= 0
):
ymax1 = elec_benefits_hoy["hourly_savings"].max()
ymax2 = elec_benefits_hoy["total"].max()
elif (
elec_benefits_hoy["hourly_savings"].max() < 0
and elec_benefits_hoy["total"].max() < 0
):
ymax1 = 0
ymax2 = 0
elif (
elec_benefits_hoy["hourly_savings"].max() < 0
and elec_benefits_hoy["total"].max() > 0
):
ymax1 = (
-1
* elec_benefits_hoy["hourly_savings"].min()
* (
elec_benefits_hoy["total"].max()
/ (elec_benefits_hoy["total"].max() - elec_benefits_hoy["total"].min())
)
/ (
1
- elec_benefits_hoy["total"].max()
/ (elec_benefits_hoy["total"].max() - elec_benefits_hoy["total"].min())
)
)
ymax2 = elec_benefits_hoy["total"].max()
else:
ymax1 = 0
ymax2 = (
-1
* elec_benefits_hoy["total"].min()
* (
elec_benefits_hoy["hourly_savings"].max()
/ (
elec_benefits_hoy["hourly_savings"].max()
- elec_benefits_hoy["hourly_savings"].min()
)
)
)
if (
elec_benefits_hoy["hourly_savings"].min() <= 0
and elec_benefits_hoy["total"].min() <= 0
):
ymin1 = elec_benefits_hoy["hourly_savings"].min()
ymin2 = elec_benefits_hoy["total"].min()
elif (
elec_benefits_hoy["hourly_savings"].min() > 0
and elec_benefits_hoy["total"].min() > 0
):
ymin1 = 0
ymin2 = 0
elif (
elec_benefits_hoy["hourly_savings"].min() > 0
and elec_benefits_hoy["total"].min() < 0
):
ymin1 = (
-1
* elec_benefits_hoy["hourly_savings"].max()
* (
elec_benefits_hoy["total"].min()
/ (elec_benefits_hoy["total"].min() - elec_benefits_hoy["total"].max())
)
/ (
1
- elec_benefits_hoy["total"].min()
/ (elec_benefits_hoy["total"].min() - elec_benefits_hoy["total"].max())
)
)
ymin2 = elec_benefits_hoy["total"].min()
else:
ymin1 = 0
ymin2 = (
-1
* elec_benefits_hoy["total"].min()
* (
elec_benefits_hoy["hourly_savings"].min()
/ (
elec_benefits_hoy["hourly_savings"].min()
- elec_benefits_hoy["hourly_savings"].min()
)
)
)
# Set x and y axis limits
ax1.set_xlim(-340, 9000)
ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)
ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)
# Set x and y axis labels
ax1.set_xlabel("Hour of Year", size=17, labelpad=5)
ax1.set_ylabel("Net Lifecycle Savings (MWh)", size=17)
ax2.set_ylabel("$ TRC Benefits", size=17, rotation=-90, labelpad=20)
# Set plot title, size, and position
ax1.set_title(
"Cumulative Savings and TRC Benefits by Hour of Year",
size=17,
loc="left",
pad=8,
)
# Tick and lebel parameters
ax1.set_xticks(np.arange(0, 8760, step=1000))
ax1.set_yticks(
np.arange(
int(round(ymin1 * 1.1, 0)),
ymax1 * 1.08,
step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),
)
)
ax1.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=14
)
ax1.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
ax2.set_xticks(np.arange(0, 8760, step=1000))
ax2.set_yticks(
np.arange(
int(round(ymin2 * 1.1, 0)),
ymax2 * 1.08,
step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),
)
)
ax2.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=14
)
ax2.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
# Minor ticks
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax2.yaxis.set_minor_locator(AutoMinorLocator())
# Legend
ax1.legend(
["Savings"],
fontsize=12,
bbox_to_anchor=(0.02, 1),
loc="upper left",
frameon=False,
)
ax2.legend(
["TRC Beneftis"],
fontsize=12,
bbox_to_anchor=(0.02, 0.95),
loc="upper left",
frameon=False,
)
fig7 = plt.figure(figsize=(12, 7), dpi=250)
ax = fig7.gca()
colors1 = [
"black",
"royalblue",
"black",
"pink",
"firebrick",
"gray",
"darkviolet",
"darkorange",
"green",
"saddlebrown",
]
legend_labels2 = []
ax.plot(
elec_benefits_hoy["hour_of_year"],
elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],
color=colors1[0],
linewidth=3,
)
legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])
x = 1
while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:
ax.plot(
elec_benefits_hoy["hour_of_year"],
elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],
color=colors1[x],
)
legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])
x += 1
# Set x and y limits based on min and max values
if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:
ymax = 0
else:
ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())
if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:
ymin = 0
else:
ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())
ax.set_xlim(-340, 9000)
ax.set_ylim(ymin * 1.1, ymax * 1.08)
# Set x and y axis labels
ax.set_xlabel("Hour of Year", size=17, labelpad=5)
ax.set_ylabel("$ TRC Benefits", size=17)
# Set plot title, size, and position
ax.set_title(
"Sum of Avoided Costs by Component and Hour of Day", size=17, loc="left"
)
# Tick and lebel parameters
ax.set_xticks(np.arange(0, 8760, step=1000))
ax.set_yticks(
np.arange(
int(round(ymin * 1.1, 0)),
ymax * 1.08,
step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),
)
)
ax.tick_params(
which="major", axis="x", direction="out", length=6, width=2, labelsize=14
)
ax.tick_params(
which="major", axis="y", direction="out", length=6, width=2, labelsize=14
)
# Minor ticks
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
# Legend
plt.legend(
legend_labels2,
bbox_to_anchor=(1, 1),
fontsize=12,
loc="upper left",
frameon=False,
)
|
989,765 | a1b3181da147dcfad0661f4441c4e379b3c861fe | import numpy as np
import pandas as pd
import nrrd
import nibabel as nib
import os
import pickle
import random
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from cv2 import resize
from sklearn import metrics
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import BCELoss
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import SGD, Adam
from scipy.stats import skew, kurtosis
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = "test"
info_file = "../../data/" + dataset + ".txt"
path = "../../data/processed/" + dataset + "-whole"
info_data_frame = pd.read_csv(info_file, header=None)
# Feature extractor
cutoff = 4
embedding_size = 512
histogram_size = 14
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
pretrained_model = torchvision.models.resnet34(pretrained=True)
for param in pretrained_model.parameters():
param.requires_grad = False
pretrained_model.fc = nn.Identity()
pretrained_model = pretrained_model.to(device)
pretrained_model.eval()
files = info_data_frame.iloc[:, 0]
labels = info_data_frame.iloc[:, 1]
X = []
y = []
for file_name, label in zip(files, labels):
CT_name = "/"+ file_name.strip('.nii.gz') + '.npy'
file_path = path + CT_name
img = np.load(file_path)
color_data = img[img > 0].flatten()
color_distribution = np.histogram(color_data, bins=10, range=(0, 1))[0] / color_data.size
distribution_skew = skew(color_data)
distribution_kurtosis = kurtosis(color_data)
distribution_mean = np.mean(color_data)
distribution_stdev = np.sqrt(np.mean((color_data - distribution_mean)**2))
color_distribution = np.append(color_distribution, [distribution_kurtosis,
distribution_skew,
distribution_mean,
distribution_stdev])
img = torch.tensor(img)
img = img.repeat(1, 3, 1, 1)
num_slices = img.shape[0]
for slice_idx in range(cutoff, num_slices - cutoff):
img[slice_idx, :, :, :] = normalize(img[slice_idx, :, :, :])
img = img.to(device)
extracted_features = pretrained_model(img).mean(dim=0)
# Free GPU
del img
torch.cuda.empty_cache()
features = np.append(extracted_features.cpu().detach().numpy(), color_distribution)
X.append(features)
y.append(label)
X = np.vstack(X)
y = np.array(y)
data_frame = pd.DataFrame({"file":files,
"label":y
})
print(data_frame.head())
embedded_images = pd.DataFrame(X)
embedded_images.columns = (["v" + str(i) for i in range(embedding_size)] +
["h" + str(i) for i in range(histogram_size)])
data_frame = pd.concat([data_frame, embedded_images], axis=1)
data_frame.to_csv("../../data/embedded-datasets/" + dataset + "_embedded_dataset.csv", index=False) |
989,766 | 00e20c4c46cf245850b535a15fe59c26a74fd1aa | import json
from bs4 import BeautifulSoup
def convertHtmlfile(name):
f = open("./html_files/{}.html".format(name))
soup = BeautifulSoup(f, features="html.parser")
table = soup.find(id='grd_itemlist')
rows = table.findChildren('tr', recursive=False)
head_row = rows[0]
headers = []
for header in head_row.stripped_strings:
title = header.replace('.', ' ')
headers.append(title)
# Body rows
def create_dict_row(row_number):
row = rows[row_number]
row_list = []
for text in row.stripped_strings:
row_list.append(text)
data = dict(zip(headers, row_list))
return data
steel_mines_data = []
for i in range(len(rows)):
j = i + 1
try:
dict_data = create_dict_row(j)
steel_mines_data.append(dict_data)
except IndexError:
break
with open('./json_files/{}.json'.format(name), 'w') as fp:
json.dump(steel_mines_data, fp)
|
989,767 | 721cee04f5a2af725df9f4da1df7adb2356cc089 | from __future__ import print_function
'''
( 'ptr_ext_lib' , ctypes.c_void_p ), # @ b6b3ef68 /usr/lib/libQtCore.so.4.7.2
local python
b6c63000-b6efa000 r-xp 00000000 08:04 3426931 /usr/lib/i386-linux-gnu/libQtCore.so.4.7.4
b6efa000-b6f01000 r--p 00296000 08:04 3426931 /usr/lib/i386-linux-gnu/libQtCore.so.4.7.4
b6f01000-b6f04000 rw-p 0029d000 08:04 3426931 /usr/lib/i386-linux-gnu/libQtCore.so.4.7.4
'''
import struct
import sys
import ctypes
import os
offset = 0xb6b3ef68 - 0xb68b1000
from haystack.mappings.process import make_process_memory_handler
from haystack.reverse import context
class Dummy():
pass
class Dl_info(ctypes.Structure):
_fields_ = [
# Pathname of shared object that contains address
('dli_fname', ctypes.c_char_p),
# Address at which shared object is loaded
('dli_fbase', ctypes.c_void_p),
# Name of nearest symbol with address lower than addr
('dli_sname', ctypes.c_char_p),
# Exact address of symbol named in dli_sname
('dli_saddr', ctypes.c_void_p)
]
def getMappings():
me = Dummy()
me.pid = os.getpid()
return make_process_memory_handler(me)
def test1():
info = Dl_info()
#handle = libdl.dlopen('/usr/lib/libQtCore.so.4.7.2')
libname = '/usr/lib/libQtCore.so.4.7.2'
libname2 = libname[
libname.rindex(
os.path.sep) +
1:libname.index('.so') +
3]
print(libname2)
libqt = ctypes.CDLL(libname2)
localmappings = getMappings()
qtmaps = [
m for m in localmappings if m.pathname is not None and libname2 in m.pathname]
myvaddr = qtmaps[0].start + offset
ret = libdl.dladdr(myvaddr, ctypes.byref(info))
print('filling dlinfo with', libname, info)
signed_addr = libdl.dlsym(0, 'dladdr', 'xxx')
vaddr_dladdr = struct.unpack('L', struct.pack('l', signed_addr))[0]
ret = libdl.dladdr(vaddr_dladdr, ctypes.byref(info))
print('dlsym test', info.dli_sname.string, info.dli_sname.string == 'dladdr')
def test2():
# now for the real deal.
# we need to emulate ELF dl-addr.c
print('')
#
# define DL_LOOKUP_ADDRESS(addr) _dl_lookup_address (addr)
libssl = ctypes.CDLL('/usr/lib/libssl.so.0.9.8')
localmappings = getMappings()
print('libssl.ssl3_read by id() is @%x' % (id(libssl.ssl3_read)))
print(localmappings.get_mapping_for_address(id(libssl.ssl3_read)))
print('')
signed_addr = libssl.dlsym(libssl._handle, 'ssl3_read', 'xxx')
fnaddr = struct.unpack('L', struct.pack('l', signed_addr))[0]
print('libssl.ssl3_read by dlsym is @%x' % (fnaddr))
print(localmappings.get_mapping_for_address(fnaddr))
info = Dl_info()
ret = libdl.dladdr(fnaddr, ctypes.byref(info))
print('dladdr test', info.dli_sname.string, info.dli_sname.string == 'ssl3_read')
'''
libssl.ssl3_read by id() is @9528ecc
0x0924a000 0x095d1000 rw-p 0x00000000 00:00 0000000 [heap]
libssl.ssl3_read by dlsym is @b6ddd9b0
0xb6dc2000 0xb6e0c000 r-xp 0x00000000 08:04 7739090 /lib/libssl.so.0.9.8
dladdr test ssl3_read True
'''
print('')
# testing low level
# low level call
#(const void *address, Dl_info *info,
# struct link_map **mapp, const ElfW(Sym) **symbolp)
print(libdl._dl_addr(fnaddr, ctypes.byref(info), 0, 0))
# iterate the struct link_map
# for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
# for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l; l = l->l_next)
# if (addr >= l->l_map_start && addr < l->l_map_end
# && (l->l_contiguous || _dl_addr_inside_object (l, addr)))
return
def getname(fnaddr):
info = Dl_info()
ret = libdl.dladdr(fnaddr, ctypes.byref(info))
# print 'dladdr test', info.dli_sname.string, info.dli_sname.string ==
# 'ssl3_read'
return info.dli_sname.string, info.dli_saddr
def test3():
''' reverse fn pointer names by trying to rebase the ptr value to a local ld_open '''
# load local memdump
# map all librairies
# go through all pointers in librairies
# try to dl_addr the pointers by rebasing.
#from haystack import dump_loader
#dump = memory_loader.load('/home/jal/outputs/dumps/ssh/ssh.1')
IGNORES = ['None', '[heap]', '[stack]', '[vdso]']
dumpname = '/home/jal/outputs/dumps/ssh/ssh.1' # 23418'
#dumpname = '/home/jal/outputs/dumps/skype/skype.1/skype.1.a'
print('[+] load context', dumpname)
ctx = context.get_context(dumpname)
mappings = ctx.mappings
ldso = dict()
for m in mappings:
if m.pathname not in IGNORES and m.pathname not in ldso:
try:
ldso[m.pathname] = ctypes.CDLL(m.pathname)
except OSError as e:
IGNORES.append(m.pathname)
print('[+] context loaded')
# mmap_libdl = [ m for m in _memory_handler if 'ld-2.13' in m.pathname ] #and 'x' in m.permissions]
#hptrs = ctx._pointers_values_heap
# print '[+] %d pointers in heap to heap '%( len(hptrs) )
# looking in [heap] pointing to elsewhere
all_ptrs = ctx.listPointerValueInHeap()
print('[+] %d pointers in heap to elsewhere ' % (len(all_ptrs)))
localmappings = getMappings()
#crypto = _memory_handler.get_mapping('/lib/i386-linux-gnu/libcrypto.so.1.0.0')
# for lm in crypto:
# print lm
# print '---'
#crypto = localmappings.get_mapping('/lib/i386-linux-gnu/libcrypto.so.1.0.0')
# for lm in crypto:
# print lm
# return
for ptr in set(all_ptrs):
# get dump mmap
m = mappings.get_mapping_for_address(ptr)
if m.pathname not in IGNORES:
# find the right localmmap
localmaps = localmappings._get_mapping(m.pathname)
found = False
for localm in localmaps:
if localm.offset == m.offset and localm.permissions == m.permissions:
# found it
found = True
caddr = ptr - m.start + localm.start # rebase
dl_name, fnaddr = getname(caddr)
if dl_name is not None:
#sym = libdl.dlsym( ldso[m.pathname]._handle, dl_name, 'xxx')
#fnaddr = struct.unpack('L',struct.pack('l', sym) )[0]
if fnaddr == caddr: # reverse check
print('[+] REBASE 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s @%x' % (
ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,
m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name, fnaddr))
# yield (ptr, m, dl_name)
else:
# continue
print('[-] MIDDLE 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s @%x' % (
ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,
m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name, fnaddr))
else:
continue
print('FAIL REBASE (not public ?) 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s ' % (
ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,
m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name))
pass
break
if not found:
continue
print('[+] not a fn pointer %x\n' % (ptr), m, '\n ---dump Vs local ---- \n', '\n'.join(map(str, localmaps)))
# pass
for name, lib in ldso.items():
ret = libdl.dlclose(lib._handle)
return
def test4():
dumpname = '/home/jal/outputs/dumps/ssh/ssh.1' # 23418'
#dumpname = '/home/jal/outputs/dumps/skype/skype.1/skype.1.a'
print('[+] load context', dumpname)
ctx = context.get_context(dumpname)
mappings = ctx.mappings
for ptr, name in ctx._function_names.items():
print('@%x -> %s::%s' % (ptr, mappings.get_mapping_for_address(ptr).pathname, name))
libdl = ctypes.CDLL('libdl.so')
def main(argv):
# test1()
# test2()
test3()
# test4()
if __name__ == '__main__':
main(sys.argv[1:])
|
989,768 | f7e2fbe2fc35d5abc1f6691cecc5750c656c601f | cont = 10
lista = []
lista2 = []
while(cont):
x = input()
y = input()
r = (x + y)/2
r2 = x - r
lista.append(r)
lista2.append(r2)
cont = cont -1
cont = 0
while(cont < 10):
print(lista[cont])
print(lista2[cont])
cont = cont + 1 |
989,769 | 21792277622aacba7ee05729ece090a7bea46184 | """
Django settings for barbex_tech project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from configparser import RawConfigParser
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
config = RawConfigParser()
config.read(os.path.join(BASE_DIR, 'conf/config.ini'))
DATABASE_USER = config.get('database', 'DATABASE_USER')
DATABASE_PASSWORD = config.get('database', 'DATABASE_PASSWORD')
DATABASE_HOST = config.get('database', 'DATABASE_HOST')
DATABASE_PORT = config.get('database', 'DATABASE_PORT')
DATABASE_ENGINE = config.get('database', 'DATABASE_ENGINE')
DATABASE_NAME = config.get('database', 'DATABASE_NAME')
TEST_DATABASE_NAME = config.get('database', 'TESTSUITE_DATABASE_NAME')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#g!ojj#sk8dvdqq#lk566khl64%mpp*bp!9e=j=$3&waottd-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["localhost"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'intranet',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'barbex_tech.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# 'django.contrib.auth.context_processors.auth',
# 'django.template.context_processors.debug',
# 'django.template.context_processors.i18n',
# 'django.template.context_processors.media',
# 'django.template.context_processors.static',
# 'django.contrib.messages.context_processors.messages',
# 'django.template.context_processors.request',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler'
)
WSGI_APPLICATION = 'hr_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': DATABASE_ENGINE,
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
# 'PORT': DATABASE_PORT, # '3306'
'STORAGE_ENGINE': 'MyISAM / INNODB / ETC',
'OPTIONS': {
"init_command": "SET foreign_key_checks = 0;",
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = '/panel/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Accra'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static_directory/'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'templates/'),
]
# Email Sending Configurations
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'fleischer89@gmail.com'
EMAIL_HOST_PASSWORD = 'hand7god'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# SMS Sending Configuration
SENDSMS_BACKEND = 'sendsms.backends.console.SmsBackend'
BASE_URL = "http://intranet.smartempiregh.com"
SMS_SENDER_ID = "UHG"
SMS_RECIPIENTS = ["0249372566"]
ORDER_SMS_MESSAGE = "Blessed Day. Please we have received an order on the website for @@product@@. " \
"Qty: @@quantity@@. Delivery Date: @@delivery_date@@"
BULK_ORDER_SMS_MESSAGE = "Blessed Day. Please we have received an order for multiple products. Visit the Admin panel " \
"for details. Delivery Date: @@delivery_date@@"
DISTRIBUTOR_SMS_MESSAGE = "Blessed Day. Please we have received a distributor application from @@name@@. " \
"Phone: @@phone@@. Email: @@email@@"
CONTACT_SMS_MESSAGE = "Blessed Day. Please we have received an enquiry on the website from @@name@@. " \
"Phone: @@phone@@. Email: @@email@@"
|
989,770 | e048af6cb527e92bdb2d05708b675052f032a7b2 | from room import Room
from player import Player
from world import World
import random
from ast import literal_eval
# Load world
world = World()
# You may uncomment the smaller graphs for development and testing purposes.
# map_file = "maps/test_line.txt"
# map_file = "maps/test_cross.txt"
# map_file = "maps/test_loop.txt"
# map_file = "maps/test_loop_fork.txt"
map_file = "maps/main_maze.txt"
# Loads the map into a dictionary
room_graph = literal_eval(open(map_file, "r").read())
world.load_graph(room_graph)
# Print an ASCII map
world.print_rooms()
player = Player(world.starting_room)
# Fill this out with directions to walk
# traversal_path = ['n', 'n']
traversal_path = []
def generate_traversal_path():
graph = {}
visted = set()
t_path = []
def dft(room):
cur_room = room
previous = None
while len(graph.keys()) < len(room_graph):
exits = cur_room.get_exits()
if cur_room.id not in visted:
for exit in exits:
if cur_room.id in graph:
graph[cur_room.id][exit] = None
else:
graph[cur_room.id] = {exit: None}
visted.add(cur_room.id)
if previous:
if previous[0] == 'n':
graph[cur_room.id]['s'] = previous[1]
if previous[0] == 's':
graph[cur_room.id]['n'] = previous[1]
if previous[0] == 'e':
graph[cur_room.id]['w'] = previous[1]
if previous[0] == 'w':
graph[cur_room.id]['e'] = previous[1]
untried = [
exit for exit in exits if graph[cur_room.id][exit] is None]
if len(untried) == 0:
return cur_room
direction = random.choice(untried)
t_path.append(direction)
new_room = cur_room.get_room_in_direction(direction)
graph[cur_room.id][direction] = new_room.id
previous = [direction, cur_room.id]
cur_room = new_room
def bfs(starting_room):
paths_dict = {}
todo = []
completed = set()
todo.append([starting_room])
while len(todo) > 0 and len(graph.keys()) < len(room_graph):
# print(completed)
rooms = todo.pop(0)
cur_room = rooms[-1]
if cur_room.id not in completed:
if None in graph[cur_room.id].values():
t_path.extend(paths_dict[cur_room.id])
return cur_room
completed.add(cur_room.id)
exits = cur_room.get_exits()
for exit in exits:
next_room = cur_room.get_room_in_direction(exit)
if cur_room.id in paths_dict:
next_room_path = list(paths_dict[cur_room.id])
next_room_path.append(exit)
paths_dict[next_room.id] = next_room_path
else:
paths_dict[next_room.id] = [exit]
new_rooms = list(rooms)
new_rooms.append(next_room)
todo.append(new_rooms)
current_room = player.current_room
while len(graph.keys()) < len(room_graph):
dft_last_room = dft(current_room)
bfs_last_room = bfs(dft_last_room)
current_room = bfs_last_room
return t_path
while True:
path = generate_traversal_path()
if len(path) < 975:
traversal_path = path
break
# TRAVERSAL TEST - DO NOT MODIFY
visited_rooms = set()
player.current_room = world.starting_room
visited_rooms.add(player.current_room)
for move in traversal_path:
player.travel(move)
visited_rooms.add(player.current_room)
if len(visited_rooms) == len(room_graph):
print(
f"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited")
else:
print("TESTS FAILED: INCOMPLETE TRAVERSAL")
print(f"{len(room_graph) - len(visited_rooms)} unvisited rooms")
#######
# UNCOMMENT TO WALK AROUND
#######
# player.current_room.print_room_description(player)
# while True:
# cmds = input("-> ").lower().split(" ")
# if cmds[0] in ["n", "s", "e", "w"]:
# player.travel(cmds[0], True)
# elif cmds[0] == "q":
# break
# else:
# print("I did not understand that command.")
|
989,771 | 33c0ad63e702526cd0d5060ec74cc335eae3234e | import numpy as np
values = [1,2,3,4,5];
x=np.multiply(values,5);
y=values*5;
print(x==y);#false
print(x);
print(y);#repeats matrix sequence 5 times
|
989,772 | 04f24f557d700684e60f0ad84b5f8e18421b4c37 | import dpkt.pcap
# print("Enter the path of the pcap file to be parsed:")
# path = input()
# f = open(path, 'rb')
f = open('assignment3_my_arp.pcap', 'rb')
pcap = dpkt.pcap.Reader(f)
count = 0
# helper function for changing bytes to MAC address
def toMAC(address_bytes):
res = ""
for i in range(len(address_bytes)):
res += str(address_bytes[i:i+1].hex())
if(i<len(address_bytes)-1):
res += ":"
return res
# helper function for changing bytes to IP address
def toIP(address_bytes):
res = ""
for i in range(len(address_bytes)):
res += str(address_bytes[i])
if(i<len(address_bytes)-1):
res += "."
return res
for packet in pcap: # packet[0]: float, packet[1]: bytes
bytes = packet[1]
dest = bytes[0:6]
src = bytes[6:12]
type = bytes[12:14]
if type == b'\x08\x06':
count += 1
hardware_type = int.from_bytes(bytes[14:16], "big")
protocol_type = bytes[16:18].hex()
hardware_size = int.from_bytes(bytes[18:19], "big")
protocol_size = int.from_bytes(bytes[19:20], "big")
optcode = int.from_bytes(bytes[20:22], "big")
encoding = "utf_8"
sender_mac = bytes[22:28]
sender_ip = bytes[28:32]
target_mac = bytes[32:38]
target_ip = bytes[38:42]
print("----------ARP message #"+str(count)+"----------")
print("Hardware type: "+str(hardware_type))
print("Protocol type: 0x" + str(protocol_type))
print("Hardware size: " + str(hardware_size))
print("Protocol size: " + str(protocol_size))
print("Opcode: "+ str(optcode))
print("Sender MAC address: " + toMAC(sender_mac))
print("Sender IP address: " + toIP(sender_ip))
print("Target MAC address: " + toMAC(target_mac))
print("Target IP address: " + toIP(target_ip))
print()
print("The number of ARP messages is: " + str(count))
|
989,773 | eaa7093e28ce3b3e40d9f2b33e726972d6f28062 | def biggest(r, l, u, d):
big = r
if big<l:
big = l
if big<u:
big = u
if big<d:
big = d
if big==r:
return "right"
if big==l:
return "left"
if big==u:
return "up"
return "down"
print (biggest(1, 2, 2, 0))
print (biggest(18, 2, 2, 0))
i = 1
j = 1
print(i==j) |
989,774 | 099ef489a3dc9e794139aa5f695546ee006ee4ea | lista = []
for c in range(0,5):
a = int(input('Escreva um número: '))
if c == 0 or a > lista[-1]:
lista.append(a)
else:
pos = 0
while pos < len(lista):
if a <= lista[pos]:
lista.insert(pos, a)
break
pos += 1
print('-=' * 30)
print(f'Os valores escritos em ordem foram {lista}')
|
989,775 | 3894dce5f185f859aeb021a25550fd024bc3c5ac | import bpy
from bpy.types import NodeTree, Node, NodeSocket
# import itertools
from bpy.app.translations import pgettext_iface as iface_
# import time
class NodeOperators(bpy.types.Operator):
"""Tooltip"""
bl_idname = "node.noter_operator"
bl_label = ""
action: bpy.props.StringProperty()
@classmethod
def description(cls, context, properties):
is_node = bool( properties.action.count("*") )
if is_node == True:
action = properties.action
action = action.split("*")
action = action[0]
if action == 'node':
return "Assign text to the current node"
elif action == 'node_get':
return "Get text from the current node"
elif action == 'node_delete':
return "Delete text in the current node"
else:
if properties.action == 'node':
return "Assign text to the active node"
elif properties.action == 'node_get':
return "Get text from the active node"
elif properties.action == 'node_delete':
return "Delete text in the active node"
elif properties.action == 'colour':
return "Paint the nodes in the color of the active node"
elif properties.action == 'colour_all':
return "Paint selected node (nodes)"
elif properties.action == 'label':
return "Write label text from the label text of the active node or active frame"
elif properties.action == 'label_all':
return "Write label text in the selected node (nodes) or selected frame (frames)"
@classmethod
def poll(cls, context):
# space = False
# for area in bpy.context.screen.areas:
# if area.type == ('NODE_EDITOR'):
# space = True
# break
# return space
space = context.space_data
return space.type == 'NODE_EDITOR'
def execute(self, context):
# space = None
# for area in bpy.context.screen.areas:
# if area.type == ('NODE_EDITOR'):
# space = area
# print (space)
# break
# space = context.space_data
# return space.type == 'NODE_EDITOR'
action = self.action
space = context.space_data
node_tree = space.node_tree
node_active = context.active_node
text_node = node_active.text
node_selected = context.selected_nodes
file_name = bpy.context.scene.file_name
if len(bpy.data.texts.values()) == 0:
bpy.ops.text.new()
text = "A new text file was created"
war = "INFO"
self.report({war}, text)
# return {'FINISHED'}
try:
main_text = bpy.data.texts[file_name].as_string()
except KeyError:
text = "File was not found"
war = "ERROR"
self.report({war}, text)
return {'FINISHED'}
# print(node_active.text, 1111111111111)
# print(len(node_active.internal_links))
# print(node_active.inputs[0].is_linked)
from_node = False
if action.count("*"):
action, from_node_name = action.split("*")[0], action.split("*")[1]
from_node = True
if action == 'node':
if from_node == True:
bpy.data.node_groups[node_tree.name].nodes[from_node_name].text = main_text
else:
node_active.text = main_text
elif action == 'node_get':
bpy.data.texts[file_name].clear()
if from_node == True:
text_node = bpy.data.node_groups[node_tree.name].nodes[from_node_name].text
bpy.data.texts[file_name].write(text_node)
else:
bpy.data.texts[file_name].write(text_node)
elif action == 'node_delete':
if from_node == True:
bpy.data.node_groups[node_tree.name].nodes[from_node_name].text = ''
else:
node_active.text = ""
elif action == 'colour':
if len(node_selected) == 0:
text = "No selected nodes was found"
war = "WARNING"
self.report({war}, text)
return {'FINISHED'}
for i in node_selected:
# node_selected.use_custom_color = bpy.data.node_groups[node_tree.name].nodes[from_node_name].use_custom_color
i.use_custom_color = node_active.use_custom_color
i.color = node_active.color
elif action == 'colour_all':
if len(node_selected) == 0:
text = "No selected nodes was found"
war = "WARNING"
self.report({war}, text)
return {'FINISHED'}
for i in node_selected:
i.use_custom_color = True
i.color = bpy.context.scene.colorProperty
elif action == "label":
if len(node_selected) == 0:
text = "No selected nodes was found"
war = "WARNING"
self.report({war}, text)
return {'FINISHED'}
for i in node_selected:
# i.use_custom_color = node_active.use_custom_color
i.label = node_active.label
elif action == "label_all":
if len(node_selected) == 0:
text = "No selected nodes was found"
war = "WARNING"
self.report({war}, text)
return {'FINISHED'}
for i in node_selected:
# i.use_custom_color = node_active.use_custom_color
i.label = bpy.context.scene.label_node_text
# now we have the context, perform a simple operation
# if node_active in node_selected:
# node_selected.remove(node_active)
# if len(node_selected) != 1:
# operator.report({'ERROR'}, "2 nodes must be selected")
# return
# node_other, = node_selected
# now we have 2 nodes to operate on
#
# operaif not node_active.inputs:tor.report({'ERROR'}, "Active node has no inputs")
# return
# if not node_other.outputs:
# operator.report({'ERROR'}, "Selected node has no outputs")
# return
# socket_in = node_active.inputs[0]
# socket_out = node_other.outputs[0]
# add a link between the two nodes
# node_link = node_tree.links.new(socket_in, socket_out)
return {'FINISHED'}
class Note_Node_Bool_Operator(bpy.types.Operator):
"""Tooltip"""
bl_idname = "node.noter_bool_operator"
bl_label = ""
bl_description = "Mute or unmute current node"
# my_bool: bpy.props.FloatProperty()
# my_bool: bpy.props.CollectionProperty(type = MyCustomNode)
# name: bpy.props.PointerProperty(type = MyCustomTreeNode)
# my_bool: bpy.props.StringProperty()
name: bpy.props.StringProperty()
@classmethod
def poll(cls, context):
space = context.space_data
return space.type == 'NODE_EDITOR'
def execute(self, context):
space = context.space_data
node_tree = space.node_tree
mute = bpy.data.node_groups[node_tree.name].nodes[self.name].mute
if mute == True:
bpy.data.node_groups[node_tree.name].nodes[self.name].mute = False
else:
bpy.data.node_groups[node_tree.name].nodes[self.name].mute = True
return {'FINISHED'}
class Choose_or_Add_Nodes_Tree(bpy.types.Operator):
"""Tooltip"""
bl_idname = "node.noter_add_nodes_tree"
bl_label = ""
bl_description = ""
name: bpy.props.StringProperty()
new: bpy.props.BoolProperty()
@classmethod
def description(cls, context, properties):
if properties.new == True:
return "Create New Node Tree"
else:
return "Choose Node Tree"
@classmethod
def poll(cls, context):
space = context.space_data
return space.type == 'NODE_EDITOR'
def execute(self, context):
if self.new == True:
context.space_data.node_tree = bpy.data.node_groups.new("", 'Noter_CustomTreeType')
else:
context.space_data.node_tree = bpy.data.node_groups[ self.name ]
return {'FINISHED'}
class Noter_Image_Action(bpy.types.Operator):
"""Tooltip"""
bl_idname = "node.noter_image"
bl_label = "Noter Image"
bl_description = 'Display the image in "Image Editor"\
\n\nIn "Image Editor" choose an image named "Noter Node Image" and after click "View Image" button'
# bl_property = "my_image"
# my_bool: bpy.props.FloatProperty()
# my_bool: bpy.props.CollectionProperty(type = MyCustomNode)
# name: bpy.props.PointerProperty(type = MyCustomTreeNode)
# my_bool: bpy.props.StringProperty()
# name: bpy.props.StringProperty()
# my_image: bpy.props.PointerProperty(type= bpy.types.Image)
my_image_name: bpy.props.StringProperty()
# @classmethod
# def poll(cls, context):
# space = context.space_data
# return space.type == 'NODE_EDITOR'
def execute(self, context):
custom_image_name = "Noter Node Image"
# if self.my_image_name == "Render Result":
# self.my_image_name = f"\\{self.my_image_name}"
# self.my_image_name = f"{bpy.context.scene.render.filepath}{self.my_image_name}"
# if self.my_image_name == "Render Result":
# # filepath = s = os.path.dirname(bpy.data.images['Render Result'].filepath)
# filepath = os.path.join( bpy.context.blend_data.filepath, self.my_image_name )
# else:
# filepath bpy.data.images[self.my_image_name].filepath
filepath = bpy.data.images[self.my_image_name].filepath
if filepath == "" :
text = "You need to choose an image not from Blender"
war = "WARNING"
self.report({war}, text)
return {'FINISHED'}
# bpy.data.images[self.my_image_name].use_fake_user = True
# print(filepath)
if bpy.data.images.find(custom_image_name) == -1:
image = bpy.data.images.load( filepath )
image.name = custom_image_name
else:
bpy.data.images[custom_image_name].filepath = filepath
# bpy.data.images.remove( bpy.data.images[custom_image_name] )
# image = bpy.data.images.load( bpy.data.images[self.my_image_name].filepath )
# image.name = custom_image_name
# bpy.context.space_data.image = image
# bpy.data.images[custom_image_name] = bpy.data.images[self.my_image_name]
# bpy.data.textures.new( custom_image_name, "IMAGE")
return {'FINISHED'}
# class Noter_NodeSearch(bpy.types.Operator):
# # def iterSingleNodeItems():
# # for node in iterAnimationNodeClasses():
# # if not node.onlySearchTags:
# # yield SingleNodeInsertionItem(node.bl_idname, node.bl_label)
# # for customSearch in node.getSearchTags():
# # if isinstance(customSearch, tuple):
# # yield SingleNodeInsertionItem(node.bl_idname, customSearch[0], customSearch[1])
# # else:
# # yield SingleNodeInsertionItem(node.bl_idname, customSearch)
# # for network in getSubprogramNetworks():
# # yield SingleNodeInsertionItem("an_InvokeSubprogramNode", network.name,
# # {"subprogramIdentifier" : repr(network.identifier)})
# # itemsByIdentifier = {}
# bl_idname = "node.noter_node_search"
# bl_label = "Node Search"
# bl_options = {"REGISTER"}
# bl_property = "my_search"
# # bl_property = "item"
# # def getSearchItems(self, context):
# # itemsByIdentifier.clear()
# # items = []
# # for item in itertools.chain(iterSingleNodeItems()):
# # itemsByIdentifier[item.identifier] = item
# # items.append((item.identifier, item.searchTag, ""))
# # return items
# # item: bpy.props.EnumProperty(items = getSearchItems)
# # # @classmethod
# # # def poll(cls, context):
# # # try: return context.space_data.node_tree.bl_idname == "an_AnimationNodeTree"
# # # except: return False
# # def invoke(self, context, event):
# # context.window_manager.invoke_search_popup(self)
# # return {"CANCELLED"}
# my_search: bpy.props.EnumProperty(
# name="My Search",
# items=(
# ('FOO', "Foo", ""),
# ('BAR', "Bar", ""),
# ('BAZ', "Baz", ""),
# ),
# )
# @classmethod
# def poll(cls, context):
# try: return context.space_data.node_tree.bl_idname == "Noter_CustomTreeType"
# except: return False
# def execute(self, context):
# self.my_searchA
# # self.report({'INFO'}, "Selected:" + self.my_search)
# return {"FINISHED"}
# def invoke(self, context, event):
# context.window_manager.invoke_search_popup(self)
# # return {"CANCELLED"}
# return {'RUNNING_MODAL'}
# # return context.window_manager.invoke_search_popup(self)
# Derived from the NodeTree base type, similar to Menu, Operator, Panel, etc.
class MyCustomTree(NodeTree):
# Description string
bl_description = 'Notes Nodes'
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'Noter_CustomTreeType'
# Label for nice name display
bl_label = "Notes Tree"
# Icon identifier
bl_icon = 'FILE'
# type = 'COMPOSITING'
# Custom socket type
class MyCustomSocket(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'Noter_CustomSocketType'
# Label for nice name display
bl_label = "Custom Node Socket"
# Enum items list
my_items = (
('DOWN', "Down", "Where your feet are"),
('UP', "Up", "Where your head should be"),
('LEFT', "Left", "Not right"),
('RIGHT', "Right", "Not left"),
)
my_enum_prop: bpy.props.EnumProperty(
name="Direction",
description="Just an example",
items=my_items,
default='UP',
)
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
# if self.is_output or self.is_linked:
# layout.label(text=text)
# else:
# layout.prop(self, "my_enum_prop", text=text)
# layout.label(text="Text")
# if len(node.inputs)
# for i in range(0, len(node.inputs) ):
# if i == 0:
# self.inputs.new('Noter_CustomSocketType', "")
# text = node.text
# if text.count("\n") == 0:
# layout.prop(node, "text", text = '')
# else:
# text_parts_list = text.split('\n')
# box = layout.box()
# box = box.box()
# col = box.column(align = 1)
# for i in text_parts_list:
# row = col.row(align = 1)
# row.label(text = i)
# row.scale_y = 0
# # break
pass
# Socket color
def draw_color(self, context, node):
# return (1.0, 0.4, 0.216, 1)
# return (1, 1, 0.035, .9)
return (0.8, 0.8, 0.03, 1.000000)
class MyCustomSocket_2(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'CustomSocketType_2'
# Label for nice name display
bl_label = "Custom Node Socket"
my_bool: bpy.props.BoolProperty()
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
# if self.is_output or self.is_linked:
layout.prop(self, 'my_bool', text = '')
# else:
# layout.prop(self, "my_enum_prop", text=text)
pass
# Socket color
def draw_color(self, context, node):
# return (1.0, 0.4, 0.216, 1)
# return (1, 1, 0.035, .9)
return (0.8, 0.8, 0.03, 1.000000)
class MyCustomSocket_3(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'Noter_CustomSocketType_3'
# Label for nice name display
bl_label = "Custom Node Socket"
image: bpy.props.PointerProperty(type= bpy.types.Image)
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text = '12312123')
pass
# Socket color
def draw_color(self, context, node):
return (0.8, 0.8, 0.03, 1.000000)
# Mix-in class for all custom nodes in this tree type.
# Defines a poll function to enable instantiation.
class MyCustomTreeNode:
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'Noter_CustomTreeType'
# return True
class MyCustomNode(Node, MyCustomTreeNode):
# === Basics ===
# Description string
'''A custom node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'Noter_CustomNodeType'
# Label for nice name display
bl_label = "Custom Node"
# Icon identifier
# bl_icon = 'SOUND'
bl_width_default = 200
# === Custom Properties ===
# These work just like custom properties in ID data blocks
# Extensive information can be found under
# http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python/Properties
text: bpy.props.StringProperty()
my_bool: bpy.props.BoolProperty()
draw_extra: bpy.props.StringProperty(default = "+++")
image_bool: bpy.props.BoolProperty()
image: bpy.props.PointerProperty(type= bpy.types.Image)
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node, as shown below.
# NOTE: this is not the same as the standard __init__ function in Python, which is
# a purely internal Python method and unknown to the node system!
def draw_label(self):
# def draw_color(self, context, node):
# return (1.0, 0.4, 0.216, 1)
# return (1, 1, 0.035, .9)
# return (0.8, 0.8, 0.03, 1.000000)
return " "
# return "Press F2"
# return self.my_bool
def init(self, context):
self.inputs.new('Noter_CustomSocketType', "")
# self.inputs.new('CustomSocketType_2', "")
# self.inputs[0].display_shape = 'DIAMOND'
# self.inputs.new('NodeSocketFloat', "World")
# self.inputs.new('NodeSocketVector', "!")
# self.inputs.new('NodeSocketColor', "")
# self.outputs.new('NodeSocketColor', "")
self.outputs.new('Noter_CustomSocketType', "")
# self.outputs.new('CustomSocketType_2', "")
# self.outputs.new('NodeSocketColor', "are")
# self.outputs.new('NodeSocketFloat', "you")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
pass
# print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
# print("Removing node ", self, ", Goodbye!")
pass
# Additional buttons displayed on the node.
# def draw_buttons_ext(self, context, layout):
def draw_buttons(self, context, layout):
text = self.text
draw_extra_count = self.draw_extra.count("+")
if self.image_bool == True:
box = layout.box()
box = box.box()
col = box.column( align = 1)
row = col.row(align = 1)
row.template_ID_preview(self, "image", new="image.new", open="image.open", hide_buttons = False)
# row.template_ID(self, "image", new="image.new", open="image.open")
row.scale_y = 1.4
try:
self.image.name
# layout.separator()
row = col.row(align = 1)
# row.label( icon = "IMAGE_DATA" )
row.operator("node.noter_image", icon = "FILE_REFRESH", text = 'View Image').my_image_name = self.image.name
row.scale_y = 1.5
except AttributeError:
pass
layout.separator(factor = 6)
if draw_extra_count >= 1:
if text.count("\n") == 0:
# layout.separator(factor = 1)
box = layout.box()
box.prop(self, "text", text = '')
else:
text_parts_list = text.split('\n')
layout.separator(factor = .5)
box = layout.box()
box = box.box()
col = box.column(align = 1)
for i in text_parts_list:
row = col.row(align = 1)
row.label(text = i)
row.scale_y = 0
if draw_extra_count >= 2:
layout.separator(factor = 2)
row_header = layout.row()
ic = 'CHECKMARK' if self.mute else 'BLANK1'
row = row_header.row()
row.operator("node.noter_bool_operator", icon = ic, text = '', depress = self.mute).name = self.name
row.alignment = 'LEFT'
if self.mute == True:
row.scale_y = 2.5
row.scale_x = 2.5
else:
row.scale_y = 1
row.scale_x = 1
if draw_extra_count >= 3:
row = row_header.row()
row.operator("node.noter_operator", icon = 'IMPORT', text = '').action = f"node*{self.name}"
row.operator("node.noter_operator", icon = 'EXPORT', text = '').action = f"node_get*{self.name}"
row.operator("node.noter_operator", icon = 'TRASH', text = '').action = f"node_delete*{self.name}"
row.alignment = 'RIGHT'
row.scale_y = 1.6
row.scale_x = 1.6
def update(self):
count = 0
for i in self.inputs:
if i.is_linked == True:
count += 1
free_inputs = len(self.inputs) - count
if free_inputs == 0:
self.inputs.new('Noter_CustomSocketType', "")
# self.inputs.new('CustomSocketType_2', "")
elif free_inputs > 1:
for i in self.inputs:
if i.is_linked == False and free_inputs > 1:
self.inputs.remove(i)
free_inputs -= 1
elif i.is_linked == True:
pass
else:
break
# def insert_link(self, link):
# count = 0
# for i in self.inputs:
# if i.is_linked == True:
# count += 1
# free_inputs = len(self.inputs) - count
# if free_inputs == 0:
# self.inputs.new('Noter_CustomSocketType', "")
# # self.inputs.new('CustomSocketType_2', "")
# elif free_inputs > 1:
# for i in self.inputs:
# if i.is_linked == False and free_inputs > 1:
# self.inputs.remove(i)
# free_inputs -= 1
# elif i.is_linked == True:
# pass
# else:
# break
# Detail buttons in the sidebar.
# If this function is not defined, the draw_buttons function is used instead
# def draw_buttons_ext(self, context, layout):
# layout.prop(self, "my_float_prop")
# # my_string_prop button will only be visible in the sidebar
# layout.prop(self, "my_string_prop")
# Optional: custom label
# Explicit user label overrides this, but here we can define a label dynamically
class MyCustomNode_2(Node, MyCustomTreeNode):
# === Basics ===
# Description string
# '''A custom node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'Noter_CustomNodeType_2'
# Label for nice name display
bl_label = "Custom Node"
# Icon identifier
# bl_icon = 'SOUND'
bl_width_default = 200
# bl_static_type = "UNDEFINED"
# === Custom Properties ===
# These work just like custom properties in ID data blocks
# Extensive information can be found under
# http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python/Properties
text: bpy.props.StringProperty()
my_bool: bpy.props.BoolProperty()
draw_extra: bpy.props.StringProperty(default = "++")
# image: bpy.data.images['Camera.001'].image
# image: bpy.props.CollectionProperty(type= bpy.types.Image)
image: bpy.props.PointerProperty(type= bpy.types.Image)
# enum_image: bpy.props.EnumProperty(
# )
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node, as shown below.
# NOTE: this is not the same as the standard __init__ function in Python, which is
# a purely internal Python method and unknown to the node system!
def draw_label(self):
# def draw_color(self, context, node):
# return (1.0, 0.4, 0.216, 1)
# return (1, 1, 0.035, .9)
# return (0.8, 0.8, 0.03, 1.000000)
return " "
# return "Press F2"
# return self.my_bool
def init(self, context):
# self.show_preview = True
# self.show_options = True
# self.image = bpy.data.textures['Texture'].preview
# self.show_texture = True
# self.image = bpy.data.images['Camera.001']
# self.image = bpy.data.images['Camera.png']
# self.image = bpy.data.textures['Texture'].image
# self.image = bpy.data.images['Camera.001.png']
# self.image = bpy.data.images['Camera.002.png'].pixels
# self.image = bpy.data.images['Untitled']
# self.image = bpy.data.textures['Texture'].image
# self.image = bpy.data.textures['Texture'].preview
# self.image = bpy.data.scenes['Scene'].node_tree.nodes['Image'].image
# print(123123)
# print(self.image)
# print()
self.inputs.new('Noter_CustomSocketType', "")
# self.inputs.new('CustomSocketType_2', "")
# self.inputs.new('NodeSocketInterface', "")
# self.inputs.new('NodeSocketInterfaceColor', "")
# self.inputs.new('NodeSocketColor', "Image")
# self.inputs.new('Noter_CustomSocketType_3', "Image")
# self.inputs[1] = bpy.data.images['Camera.png']
# self.inputs[0].display_shape = 'DIAMOND'
# self.inputs.new('NodeSocketFloat', "World")
# self.inputs.new('NodeSocketVector', "!")
# self.inputs.new('NodeSocketColor', "")
# self.outputs.new('NodeSocketColor', "")
self.outputs.new('Noter_CustomSocketType', "")
# self.outputs.new('CustomSocketType_2', "")
# self.outputs.new('NodeSocketColor', "are")
# self.outputs.new('NodeSocketFloat', "you")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
pass
# print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
# print("Removing node ", self, ", Goodbye!")
pass
# Additional buttons displayed on the node.
# def draw_buttons_ext(self, context, layout):
def draw_buttons(self, context, layout):
# img = bpy.context.scene.Noter_images
row = layout.row()
row.template_ID_preview(self, "image", new="image.new", open="image.open", hide_buttons = False)
# row.template_ID(self, "image", new="image.new", open="image.open")
row.scale_y = 1.4
try:
layout.separator()
self.image.name
row = layout.row()
row.label(icon = "IMAGE_DATA")
row.operator("node.noter_image", icon = "EXPORT", text = 'View Image').my_image_name = self.image.name
row.scale_y = 1.7
except AttributeError:
pass
# layout = self.layout
# pcoll = preview_collections["main"]
# row = layout.row()
# my_icon = pcoll["my_icon"]
# row.operator("render.render", icon_value = my_icon.icon_id)
# layout.template_icon(icon_value = my_icon.icon_id, scale=15.0)
# self.show_preview = True
# self.show_texture = True
# self.image = bpy.data.images['Camera.001']
# self.image = bpy.data.images['Camera.001.png']
# self.image = bpy.data.textures['Texture']
# row = layout.row()
# row.operator( "node.noter_image_action", text = "Image" )
# layout.operator("node.noter_image"
# layout.operator("node.noter_bool_operator", icon = "DOT", text = 'Image')
# print(self.image.name)
# try:
# image = bpy.types.Image(file_format='PNG')
# image.file_format = 'PNG'
# image.filepath = 'C:\\Users\\Standardbenutzer\\Desktop\\bla.png'
# sima = context.space_data
# tex = bpy.data.textures['.hidden']
# tex = bpy.data.textures['Texture']
# tex = bpy.data.images['Camera.001']
# col = layout.box().column()
# tex = self
# tex = context.texture
# layout.template_icon_view(tex, "image", show_labels=True, scale=6.0, scale_popup=5.0)
# layout.template_ID(self, 'image', new="", open="", unlink="", filter='ALL', live_icon=False, text="", text_ctxt="", translate=True)
# layout.template_any_ID(tex, 'image', "Image")
# layout.template_path_builder(tex, 'image', "Image")
# layout.template_preview(self, show_buttons=False)
# layout.template_preview(self, show_buttons=True)
# layout.template_ID(tex, "image", new="image.new", open="image.open")
# layout.template_ID(self, "image", new="image.new", open="image.open")
# layout.template_image_layers(tex.image, tex.image_user)
# layout.template_layers(tex, "image")
# layout.template_vectorscope(tex, "image")
# layout.template_image(tex, "image", tex.image_user, compact=False, multiview=True)
# layout.template_image(self, "image", self.image.users)
# layout.template_ID_preview(self, "image", new="image.new", open="image.open", hide_buttons = False)
# layout.template_ID_tabs(tex, "image", new="", menu="", filter='ALL')
# layout.template_icon( 37*12 , scale=4)
# layout.template_layers(tex, 'image', used_layers_data, used_layers_property, active_layer)
# layout.template_image_layers(tex.image, tex.image_user)
# layout.template_icon(icon_value=custom_icons[z[:-4]].icon_id,scale=10)
# except KeyError:
# pass
# except TypeError:
# pass
# text = self.text
# if text.count("\n") == 0:
# layout.separator(factor = 1)
# box = layout.box()
# box.prop(self, "text", text = '')
# else:
# text_parts_list = text.split('\n')
# layout.separator(factor = .5)
# box = layout.box()
# box = box.box()
# col = box.column(align = 1)
# for i in text_parts_list:
# row = col.row(align = 1)
# row.label(text = i)
# row.scale_y = 0
# draw_extra_count = self.draw_extra.count("+")
# if draw_extra_count >= 1:
# layout.separator(factor = 2)
# row_header = layout.row()
# ic = 'CHECKMARK' if self.mute else 'BLANK1'
# row = row_header.row()
# row.operator("node.noter_bool_operator", icon = ic, text = '', depress = self.mute).name = self.name
# row.alignment = 'LEFT'
# if self.mute == True:
# row.scale_y = 2.5
# row.scale_x = 2.5
# else:
# row.scale_y = 1
# row.scale_x = 1
# if draw_extra_count >= 2:
# row = row_header.row()
# row.operator("node.noter_operator", icon = 'IMPORT', text = '').action = f"node*{self.name}"
# row.operator("node.noter_operator", icon = 'EXPORT', text = '').action = f"node_get*{self.name}"
# row.operator("node.noter_operator", icon = 'TRASH', text = '').action = f"node_delete*{self.name}"
# row.alignment = 'RIGHT'
# row.scale_y = 1.6
# row.scale_x = 1.6
# def update(self):
# # self.show_preview = True
# # self.show_texture = True
# # # self.image = bpy.data.images['Camera.001']
# # # self.image = bpy.data.images['Camera.001.png']
# # self.image = bpy.data.textures['Texture']
# # print(self.image)
# # print(123123)
# count = 0
# for i in self.inputs:
# if i.is_linked == True:
# count += 1
# free_inputs = len(self.inputs) - count
# if free_inputs == 0:
# self.inputs.new('Noter_CustomSocketType', "")
# # self.inputs.new('CustomSocketType_2', "")
# elif free_inputs > 1:
# for i in self.inputs:
# if i.is_linked == False and free_inputs > 1:
# self.inputs.remove(i)
# free_inputs -= 1
# elif i.is_linked == True:
# pass
# else:
# break
### Node Categories ###
# Node categories are a python system for automatically
# extending the Add menu, toolbar panels and search operator.
# For more examples see release/scripts/startup/nodeitems_builtins.py
import nodeitems_utils
from nodeitems_utils import NodeCategory, NodeItem
# our own base class with an appropriate poll function,
# so the categories only show up in our own tree type
class MyNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'Noter_CustomTreeType'
class NODE_PT_active_node_generic(bpy.types.Panel):
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_category = "Noter"
bl_label = "Noter"
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'Noter_CustomTreeType'
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.scene, "file_name", text = '')
row.scale_y = 1.3
box = layout.box()
column = box.column(align = 1)
column.scale_y = 1.3
column.operator("node.noter_operator", text = '', icon = "IMPORT").action = 'node'
column.operator("node.noter_operator", text = '', icon = "EXPORT").action = 'node_get'
column.operator("node.noter_operator", text = '', icon = "TRASH").action = 'node_delete'
column.separator(factor = 2)
# column.template_columnor_picker(self, "columnorProperty", value_slider = True)
# column.prop(self, "columnorProperty")
column.operator("node.noter_operator", text = 'Copy-Paste', icon = "BRUSH_DATA").action = 'colour'
column.operator("node.noter_operator", text = 'Copy-Paste', icon = "TOPBAR").action = 'label'
column.separator(factor = 2)
row = column.row(align = 1)
row_row = row.row(align = 1)
row_row.operator("node.noter_operator", text = 'Paint', icon = "BRUSH_DATA").action = 'colour_all'
row_row = row.row(align = 1)
row_row.scale_x = .6
row_row.prop(bpy.context.scene, "colorProperty", text = "")
column.separator(factor = 2)
column.operator("node.noter_operator", text = 'Write Label', icon = "TOPBAR").action = 'label_all'
column.prop(bpy.context.scene, "label_node_text", text = "")
column.separator(factor = 1)
# row_row = row.row(align = 1)
# row_row.scale_x = 2
class NODE_PT_active_node_color_2 (bpy.types.Panel):
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_category = "Noter"
bl_label = "Node Color"
# bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = 'NODE_PT_active_node_generic'
@classmethod
def poll(cls, context):
return context.active_node is not None
def draw_header(self, context):
node = context.active_node
self.layout.prop(node, "use_custom_color", text="")
def draw_header_preset(self, _context):
bpy.types.NODE_PT_node_color_presets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.layout
node = context.active_node
layout.enabled = node.use_custom_color
row = layout.row()
row.prop(node, "color", text="")
row.menu("NODE_MT_node_color_context_menu", text="", icon='DOWNARROW_HLT')
class NODE_SPACE_PT_AnnotationDataPanel_2(bpy.types.Panel):
bl_label = "Annotations"
bl_region_type = 'UI'
bl_space_type = 'NODE_EDITOR'
bl_category = "Noter"
# bl_parent_id = 'NODE_PT_active_node_generic'
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
# Show this panel as long as someone that might own this exists
# AND the owner isn't an object (e.g. GP Object)
if context.space_data.tree_type == 'Noter_CustomTreeType':
if context.annotation_data_owner is None:
return False
elif type(context.annotation_data_owner) is bpy.types.Object:
return False
else:
return True
def draw_header(self, context):
if context.space_data.type not in {'VIEW_3D', 'TOPBAR'}:
self.layout.prop(context.space_data, "show_annotation", text="")
def draw(self, context):
layout = self.layout
layout.use_property_decorate = False
# Grease Pencil owner.
gpd_owner = context.annotation_data_owner
gpd = context.annotation_data
# Owner selector.
if context.space_data.type == 'CLIP_EDITOR':
layout.row().prop(context.space_data, "annotation_source", expand=True)
layout.template_ID(gpd_owner, "grease_pencil", new="gpencil.annotation_add", unlink="gpencil.data_unlink")
# List of layers/notes.
if gpd and gpd.layers:
self.draw_layers(context, layout, gpd)
def draw_layers(self, context, layout, gpd):
row = layout.row()
col = row.column()
if len(gpd.layers) >= 2:
layer_rows = 5
else:
layer_rows = 3
col.template_list("GPENCIL_UL_annotation_layer", "", gpd, "layers", gpd.layers, "active_index",
rows=layer_rows, sort_reverse=True, sort_lock=True)
col = row.column()
sub = col.column(align=True)
sub.operator("gpencil.layer_annotation_add", icon='ADD', text="")
sub.operator("gpencil.layer_annotation_remove", icon='REMOVE', text="")
gpl = context.active_annotation_layer
if gpl:
if len(gpd.layers) > 1:
col.separator()
sub = col.column(align=True)
sub.operator("gpencil.layer_annotation_move", icon='TRIA_UP', text="").type = 'UP'
sub.operator("gpencil.layer_annotation_move", icon='TRIA_DOWN', text="").type = 'DOWN'
tool_settings = context.tool_settings
if gpd and gpl:
layout.prop(gpl, "thickness")
else:
layout.prop(tool_settings, "annotation_thickness", text="Thickness")
if gpl:
# Full-Row - Frame Locking (and Delete Frame)
row = layout.row(align=True)
row.active = not gpl.lock
if gpl.active_frame:
lock_status = iface_("Locked") if gpl.lock_frame else iface_("Unlocked")
lock_label = iface_("Frame: %d (%s)") % (gpl.active_frame.frame_number, lock_status)
else:
lock_label = iface_("Lock Frame")
row.prop(gpl, "lock_frame", text=lock_label, icon='UNLOCKED')
row.operator("gpencil.annotation_active_frame_delete", text="", icon='X')
def insertNode(layout, type, text, settings = {}, icon = "NONE"):
operator = layout.operator("node.add_node", text = text, icon = icon)
operator.type = type
operator.use_transform = True
for name, value in settings.items():
item = operator.settings.add()
item.name = name
item.value = value
return operator
separator_factor_for_menus = .2
class NODE_MT_add_menu_notes(bpy.types.Menu):
bl_label = "Note"
def draw(self, context):
layout = self.layout
props = layout.operator("node.add_node", text = "Note Node", icon = 'FILE')
props.use_transform = True
props.type = "Noter_CustomNodeType"
layout.separator(factor = separator_factor_for_menus)
insertNode(layout, "Noter_CustomNodeType", "Note Node ( w/o some buttons )", {"draw_extra" : repr("++")}, 'OUTLINER_DATA_POINTCLOUD')
layout.separator(factor = separator_factor_for_menus)
insertNode(layout, "Noter_CustomNodeType", "Note Node ( w/o All buttons )", {"draw_extra" : repr("+")}, 'LAYER_USED')
# props = layout.operator("node.add_node", text = "Image Node", icon = 'IMAGE_DATA')
# props.use_transform = True
# props.type = "Noter_CustomNodeType"
# props = layout.operator("node.add_node", text = "cni", icon = 'NONE')
# props.use_transform = True
# props.type = "CompositorNodeImage"
class NODE_MT_add_menu_image_notes(bpy.types.Menu):
bl_label = "Layout"
def draw(self, context):
layout = self.layout
# layout.operator_context = 'INVOKE_AREA'
insertNode(layout, "Noter_CustomNodeType", "Image Note Node", { "draw_extra" : repr("+++"), "image_bool" : repr( True ) }, 'IMAGE_DATA')
layout.separator(factor = separator_factor_for_menus)
insertNode(layout, "Noter_CustomNodeType", "Image Note Node ( w/o some buttons )", { "draw_extra" : repr("++"), "image_bool" : repr( True ) }, 'OUTLINER_DATA_POINTCLOUD')
layout.separator(factor = separator_factor_for_menus)
insertNode(layout, "Noter_CustomNodeType", "Image Note Node ( w/o All buttons )", { "draw_extra" : repr("+"), "image_bool" : repr( True ) }, 'LAYER_USED')
# layout.separator(factor = separator_factor_for_menus)
# insertNode(layout, "Noter_CustomNodeType", "Without extra buttons + +", { "draw_extra" : repr(""), "image_bool" : repr( True ) }, 'LAYER_USED')
class NODE_MT_add_menu_othernotes(bpy.types.Menu):
bl_label = "Other Notes"
def draw(self, context):
layout = self.layout
insertNode(layout, "Noter_CustomNodeType", "Without extra buttons", {"draw_extra" : repr("+")}, 'OUTLINER_DATA_POINTCLOUD')
layout.separator(factor = separator_factor_for_menus)
insertNode(layout, "Noter_CustomNodeType", "Without extra buttons +", {"draw_extra" : repr("")}, 'LAYER_USED')
class NODE_MT_add_menu_layout(bpy.types.Menu):
bl_label = "Layout"
def draw(self, context):
layout = self.layout
# layout.operator_context = 'INVOKE_AREA'
props = layout.operator("node.add_node", text = "Reroute", icon = 'REC')
props.use_transform = True
props.type = "NodeReroute"
layout.separator(factor = separator_factor_for_menus)
props = layout.operator("node.add_node", text = "Frame", icon = 'MATPLANE')
props.use_transform = True
props.type = "NodeFrame"
def add__NODE_MT_add(self, context):
if context.space_data.tree_type == 'Noter_CustomTreeType':
layout = self.layout
if bool(context.space_data.edit_tree) == True:
# layout.operator("node.noter_node_search", text = "Search", icon = 'VIEWZOOM')
# row = layout.row()
# layout.operator('node.add_search', text = "Search...", icon = 'VIEWZOOM')
# row.operator_context = 'INVOKE_DEFAULT'
factor = .5
layout.separator(factor = 1)
# layout.operator('node.add_search', text = "Note", icon = 'FILE')
layout.menu("NODE_MT_add_menu_notes", text = "Notes", icon = "FILE")
layout.separator(factor = factor)
layout.menu("NODE_MT_add_menu_image_notes", text = "Image Notes", icon = 'IMAGE_DATA')
# layout.menu("NODE_MT_add_menu_othernotes", text = "Other Notes", icon = 'DOCUMENTS')
layout.separator(factor = factor)
layout.menu("NODE_MT_add_menu_layout", text = "Layout", icon = 'SEQ_STRIP_META')
layout.separator(factor = 1)
else:
row = layout.row()
row.scale_y = 1.7
row.operator('node.noter_add_nodes_tree', text = "Create New Node Tree", icon = 'ADD').new = True
node_groups = bpy.data.node_groups.values()
for node_group in node_groups:
layout.separator()
row = layout.row()
row.scale_y = 1
row.operator('node.noter_add_nodes_tree', text = node_group.name, icon = 'NODETREE').name = node_group.name
layout.separator(factor = 1)
# all categories in a list
node_categories = [
MyNodeCategory('OTHERNODES', "All Nodes", items=[
NodeItem("Noter_CustomNodeType", label="Note Nodes"
),
NodeItem("Noter_CustomNodeType", label="Note Node ( w/o some buttons )", settings={
"draw_extra": repr("++"),
}),
NodeItem("Noter_CustomNodeType", label="Note Node ( w/o All buttons )", settings={
"draw_extra": repr("+"),
}),
NodeItem("Noter_CustomNodeType", label="Image Note Node", settings={
"image_bool": repr(True)
}),
NodeItem("Noter_CustomNodeType", label="Image Note Node ( w/o some buttons )", settings={
"draw_extra": repr("++"), "image_bool": repr(True)
}),
NodeItem("Noter_CustomNodeType", label="Image Note Node ( w/o All buttons )", settings={
"draw_extra": repr("+"), "image_bool": repr(True)
}),
NodeItem("NodeReroute", label="Reroute"
),
NodeItem("NodeFrame", label="Frame"
),
]),
# identifier, label, items list
# # MyNodeCategory('SOMENODES', "Some Nodes", NodeItem("Noter_CustomNodeType") ),
# # NodeItem("Noter_CustomNodeType"),
# MyNodeCategory('SOMENODES', "", items=[
# # our basic node
# NodeItem("Noter_CustomNodeType", label = 'Note Node'),
# ]),
# # MyNodeCategory("Noter_CustomNodeType"),
# MyNodeCategory('OTHERNODES', "Other Notes", items=[
# # the node item can have additional settings,
# # which are applied to new nodes
# # NB: settings values are stored as string expressions,
# # for this reason they should be converted to strings using repr()
# NodeItem("Noter_CustomNodeType", label="Without extra buttons", settings={
# "draw_extra": repr("+"),
# }),
# NodeItem("Noter_CustomNodeType", label="Without extra buttons +", settings={
# "draw_extra": repr(""),
# }),
# ]),
]
Nodes_blender_classes = (
# MyNodeCategory,
MyCustomTree,
MyCustomSocket,
MyCustomSocket_2,
MyCustomSocket_3,
MyCustomNode,
MyCustomNode_2,
# Noter_Image,
NodeOperators,
NODE_PT_active_node_generic,
NODE_PT_active_node_color_2,
NODE_SPACE_PT_AnnotationDataPanel_2,
Note_Node_Bool_Operator,
Choose_or_Add_Nodes_Tree,
Noter_Image_Action,
NODE_MT_add_menu_layout,
NODE_MT_add_menu_othernotes,
NODE_MT_add_menu_notes,
NODE_MT_add_menu_image_notes,
# Noter_NodeSearch,
) |
989,776 | 396992979563d42de62b7953f1d16f02d361dae4 | #!/usr/bin/env python
from raco import RACompiler
from raco.language import MyriaAlgebra
from raco.myrialang import compile_to_json
import json
def json_pretty_print(dictionary):
"""a function to pretty-print a JSON dictionary.
From http://docs.python.org/2/library/json.html"""
return json.dumps(dictionary, sort_keys=True,
indent=2, separators=(',', ': '))
# A simple join
join = """
A(x,z) :- Twitter(x,y), Twitter(y,z)
"""
# A multi-join version
multi_join = """
A(x,w) :- R3(x,y,z), S3(y,z,w)
"""
# Triangles
triangles = """
A(x,y,z) :- R(x,y),S(y,z),T(z,x)
"""
# Three hops
three_hops = """
ThreeHops(x,w) :- TwitterK(x,y),TwitterK(y,z),TwitterK(z,w)
"""
# Cross product
cross_product = """
Cross(x,y) :- R1(x),S1(y).
"""
# Union
union = """
B(x) :- A(x)
A(x) :- R(x,3)
A(x) :- S(x,y)
"""
# Chained
chained = """
JustXBill(x) :- TwitterK(x,y)
JustXBill2(x) :- JustXBill(x)
JustXBillSquared(x) :- JustXBill(x), JustXBill2(x)
"""
# Chained 2 -- this one triggers Bug #29
chained2 = """
A(x,z) :- R(x,y,z);
B(w) :- A(3,w)
"""
chained_victim = """
InDegreeNCCDC(dst, count(time)) :- nccdc(src, dst, proto, time, x, y, z)
Victim(dst) :- InDegreeNCCDC(dst, cnt), cnt > 10000
"""
# Recursion
recursion = """
A(x) :- R(x,3)
A(x) :- R(x,y), A(x)
"""
# Filters
filtered = """
filtered(src, dst, time) :- nccdc(src, dst, proto, time, a, b, c), time > 1366475761, time < 1366475821
"""
# Aggregate
aggregate = """
InDegree(dst, count(src)) :- R3(src,dst,val)
"""
# Multi-column aggregate
multi_aggregate = """
TwoHopCount(x, z, count(y)) :- R3(x,y,z)
"""
# No-column aggregate
no_group_aggregate = """
Status(min(x), count(y)) :- Twitter(x,y)
"""
# Which one do we use?
query = filtered
def comment(s):
print "/*\n%s\n*/" % str(s)
# Create a compiler object
dlog = RACompiler()
# parse the query
dlog.fromDatalog(query)
print "************ LOGICAL PLAN *************"
cached_logicalplan = str(dlog.logicalplan)
print dlog.logicalplan
print
# Optimize the query, includes producing a physical plan
print "************ PHYSICAL PLAN *************"
dlog.optimize(target=MyriaAlgebra, eliminate_common_subexpressions=False)
print dlog.physicalplan
print
# generate code in the target language
print "************ CODE *************"
myria_json = compile_to_json(query, cached_logicalplan, dlog.physicalplan)
print json_pretty_print(myria_json)
print
# dump the JSON to output.json
print "************ DUMPING CODE TO output.json *************"
with open('output.json', 'w') as outfile:
json.dump(myria_json, outfile)
|
989,777 | 00eeab94d18d7a58188df6329aeb90c1e8f24a0d | import re
nameRegex = re.compile(r'First Name: (.*) Last Name: (.*)')
mo1 = nameRegex.search('First Name: Aleksey Last Name: Kaunnikov')
print(mo1.group(2)) |
989,778 | f4bf4534aeb2029b165dd4b3b7bd58b16839f84b | n=int(input())
arr=list(map(int,input().split()))
avg=sum(arr)/float(n)
print('%.6f'% avg) |
989,779 | 00cb62e2766658857f849e86bab168a1401cd795 | class WrongParametersError(Exception):
def __init__(self, field_name):
self.message = f'Field {field_name} has an incorrect value.'
def __str__(self):
return self.message
|
989,780 | c7a25bff63df9939af0c3f308ae4c34e19d5d90a | #
# @lc app=leetcode id=402 lang=python3
#
# [402] Remove K Digits
#
# @lc code=start
# TAGS: Greedy
# REVIEWME:
class Solution:
# 32 ms, 90.71%.
def removeKdigits(self, num: str, k: int) -> str:
rv = []
for n in num:
while rv and rv[-1] > n and k:
rv.pop()
k -= 1
rv.append(n)
if k: rv = rv[:-k]
return str(int("".join(rv))) if rv else "0"
# @lc code=end
|
989,781 | 10045274fe412a71277da2bab5e92fb7abf7e6b0 | __author__ = 'PCW-MacBookProRet'
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon, QKeySequence, QFont
from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QMainWindow,
QMessageBox, QTextEdit, QDialog, QMenuBar, QMenu)
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter
class ui_TextEditor(object):
def setupUi(self):
self.curFile = ''
self.textEdit = QTextEdit()
self.setCentralWidget(self.textEdit)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.readSettings()
self.textEdit.document().contentsChanged.connect(self.documentWasModified)
self.setCurrentFile('')
def closeEvent(self, event):
if self.maybeSave():
self.writeSettings()
event.accept()
else:
event.ignore()
def newFile(self):
if self.maybeSave():
self.textEdit.clear()
self.setCurrentFile('')
def open(self):
if self.maybeSave():
fileName, _ = QFileDialog.getOpenFileName(self)
if fileName:
self.loadFile(fileName)
def print_(self):
document = self.textEdit.document()
printer = QPrinter()
dlg = QPrintDialog(printer, self)
if dlg.exec_() != QDialog.Accepted:
return
document.print_(printer)
self.statusBar().showMessage("Ready", 2000)
def save(self):
if self.curFile:
return self.saveFile(self.curFile)
return self.saveAs()
def saveAs(self):
fileName, _ = QFileDialog.getSaveFileName(self)
if fileName:
return self.saveFile(fileName)
return False
def about(self):
QMessageBox.about(self, "About VGenes Text Editor",
"The <b>VGenes Text Editor</b> allows "
"you to edit, save, and print documents "
"generated by VGenes.")
def IncreaseFont(self):
FontIs = self.textEdit.currentFont()
font = QFont(FontIs)
FontSize = int(font.pointSize())
FontFam = font.family()
if FontSize < 36:
FontSize += 1
font.setPointSize(FontSize)
font.setFamily(FontFam)
self.textEdit.setFont(font)
def DecreaseFont(self):
FontIs = self.textEdit.currentFont()
font = QFont(FontIs)
FontSize = int(font.pointSize())
FontFam = font.family()
if FontSize > 6:
FontSize -= 1
font.setPointSize(FontSize)
font.setFamily(FontFam)
self.textEdit.setFont(font)
def documentWasModified(self):
self.setWindowModified(self.textEdit.document().isModified())
def createActions(self):
self.newAct = QAction(QIcon(':/PNG-Icons/page.png'), "&New", self,
shortcut=QKeySequence.New, statusTip="Create a new file",
triggered=self.newFile)
self.openAct = QAction(QIcon(':/PNG-Icons/folder.png'), "&Open...", self,
shortcut=QKeySequence.Open, statusTip="Open an existing file",
triggered=self.open)
# self.closeAct = QAction("Close", self, shortcut=QKeySequence.Close,
# statusTip="Close window", triggered=self.close)
self.closeAct = QAction("&Close", self,
shortcut=QKeySequence.Close,
statusTip="Close window", triggered=self.close)
self.saveAct = QAction(QIcon(':/PNG-Icons/SaveIcon.png'), "&Save", self,
shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.saveAsAct = QAction("Save &As...", self,
shortcut=QKeySequence.SaveAs,
statusTip="Save the document under a new name",
triggered=self.saveAs)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit VGenes Text Editor", triggered=self.close)
self.cutAct = QAction(QIcon(':/PNG-Icons/scissor.png'), "Cu&t", self,
shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.textEdit.cut)
self.IncreaseAct = QAction(QIcon(':/PNG-Icons/plus.png'), "&Increase", self,
statusTip="Increase font size",
triggered=self.IncreaseFont)
self.DecreaseAct = QAction(QIcon(':/PNG-Icons/minus.png'), "&Decrease", self,
statusTip="Decrease font size",
triggered=self.DecreaseFont)
self.printAct = QAction(QIcon(':/PNG-Icons/print.png'), "&Print...", self,
shortcut=QKeySequence.Print,
statusTip="Print the current form letter",
triggered=self.print_)
self.copyAct = QAction(QIcon(':/PNG-Icons/pages.png'), "&Copy", self,
shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.textEdit.copy)
self.pasteAct = QAction(QIcon(':/PNG-Icons/Paste.png'), "&Paste", self,
shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.textEdit.paste)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
# self.aboutQtAct = QAction("About &Qt", self,
# statusTip="Show the Qt library's About box",
# triggered=QApplication.instance().aboutQt)
self.cutAct.setEnabled(False)
self.copyAct.setEnabled(False)
self.textEdit.copyAvailable.connect(self.cutAct.setEnabled)
self.textEdit.copyAvailable.connect(self.copyAct.setEnabled)
def createMenus(self):
self.menubar = QMenuBar(self)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1029, 22))
self.menubar.setDefaultUp(False)
self.menubar.setNativeMenuBar(False)
self.menubar.setObjectName("menubar")
self.menuFile = QMenu(self.menubar)
self.setMenuBar(self.menubar)
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.closeAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator();
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
# self.helpMenu.addAction(self.aboutQtAct)
def createToolBars(self):
self.fileToolBar = self.addToolBar("File")
self.fileToolBar.addAction(self.newAct)
self.fileToolBar.addAction(self.openAct)
# self.fileToolBar.addAction(self.closeACT)
self.fileToolBar.addAction(self.saveAct)
self.fileToolBar.addAction(self.printAct)
self.editToolBar = self.addToolBar("Edit")
self.editToolBar.addAction(self.cutAct)
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.pasteAct)
self.FontSizeToolBar = self.addToolBar("FontSize")
self.FontSizeToolBar.addAction(self.IncreaseAct)
self.FontSizeToolBar.addAction(self.DecreaseAct)
def createStatusBar(self):
self.statusBar().showMessage("Ready")
def readSettings(self):
settings = QtCore.QSettings("Trolltech", "VGenes Text Editor")
pos = settings.value("pos", QtCore.QPoint(200, 200))
size = settings.value("size", QtCore.QSize(400, 400))
self.resize(size)
self.move(pos)
def writeSettings(self):
settings = QtCore.QSettings("Trolltech", "VGenes Text Editor")
settings.setValue("pos", self.pos())
settings.setValue("size", self.size())
def maybeSave(self):
if self.textEdit.document().isModified():
ret = QMessageBox.warning(self, "VGenes Text Editor",
"The document has been modified.\nDo you want to save "
"your changes?",
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
if ret == QMessageBox.Save:
return self.save()
if ret == QMessageBox.Cancel:
return False
return True
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QMessageBox.warning(self, "VGenes Text Editor",
"Cannot read file %s:\n%s." % (fileName, file.errorString()))
return
inf = QtCore.QTextStream(file)
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.textEdit.setPlainText(inf.readAll())
QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName)
self.statusBar().showMessage("File loaded", 2000)
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QMessageBox.warning(self, "VGenes Text Editor",
"Cannot write file %s:\n%s." % (fileName, file.errorString()))
return False
outf = QtCore.QTextStream(file)
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outf << self.textEdit.toPlainText()
QApplication.restoreOverrideCursor()
self.setCurrentFile(fileName);
self.statusBar().showMessage("File saved", 2000)
return True
def setCurrentFile(self, fileName):
self.curFile = fileName
self.textEdit.document().setModified(False)
self.setWindowModified(False)
if self.curFile:
shownName = self.strippedName(self.curFile)
else:
shownName = 'untitled.txt'
self.setWindowTitle("%s[*] - VGenes Text Editor" % shownName)
def strippedName(self, fullFileName):
return QtCore.QFileInfo(fullFileName).fileName() |
989,782 | 1205b98439daa78cf3954b58f60b1a1a51624440 | # always use UTC time
# import time
# # UTC in a named tuple, time = 0 starts on jan 1 1970
# print(time.gmtime(0))
#
# # local time
# print(time.localtime())
#
# # epoch, number seconds since jan 1 1970
# print(time.time())
# print()
#
# # extract parts
# time_here = time.localtime()
# print(time_here)
# print("Year: ", time_here[0], time_here.tm_year) # two ways to print info, from tuple or through key
# print("Month: ", time_here[1], time_here.tm_mon)
# print("Day: ", time_here[2], time_here.tm_mday)
import time
# from time import time as my_timer # problem is if daylight savings resets between
from time import perf_counter as my_timer # gives time elapsed without using actual time
# from time import monotonic as my_timer # time that cannot go backwards
# from time import process_time as my_timer # time spent by CPU
import random
input("Press enter to start.") # since input() used enter will signal the action
wait_time = random.randint(1, 6)
time.sleep(wait_time)
start_time = my_timer()
input("Press enter to stop.")
end_time = my_timer()
print("Started at: " + time.strftime("%X", time.localtime(start_time)))
print("Ended at: " + time.strftime("%X", time.localtime(end_time)))
print("Your reaction time was {} seconds.".format(round(end_time - start_time, 2)))
|
989,783 | e213f886db0a00fc92e57806b97a1763d4a0614c | import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
corpustools_path = os.path.split(os.path.split(os.path.split(test_dir)[0])[0])[0]
sys.path.insert(0,corpustools_path)
from corpustools.corpus.io import (download_binary, save_binary, load_binary,
load_corpus_csv,load_spelling_corpus, load_transcription_corpus,
export_corpus_csv, export_feature_matrix_csv,
load_feature_matrix_csv,
load_corpus_ilg)
from corpustools.exceptions import DelimiterError, ILGError
from corpustools.corpus.classes import (Word, Corpus, FeatureMatrix)
from corpustools.corpus.tests.lexicon_test import create_unspecified_test_corpus
TEST_DIR = r'C:\Users\michael\Dropbox\Measuring_Phonological_Relations\Computational\CorpusTools_test_files\Corpus_loading'
class ILGTest(unittest.TestCase):
def setUp(self):
self.basic_path = os.path.join(TEST_DIR,'ilg','test_basic.txt')
self.mismatched_path = os.path.join(TEST_DIR,'ilg','test_mismatched.txt')
def test_ilg_basic(self):
corpus = load_corpus_ilg('test', self.basic_path,delimiter=None,ignore_list=[], trans_delimiter = '.')
#print(corpus.words)
self.assertEqual(corpus.lexicon.find('a').frequency,2)
def test_ilg_mismatched(self):
self.assertRaises(ILGError,load_corpus_ilg, 'test', self.mismatched_path, delimiter=None,ignore_list=[], trans_delimiter = '.')
class CustomCorpusTest(unittest.TestCase):
def setUp(self):
self.example_path = os.path.join(TEST_DIR,'example.txt')
self.hayes_path = os.path.join(TEST_DIR,'hayes.txt')
self.spe_path = os.path.join(TEST_DIR,'spe.txt')
def test_corpus_csv(self):
if not os.path.exists(TEST_DIR):
return
self.assertRaises(DelimiterError,load_corpus_csv,'example',self.example_path,delimiter='\t')
self.assertRaises(DelimiterError,load_corpus_csv,'example',self.example_path,delimiter=',',trans_delimiter='/')
#c = load_corpus_csv('example',self.example_path,delimiter=',')
c = load_corpus_csv('example',self.example_path,delimiter=',')
example_c = create_unspecified_test_corpus()
self.assertIsInstance(c,Corpus)
self.assertEqual(c,example_c)
class CustomCorpusTextTest(unittest.TestCase):
def setUp(self):
self.spelling_path = os.path.join(TEST_DIR,'test_text_spelling.txt')
self.transcription_path = os.path.join(TEST_DIR,'test_text_transcription.txt')
self.transcription_morphemes_path = os.path.join(TEST_DIR,'test_text_transcription_morpheme_boundaries.txt')
self.full_feature_matrix_path = os.path.join(TEST_DIR,'basic.feature')
self.missing_feature_matrix_path = os.path.join(TEST_DIR, 'missing_segments.feature')
def test_load_spelling_no_ignore(self):
if not os.path.exists(TEST_DIR):
return
self.assertRaises(DelimiterError, load_spelling_corpus, 'test', self.spelling_path,"?",[])
c = load_spelling_corpus('test',self.spelling_path,' ',[])
self.assertEqual(c.lexicon['ab'].frequency, 2)
def test_load_spelling_ignore(self):
if not os.path.exists(TEST_DIR):
return
c = load_spelling_corpus('test',self.spelling_path,' ',["'",'.'])
self.assertEqual(c.lexicon['ab'].frequency, 3)
self.assertEqual(c.lexicon['cabd'].frequency, 1)
def test_load_transcription(self):
if not os.path.exists(TEST_DIR):
return
self.assertRaises(DelimiterError,load_transcription_corpus,'test',
self.transcription_path," ",[],
trans_delimiter = ',')
c = load_transcription_corpus('test',self.transcription_path,' ',[],trans_delimiter='.')
self.assertEqual(sorted(c.lexicon.inventory), sorted(['#','a','b','c','d']))
def test_load_transcription_morpheme(self):
if not os.path.exists(TEST_DIR):
return
c = load_transcription_corpus('test',self.transcription_morphemes_path,' ',['-','=','.'],trans_delimiter='.')
self.assertEqual(c.lexicon['cab'].frequency, 2)
def test_load_with_fm(self):
if not os.path.exists(TEST_DIR):
return
c = load_transcription_corpus('test',self.transcription_path,' ',
['-','=','.'],trans_delimiter='.',
feature_system_path = self.full_feature_matrix_path)
self.assertEqual(c.lexicon.specifier,load_binary(self.full_feature_matrix_path))
self.assertEqual(c.lexicon['cab'].frequency, 1)
self.assertEqual(c.lexicon.check_coverage(),[])
c = load_transcription_corpus('test',self.transcription_path,' ',
['-','=','.'],trans_delimiter='.',
feature_system_path = self.missing_feature_matrix_path)
self.assertEqual(c.lexicon.specifier,load_binary(self.missing_feature_matrix_path))
self.assertEqual(sorted(c.lexicon.check_coverage()),sorted(['b','c','d']))
class BinaryCorpusLoadTest(unittest.TestCase):
def setUp(self):
self.example_path = os.path.join(TEST_DIR,'example.corpus')
def test_load(self):
if not os.path.exists(TEST_DIR):
return
c = load_binary(self.example_path)
example_c = create_unspecified_test_corpus()
self.assertEqual(c,example_c)
class BinaryCorpusSaveTest(unittest.TestCase):
def setUp(self):
if not os.path.exists(TEST_DIR):
return
self.corpus = create_unspecified_test_corpus()
self.path = os.path.join(TEST_DIR,'testsave.corpus')
def test_save(self):
if not os.path.exists(TEST_DIR):
return
save_binary(self.corpus,self.path)
c = load_binary(self.path)
self.assertEqual(self.corpus,c)
class BinaryCorpusDownloadTest(unittest.TestCase):
def setUp(self):
self.name = 'example'
self.path = os.path.join(TEST_DIR,'testdownload.corpus')
self.example_path = os.path.join(TEST_DIR,'example.corpus')
def test_download(self):
if not os.path.exists(TEST_DIR):
return
download_binary(self.name,self.path)
c = load_binary(self.path)
example_c = load_binary(self.example_path)
self.assertEqual(c,example_c)
class FeatureMatrixCsvTest(unittest.TestCase):
def setUp(self):
self.basic_path = os.path.join(TEST_DIR,'test_feature_matrix.txt')
self.missing_value_path = os.path.join(TEST_DIR,'test_feature_matrix_missing_value.txt')
self.extra_feature_path = os.path.join(TEST_DIR,'test_feature_matrix_extra_feature.txt')
def test_basic(self):
if not os.path.exists(TEST_DIR):
return
self.assertRaises(DelimiterError,load_feature_matrix_csv,'test',self.basic_path,' ')
fm = load_feature_matrix_csv('test',self.basic_path,',')
self.assertEqual(fm.name,'test')
self.assertEqual(fm['a','feature1'], '+')
def test_missing_value(self):
if not os.path.exists(TEST_DIR):
return
fm = load_feature_matrix_csv('test',self.missing_value_path,',')
self.assertEqual(fm['d','feature2'],'n')
def test_extra_feature(self):
if not os.path.exists(TEST_DIR):
return
fm = load_feature_matrix_csv('test',self.extra_feature_path,',')
self.assertRaises(KeyError,fm.__getitem__,('a','feature3'))
class BinaryFeatureMatrixSaveTest(unittest.TestCase):
def setUp(self):
self.basic_path = os.path.join(TEST_DIR,'test_feature_matrix.txt')
self.basic_save_path = os.path.join(TEST_DIR,'basic.feature')
self.missing_segment_path = os.path.join(TEST_DIR,'test_feature_matrix_missing_segment.txt')
self.missing_save_path = os.path.join(TEST_DIR,'missing_segments.feature')
def test_save(self):
if not os.path.exists(TEST_DIR):
return
fm = load_feature_matrix_csv('test',self.basic_path,',')
save_binary(fm,self.basic_save_path)
saved_fm = load_binary(self.basic_save_path)
self.assertEqual(fm,saved_fm)
fm = load_feature_matrix_csv('test',self.missing_segment_path,',')
save_binary(fm,self.missing_save_path)
saved_fm = load_binary(self.missing_save_path)
self.assertEqual(fm,saved_fm)
if __name__ == '__main__':
if os.path.exists(TEST_DIR):
unittest.main()
|
989,784 | e1cb1a9cba1b4be4a1f7cb2a19d5bb4c4a2c59ed | from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Sequence
from aiohttp.web import HTTPCreated, HTTPNoContent
from yarl import URL
from .core import ClientError, _Core
from .utils import NoPublicConstructor
class Action(str, Enum):
READ = "read"
WRITE = "write"
MANAGE = "manage"
@dataclass(frozen=True)
class Permission:
uri: URL
action: Action
@dataclass(frozen=True)
class Share:
user: str
permission: Permission
class Users(metaclass=NoPublicConstructor):
def __init__(self, core: _Core) -> None:
self._core = core
async def get_acl(
self, user: str, scheme: Optional[str] = None
) -> Sequence[Permission]:
url = URL(f"users/{user}/permissions")
params = {"scheme": scheme} if scheme else {}
async with self._core.request("GET", url, params=params) as resp:
payload = await resp.json()
ret = []
for item in payload:
uri = URL(item["uri"])
action = Action(item["action"])
ret.append(Permission(uri, action))
return ret
async def get_shares(
self, user: str, scheme: Optional[str] = None
) -> Sequence[Share]:
url = URL(f"users/{user}/permissions/shared")
params = {"scheme": scheme} if scheme else {}
async with self._core.request("GET", url, params=params) as resp:
payload = await resp.json()
ret = []
for item in payload:
uri = URL(item["uri"])
action = Action(item["action"])
ret.append(Share(item["user"], Permission(uri, action)))
return ret
async def share(self, user: str, permission: Permission) -> None:
url = URL(f"users/{user}/permissions")
payload = [_permission_to_api(permission)]
async with self._core.request("POST", url, json=payload) as resp:
# TODO: server part contain TODO record for returning more then
# HTTPCreated, this part must me refactored then
if resp.status != HTTPCreated.status_code:
raise ClientError("Server return unexpected result.") # NOQA
return None
async def revoke(self, user: str, uri: URL) -> None:
url = URL(f"users/{user}/permissions")
async with self._core.request("DELETE", url, params={"uri": str(uri)}) as resp:
# TODO: server part contain TODO record for returning more then
# HTTPNoContent, this part must me refactored then
if resp.status != HTTPNoContent.status_code:
raise ClientError(
f"Server return unexpected result: {resp.status}."
) # NOQA
return None
def _permission_to_api(perm: Permission) -> Dict[str, Any]:
primitive: Dict[str, Any] = {"uri": str(perm.uri), "action": perm.action.value}
return primitive
|
989,785 | d7e546b4c65395923932773951ecc2537de52af5 | cantantes = [" 2pac", "Drake", "Bad Bunny", "Julio iglesias"]
numeros = [1, 2, 5, 8, 3, 4,]
#ordenar listas
numeros.sort()
print(numeros)
#añadir elementos
cantantes.append("Natos y Waor")
cantantes.insert(1, "David Bisbal")
print(cantantes)
#eliminar elemetos
cantantes.pop(1)
cantantes.remove("Bad Bunny")
print(cantantes)
#Dar la vuelta
print(numeros)
numeros.reverse()
print(numeros)
# Buscar dentro de una lista
print("Drake" in cantantes)
#contar elementos
print(len(cantantes))
# cuantas veces aparece un elemento
numeros.append(8)
print(numeros.count(8))
# Conseguir indice
print(cantantes.index("Drake"))
#Unir listas
cantantes.extend(numeros)
print(cantantes) |
989,786 | 21da99adfeecec28a7e90db9d4dadae586024841 | #MenuTitle: Bind all anchors in a font with their node
# -*- coding: utf-8 -*-
__doc__="""
Bind anchors with a node in a font.
"""
import GlyphsApp
import vanilla
Font = Glyphs.font
Glyphs.clearLog()
Glyphs.showMacroWindow()
selectedLayer = Font.selectedLayers[0]
# glyph=selectedLayer.parent
for glyph in Font.glyphs:
for layer in glyph.layers:
for path in layer.paths:
for node in path.nodes:
x=node.position.x
y=node.position.y
nodeAnchors=list()
for anchor in layer.anchors:
if(anchor.position.x==x and anchor.position.y==y):
nodeAnchors.append(anchor.name)
if len(nodeAnchors)>0:
node.userData["anchors"]=nodeAnchors
else:
del(node.userData["anchors"])
|
989,787 | a3ff65b34a1a646ac948f87aa1b257921397379a | from django.views import generic
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .models import Event, Info, User
from .forms import EventForm,SignUpForm
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import login, authenticate,logout
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
from django.utils import timezone
import datetime
import sys
class IndexView(generic.ListView):
"""Show index view which is a list of all events and render index page."""
template_name = 'kvent/index.html'
context_object_name = 'all_event'
def get_queryset(self):
query = self.request.GET.get('query')
if query:
return Event.objects.filter(event_name__contains=query)
else:
return Event.objects.all().order_by('-date_time')
@login_required(login_url='login/')
def profile(request):
"""Function for render user's profile page."""
user = Info.objects.all()
return render(request, 'kvent/profile.html',{user:'user'})
def detail(request, event_id):
"""Function for render event detail page."""
event = get_object_or_404(Event, pk=event_id)
user = request.user
return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})
@login_required(login_url='login')
def event_history(request, username):
user = request.user
event_host = Event.objects.filter(user=user)
event_participant = Event.objects.filter(participants=user)
return render(request, 'kvent/event-history.html', {
'user': user,
'event_host': event_host,
'event_participant': event_participant
})
@login_required(login_url='login')
def create_event(request):
"""
Function for create event with form and only logged in user can create the event
and render create event page.
"""
form = EventForm(request.POST, request.FILES)
number_people = form.data.get('number_people')
arrange_time = form.data.get('arrange_time')
if request.method == 'POST':
if form.is_valid():
if int(number_people) >= 10:
try:
if datetime.datetime.strptime(arrange_time,'%Y-%m-%d %H:%M').date() > timezone.now().date():
photo = form.cleaned_data.get('photo')
event_name = form.data.get('event_name')
location = form.data.get('location')
short_description = form.data.get('short_description')
long_description = form.data.get('long_description')
event = Event(event_name = event_name, location=location, short_description = short_description, long_description = long_description, arrange_time = arrange_time, number_people = number_people,full=False, photo=photo, user=request.user)
event.save()
messages.success(request, f"You've created the {event_name} event!")
return HttpResponseRedirect(reverse('index'))
else:
messages.warning(request, "Arrangement date must be in the future!")
except:
messages.warning(request, f"You should input the date and time as format!")
return render(request, 'Kvent/create-event-page.html', {'form': form})
else :
messages.warning(request, "Number of paricipants must more than 10 or equal")
else:
messages.warning(request, f"You should input the date and time as format!")
return render(request, 'Kvent/create-event-page.html', {'form': form})
def signup(request):
"""Function for let user who doesn't have an account to create an account and render signup page."""
if request.method == 'POST':
form = SignUpForm(data=request.POST)
if form.is_valid():
email = form.data.get('email')
username = form.data.get('username')
if User.objects.filter(username=username).exists():
messages.error(request, "Your username is already taken!")
form = SignUpForm()
else:
raw_password = form.data.get('raw_password')
user = authenticate(email=email,username=username, password=raw_password)
form.save()
return redirect(reverse('login'))
else:
form = SignUpForm()
return render(request,'registration/createaccount.html', {'form': form})
@login_required(login_url='/login/')
def delete_event(request, event_id):
"""Function for delete event and only logged in user can delete event."""
DANGER = 50
event = Event.objects.get( pk=event_id)
if str(request.user) == event.user:
messages.add_message(request, DANGER, f"You've deleted the {event.event_name} event.", extra_tags='danger')
event.delete()
else:
messages.warning(request, "You can only delete your event.")
return redirect('index')
return redirect('index')
@login_required(login_url='/login/')
def join_event(request, event_id):
user = request.user.id
try:
event = get_object_or_404(Event, pk=event_id)
except (KeyError, Event.DoesNotExist):
return redirect('index')
else:
if str(request.user) == event.user:
messages.warning(request, f"You can't join your own event!")
return redirect('index')
else:
messages.success(request, f"You've joined the {event.event_name} event!")
event.participants.add(user)
return redirect('index')
@login_required(login_url='/login/')
def leave_event(request, event_id):
DANGER = 50
user = request.user.id
try:
event = get_object_or_404(Event, pk=event_id)
except (KeyError, Event.DoesNotExist):
return redirect('index')
else:
messages.add_message(request, DANGER, f"You've left the {event.event_name} event.", extra_tags='danger')
event.participants.remove(user)
return redirect('index')
@login_required(login_url='login')
def logout(request):
logout(request)
return redirect('index')
def view404(request, exception):
res = render(request, 'Kvent/404.html')
res.status_code = 404
return res |
989,788 | fbde2b4d60f3e30130cdbc462fe6d8f1a12dc25b | import os
import sys
import time
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import operator
import math
img = nd.imread('images/digits.png')
nrow, ncol = img.shape[0:2]
xs = 10.
ys = xs*float(nrow)/float(ncol)
# plt.close(0)
# fig11, ax11 = plt.subplots(num=0,figsize=[xs,ys])
# fig11.subplots_adjust(0,0,1,1)
# ax11.axis('off')
# im11 = ax11.imshow(img)
# fig11.canvas.draw()
nums = img.reshape(50,20,100,20).transpose(0,2,1,3).reshape(5000,20,20)
fig12, ax12 = plt.subplots(num=1,figsize=[xs/1.5,xs/1.5])
fig12.subplots_adjust(0,0,1,1)
ax12.axis('off')
im12 = ax12.imshow(nums[0])
fig12.canvas.draw()
fig12.show()
nums_avg = np.array([nums[i*500:(i+1)*500].mean(0) for i in range(10)])
failureIndex = {}
fail = []
for ii in range(10):
incorrect = 0
errorDict = {}
l = []
for i in xrange(500):
index = ii * 500 + i
samp = nums[index]
PT = nums_avg.reshape(10,400)
P = PT.transpose()
PTPinv = np.linalg.inv(np.dot(PT,P))
PTyy = np.dot(PT,samp.flatten())
avec = np.dot(PTPinv,PTyy)
l.append(avec)
if np.argmax(avec) != ii:
failureIndex[index] = np.argmax(avec)
fail.append(index)
incorrect += 1
if np.argmax(avec) in errorDict:
errorDict[np.argmax(avec)] += 1
else:
errorDict[np.argmax(avec)] = 1
else:
pass
xs = 6
ys = 8
fig0, ax0 = plt.subplots(10,1,figsize=[xs,ys], sharex=True)
sorted_x = sorted(errorDict.items(), key=operator.itemgetter(1), reverse = True)
v = np.vstack(l)
add = v.T
for j in range(0,10):
ax0[j].hist(add[j], bins = 100, color='cyan')
[i.set_yticklabels('') for i in ax0]
ax0[j].set_title("Known %s's Against %s's"% (ii,str(j)), fontsize=10)
fig0.subplots_adjust(hspace=2)
fig0.subplots_adjust(.1,.1,.95,.95)
fig0.canvas.draw()
fig0.show()
print "%s%% of %s's were incorrectly identified, the most common guess for those failures was %s's" % \
((incorrect/500.0) * 100, ii, sorted_x[0][0])
t0 = time.time()
dt = 0.0
while dt<30.:
i = int(math.floor(len(fail)*np.random.rand()))
ii = fail[i]
if dt == 0.0:
im12.set_data(nums[ii])
lab = ax12.text(0,0, 'Guess: ', va = 'top', fontsize = 20, color = 'w')
lab.set_text('Guess: {0}'.format(failureIndex[ii]))
else:
lab.remove()
im12.set_data(nums[ii])
lab = ax12.text(0,0, 'Guess: ', va = 'top', fontsize = 20, color = 'w')
lab.set_text('Guess: {0}'.format(failureIndex[ii]))
fig12.canvas.draw()
fig12.show()
time.sleep(1.0)
dt = time.time()-t0
plt.clf('all')
print "\n"
print "Removing zero point offset:\n"
failureIndex = {}
fail = []
for ii in range(10):
incorrect = 0
errorDict = {}
l = []
for i in xrange(500):
index = ii * 500 + i
samp = nums[index]
PT1 = nums_avg.reshape(10,400)
PT = np.vstack((PT1, np.ones(400)))
P = PT.transpose()
PTPinv = np.linalg.inv(np.dot(PT,P))
PTyy = np.dot(PT,samp.flatten())
#Take only first 10 elements
avec = np.dot(PTPinv,PTyy)
avec = avec[0:10]
l.append(avec[0:10])
if np.argmax(avec) != ii:
failureIndex[index] = np.argmax(avec)
fail.append(index)
#fail[index] = np.argmax(avec)
incorrect += 1
if np.argmax(avec) in errorDict:
errorDict[np.argmax(avec)] += 1
else:
errorDict[np.argmax(avec)] = 1
else:
pass
xs = 6
ys = 8
fig1, ax1 = plt.subplots(10,1,figsize=[xs,ys], sharex=True)
sorted_x = sorted(errorDict.items(), key=operator.itemgetter(1), reverse = True)
v = np.vstack(l)
add = v.T
for j in range(0,10):
ax1[j].hist(add[j], bins = 100, color='cyan')
[i.set_yticklabels('') for i in ax1]
ax1[j].set_title("Known %s's Against %s's"% (ii,str(j)), fontsize=10)
fig1.subplots_adjust(hspace=2)
fig1.subplots_adjust(.1,.1,.95,.95)
fig1.canvas.draw()
print "%s%% of %s's were incorrectly identified, the most common guess for those failures was %s's" % \
((incorrect/500.0) * 100, ii, sorted_x[0][0])
plt.show() |
989,789 | 772db08eba390beb54fec73cdd22d3e6cf8fe9e9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import logging
from flask import Flask
import altcoinvw.util
from fullnode.node import *
from fullnode.webindex import *
from fullnode.webaltcoin import *
__all__ = []
BasePath = os.path.abspath(os.path.join(sys.argv[0], '../'))
Logger = logging.getLogger('')
WebApp = Flask(__name__)
class WebServerEnv:
def __init__(self):
self.Node = None
def Start():
env = WebServerEnv()
env.Node = Node(BasePath)
env.Node.StartInNewThread()
WebAppAddUrlRule('/Index', lambda : CtrIndex(env))
WebAppAddUrlRule('/Altcoin/Kill', lambda : CtrAltcoinKill(env))
WebAppAddUrlRule('/Altcoin/Run', lambda : CtrAltcoinRun(env))
WebAppAddUrlRule('/Altcoin/Call', lambda : CtrAltcoinCall(env))
WebApp.run(debug=False, host='0.0.0.0', port=80)
def WebAppAddUrlRule(name, func):
WebApp.add_url_rule(name, name, view_func=func, methods=['GET', 'POST'])
def CtrRefresh():
try:
if altcoinvw.util.WebCheckModuleIfNeedReloadAndPrepareReload('webindex') == True:
imp.reload(webindex)
except Exception as e:
Logger.fatal('{}'.format(e))
return 'success'
|
989,790 | beedb14fe138a4f38042a8bd5b11918a2c14d94a | from model import ActorNetwork, CriticNetwork
from OUNoise import OUNoise
from utilities import hard_update
import torch
from torch.optim import Adam
import numpy as np
# import pdb
class DDPGAgent:
def __init__(self, state_size, action_size, num_agents,
hidden_in_actor=512, hidden_out_actor=256, lr_actor=1e-4,
hidden_in_critic=512, hidden_out_critic=256, lr_critic=3e-4, weight_decay_critic=0,
seed=1, device='cpu'):
super(DDPGAgent, self).__init__()
self.device = device
# Actor
self.actor = ActorNetwork(state_size, hidden_in_actor, hidden_out_actor, action_size, seed).to(device)
self.target_actor = ActorNetwork(state_size, hidden_in_actor, hidden_out_actor, action_size, seed).to(device)
self.actor_optimizer = Adam(self.actor.parameters(), lr=lr_actor)
# Target
self.critic = CriticNetwork(state_size, action_size, num_agents, hidden_in_critic, hidden_out_critic, seed).to(device)
self.target_critic = CriticNetwork(state_size, action_size, num_agents, hidden_in_critic, hidden_out_critic, seed).to(device)
self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay_critic)
# Noise
self.noise = OUNoise(action_size, seed, scale=1.0)
# initialize targets same as original networks
hard_update(self.target_actor, self.actor)
hard_update(self.target_critic, self.critic)
def reset(self):
self.noise.reset()
def act(self, obs, noise_factor=0.0):
if torch.is_tensor(obs):
states = obs
else:
states = torch.from_numpy(obs).float().to(self.device)
self.actor.eval()
with torch.no_grad():
actions = self.actor(states).cpu().data.numpy()
self.actor.train()
actions += noise_factor*self.noise.sample()
return np.clip(actions, -1, 1)
def target_act(self, obs):
if torch.is_tensor(obs):
states = obs
else:
states = torch.from_numpy(obs).float().to(self.device)
self.target_actor.eval()
with torch.no_grad():
actions = self.target_actor(states).cpu().data.numpy()
self.target_actor.train()
return np.clip(actions, -1, 1)
|
989,791 | 099ed354a799204b3c2e75c026dbaaabe1fc8264 | def prime(n):
for i in range(2,n+1):
s=0
for j in range(2,i//2+1):
if i%j==0:
s=s+i
if(s<=0):
print(i)
prime(50)
|
989,792 | 68642dfe6aea428e9eb5443d17a73fceb20d9659 |
import requests
from auth import auth_data, save_auth_data
def do_refresh_token():
auth_code = auth_data['auth_code']
params = {
'grant_type': 'refresh_token',
# 'code': auth_data['auth_code'],
'refresh_token': auth_data['refresh_token'],
'client_id': auth_data['client_id'],
'client_secret': auth_data['client_secret'],
}
url = 'https://api.box.com/oauth2/token'
res = requests.post(url, data=params)
if res.status_code != 200:
print(res.status_code)
print(res.content)
assert res.status_code == 200
auth_data['access_token'] = res.json()['access_token']
print(auth_data['access_token'])
save_auth_data()
def get(url, params=None, *, refresh_token=True):
headers = {
'Authorization': f'Bearer {auth_data["access_token"]}'
}
res = requests.get(url, params=params, headers=headers)
if res.status_code == 401 and refresh_token:
do_refresh_token()
res = get(url, params, refresh_token=False)
assert res.status_code == 200, f"Call failed {res.status_code}"
return res
|
989,793 | 8b73dc33c4f95c31fcd3a92e5e23e9480d811768 | import hashlib
from datetime import datetime, date
from typing import Tuple, Union, Callable, Optional, List
import dateparser
from bs4 import BeautifulSoup
from definitions import CRYPTONIA_WORLD_COUNTRY, CRYPTONIA_MARKET_EXTERNAL_MARKET_STRINGS, \
FEEDBACK_TEXT_HASH_COLUMN_LENGTH, MD5_HASH_STRING_ENCODING
from src.base.base_functions import BaseFunctions, get_external_rating_tuple
from src.db_utils import shorten_and_sanitize_for_text_column
from src.utils import parse_time_delta_from_string
ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS = 7
def _parse_percent_positive_rating(label_div: BeautifulSoup) -> float:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
return float(span.text[:-1])
def _parse_disputes(label_div: BeautifulSoup) -> Tuple[int, int]:
good_spans = [good_span for good_span in label_div.findAll('span', attrs={'class': 'good'})]
assert len(good_spans) == 1
good_span = good_spans[0]
inner_spans = [span for span in good_span.findAll('span')]
assert len(inner_spans) == 1
inner_span = inner_spans[0]
disputes_won = int(inner_span.text)
bad_spans = [bad_span for bad_span in label_div.findAll('span', attrs={'class': 'good'})]
assert len(bad_spans) == 1
bad_span = bad_spans[0]
disputes_lost = int(bad_span.text)
return disputes_won, disputes_lost
def _parse_external_market_verifications(label_div: BeautifulSoup) -> Tuple[
Tuple[str, int, float, float, int, int, int, str]]:
external_market_verifications: List[Tuple[str, int, float, float, int, int, int, str]] = []
spans = [span for span in label_div.findAll('span')]
if len(spans) > 0:
verified_spans = [span for span in label_div.findAll('span', attrs={'class': 'verified'})]
remaining_external_market_ratings = list(CRYPTONIA_MARKET_EXTERNAL_MARKET_STRINGS)
for verified_span in verified_spans:
for market_id, market_string in remaining_external_market_ratings:
if verified_span.text.find(market_string) != -1:
parts = verified_span.text.split(market_string)
external_rating_tuple = get_external_rating_tuple(market_id, "".join(parts).strip())
external_market_verifications.append(external_rating_tuple)
remaining_external_market_ratings.remove((market_id, market_string))
break
if len(external_market_verifications) != len(verified_spans):
raise AssertionError(f"Unknown external market {verified_spans}")
return tuple(external_market_verifications)
def _parse_amount_on_escrow(label_div: BeautifulSoup) -> Tuple[str, float, str, float]:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
crypto_amount_str, crypto_currency_str, fiat_amount_str, fiat_currency_str = span.text.split()
return crypto_currency_str, float(crypto_amount_str), fiat_currency_str[:-1], float(fiat_amount_str[1:])
def _parse_ships_from(label_div: BeautifulSoup) -> str:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
return span.text.strip()
def _parse_ships_to(label_div: BeautifulSoup) -> Tuple[str]:
s: str
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
return tuple([s.strip() for s in span.text.split(",")])
def _parse_jabber_id(label_div: BeautifulSoup) -> str:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
return span.text.strip()
def _parse_fe_enabled(label_div: BeautifulSoup) -> bool:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span_text = spans[0].text
if span_text == "Yes":
return True
elif span_text == "No":
return False
else:
raise AssertionError("Unknown value for 'FE Enabled' field in user profile")
def _parse_member_since(label_div: BeautifulSoup) -> datetime:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span_text = spans[0].text
return dateparser.parse(span_text)
def _find_last_online_text_delimiter(span_text: str) -> str:
candidate_delimiters = ("Within the last", "Whithin the last")
for delimiter in candidate_delimiters:
if span_text.find(delimiter) != -1:
return delimiter
raise AssertionError(f"Unknown delimiter in '{span_text}'")
def _parse_last_online(label_div: BeautifulSoup) -> date:
spans = [span for span in label_div.findAll('span')]
assert len(spans) == 1
span_text = spans[0].text
delimiter = _find_last_online_text_delimiter(span_text)
time_ago_string = span_text.split(delimiter)[1].strip()
time_delta_val = parse_time_delta_from_string(time_ago_string)
last_online = datetime.utcnow() - time_delta_val
return date(year=last_online.year, month=last_online.month, day=last_online.day)
def _get_current_page_and_total_pages(td_gridftr: BeautifulSoup) -> Tuple[int, int]:
spans = [span for span in td_gridftr.findAll('span')]
assert len(spans) >= 1
for span in spans:
try:
current_page, total_pages = span.text.split(" of ")
return int(current_page), int(total_pages)
except ValueError as e:
pass
# noinspection PyUnboundLocalVariable
raise e
class CryptoniaScrapingFunctions(BaseFunctions):
@staticmethod
def get_meta_refresh_interval(soup_html: BeautifulSoup) -> Tuple[int, str]:
metas = [meta for meta in soup_html.findAll('meta') if
"http-equiv" in meta.attrs.keys() and meta["http-equiv"] == "refresh"]
assert len(metas) == 1
meta_content = metas[0]["content"]
wait_interval, redirect_url = meta_content.split(";")
return int(wait_interval), redirect_url.strip()
@staticmethod
def get_captcha_image_url_from_market_page(soup_html: BeautifulSoup) -> str:
imgs = [img for img in soup_html.select('.login_captcha')]
assert len(imgs) == 1
return imgs[0]["src"]
@staticmethod
def accepts_currencies(soup_html: BeautifulSoup) -> Tuple[bool, bool, bool]:
product_details_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_details'})]
assert len(product_details_divs) == 1
product_details_div = product_details_divs[0]
imgs = [img for img in product_details_div.findAll('img', attrs={'style': 'height: 18px; max-width: 200px'})]
accepts_btc = False
accepts_multisig_btc = False
accepts_xmr = False
for img in imgs:
if img["src"] == "/image/btc_nm.png": accepts_btc = True
if img["src"] == "/image/btc_ms_nm.png": accepts_multisig_btc = True
if img["src"] == "/image/xmr_nm.png": accepts_xmr = True
return accepts_btc, accepts_multisig_btc, accepts_xmr
@staticmethod
def get_title(soup_html) -> str:
raise NotImplementedError('')
@staticmethod
def get_description(soup_html: BeautifulSoup) -> Optional[str]:
tab_view_1_divs = [div for div in soup_html.findAll('div', attrs={'id': 'tabview1'})]
assert len(tab_view_1_divs) == 1
tab_view_1_div = tab_view_1_divs[0]
content_divs = [div for div in tab_view_1_div.findAll('div', attrs={'class': 'content_div'})]
if len(content_divs) >= 1:
content_div = content_divs[0]
return shorten_and_sanitize_for_text_column(content_div.text)
else:
return None
@staticmethod
def get_product_page_urls(soup_html) -> Tuple[str]:
product_page_urls: List[str] = []
tables = [table for table in soup_html.findAll('table', attrs={'style': 'width: 100%'})]
if len(tables) == 0:
res: Tuple[str] = ()
return res
elif len(tables) == 1:
pass
else:
raise AssertionError("Unknown format in search result page.")
table = tables[0]
trs = [tr for tr in table.findAll('tr')]
assert len(trs) <= 27
for tr in trs[1:-1]:
thumb_td, spacer_td, product_td, price_td, vendor_td = [td for td in tr.findAll('td')]
hrefs = [href for href in product_td.findAll('a', href=True)]
assert len(hrefs) == 1
href = hrefs[0]
product_page_urls.append(href["href"])
assert len(product_page_urls) == len(trs) - 2
return tuple(product_page_urls)
@staticmethod
def get_nr_sold_since_date(soup_html) -> int:
raise NotImplementedError('')
@staticmethod
def get_fiat_currency_and_price(soup_html) -> Tuple[str, int]:
raise NotImplementedError('')
@staticmethod
def get_origin_country_and_destinations(soup_html: BeautifulSoup) -> Tuple[str, Tuple[str]]:
origin: str
dest: List[str]
a_dest: str
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
assert len(product_data_divs) >= ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS
product_data_divs = [div for div in product_data_divs if div.find('label').text == "Ships from:"]
assert len(product_data_divs) <= 1
if len(product_data_divs) == 0:
return CRYPTONIA_WORLD_COUNTRY, tuple([CRYPTONIA_WORLD_COUNTRY])
else:
product_data_div = product_data_divs[0]
lbllist_divs = [div for div in product_data_div.findAll('div', attrs={'class': 'lbllist'})]
assert len(lbllist_divs) == 1
lbllist_div = lbllist_divs[0]
origin_string, dests_string = lbllist_div.text.split("→")
origin_country: str = origin_string.strip()
destination_countries: List[str] = [dests_string[a.regs[0][0]:a.regs[0][1]].strip() for a in
BaseFunctions.COMMA_SEPARATED_COUNTRY_REGEX.finditer(dests_string)]
return origin_country, tuple(destination_countries)
@staticmethod
def get_cryptocurrency_rates(soup_html: BeautifulSoup) -> Tuple[float, float]:
rate_divs = [div for div in soup_html.findAll('div', attrs={'class': 'rate_div'})]
assert len(rate_divs) == 1
rate_div = rate_divs[0]
smtexts = [span for span in rate_div.findAll('span', attrs={'class': 'smtext'})]
assert len(smtexts) == 1
smtext = smtexts[0]
rates_string = smtext.text
rates = rates_string.split("=")
btc_usd_rate = float(rates[2].split(" ")[1])
btc_xmr_rate = float(rates[4].split(" ")[1])
xmr_usd_rate = btc_usd_rate / btc_xmr_rate
return btc_usd_rate, xmr_usd_rate
def _format_logger_message(self, message: str) -> str:
return message
@staticmethod
def get_category_pairs_and_urls(soup_html: BeautifulSoup) -> Tuple[
Tuple[Tuple[Tuple[str, int, str, int]]], Tuple[str]]:
sidebar_inners = [div for div in soup_html.findAll('div', attrs={'class': 'sidebar_inner'})]
assert len(sidebar_inners) == 2
sidebar_inner = sidebar_inners[1]
chksubcats_divs = [div for div in sidebar_inner.findAll('div', attrs={'class': 'chksubcats'})]
category_name_spans = [span for span in sidebar_inner.findAll('span', attrs={'class', 'lgtext'})]
assert len(chksubcats_divs) == len(category_name_spans) == 10
category_lists: List[Tuple[Tuple[str, int, str, int]]] = []
urls: List[str] = []
for chksubcats_div, category_name_span in zip(chksubcats_divs, category_name_spans):
main_category_name = category_name_span.text.strip()
subcategory_hrefs = [href for href in chksubcats_div.findAll('a', href=True)]
for subcategory_href in subcategory_hrefs:
subcategory_href_inner_text_parts = subcategory_href.text.split(" ")
assert len(subcategory_href_inner_text_parts) == 2
subcategory_name = subcategory_href_inner_text_parts[0].strip()
categories = ((main_category_name, None, None, 0), (subcategory_name, None, main_category_name, 1))
subcategory_base_url = subcategory_href["href"]
category_lists.append(categories)
urls.append(subcategory_base_url)
assert len(category_lists) == len(urls)
return tuple(category_lists), tuple(urls)
@staticmethod
def get_nr_of_result_pages_in_category(soup_html: BeautifulSoup) -> int:
tds = [td for td in soup_html.findAll('td', attrs={'class', 'gridftr'})]
no_products_p: BeautifulSoup = soup_html.select_one("#body > div.mainarea > div > div > div.mainbox > p")
if no_products_p and no_products_p.text == 'No products found in this category.':
return 0
assert len(tds) == 1
td: BeautifulSoup = tds[0]
spans = [span for span in td.findAll('span')]
assert (len(spans) == 2 or len(spans) == 3)
span: BeautifulSoup = spans[1]
parts_of_span = span.text.split(" ")
assert len(parts_of_span) == 3
return int(parts_of_span[2])
@staticmethod
def get_titles_sellers_and_seller_urls(soup_html: BeautifulSoup) -> Tuple[Tuple[str], Tuple[str], Tuple[str]]:
titles: List[str] = []
sellers: List[str] = []
seller_urls: List[str] = []
tables = [table for table in soup_html.findAll('table', attrs={'style': 'width: 100%'})]
if len(tables) == 0:
res: Tuple[str] = ()
return res, res, res
elif len(tables) == 1:
pass
else:
raise AssertionError("Unknown format in search result page.")
table = tables[0]
trs = [tr for tr in table.findAll('tr')]
assert len(trs) <= 27
for tr in trs[1:-1]:
thumb_td, spacer_td, product_td, price_td, vendor_td = [td for td in tr.findAll('td')]
hrefs = [href for href in vendor_td.findAll('a', href=True)]
assert len(hrefs) == 1
href = hrefs[0]
seller_urls.append(href['href'])
sellers.append(href.text)
divs = [div for div in
product_td.findAll('div', attrs={'style': 'margin-bottom: 5px; width: 270px; overflow: hidden'})]
assert len(divs) == 1
name_div = divs[0]
titles.append(name_div.text)
return tuple(titles), tuple(sellers), tuple(seller_urls)
@staticmethod
def get_fiat_currency_and_price_and_unit_type(soup_html: BeautifulSoup) -> Tuple[str, float, str]:
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
assert len(product_data_divs) >= ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS
product_data_divs = [div for div in product_data_divs if div.find('label').text == "Price:"]
assert len(product_data_divs) == 1
product_data_div = product_data_divs[0]
lg_spans = [span for span in product_data_div.findAll('span', attrs={'class': 'lgtext', 'style': ''})]
assert len(lg_spans) == 1
lg_span = lg_spans[0]
price, currency_slash_unit = lg_span.text.split(" ")
currency, unit = currency_slash_unit.split("/", maxsplit=1) # name of unit can contain slash, e.g. "1/4 pound"
return currency, float(price), unit
@staticmethod
def supports_escrow(soup_html: BeautifulSoup) -> bool:
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
assert len(product_data_divs) >= ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS
product_data_divs = [div for div in product_data_divs if div.find('label').text == "FE or Escrow:"]
assert len(product_data_divs) == 1
product_data_div = product_data_divs[0]
spans = [span for span in product_data_div.findAll(lambda tag: tag.name == 'span' and
tag.get('class') == ['verified'])]
spans_length = len(spans)
assert spans_length <= 1
if spans_length == 1:
assert spans[0].text == "ESCROW"
return spans_length == 1
@staticmethod
def get_quantity_in_stock_unit_type_and_minimum_order_unit_amount(soup_html: BeautifulSoup) -> Tuple[int, str, int]:
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
assert len(product_data_divs) >= ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS
product_data_divs = [div for div in product_data_divs if div.find('label').text == "In stock:"]
assert len(product_data_divs) == 1
product_data_div = product_data_divs[0]
quantity = None
spans = [span for span in product_data_div.findAll('span')]
if len(spans) == 1:
minimum_order_unit_amount = 1
elif len(spans) == 2:
minimum_order_unit_amount = int(spans[1].text.split()[2])
elif len(spans) == 3 and "class" in spans[2].attrs.keys() and "error" in spans[2].attrs["class"]:
minimum_order_unit_amount = int(spans[1].text.split()[2])
quantity = 0
else:
raise AssertionError("Unknown format for 'In stock' field.")
span = spans[0]
quantity_and_unit_type = span.text.split(" ")
assert len(quantity_and_unit_type) == 2
quantity = quantity_and_unit_type[0] if quantity is None else quantity
unit_type = quantity_and_unit_type[1]
return int(quantity), unit_type, minimum_order_unit_amount
@staticmethod
def get_listing_type(soup_html: BeautifulSoup) -> Optional[str]:
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
if len(product_data_divs) >= ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS:
product_data_divs = [div for div in product_data_divs if div.find('label').text == "Listing Type:"]
assert len(product_data_divs) == 1
product_data_div = product_data_divs[0]
spans = [span for span in product_data_div.findAll('span')]
assert len(spans) == 1
span = spans[0]
return span.text
else:
return None
@staticmethod
def get_shipping_methods(soup_html) -> Tuple[
Tuple[str, Optional[float], str, float, Optional[str], Optional[bool]]]:
shipselects = [select for select in
soup_html.findAll('select', attrs={'class': 'shipselect', 'name': 'shipping_method'})]
assert len(shipselects) == 1
shipselect = shipselects[0]
options = [option for option in shipselect.findAll('option')]
assert len(options) >= 1
shipping_methods: List[Tuple[str, Optional[float], str, float, Optional[str], Optional[bool]]] = []
for option in options[1:]:
description = "(".join(option.text.split("(")[:-1])[:-1]
price_and_currency = option.text.split("(")[-1].split(" ")
price, currency = float(price_and_currency[0]), price_and_currency[1][:-1]
days, unit_name, price_is_per_unit = None, None, False
shipping_methods.append((description, days, currency, price, unit_name, price_is_per_unit))
return tuple(shipping_methods)
@staticmethod
def get_bulk_prices(soup_html: BeautifulSoup) -> Tuple[Tuple[int, Optional[int], float, float, Optional[float]]]:
all_product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data'})]
product_data_divs = [div for div in soup_html.findAll('div', attrs={'class': 'product_data',
'style': 'margin-top: 0; padding-top: 0'})]
assert len(product_data_divs) <= len(all_product_data_divs) - ASSUMED_MINIMUM_NUMBER_OF_PRODUCT_DATA_DIVS
lower_bounds: List[int] = []
upper_bounds: List[Optional[int]] = []
fiat_prices: List[float] = []
btc_prices: List[float] = []
discount_percents: List[float] = []
lower_bounds, upper_bounds, fiat_prices, btc_prices, discount_percents = [], [], [], \
[], []
for product_data_div in product_data_divs:
labels = [label for label in product_data_div.findAll('label')]
assert len(labels) == 1
label = labels[0]
bulk_lower_bound = int(label.text.split(" ")[0])
spans = [span for span in product_data_div.findAll('span')]
assert len(spans) == 3
assert spans[1].attrs == {}
assert spans[2].attrs["class"] == ["pricetag"]
lg_text = spans[0]
bulk_fiat_price = float(lg_text.text.split("/")[0].split(" ")[0])
no_class_span = spans[1]
bulk_btc_price = float(no_class_span.text.split("/")[0].split(" ")[0][1:])
pricetag_span = spans[2]
discount_percent = pricetag_span.text.split("%")[0]
lower_bounds.append(bulk_lower_bound)
fiat_prices.append(bulk_fiat_price)
btc_prices.append(bulk_btc_price)
discount_percents.append(discount_percent)
for i in range(len(lower_bounds) - 1):
upper_bounds.append(lower_bounds[i + 1] - 1)
assert max(len(lower_bounds), 1) - 1 == len(upper_bounds)
for i in range(len(lower_bounds) - len(upper_bounds)):
upper_bounds.append(None)
bulk_prices: List[Tuple[int, Optional[int], float, float, Optional[float]]] = []
for lower_bound, upper_bound, fiat_price, btc_price, discount_percent in zip(lower_bounds, upper_bounds,
fiat_prices, btc_prices,
discount_percents):
bulk_prices.append((lower_bound, upper_bound, fiat_price, btc_price, discount_percent))
return tuple(bulk_prices)
@staticmethod
def get_seller_about_description(soup_html: BeautifulSoup) -> str:
target_content_divs = [div for div in
soup_html.findAll('div', attrs={'id': 'general_div', 'class': 'target_content'})]
assert len(target_content_divs) == 1
target_content_div = target_content_divs[0]
return shorten_and_sanitize_for_text_column(target_content_div.text)
@staticmethod
def get_seller_info(soup_html: BeautifulSoup) -> Tuple[
float, Tuple[int, int], Tuple[Tuple[str, int, float, float, int, int, int, str]], Tuple[
str, float, str, float], str, Tuple[str], any, bool, datetime, date]:
res = []
expected_labels_and_parsing_funcs = [[_parse_percent_positive_rating, "Positive:"],
[_parse_disputes, "Disputes (won/lost):"],
[_parse_external_market_verifications, "Verifications:"],
[_parse_amount_on_escrow, "Amount on Escrow:"],
[_parse_ships_from, "Ships From:"],
[_parse_ships_to, "Ships To:"],
[_parse_jabber_id, "XMPP/Jabber ID:"],
[_parse_fe_enabled, "FE Enabled:"],
[_parse_member_since, "Member since:"],
[_parse_last_online, "Last online:"]]
inline_divs = [inline_div for inline_div in soup_html.findAll('div', attrs={'class': 'inline_div'})]
assert len(inline_divs) <= 2
seller_info_div = inline_divs[-1]
label_divs = [label_div for label_div in seller_info_div.findAll('div') if
'class' not in label_div.attrs.keys()]
assert len(label_divs) <= len(expected_labels_and_parsing_funcs)
for i, label_div in zip(range(len(label_divs)), label_divs):
k = 0
expected_label = expected_labels_and_parsing_funcs[k][1]
labels = [label for label in label_div.findAll('label')]
if 'lblTuple' in [item for subTuple in label_div.attrs.values() for item in subTuple]:
assert len(labels) == 0
else:
assert len(labels) == 1
label = labels[0]
while label.text != expected_label:
res.append([])
k += 1
expected_label = expected_labels_and_parsing_funcs[k][1]
func: Callable[[str], any] = expected_labels_and_parsing_funcs[k][0]
res.append(func(label_div))
expected_labels_and_parsing_funcs = expected_labels_and_parsing_funcs[k + 1:]
k += 1
percent_positive_rating: float = res[0]
disputes: Tuple[int, int] = res[1]
external_market_verifications: Tuple[Tuple[str, int, float, float, int, int, int, str]] = res[2]
amount_on_escrow: Tuple[str, float, str, float] = res[3]
ships_from: str = res[4]
ships_to: Tuple[str] = res[5]
jabber_id: str = res[6]
fe_enabled: bool = res[7]
member_since: datetime = res[8]
last_online: date = res[9]
if not jabber_id:
jabber_id = None
if not ships_from:
ships_from = None
return percent_positive_rating, disputes, tuple(external_market_verifications), amount_on_escrow, ships_from, \
ships_to, \
jabber_id, fe_enabled, member_since, last_online
@staticmethod
def get_parenthesis_number_and_vendor_level(soup_html: BeautifulSoup) -> Tuple[int, int]:
h2s = [h2 for h2 in soup_html.findAll('h2')]
assert len(h2s) >= 1
h2 = h2s[0]
parenthesis_string, level_string = [s.strip() for s in h2.text.split("\n")]
parenthesis_number = int(parenthesis_string.split("\xa0")[1][1:-1])
level_number = int(level_string.split()[1])
return parenthesis_number, level_number
@staticmethod
def get_feedbacks(soup_html: BeautifulSoup) -> Tuple[
Tuple[date], Tuple[str], Tuple[str], Tuple[str], Tuple[str], Tuple[str], Tuple[str], Tuple[float]]:
target_content_divs = [div for div in
soup_html.findAll('div', attrs={'id': 'feedback_div', 'class': 'target_content'})]
assert len(target_content_divs) == 1
target_content_div = target_content_divs[0]
is_last_page = CryptoniaScrapingFunctions.get_next_feedback_url(soup_html) is None
table_rows = [tr for tr in target_content_div.findAll('tr')]
if is_last_page:
assert len(table_rows) <= 27
else:
assert len(table_rows) == 27
publication_dates: List[date] = []
feedback_categories: List[str] = []
titles: List[str] = []
feedback_message_texts: List[str] = []
text_hashes: List[str] = []
buyers: List[str] = []
crypto_currencies: List[str] = []
prices: List[float] = []
for row in table_rows[1:-1]:
spans = [span for span in row.findAll('span')]
assert len(spans) == 5
paragraphs = [p for p in row.findAll('p')]
assert len(paragraphs) == 1
feedback_category_span = spans[0]
tag_attributes = [item for subTuple in feedback_category_span.attrs.values() for item in subTuple]
if 'icono-checkCircle' in tag_attributes and len(tag_attributes) == 13:
feedback_category = "Positive Feedback"
elif 'icono-crossCircle' in tag_attributes and len(tag_attributes) == 11:
feedback_category = "Negative Feedback"
else:
raise AssertionError(f"Unknown feedback type {tag_attributes}")
product_title_span = spans[1]
title = product_title_span.text
feedback_message_paragraph = paragraphs[0]
feedback_text = feedback_message_paragraph.text
date_span = spans[2]
year, month, day = [int(s) for s in date_span.text.split("-")]
publication_date = date(year=year, month=month, day=day)
price_span = spans[3]
price_and_cryptocurrency = price_span.text.split()
assert len(price_and_cryptocurrency) == 2
price = float(price_and_cryptocurrency[0])
crypto_currency = price_and_cryptocurrency[1]
assert len(crypto_currency) == 3
buyer_username_span = spans[4]
buyer = buyer_username_span.text
publication_dates.append(publication_date)
feedback_categories.append(feedback_category)
titles.append(title)
feedback_message_texts.append(feedback_text)
text_hashes.append(hashlib.md5(
feedback_text.encode(MD5_HASH_STRING_ENCODING)
).hexdigest()[:FEEDBACK_TEXT_HASH_COLUMN_LENGTH])
buyers.append(buyer)
crypto_currencies.append(crypto_currency)
prices.append(price)
return tuple(publication_dates), tuple(feedback_categories), tuple(titles), tuple(
feedback_message_texts), tuple(text_hashes), tuple(buyers), tuple(crypto_currencies), tuple(prices)
@staticmethod
def get_next_feedback_url(soup_html: BeautifulSoup) -> Union[str, None]:
td_gridftrs = [td for td in soup_html.findAll('td', attrs={'class': 'gridftr', 'colspan': '5'})]
if len(td_gridftrs) == 0:
return None
elif len(td_gridftrs) == 1:
td_gridftr = td_gridftrs[0]
current_page, total_pages = _get_current_page_and_total_pages(td_gridftr)
a_tags = [a_tag for a_tag in td_gridftr.findAll('a', href=True)]
if current_page == total_pages and len(a_tags) == 0:
return None
elif current_page == total_pages and len(a_tags) == 1:
return None
elif current_page == total_pages and len(a_tags) == 2:
raise AssertionError
elif current_page != total_pages and len(a_tags) == 1:
return a_tags[0]["href"]
# return the first
elif current_page != total_pages and len(a_tags) == 2:
return a_tags[1]["href"]
# return last of the two
else:
raise AssertionError("Unrecognized feedback tab pagination.")
@staticmethod
def get_pgp_key(soup_html: BeautifulSoup) -> Union[str, None]:
target_content_divs = [div for div in
soup_html.findAll('div', attrs={'id': 'pgp_div', 'class': 'target_content'})]
assert len(target_content_divs) == 1
target_content_div = target_content_divs[0]
text_areas = [text_area for text_area in
target_content_div.findAll('textarea', attrs={'class': 'ascii_armour_textarea'})]
if len(text_areas) == 0:
return None
elif len(text_areas) == 1:
text_area = text_areas[0]
return shorten_and_sanitize_for_text_column(text_area.text)
else:
raise AssertionError("Unknown page formatting when scraping PGP key")
@staticmethod
def get_terms_and_conditions(soup_html: BeautifulSoup) -> Union[str, None]:
target_content_divs = [div for div in
soup_html.findAll('div', attrs={'id': 'terms_div', 'class': 'target_content'})]
assert len(target_content_divs) == 1
target_content_div = target_content_divs[0]
content_divs = [content_div for content_div in
target_content_div.findAll('div', attrs={'class': 'content_div'})]
if len(content_divs) == 0:
return None
elif len(content_divs) == 1:
content_div = content_divs[0]
return shorten_and_sanitize_for_text_column(content_div.text)
else:
raise AssertionError("Unknown page formatting when scraping terms and conditions")
@staticmethod
def get_login_payload(soup_html: BeautifulSoup, username: str, password: str, captcha_solution: str) -> dict:
payload = {}
inputs = [input for input in soup_html.findAll('input')]
assert len(inputs) == 5
username_input = inputs[0]
assert username_input["type"] == "input"
password_input = inputs[1]
assert password_input["type"] == "password"
captcha_input = inputs[2]
assert captcha_input["type"] == "text"
hidden_input = inputs[3]
assert hidden_input["type"] == "hidden"
submit_input = inputs[4]
assert submit_input["type"] == "submit"
sess_code = hidden_input["value"]
submit_value = submit_input["value"]
payload[username_input["name"]] = username
payload[password_input["name"]] = password
payload[captcha_input["name"]] = captcha_solution
payload[hidden_input["name"]] = sess_code
payload[submit_input["name"]] = submit_value
return payload
@staticmethod
def is_internal_connection_error(soup_html: BeautifulSoup):
error_message_p: BeautifulSoup = soup_html.select_one("#body > div > div > p.error")
error_message: str = error_message_p.text if error_message_p else None
return error_message == "Internal connection error. Please contact support."
|
989,794 | e5e84257f68ba0627609a806038ba638ffcd346d | from collections import OrderedDict
from datetime import datetime
from typing import Dict, List, Optional
import click
from git import Commit
from github.Issue import Issue
from cherrytree.github_utils import (
commit_pr_number,
deduplicate_prs,
get_access_token,
get_issue,
get_issues_from_labels,
git_get_current_head,
get_git_repo,
os_system,
truncate_str,
)
from cherrytree.classes import (
Cherry,
CherryTreeExecutionException,
CommitSummary,
)
SHORT_SHA_LEN = 12
TMP_BRANCH = "__tmp_branch"
class CherryTreeBranch:
"""Represents a release branch"""
repo: str
release_branch: str
main_branch: str
labels: List[str]
blocking_labels: List[str]
branch_commits: Dict[str, Dict[int, Commit]]
missing_pull_requests: List[Issue]
pull_requests: List[int]
cherries: List[Cherry]
blocking_pr_ids: List[int]
def __init__(
self,
repo: str,
release_branch: str,
main_branch: str,
labels: List[str],
blocking_labels: List[str],
pull_requests: List[int],
access_token: Optional[str],
):
self.repo = repo
self.labels = labels
self.blocking_labels = blocking_labels
self.pull_requests = pull_requests
self.missing_pull_requests = []
self.release_branch = release_branch
self.main_branch = main_branch
self.git_repo = get_git_repo()
self.base_ref = self.get_base()
self.blocking_pr_ids = []
try:
self.access_token = get_access_token(access_token)
except NotImplementedError:
click.secho(
f"No access token provided. Either provide one via the --access-token "
f"parameter, or set the GITHUB_TOKEN env variable", fg="red")
exit(1)
click.secho(f"Base ref is {self.base_ref}", fg="cyan")
self.branches = {}
self.branch_commits = {}
skipped_commits = 0
for branch in (self.main_branch, self.release_branch):
commits = OrderedDict()
self.branch_commits[branch] = commits
for commit in self.git_repo.iter_commits(branch):
pr_number = commit_pr_number(commit)
if pr_number is None:
skipped_commits += 1
else:
commits[pr_number] = commit
if skipped_commits:
click.secho(
f"{skipped_commits} commits skipped due to missing PRs", fg="yellow"
)
# add all PRs that should be cherries
prs: List[Issue] = []
for label in self.labels:
click.secho(f'Fetching labeled PRs: "{label}"', fg="cyan", nl=False)
new_prs = get_issues_from_labels(
repo=self.repo,
access_token=self.access_token,
label=label,
prs_only=True,
)
click.secho(f' ({len(new_prs)} labels found)', fg="cyan")
prs += new_prs
for pull_request in pull_requests:
prs.append(get_issue(self.repo, self.access_token, pull_request))
prs = deduplicate_prs(prs)
# add all PRs that are flagged as blocking
for label in self.blocking_labels:
click.secho(
f'Fetching labeled PRs marked as blocking: "{label}"',
fg="cyan",
nl=False,
)
blocking_prs = get_issues_from_labels(
repo=self.repo,
access_token=self.access_token,
label=label,
prs_only=True,
)
click.secho(f' ({len(blocking_prs)} blocking labels found)', fg="cyan")
self.blocking_pr_ids += [pr.number for pr in blocking_prs]
prs = deduplicate_prs(prs)
now = datetime.now()
prs.sort(
key=lambda x: x.closed_at if x.closed_at is not None else now,
)
click.secho(f"{len(prs)} PRs found", fg="cyan")
self.cherries = []
for pr in prs:
main_commit = self.branch_commits[self.main_branch].get(pr.number)
applied_commit = self.branch_commits[self.release_branch].get(pr.number)
if main_commit is None and pr.closed_at is not None:
# skip closed PRs that haven't been merged
continue
cherry = Cherry(
commit=main_commit,
pr=pr,
is_applied=True if applied_commit is not None else False,
)
self.cherries.append(cherry)
def get_base(self) -> str:
base_commits = self.git_repo.merge_base(self.main_branch, self.release_branch)
if len(base_commits) < 1:
raise Exception("No common ancestor found!")
elif len(base_commits) > 1:
raise Exception("Multiple common ancestors found!?")
return base_commits[0].hexsha
def apply_cherries(
self,
target_branch: Optional[str],
dryrun: bool,
error_mode: str,
force_rebuild_target: bool,
):
error = False
current_head = git_get_current_head()
click.secho("Fetching all branches", fg="cyan")
os_system("git fetch --all")
click.secho(f"Checking out base branch: {self.release_branch}", fg="cyan")
os_system(f"git checkout {self.release_branch}")
if target_branch is None and dryrun:
target_branch = TMP_BRANCH
click.secho(
f"Recreating and checking out temporary branch: {target_branch}",
fg="cyan",
)
os_system(f"git branch -D {target_branch}", raise_on_error=False)
os_system(f"git checkout -b {target_branch}")
elif (target_branch is None or target_branch == self.release_branch) and not dryrun:
# base and target are the same - no need to recheckout
target_branch = self.release_branch
else:
os_system(f"git branch {target_branch}", raise_on_error=False)
if force_rebuild_target:
click.secho(f"Recreating target branch: {target_branch}", fg="cyan")
os_system(f"git branch -D {target_branch}", raise_on_error=False)
os_system(f"git branch {target_branch}")
click.secho(f"Checking out target branch: {target_branch}", fg="cyan")
os_system(f"git checkout {target_branch}")
applied_cherries: List[Cherry] = []
applied_dryrun_cherries: List[Cherry] = []
blocking_cherries: List[Cherry] = []
conflicted_cherries: List[CommitSummary] = []
open_cherries: List[Cherry] = []
base_sha = self.git_repo.head.commit.hexsha
for cherry in self.cherries:
pr = cherry.pr
commit = cherry.commit
if commit is None:
click.secho(
truncate_str(f"error-open #{pr.number}: {pr.title}"), fg="red"
)
open_cherries.append(cherry)
error = True
continue
sha = cherry.commit.hexsha
if cherry.is_applied:
click.secho(
truncate_str(f"skip-applied #{pr.number}: {pr.title}"), fg="yellow"
)
continue
if cherry.pr.number in self.blocking_pr_ids:
click.secho(
truncate_str(f"error-blocking #{pr.number}: {pr.title}"), fg="red"
)
blocking_cherries.append(cherry)
error = True
if error_mode == "dryrun":
dryrun = True
elif error_mode == "break":
break
try:
os_system(f"git cherry-pick -x {sha}")
if dryrun:
applied_dryrun_cherries.append(cherry)
else:
applied_cherries.append(cherry)
click.secho(
truncate_str(f"apply-ok #{pr.number}: {pr.title}"),
fg="green",
nl=False,
)
if dryrun:
# os_system(f"git reset --hard HEAD~1")
click.secho(" [DRY-RUN]", fg="cyan")
else:
base_sha = cherry.commit.hexsha
click.echo()
except CherryTreeExecutionException:
os_system("git cherry-pick --abort")
try:
# try to ff to see if cherry was already applied
os_system(f"git cherry-pick --ff {sha}")
click.secho(f"skip-empty #{pr.number}: {pr.title}", fg="yellow")
except CherryTreeExecutionException:
click.secho(
truncate_str(f"error-conflict #{pr.number}: {pr.title}"),
fg="red",
)
# These need to be put into a wrapper to avoid re-hitting the
# GH API later
conflicted_cherries.append(CommitSummary(
pr_number=pr.number,
pr_title=pr.title,
sha=commit.hexsha,
author=pr.user.login,
merged_by=pr.closed_by.login,
))
os_system("git cherry-pick --abort")
error = True
if error_mode == "dryrun":
dryrun = True
elif error_mode == "break":
break
if dryrun:
os_system(f"git reset --hard {base_sha}")
os_system(f"git checkout {current_head}")
if target_branch == TMP_BRANCH:
os_system(f"git branch -D {target_branch}")
if blocking_cherries:
click.echo()
click.secho(
f"{len(blocking_cherries)} open PRs that need to be cleared first:",
fg="red",
)
for cherry in blocking_cherries:
pr = cherry.pr
click.echo(f"#{pr.number} (author: {pr.user.login}): {pr.title}")
if open_cherries:
click.echo()
click.secho(
f"{len(open_cherries)} open PRs that need to be merged:",
fg="red",
)
for cherry in open_cherries:
pr = cherry.pr
click.echo(f"#{pr.number} (author: {pr.user.login}): {pr.title}")
if conflicted_cherries:
click.echo()
click.secho(
f"{len(conflicted_cherries)} "
"PRs that need to be manually cherried due to conflicts:",
fg="red",
)
for commit in conflicted_cherries:
click.echo(
f"#{commit.pr_number} (sha: {commit.sha[:12]}, "
f"author: {commit.author}, "
f"merged by: {commit.merged_by}): "
f"{truncate_str(commit.pr_title, 30)}"
)
click.echo()
click.secho(f"Summary:", fg="cyan")
click.secho(
f"{len(applied_cherries)} successful cherries", fg="cyan",
)
if applied_dryrun_cherries:
click.secho(
f"{len(applied_dryrun_cherries)} dry-run cherries", fg="cyan",
)
if blocking_cherries:
click.secho(
f"{len(blocking_cherries)} blocking cherries", fg="cyan",
)
if conflicted_cherries:
click.secho(
f"{len(conflicted_cherries)} conflicts", fg="cyan",
)
if open_cherries:
click.secho(
f"{len(open_cherries)} open PRs", fg="cyan",
)
if error:
exit(1)
|
989,795 | 2821c192270097bb78b6a5702dc7bb07758232e9 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/automl_v1beta1/proto/text_extraction.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.automl_v1beta1.proto import (
text_segment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2,
)
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/automl_v1beta1/proto/text_extraction.proto",
package="google.cloud.automl.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
'\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_TEXTEXTRACTIONANNOTATION = _descriptor.Descriptor(
name="TextExtractionAnnotation",
full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="text_segment",
full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.text_segment",
index=0,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="score",
full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.score",
index=1,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="annotation",
full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.annotation",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=172,
serialized_end=293,
)
_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY = _descriptor.Descriptor(
name="ConfidenceMetricsEntry",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="confidence_threshold",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.confidence_threshold",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="recall",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.recall",
index=1,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="precision",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.precision",
index=2,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="f1_score",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry.f1_score",
index=3,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=468,
serialized_end=575,
)
_TEXTEXTRACTIONEVALUATIONMETRICS = _descriptor.Descriptor(
name="TextExtractionEvaluationMetrics",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="au_prc",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.au_prc",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="confidence_metrics_entries",
full_name="google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.confidence_metrics_entries",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=296,
serialized_end=575,
)
_TEXTEXTRACTIONANNOTATION.fields_by_name[
"text_segment"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2._TEXTSEGMENT
)
_TEXTEXTRACTIONANNOTATION.oneofs_by_name["annotation"].fields.append(
_TEXTEXTRACTIONANNOTATION.fields_by_name["text_segment"]
)
_TEXTEXTRACTIONANNOTATION.fields_by_name[
"text_segment"
].containing_oneof = _TEXTEXTRACTIONANNOTATION.oneofs_by_name["annotation"]
_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY.containing_type = (
_TEXTEXTRACTIONEVALUATIONMETRICS
)
_TEXTEXTRACTIONEVALUATIONMETRICS.fields_by_name[
"confidence_metrics_entries"
].message_type = _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY
DESCRIPTOR.message_types_by_name["TextExtractionAnnotation"] = _TEXTEXTRACTIONANNOTATION
DESCRIPTOR.message_types_by_name[
"TextExtractionEvaluationMetrics"
] = _TEXTEXTRACTIONEVALUATIONMETRICS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TextExtractionAnnotation = _reflection.GeneratedProtocolMessageType(
"TextExtractionAnnotation",
(_message.Message,),
dict(
DESCRIPTOR=_TEXTEXTRACTIONANNOTATION,
__module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2",
__doc__="""Annotation for identifying spans of text.
Attributes:
annotation:
Required. Text extraction annotations can either be a text
segment or a text relation.
text_segment:
An entity annotation will set this, which is the part of the
original text to which the annotation pertains.
score:
Output only. A confidence estimate between 0.0 and 1.0. A
higher value means greater confidence in correctness of the
annotation.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionAnnotation)
),
)
_sym_db.RegisterMessage(TextExtractionAnnotation)
TextExtractionEvaluationMetrics = _reflection.GeneratedProtocolMessageType(
"TextExtractionEvaluationMetrics",
(_message.Message,),
dict(
ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType(
"ConfidenceMetricsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY,
__module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2",
__doc__="""Metrics for a single confidence threshold.
Attributes:
confidence_threshold:
Output only. The confidence threshold value used to compute
the metrics. Only annotations with score of at least this
threshold are considered to be ones the model would return.
recall:
Output only. Recall under the given confidence threshold.
precision:
Output only. Precision under the given confidence threshold.
f1_score:
Output only. The harmonic mean of recall and precision.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry)
),
),
DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS,
__module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2",
__doc__="""Model evaluation metrics for text extraction problems.
Attributes:
au_prc:
Output only. The Area under precision recall curve metric.
confidence_metrics_entries:
Output only. Metrics that have confidence thresholds.
Precision-recall curve can be derived from it.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics)
),
)
_sym_db.RegisterMessage(TextExtractionEvaluationMetrics)
_sym_db.RegisterMessage(TextExtractionEvaluationMetrics.ConfidenceMetricsEntry)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
989,796 | d3e874a69384a768a99c869e072a1c9f8c339e72 | from scipy.io import savemat
from .JSON import clobber
def writeMatFile(data, fileName):
data = clobber(data)
savemat(fileName, data, appendmat = True)
return None
|
989,797 | 18fbba933ab69c84ef59f0a003d1072720f33b71 | import torch
from torch import nn
from torch import optim
from torch.utils.data.dataloader import DataLoader
import os
import pickle
from src.model.fasttext import FastText
from src.data_process.dataset import ClassifierDataset
def train(args):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
base_path = './data/'
processed_base_path = os.path.join(base_path, 'processed')
processed_data_path = os.path.join(processed_base_path, 'data.npz')
# word2index_path = os.path.join(processed_base_path, 'word2index.pkl')
index2word_path = os.path.join(processed_base_path, 'index2word.pkl')
glove_path = os.path.join(processed_base_path, 'glove.npy')
save_path = os.path.join(processed_base_path, 'model.pkl')
with open(index2word_path, 'rb') as handle:
index2word = pickle.load(handle)
model = FastText(vocab_size=len(index2word), embed_size=300)
model.load_pretrained_embeddings(glove_path, fix=False)
model = model.cuda()
dataset = ClassifierDataset(processed_data_path)
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=False
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
max_accuracy = 0
for epoch in range(args.epoches):
total_samples = 0
total_loss = 0
correct_samples = 0
for i, data in enumerate(data_loader):
optimizer.zero_grad()
sentence, label = data
sentence, label = sentence.cuda(), label.cuda()
logit = model(sentence)
loss = criterion(logit, label)
loss.backward()
optimizer.step()
batch_size = label.size(0)
total_samples += batch_size
total_loss += batch_size * loss.item()
pred = logit.argmax(dim=-1)
correct_samples += (pred == label).long().sum().item()
if i % 100 == 0:
train_loss = total_loss / total_samples
train_accuracy = correct_samples / total_samples
print('[epoch %d] [step %d]\ttrain_loss: %.4f\ttrain_accuracy: %.4f' % (epoch, i, train_loss, train_accuracy))
total_samples = 0
total_loss = 0
correct_samples = 0
if train_accuracy > max_accuracy:
max_accuracy = train_accuracy
torch.save(model, save_path) |
989,798 | c4b93f010bda87100a3b4893854f3549a3a9fa96 | from dog import Dog
d1 = Dog("d1",10)
d2 = Dog("d2",12)
d1.setName("Aktos")
d2.setName("Tuzik")
name = d1.getName()
name2 = d2.getName()
d1.setAge(10)
d2.setAge(12)
age = d1.getAge()
age2 = d2.getAge()
print(name,age)
print(name2,age2)
print(d1)
print(d2)
from person2 import Person
p1 = Person("p1",21,d1)
dog_name = p1.getDogName()
dog_age = p1.getDogAge()
p2 = Person("p2",24,d2)
dog_name2 = p2.getDogName()
dog_age2 = p2.getDogAge()
print(dog_name, dog_age)
print(dog_name2, dog_age2) |
989,799 | 9a6ce3e012ee43bfd97bfce431ef3e48e0d952b6 | num_list=list(map(int,input().split(",")))
k=int(input())
if (max(num_list)-min(num_list))<=2*k:
print(0)
else:
print(str(max(num_list)-min(num_list)-2*k)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.