index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,200 | b08288b8d0a1b0625957556876bf012445c0eee6 | max_confrm=5
me=-1
import pandas as pd
import os
import random
rej=0
ttl=0
flag=2
def confirm_updater(r,el):
#print("\n\n*************************************\ninside confirm updater :")
#print("\t the row is :"+str(r))
df=pd.read_csv("request.txt")
v=int(df.iloc[[r]]['confirm'])
#print("initial votes :"+str(v))
vr=v+1
df.at[r,'confirm']=vr
k=df.iloc[0:,1:]
k.to_csv("request.txt",sep=',')
f=open("request.txt","r")
f1=open("request1.txt","a")
for k in f:
f1.write(k)
f1.close()
f.close()
#f=open("request.txt","a")
f1=open("request.txt","w+")
f=open("request.txt","w")
f.truncate(0)
for k in f1:
f.write(k)
f1.truncate(0)
f1.write("row")
f.close()
f1.close()
os.rename("request.txt","req.txt")
os.rename("request1.txt","request.txt")
os.rename("req.txt","request1.txt")
if vr==max_confrm:
print("confirmed :",str(r))
print("by :",str(me))
cs=pd.read_csv(str(el[2])+'.txt')
dt=cs[['amt','ggt']]
lr=len(dt)
st=str(el[3])+',0,'+str(el[4])+','+str(el[1])+','+str(lr)+'\n'
obj_r=open(str(el[2])+'.txt',"a+")
obj_r.write(st)
return 1
else:
return 0
def verifying(el):
cost=0
fn=el[1]+'.txt'
row=el[5]
fil=pd.read_csv(fn) #0<-deposit; 1=waiting ;2->transmitted
data=fil[['amt','ggt']]
i=0
print("\tinside verifying :")
print(el)
print("i :",i,"<> r :",row) # logic of cost calculator
while i<row:
print("i :",str(i),"<> r :",str(row))
print("value[i] "+str(data['ggt'][i]))
if data['ggt'][i]==0:
cost+=float(data['amt'][i])
else:
cost=cost-float(data['amt'][i])
i+=1
if cost>=el[3]:
cc=confirm_updater(el[0],el)
return cc
return 0 # not confirming
#print("Yes! \t @",el,"\t by :<> ",str(me))
#confirm_updater(2)
def submit():
csv=pd.read_csv("request.txt")
data=csv[['row','sender','reciever','amount','date','unq','confirm']]
el=[]
n=10
cf=1
if cf==1:
n=random.randint(0,len(data)-1)
else:
n=int(input("enter the transaction id :"))
if int(data['confirm'][n])>=max_confrm:
'''
if rej==len(data)**3:
print("Total coins mined :"+str(ttl)+" by <"+me+">")
flag=-2
return 0
'''
return -1
el.append(data['row'][n])
el.append(data['sender'][n])
el.append(data['reciever'][n])
el.append(data['amount'][n])
el.append(data['date'][n])
el.append(data['unq'][n])
el.append(data['confirm'][n])
print(el)
print("verifying @el :")
return verifying(el)
def mine():
c=1
while c==1:
s=submit()
if s==0:
mine()
c=1
#print("Mine :"+str(c))
#c=int(input("continue :1/0"))
'''
if submit() ==-1:
print("already confirmed")
'''
me=int(input("Enter me :"))
mine()
|
23,201 | 908cbe456aee84e21b1268a41b7cc4168d7c9c90 | from setuptools import setup
setup(name='Empresas',
version='0.1',
description='Aplicacion para valorar practicas de empresas',
long_description='Aplicacion que permite registrar empresas y valoraciones de las practicas en ellas',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: GNU :: GNU License',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Linguistic',
],
keywords='aplicacion basica de valoracion',
url='https://github.com/hugobarzano/Aplicaciones/tree/master/ENV1/mysite',
author='Hugo Barzano Cruz',
author_email='Hugo Barzano Cruz',
license='GNU',
#packages=['Empresas'],
install_requires=[
'sqlite3',
],
include_package_data=True,
zip_safe=False)
|
23,202 | e9f0694aff5ab01e4bcec9d9ba68d45058abe328 | import thread
import threading
import time
import bluetooth
import datetime
import os
import sys
import sensor
from bluepy.btle import Scanner, DefaultDelegate
import subprocess
import random
# for node this is a id which is assigned by server
# for server this is the id for assigning to the next incoming node, then PIID+=1
PIID = 0
rank=-1
# manually setup on the file
macAddr = ""
# the iterval time in second for scan the smart nodes
iterval_for_scan = 10
iterval_for_scan_random = 50
# the iterval time in secone for broadcasting the data
interval_for_sending = 1
interval_for_sending_random = 5
## delay for response
interval_for_response = 0
## fixed time for assign a new id
interval_for_assign_new_id = 60
# locker
mutex = threading.Lock()
mutex_scan = threading.Lock()
## mac -> ID table
macIDTable = {}
###############################################################
# broadcast simple example
# broadcast my rank
def Broadcast_Server_03_01():
#time.sleep(interval_for_response)
global rank
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
data = "04 c1 03 01 " + irank
print "broadcasting data: ", data
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"Broadcast_Server_03_01|"+ data + '\n')
input_file.close()
subprocess.call("sudo hciconfig hci0 up",shell=True);
subprocess.call("sudo hciconfig leadv 3",shell=True);
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000A 01",shell=True)
# server rank
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 88 "+ data,shell=True)
interval = random.uniform(interval_for_sending,interval_for_sending_random)
time.sleep(interval)
###############################################################
# broadcast simple example
# request a newest PIID
def Broadcast_Client_01_01():
global rank,PIID,macAddr
if (PIID > 0):
return
print "request a PIID."
print "PIID: ", PIID
data = ""
# modify the data
idd = "0000" + (str)(PIID)
idd = idd[-4:]
idd = idd[0] + idd[1] + " " + idd[2] + idd[3] + " "
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
str_macAddr = macAddr.replace(":"," ") + " "
data = "C1 " + "01 " + "01 " + idd + idd + irank + str_macAddr
tdata = data.replace(" ","")
# len of the data
data = (str)(len(tdata)/2) + " " + data
# broadcast
BroadCast(data, 0)
###############################################################
# broadcast simple example
# upload the local data to server
def Broadcast_Client_01_02():
global rank,PIID
print "upload data."
print "PIID: ", PIID
print "rank: ", rank
data = ""
ctim = time.time()
temp,hum = sensor.Tempture()
print "temp,hum: ",temp, ",",hum
power = sensor.Power_consumption()
print "power consumption: ", power
# modify the data
temp = "00" + (str)(temp)
temp = temp[-2:] + " "
hum = "00" + (str)(hum)
hum = hum[-2:] + " "
power = "00" + (str)(power)
power = power[-2:] + " "
idd = "0000" + (str)(PIID)
idd = idd[-4:]
idd = idd[0] + idd[1] + " " + idd[2] + idd[3] + " "
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
# make a data package
data = "C1 " + "01 " + "02 " + idd + idd + irank + temp + hum + "00 " + power
tdata = data.replace(" ","")
# len of the data
data = (str)(len(tdata)/2) + " " + data
# broadcast
BroadCast(data, 0)
###############################################################
def BroadCast(data,fixedtime):
#time.sleep(interval_for_response)
print "broadcasting data: ", data
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"BroadCast|"+ data + '\n')
input_file.close()
subprocess.call("sudo hciconfig hci0 up",shell=True);
subprocess.call("sudo hciconfig leadv 3",shell=True);
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000A 01",shell=True)
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 88 " + data,shell=True)
interval = random.uniform(interval_for_sending,interval_for_sending_random) + fixedtime
time.sleep(interval)
###############################################################
## client scan network
def ScanNetwork():
global rank, iterval_for_scan, interval_for_sending, macAddr
subprocess.call("sudo hciconfig hci0 piscan",shell=True);
interval = random.uniform(iterval_for_scan,iterval_for_scan_random)
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"start scan" + '\n')
input_file.close()
scanner = Scanner()#.withDelegate(ScanDelegate())
devices = scanner.scan(interval) # 0 = loop
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"end scan" + '\n')
input_file.close()
###############################################################
## rerank first
if (rank != 0 ): # if not the server, rerank
print "reranking..."
rank = -1
for dev in devices:
dict = {}
for (adtype, desc, value) in dev.getScanData():
dict[adtype] = value
# handle the flag == 88
if (dict.has_key(1) and dict[1] == '88'):
###############################################################
# handle type = 0xc1
if (dict.has_key(0xc1)):
data = dict[0xc1]
print "recv data [0xC1]: ", data
# parse cmd
cmd = data[0:2]
data = data[2:]
print "cmd: ", cmd, ":",data
###############################################################
if (cmd == '03'):
print "Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi)
for (adtype, desc, value) in dev.getScanData():
print " %s = %s" % (desc, value)
for key in dict:
print " msg %d = %s" % (key, dict[key])
handle_cmd_03(data)
###############################################################
## other task:
for dev in devices:
dict = {}
for (adtype, desc, value) in dev.getScanData():
dict[adtype] = value
# handle the flag == 88
if (dict.has_key(1) and dict[1] == '88'):
###############################################################
# handle type = 0xc1
if (dict.has_key(0xc1)):
data = dict[0xc1]
print "recv data [0xC1]: ", data
# parse cmd
cmd = data[0:2]
data = data[2:]
print "cmd: ", cmd, ":",data
###############################################################
# cmd 01: upload to server
if (cmd == '01'):
print "Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi)
for (adtype, desc, value) in dev.getScanData():
print " %s = %s" % (desc, value)
for key in dict:
print " msg %d = %s" % (key, dict[key])
handle_cmd_01(data)
# cmd 02: download to client
if (cmd == '02'):
print "Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi)
for (adtype, desc, value) in dev.getScanData():
print " %s = %s" % (desc, value)
for key in dict:
print " msg %d = %s" % (key, dict[key])
handle_cmd_02(data)
###############################################################
# cmd 01:
def handle_cmd_01(data):
global rank, PIID
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"handle_cmd_01|"+ data + '\n')
input_file.close()
###############################################################
# server
if (rank == 0):
# handle the msg
# type
ty = data[0:2]
data = data[2:]
###############################################################
# type 01: request a PIID
if (ty == '01'):
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"request a PIID"+'\n')
input_file.close()
print "request a PIID."
fromID = data[0:4]
print "Source ID: ", fromID
theLastForwardedID = data[4:8]
print "The last forward ID: ", theLastForwardedID
theLastRank = data[8:10]
print "the last rank: ", theLastRank
nodeAddr = data[10:22]
print "node address: ", nodeAddr
## need to handle the mac ID table here, TODO::
## now just simply way to do this
# assign a PIID
PIID = PIID + 1
if (PIID > 9999):
PIID = 0
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"assign a PIID|" +(str)(PIID) +'\n')
input_file.close()
# ready to send it back
data = ""
# new id for client
idd = "0000" + (str)(PIID)
idd = idd[-4:]
idd = idd[0] + idd[1] + " " + idd[2] + idd[3] + " "
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
nodeAddr = nodeAddr[0:2] + " " + nodeAddr[2:4] + " " + nodeAddr[4:6] + " " + nodeAddr[6:8] + " " + nodeAddr[8:10] + " " + nodeAddr[10:12] + " "
# make a data package
# send to node
# cmd = 02 01
data = "C1 " + "02 " + "01 " + idd + idd + irank + nodeAddr
tdata = data.replace(" ","")
# len of the data
data = (str)(len(tdata)/2) + " " + data
# broadcast
BroadCast(data, interval_for_assign_new_id)
###############################################################
# type 02: msg upload to server
if (ty == '02'):
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"msg upload to server" +'\n')
input_file.close()
fromID = data[0:4]
print "Source ID: ", fromID
theLastForwardedID = data[4:8]
print "The last forward ID: ", theLastForwardedID
theLastRank = data[8:10]
print "The last rank: ", theLastRank
temp = data[10:12]
print "Temp: ", temp
hum = data[12:14]
print "Hum: ", hum
smoke = data[14:16]
print "Smoke: ", smoke
power = data[16:18]
print "Power consumption: ", power
###############################################################
# client
if (rank > 0):
# forword this msg
ty = data[0:2]
data = data[2:]
forwardData = "C1 01 " + ty + " " # up load to server
# source ID
forwardData = forwardData + data[0:2] +" " + data[2:4] + " "
# the my ID
# modify the data
idd = "0000" + (str)(PIID)
idd = idd[-4:]
idd = idd[0] + idd[1] + " " + idd[2] + idd[3] + " "
forwardData = forwardData + idd
# its rank
itsRank = (int)(data[8:10])
if (rank >= itsRank):
print "NOT forword this msg to server."
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"NOT forword this msg to server" +'\n')
input_file.close()
return
#my rank
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
forwardData = forwardData + irank
for i in range(10, len(data), 2):
forwardData = forwardData + data[i] + data[i+1] + " "
tdata = forwardData.replace(" ","")
# len of the data
forwardData = (str)(len(tdata)/2) + " " + forwardData
print "forword this msg to server."
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"forword this msg to server" +'\n')
input_file.close()
# broadcast
BroadCast(forwardData, 0)
pass
###############################################################
# cmd 02:
def handle_cmd_02(data):
global rank, macAddr, PIID
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"handle_cmd_02|"+ data + '\n')
input_file.close()
###############################################################
# server
if (rank == 0):
print "I am server."
pass
###############################################################
# client
if (rank != 0):
# handle the msg
# type
ty = data[0:2]
data = data[2:]
# if this msg is for me
###############################################################
# assign a PIID
if (ty == '01'):
newID = data[0:4]
print "New ID: ", newID
theLastForwardedID = data[4:8]
print "The last forward ID: ", theLastForwardedID
theLastRank = data[8:10]
print "The last rank: ", theLastRank
nodeAddr = data[10:22]
print "Rev Addr: ", nodeAddr.upper().strip()
print "My Addr: ", macAddr.replace(":","").upper().strip()
# get a new PIID
if (nodeAddr.upper().strip() == macAddr.replace(":","").upper().strip()):
PIID = (int)(newID)
print "Assigned a new PIID: ", PIID
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"newID|" +(str)(PIID) +'\n')
input_file.close()
else:
# downforward this msg
downwardData = "C1 02 01 "
#new ID
downwardData = downwardData + data[0:2] + " " + data[2:4] + " "
# the my ID
# modify the data
idd = "0000" + (str)(PIID)
idd = idd[-4:]
idd = idd[0] + idd[1] + " " + idd[2] + idd[3] + " "
downwardData = downwardData + idd
# its rank
itsRank = (int)(data[8:10])
if (rank <= itsRank):
print "NOT downwardData this msg."
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"NOT downwardData this msg" +'\n')
input_file.close()
return
#my rank
irank = "00 "
if (rank != -1):
irank = "00" + (str)(rank)
irank = irank[-2:] + " "
downwardData = downwardData + irank
for i in range(10, len(data), 2):
downwardData = downwardData + data[i] + data[i+1] + " "
tdata = downwardData.replace(" ","")
# len of the data
downwardData = (str)(len(tdata)/2) + " " + downwardData
print "Downward this msg to node."
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"downwardData this msg"+'\n')
input_file.close()
# broadcast
BroadCast(downwardData, 0)
pass
###############################################################
# cmd 03: only broadcast msg
def handle_cmd_03(data):
global rank
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"handle_cmd_03|"+ data + '\n')
input_file.close()
if (rank == 0):
print "I am server."
return
# type
ty = data[0:2]
data = data[2:4]
# server broadcast it's rank
if (ty == '01'):
print "incoming rank:", data
ser_rank = (int)(data)
if ((rank == -1 or ser_rank <rank -1) and ser_rank >=0):
rank = ser_rank + 1
print "my rank: ", rank
with open("clog.txt","a") as input_file:
input_file.write(str(datetime.datetime.now())+ "|"+"my rank|" + (str)(rank)+'\n')
input_file.close()
###############################################################
if __name__ == '__main__':
# read the rank from file
with open("rank.txt","r") as input_file:
rank = (int)(input_file.readline())
input_file.close()
print "rank: ",rank
# read the mac address from file
with open("macAddr.txt","r") as input_file:
macAddr = input_file.readline()
input_file.close()
print "Mac Address: ",macAddr
# start log
with open("clog.txt","a") as input_file:
input_file.write("##############################################\n")
input_file.write("Start Time|"+str(datetime.datetime.now())+'\n')
input_file.close()
## use try catch {} to handle the while True.
while True:
try:
# client
if (rank != 0):
# scan network
ScanNetwork()
# broadcast for a while
# upload data to server
if (PIID == 0):
# request a PIID
Broadcast_Client_01_01()
if (rank > 0 and PIID > 0):
# broadcast for a while
Broadcast_Server_03_01()
# upload data to server
Broadcast_Client_01_02()
# server
if (rank == 0):
# scan network
ScanNetwork()
# broadcast for a while
Broadcast_Server_03_01()
except:
print 'error and rerun again.'
pass
|
23,203 | 8d601720f0562ce0ed90ad891ceb62d0b58d1379 | import numpy as np
from lstm_vae import create_lstm_vae
from lstm_vae.helper import get_data_v2, padding_len, vocab, manual_one_hot, id_vocab
from numpy import array
from keras.preprocessing.sequence import pad_sequences
np.set_printoptions(threshold=np.nan)
if __name__ == "__main__":
original = pad_sequences(get_data_v2("test_source.txt"), padding="post", value=0.0, maxlen=30)
paraphrase = pad_sequences(get_data_v2("test_target.txt"), padding="post", value=0.0, maxlen=30)
target_paraphrase = manual_one_hot(paraphrase)
input_dim = len(vocab)
# time_steps = original.shape[1]
time_steps = padding_len + 1
batch_size = 1
print("time_steps:" + str(time_steps))
print("input_dim:" + str(input_dim))
vae, enc, gen = create_lstm_vae(input_dim,
timesteps=time_steps,
batch_size=batch_size,
intermediate_dim=800,
latent_dim=800,
epsilon_std=1.,
)
# plot_model(enc, to_file="encoder.png", show_shapes=True)
# plot_model(vae, to_file="vae.png", show_shapes=True)
# plot_model(gen, to_file="generator.png", show_shapes=True)
# print("start loading weights")
# vae.load_weights("vae_model.h5")
# enc.load_weights("enc.h5")
# gen.load_weights("gen.h5")
# print("loading weights finished")
for i in range(200):
vae.fit([original, paraphrase], target_paraphrase, epochs=1, batch_size=batch_size, shuffle=True,
validation_split=0.2
# validation_data=([test_original, test_paraphrase], test_paraphrase),
# callbacks=[TensorBoard(log_dir="./logs")]
)
# vae.save_weights("vae_model.h5")
# enc.save_weights("enc.h5")
# gen.save_weights("gen.h5")
for t in range(10):
# print("encoding start")
sent_encoded = enc.predict([original[t:t + 1], paraphrase[t:t + 1]], batch_size=batch_size)
print(sent_encoded.shape)
# print("encoding finished")
# print("decoding start")
sent_decoded = gen.predict([sent_encoded, original[t:t + 1]], batch_size=batch_size)
for n in sent_decoded[0]:
print(id_vocab[np.argmax(n)], end=" ")
# print("decoding finished")
print()
|
23,204 | 15ed161085509ded22c2dab213ce474910f3ccfd | from utils import knot_hash
from utils.knot_hash import single_round
def solve_part_1(sparse_hash, lengths):
sparse_hash, _, _ = single_round(sparse_hash, lengths)
return sparse_hash[0] * sparse_hash[1]
def solve_part_2(puzzle_input):
return knot_hash(puzzle_input)
if __name__ == '__main__':
line = open('input.txt').readline()
lengths = [int(x) for x in line.split(',')]
print solve_part_1(range(256), lengths)
print solve_part_2(line)
|
23,205 | 1b9c95cae58d7316751087a0630283e4f57b535f | #!/usr/bin/python
# -*- coding: utf-8 -*-
import request_handler
import util
from models import Video
from app import App
from gandalf import gandalf
class AboutRequestHandler(request_handler.RequestHandler):
def render_jinja2_template(self, template_name, template_values):
template_values["selected_nav_link"] = "about"
request_handler.RequestHandler.render_jinja2_template(self, template_name, template_values)
class ViewAbout(AboutRequestHandler):
def get(self):
self.render_jinja2_template('about/about_the_site.html', {
"selected_id": "the-site",
"approx_vid_count": Video.approx_count(),
"gandalf_production_test": gandalf("production_test"),
})
class ViewAboutTheTeam(AboutRequestHandler):
def get(self):
self.render_jinja2_template('about/about_the_team.html', {
"selected_id": "the-team",
"team": [["" + item for item in items] for items in TEAM],
"contributors": ["" + item for item in sorted(CONTRIBUTORS)],
})
class ViewGettingStarted(AboutRequestHandler):
def get(self):
self.render_jinja2_template('about/getting_started.html', {
"selected_id": "getting-started",
"approx_vid_count": Video.approx_count(),
"App": App
})
class ViewDiscoveryLab(request_handler.RequestHandler):
def get(self):
self.render_jinja2_template('about/discovery_lab.html', {
"selected_id": "discovery-lab"})
class ViewFAQ(AboutRequestHandler):
def get(self):
self.render_jinja2_template('about/faq.html', {
"selected_id": "faq",
"approx_vid_count": Video.approx_count()
})
class ViewDownloads(AboutRequestHandler):
def get(self):
self.render_jinja2_template('about/downloads.html', {})
TEAM = [
(u"emanuel", u"עמנואל",
u"""
פרופ' עמנואל גרינגרד עבד שנים רבות בחברת י.ב.מ ישראל, השתתף בפיתוח מערכות ההפעלה במעבדות הפיתוח בארה"ב וכן שימש כמנהל היישום של השפות השמיות במרכזי הפיתוח של י.ב.מ בארה"ב.
בין תפקידיו בארץ ניתן למנות: מנהל הסיוע הטכני, מנהל הקבוצה להסבת תוכנות לעברית, מנהל ארצי של מהנדסי מערכות ומנהל המרכז המדעי של י.ב.מ. ישראל.
משך עבודתו לימד באוניברסיטאות ולאחר פרישתו מי.ב.מ. יסד את המחלקה להנדסת תוכנה בשנקר – בי"ס גבוה להנדסה ולעיצוב ועמד בראשה שנים רבות.
עיסוקו הנוכחי הוא במחשבים אישיים, אינטרנט ולימוד מקוון .משמש כמנהל הפרויקט של התאמת תוכנת Moodle לעברית ומרכז התרגום של אקדמיית קהאן לעברית.
"""),
(u"yarden", u"ירדן",
u"""
ירדן אסא גילתה את קהאן אקדמי בעקבות חיפוש ברשת של סרטון שיסביר לה מה זה "בינארי".
מאז היא מחפשת אנשים שרוצים להירתם ליצירת שינוי בעולם, שיצטרפו אליה.
היא חגגה השנה 25 והיא לקראת סיום תואר בפסיכולוגיה ומנהל עסקים.
היא מאד אוהבת את האקדמיה, אבל מרגישה שהגיע הזמן ליצור אקדמיה של העולם החדש.
היא מתגעגעת לניו-יורק שהייתה לה לבית, אבל אוהבת את ישראל יותר מהכל.
"""),
(u"maya", u"מאיה",
u"""
ב- 2011, הקימה מאיה את המיזם "אני-10" שהוא אוסף שיעורים ותרגולים בוידאו, בחינם, לשיפור הבנת החומר הנלמד במתמטיקה מכיתה א' - יב'.
את רב השיעורים הקליטה היא בעצמה ובחלקם נעזרה במספר מורים מוכשרים נוספים. לאורך שנים, לצד עבודתה כאדריכלית, עסקה גם כמורה פרטית בהתנדבות בשכונות מצוקה,
ובעקבות הצלחתה לגרום לתלמידיה לשפר מאוד את השגיהם בלימודים, החליטה שהיא חיבת למצוא פתרון חינמי ללימוד מתמטיקה במטרה להגיע למספר רב ככל הניתן של תלמידים משכבה סוציואקונומית נמוכה.
באמצע 2013 היא החלה לשתף פעולה עם אקדמיית קהאן.
"""),
(u"eli", u"אלי",
u"""
אלי נצר, מורה למתמטיקה בתיכון חדש תל-אביב. בוגר אוניברסיטת תל-אביב בלימודי הנדסת אלקטרוניקה ובעל תואר שני במנהל עסקים.
עד לפני מספר שנים עסק בניהול פרוייקטים בתעשיית ההיי-טק. לפני מספר שנים הגשים חלום אישי וביצע הסבה מהיי-טק להוראת מתמטיקה בתיכון.
"""),
(u"eran", u"ערן",
u"""
ערן חג'ג', 33, תל אביב, יזם ומתכנת מילדות.
בעברו מילא תפקידי פיתוח וניהול שונים בניהם כ-CTO של ביטוח ישיר.
כיום מנכ"ל של חברת אלגו-טריידינג, יזם ויועץ לחברות היטק שונות בניהן בינק ו-JFROG,
אבל יותר מכל אוהב ללמד.
ערן נדבק בחיידק בעקבות ההרצאה של סאל ב-TED ומאז פועל לקידום הפרוייקט בארץ.
מאמין גדול ביכולת שינוי לטובה של מערכת החינוך והעולם בכלל ומשתדל לחיות בהווה.
"""),
(u"ofer", u"עופר",
u"""
עופר קורן נתקל לראשונה בקהאן לפני כ-5 שנים, אז עבד באולפני האנימציה של דיסני בקליפורניה, והיה צריך להשלים חסך במטריצות וטרנספורמציות בתלת-מימד.
מאז הוא כבר חזר לישראל ועובד בחברת IBM כמהנדס תוכנה, ובמקביל מרכז את הפיתוח והתחזוקה של אתר אקדמית קהאן העברית.
"""),
(u"roi", u"רועי",
u"""
רועי חרמוני נדבק בחיידק ההוראה בזמן הצבא ומאז משתדל לעמוד מול כיתה כמה שיותר.
מאז ששמע על אקדמיית קהאן הבין שעתיד החינוך בעולם נמצא מול מחשב ולא רק מול המורה.
מתגורר במושב נורדיה, חובב מוזיקה מושבע ומאמין שאפשר להסביר הכל דרך ביולוגיה.
"""),
(u"or", u"אור",
u"""
אור כהן, 31, רחובות. מועמד לדוקטורט בפיזיקה במכון ויצמן. בתור אחד שלמד חלק גדול מהתואר הראשון שלו בטכניון דרך הרצאות וידאו, הוא מאמין גדול ביכולת של טכנלוגיה להעניק חינוך ברמה גבוהה שמותאם לכל תלמיד,
מכל שכבה חברתית ובכל מקום בארץ. מאז ששמע על אקדמית קהאן לפני שנתיים ומאז הוא מתנדב בעמותה בריכוז מערך התרגום והתכנות של התרגילים.
"""),
# (u"asaf", u"אסף",
# u"""
# אסף גלבוע הוא יזם בנשמה שמתמחה בהפיכת רעיונות טובים לארגונים מצליחים.
# בחמשת השנים האחרונות אסף הקים וניהל את תמיס יישומי כטב"ם, חברה פורצת דרך שהעבירה את עולם המטוסים הבלתי מאויישים מהספרה הצבאית לספרה האזרחית.
# בנוסף לפעילותו העסקית לאסף יש תשוקה בוערת לחינוך. בשנתיים האחרונות אסף עובד עם נערים ונערות בערי הפריפריה במסגרת עמותת יוניסטרים,
# אשר פועלת להעניק למנהיגי העתיד של ארצנו את הביטחון העצמי והאמונה ביכולת שלהם להיות יזמים וליצור עולם טוב יותר.
# כשהוא לא מנסה לשנות את העולם אסף משחית את זמנו בטיולים ברחבי הארץ והעולם עם אשתו ושון הכלב (גולדן רטריור ויזם בתחום העצמות וכדורי הטניס) וברכיבה על אופני שטח ברחבי ארץ ישראל המדהימה.
# """),
# (u"shahar", u"שחר",
# u"""
# שחר קידר, בן 30, גר במסילת ציון ונשוי למילה. בימים הוא מתכנת בכיר בחברת סטארטאפ בשם BigPanda.
# בלילות הוא ישן. פעם חלם להיות מורה, אבל במקום זה, החליט לנצל את התשוקה שלו לטכנולוגיה ולחינוך על מנת לייסד את "אקדמיית קהאן בישראל".
# """),
]
CONTRIBUTORS = [
u"אלון חורב",
u"שמוליק לוטטי",
u"עמרי ולפר",
u"שי ארליכמן",
u"אסף גלבוע",
u"שחר קידר",
u"אמיר ליס",
u"גלי פלס",
u"אלדד ארנון",
u"שלומית אורגד",
u"עידו גרינגרד",
u"רויטל חליף",
u"מיכאל נירגד-גיא",
u"מילה קידר",
u"אורי כהן",
u"זוהר שינדלמן",
u"שרון בר-נטע",
u"חני מנור",
u"ויקטור פליקשטיין",
u"יגאל ספיר",
u"מיקי אליאל",
] |
23,206 | 6dda70b20f9f0aca23d45d3f319696b9b0f0c383 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""Support for Honeywell's RAMSES-II RF protocol, as used by CH/DHW (heat).
Provides support for climate entities.
"""
from __future__ import annotations
import json
import logging
from datetime import datetime as dt
from typing import Any
from homeassistant.components.climate import (
PRECISION_TENTHS,
ClimateEntity,
ClimateEntityFeature,
HVACAction,
HVACMode,
)
from homeassistant.components.climate.const import (
PRESET_AWAY,
PRESET_ECO,
PRESET_HOME,
PRESET_NONE,
)
from homeassistant.core import callback
from . import EvohomeZoneBase
from .const import ATTR_SETPOINT, DATA, SERVICE, UNIQUE_ID, SystemMode, ZoneMode
from .schemas import (
CONF_MODE,
CONF_SYSTEM_MODE,
SVC_RESET_SYSTEM_MODE,
SVC_SET_SYSTEM_MODE,
)
_LOGGER = logging.getLogger(__name__)
MODE_TCS_TO_HA = {
SystemMode.AUTO: HVACMode.HEAT, # NOTE: don't use _AUTO
SystemMode.HEAT_OFF: HVACMode.OFF,
}
MODE_TCS_TO_HA[SystemMode.RESET] = MODE_TCS_TO_HA[SystemMode.AUTO]
MODE_TO_TCS = {
HVACMode.HEAT: SystemMode.AUTO,
HVACMode.OFF: SystemMode.HEAT_OFF,
HVACMode.AUTO: SystemMode.RESET, # not all systems support this
}
PRESET_CUSTOM = "custom" # NOTE: not an offical PRESET
PRESET_TCS_TO_HA = {
SystemMode.AUTO: PRESET_NONE,
SystemMode.AWAY: PRESET_AWAY,
SystemMode.CUSTOM: PRESET_CUSTOM,
SystemMode.DAY_OFF: PRESET_HOME,
SystemMode.ECO_BOOST: PRESET_ECO, # or: PRESET_BOOST
SystemMode.HEAT_OFF: PRESET_NONE,
}
PRESET_TCS_TO_HA[SystemMode.DAY_OFF_ECO] = PRESET_TCS_TO_HA[SystemMode.DAY_OFF]
PRESET_TCS_TO_HA[SystemMode.RESET] = PRESET_TCS_TO_HA[SystemMode.AUTO]
PRESET_TO_TCS = (
SystemMode.AUTO,
SystemMode.AWAY,
SystemMode.CUSTOM,
SystemMode.DAY_OFF,
SystemMode.ECO_BOOST,
)
PRESET_TO_TCS = {v: k for k, v in PRESET_TCS_TO_HA.items() if k in PRESET_TO_TCS}
#
MODE_ZONE_TO_HA = {
ZoneMode.ADVANCED: HVACMode.HEAT,
ZoneMode.SCHEDULE: HVACMode.AUTO,
}
MODE_ZONE_TO_HA[ZoneMode.PERMANENT] = MODE_ZONE_TO_HA[ZoneMode.ADVANCED]
MODE_ZONE_TO_HA[ZoneMode.TEMPORARY] = MODE_ZONE_TO_HA[ZoneMode.ADVANCED]
MODE_TO_ZONE = (ZoneMode.SCHEDULE, ZoneMode.PERMANENT)
MODE_TO_ZONE = {v: k for k, v in MODE_ZONE_TO_HA.items() if k in MODE_TO_ZONE}
PRESET_ZONE_TO_HA = {
ZoneMode.SCHEDULE: PRESET_NONE,
ZoneMode.TEMPORARY: "temporary",
ZoneMode.PERMANENT: "permanent",
}
PRESET_TO_ZONE = {v: k for k, v in PRESET_ZONE_TO_HA.items()}
class EvohomeController(EvohomeZoneBase, ClimateEntity):
"""Base for a Honeywell Controller/Location."""
_attr_icon: str = "mdi:thermostat"
_attr_hvac_modes: list[str] = list(MODE_TO_TCS)
_attr_preset_modes: list[str] = list(PRESET_TO_TCS)
_attr_supported_features: int = ClimateEntityFeature.PRESET_MODE
_attr_max_temp: float | None = None
_attr_min_temp: float | None = None
def __init__(self, broker, device) -> None:
"""Initialize a TCS Controller."""
_LOGGER.info("Found a Controller: %r", device)
super().__init__(broker, device)
@property
def current_temperature(self) -> float | None:
"""Return the average current temperature of the heating Zones.
Controllers do not have a current temp, but one is expected by HA.
"""
temps = [z.temperature for z in self._device.zones if z.temperature is not None]
temps = [t for t in temps if t is not None] # above is buggy, why?
try:
return round(sum(temps) / len(temps), 1) if temps else None
except TypeError:
_LOGGER.error(f"temp ({temps}) contains None")
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the integration-specific state attributes."""
return {
"heat_demand": self._device.heat_demand,
"heat_demands": self._device.heat_demands,
"relay_demands": self._device.relay_demands,
"system_mode": self._device.system_mode,
"tpi_params": self._device.tpi_params,
# "faults": self._device.faultlog,
}
@property
def hvac_action(self) -> str | None:
"""Return the Controller's current running hvac operation."""
if self._device.system_mode is None:
return # unable to determine
if self._device.system_mode[CONF_SYSTEM_MODE] == SystemMode.HEAT_OFF:
return HVACAction.OFF
if self._device.heat_demand:
return HVACAction.HEATING
if self._device.heat_demand is not None:
return HVACAction.IDLE
@property
def hvac_mode(self) -> str | None:
"""Return the Controller's current operating mode of a Controller."""
if self._device.system_mode is None:
return # unable to determine
if self._device.system_mode[CONF_SYSTEM_MODE] == SystemMode.HEAT_OFF:
return HVACMode.OFF
if self._device.system_mode[CONF_SYSTEM_MODE] == SystemMode.AWAY:
return HVACMode.AUTO # users can't adjust setpoints in away mode
return HVACMode.HEAT
@property
def name(self) -> str:
"""Return the name of the Controller."""
return "Controller"
@property
def preset_mode(self) -> str | None:
"""Return the Controller's current preset mode, e.g., home, away, temp."""
if self._device.system_mode is None:
return # unable to determine
return PRESET_TCS_TO_HA[self._device.system_mode[CONF_SYSTEM_MODE]]
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
zones = [z for z in self._device.zones if z.setpoint is not None]
temps = [z.setpoint for z in zones if z.heat_demand is not None]
return max(z.setpoint for z in zones) if temps else None
# temps = [z.setpoint for z in self._device.zones]
# return round(sum(temps) / len(temps), 1) if temps else None
@callback
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set an operating mode for a Controller."""
self.svc_set_system_mode(MODE_TO_TCS.get(hvac_mode))
@callback
def set_preset_mode(self, preset_mode: str | None) -> None:
"""Set the preset mode; if None, then revert to 'Auto' mode."""
self.svc_set_system_mode(PRESET_TO_TCS.get(preset_mode, SystemMode.AUTO))
@callback
def async_handle_dispatch(self, *args) -> None:
"""Process a service request (system mode) for a controller."""
if not args:
self.update_ha_state()
return
payload = args[0]
if payload.get(UNIQUE_ID) != self.unique_id:
return
elif payload[SERVICE] == SVC_RESET_SYSTEM_MODE:
self._call_client_api(self._device.reset_mode)
elif payload[SERVICE] == SVC_SET_SYSTEM_MODE:
kwargs = dict(payload[DATA])
kwargs["system_mode"] = kwargs.pop("mode", None)
until = kwargs.pop("duration", None) or kwargs.pop("period", None)
kwargs["until"] = (dt.now() + until) if until else None
self._call_client_api(self._device.set_mode, **kwargs)
@callback
def svc_reset_system_mode(self) -> None:
"""Reset the (native) operating mode of the Controller."""
self._call_client_api(self._device.reset_mode)
@callback
def svc_set_system_mode(self, mode, period=None, days=None) -> None:
"""Set the (native) operating mode of the Controller."""
if period is not None:
until = dt.now() + period
elif days is not None:
until = dt.now() + days # TODO: round down
else:
until = None
self._call_client_api(self._device.set_mode, system_mode=mode, until=until)
class EvohomeZone(EvohomeZoneBase, ClimateEntity):
"""Base for a Honeywell TCS Zone."""
_attr_icon: str = "mdi:radiator"
_attr_hvac_modes: list[str] = list(MODE_TO_ZONE)
_attr_preset_modes: list[str] = list(PRESET_TO_ZONE)
_attr_supported_features: int = (
ClimateEntityFeature.PRESET_MODE | ClimateEntityFeature.TARGET_TEMPERATURE
)
_attr_target_temperature_step: float = PRECISION_TENTHS
def __init__(self, broker, device) -> None:
"""Initialize a TCS Zone."""
_LOGGER.info("Found a Zone: %r", device)
super().__init__(broker, device)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the integration-specific state attributes."""
return {
"zone_idx": self._device.idx,
"heating_type": self._device.heating_type,
"mode": self._device.mode,
"config": self._device.config,
**super().extra_state_attributes,
"schedule": self._device.schedule,
"schedule_version": self._device.schedule_version,
}
@property
def hvac_action(self) -> str | None:
"""Return the Zone's current running hvac operation."""
if self._device.tcs.system_mode is None:
return # unable to determine
if self._device.tcs.system_mode[CONF_SYSTEM_MODE] == SystemMode.HEAT_OFF:
return HVACAction.OFF
if self._device.heat_demand:
return HVACAction.HEATING
if self._device.heat_demand is not None:
return HVACAction.IDLE
@property
def hvac_mode(self) -> str | None:
"""Return the Zone's hvac operation ie. heat, cool mode."""
if self._device.tcs.system_mode is None:
return # unable to determine
if self._device.tcs.system_mode[CONF_SYSTEM_MODE] == SystemMode.AWAY:
return HVACMode.AUTO
if self._device.tcs.system_mode[CONF_SYSTEM_MODE] == SystemMode.HEAT_OFF:
return HVACMode.OFF
if self._device.mode is None or self._device.mode[ATTR_SETPOINT] is None:
return # unable to determine
if (
self._device.config
and self._device.mode[ATTR_SETPOINT] <= self._device.config["min_temp"]
):
return HVACMode.OFF
return HVACMode.HEAT
@property
def max_temp(self) -> float | None:
"""Return the maximum target temperature of a Zone."""
try:
return self._device.config["max_temp"]
except TypeError: # 'NoneType' object is not subscriptable
return
@property
def min_temp(self) -> float | None:
"""Return the minimum target temperature of a Zone."""
try:
return self._device.config["min_temp"]
except TypeError: # 'NoneType' object is not subscriptable
return
@property
def preset_mode(self) -> str | None:
"""Return the Zone's current preset mode, e.g., home, away, temp."""
if self._device.tcs.system_mode is None:
return # unable to determine
# if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA:
if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in (
SystemMode.AWAY,
SystemMode.HEAT_OFF,
):
return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]
if self._device.mode is None:
return # unable to determine
if self._device.mode[CONF_MODE] == ZoneMode.SCHEDULE:
return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]
return PRESET_ZONE_TO_HA.get(self._device.mode[CONF_MODE])
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self._device.setpoint
@callback
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set a Zone to one of its native operating modes."""
if hvac_mode == HVACMode.AUTO: # FollowSchedule
self.svc_reset_zone_mode()
elif hvac_mode == HVACMode.HEAT: # TemporaryOverride
self.svc_set_zone_mode(mode=ZoneMode.PERMANENT, setpoint=25) # TODO:
else: # HVACMode.OFF, PermentOverride, temp = min
self.svc_set_zone_mode(self._device.set_frost_mode) # TODO:
@callback
def set_preset_mode(self, preset_mode: str | None) -> None:
"""Set the preset mode; if None, then revert to following the schedule."""
self.svc_set_zone_mode(
mode=PRESET_TO_ZONE.get(preset_mode),
setpoint=self.target_temperature if preset_mode == "permanent" else None,
)
@callback
def set_temperature(self, temperature: float = None, **kwargs) -> None:
"""Set a new target temperature."""
self.svc_set_zone_mode(setpoint=temperature)
@callback
def svc_put_zone_temp(
self, temperature: float, **kwargs
) -> None: # set_current_temp
"""Fake the measured temperature of the Zone sensor.
This is not the setpoint (see: set_temperature), but the measured temperature.
"""
self._device.sensor._make_fake()
self._device.sensor.temperature = temperature
self._device._get_temp()
self.update_ha_state()
@callback
def svc_reset_zone_config(self) -> None:
"""Reset the configuration of the Zone."""
self._call_client_api(self._device.reset_config)
@callback
def svc_reset_zone_mode(self) -> None:
"""Reset the (native) operating mode of the Zone."""
self._call_client_api(self._device.reset_mode)
@callback
def svc_set_zone_config(self, **kwargs) -> None:
"""Set the configuration of the Zone (min/max temp, etc.)."""
self._call_client_api(self._device.set_config, **kwargs)
@callback
def svc_set_zone_mode(
self, mode=None, setpoint=None, duration=None, until=None
) -> None:
"""Set the (native) operating mode of the Zone."""
if until is None and duration is not None:
until = dt.now() + duration
self._call_client_api(
self._device.set_mode, mode=mode, setpoint=setpoint, until=until
)
async def svc_get_zone_schedule(self, **kwargs) -> None:
"""Get the latest weekly schedule of the Zone."""
# {{ state_attr('climate.ramses_cc_01_145038_04', 'schedule') }}
await self._device.get_schedule()
self.update_ha_state()
async def svc_set_zone_schedule(self, schedule: str, **kwargs) -> None:
"""Set the weekly schedule of the Zone."""
await self._device.set_schedule(json.loads(schedule))
|
23,207 | d292e8ca4d40d39f3dec975086fae4c281e37c09 | import decimal
from datetime import date
from django.urls import reverse
from django.db import models
from django.utils.formats import date_format
from django.utils.translation import gettext_lazy as _
class TimeStampMixin(models.Model):
""" Абстрактный класс для использования во всех моделях где нужны created_at и updated_at
Отсюда:
https://stackoverflow.com/questions/3429878/automatic-creation-date-for-django-model-form-objects
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class LedgerAccount(TimeStampMixin):
class AccountTypes(models.IntegerChoices):
""" Три типа бухгалтерских cчетов
Отсюда:
https://docs.djangoproject.com/en/3.0/ref/models/fields/#enumeration-types
"""
ACTIVE = +1, _('Активный')
VARIABLE = 0, _('Изменчивый')
SOURCE = -1, _('Пассивный')
number = models.CharField(max_length=2,unique=True)
name = models.CharField(max_length=32)
full_name = models.CharField(max_length=64)
type = models.IntegerField(
choices=AccountTypes.choices,
default=AccountTypes.ACTIVE,
)
def __str__(self):
# TODO: переделать на "%s ..." % (self.number, ...)
return f'{self.number} {self.name}'
def get_description(self):
# TODO: переделать на "%s ..." % (self.number, ...)
return f'Счет: {self.number}, ' \
f'имя: {self.name}, ' \
f'тип: {self.get_type()}'
def get_absolute_url(self):
return reverse('ledger:account_view', args=[self.number])
# def get_type(self):
# return self.AccountTypes(self.type).label
def get_type_sign(self):
if self.type == self.AccountTypes.ACTIVE:
return 'a'
elif self.type == self.AccountTypes.VARIABLE:
return '~'
elif self.type == self.AccountTypes.SOURCE:
return 'п'
def get_refs(self) -> int:
""" Подсчет общего количества упоминаний этого счета.Для тестов и разработки.
Returns:
int: общее количество упоминаний счета
"""
return LedgerEntry.objects.filter(
status=LedgerEntry.Statuses.ENABLE
).filter(
models.Q(account_two=self.id) | models.Q(account_one=self.id)
).count()
def get_arrival(self, date_from: date, date_to: date) -> decimal.Decimal:
""" Подсчет прихода за период (включительно) по журналу проводок для текущего счета.
Args:
date_from (date): Начало периода
date_to (date): Конец периода
Returns:
decimal.Decimal: приход за период
"""
query_one = LedgerEntry.objects.filter(
models.Q(account_one=self.id) | models.Q(account_two=self.id),
status=LedgerEntry.Statuses.ENABLE,
type=LedgerEntry.EntryTypes.RISE.value,
date__gte=date_from,
date__lte=date_to
).aggregate(models.Sum('amount'))
query_two = LedgerEntry.objects.filter(
account_two=self.id,
status=LedgerEntry.Statuses.ENABLE,
type=LedgerEntry.EntryTypes.MOVE.value,
date__gte=date_from,
date__lte=date_to
).aggregate(models.Sum('amount'))
amount = decimal.Decimal('0.00')
if query_one['amount__sum'] is not None:
amount = query_one['amount__sum']
if query_two['amount__sum'] is not None:
amount += query_two['amount__sum']
return amount.quantize(decimal.Decimal("1.00"))
def get_expense(self, date_from: date, date_to: date) -> decimal.Decimal:
""" Подсчет расхода за период (включительно) по журналу проводок для текущего счета.
Args:
date_from (date): Начало периода
date_to (date): Конец периода
Returns:
decimal.Decimal: приход за период
"""
query_one = LedgerEntry.objects.filter(
models.Q(account_one=self.id) | models.Q(account_two=self.id),
status=LedgerEntry.Statuses.ENABLE,
type=LedgerEntry.EntryTypes.FALL.value,
date__gte=date_from, date__lte=date_to
).aggregate(models.Sum('amount'))
query_two = LedgerEntry.objects.filter(
account_one=self.id,
status=LedgerEntry.Statuses.ENABLE,
type=LedgerEntry.EntryTypes.MOVE.value,
date__gte=date_from,
date__lte=date_to
).aggregate(models.Sum('amount'))
amount = decimal.Decimal('0.00')
if query_one['amount__sum'] is not None:
amount = query_one['amount__sum']
if query_two['amount__sum'] is not None:
amount += query_two['amount__sum']
return amount.quantize(decimal.Decimal("1.00"))
def get_remains(self, date_from: date, date_to: date) -> decimal.Decimal:
"""Подсчет остатка на счете на определенную дату. НЕ ДОДЕЛАНА!
Для корректного расчета остатка на счету на дату надо, чтобы считалось не от начальной даты,
а с начала движений по счету.
Может быть сделать отдельную функцию для этого.
Args:
date_from (date): Дата, с которой начинается расчет приходов/расходов
date_to (date): Дата, на которую нужно посчитать остаток
Returns:
decimal.Decimal: Остаток для отображения
"""
arrival = self.get_arrival(date_from, date_to)
expense = self.get_expense(date_from, date_to)
balance = {
'arrival': arrival,
'expense': expense,
'balance': (arrival - expense),
}
return balance
def get_account_balance(self, date_to: date) -> decimal.Decimal:
"""Подсчет остатка на счете на определенную дату.
Args:
date_to (date): Дата, на которую нужно посчитать остаток
Returns:
decimal.Decimal: Остаток для отображения
"""
date_earliest = LedgerEntry.objects.earliest('date').date
arrival = self.get_arrival(date_earliest, date_to)
expense = self.get_expense(date_earliest, date_to)
balance = arrival - expense
return balance
class Meta:
verbose_name = "Бухгалтерский счет"
verbose_name_plural = "План счетов"
class LedgerEntry(TimeStampMixin):
class EntryTypes(models.IntegerChoices):
""" Три вида бухгалтерских проводок
Отсюда:
https://docs.djangoproject.com/en/3.0/ref/models/fields/#enumeration-types
"""
RISE = 1, _('Увеличение')
MOVE = 0, _('Перетекание')
FALL = -1, _('Уменьшение')
class Statuses(models.IntegerChoices):
""" Два состояния активности бухгалтерских проводок
Отсюда:
https://docs.djangoproject.com/en/3.0/ref/models/fields/#enumeration-types
"""
ENABLE = 1, _('Включена')
DISABLE = 0, _('Выключена')
date = models.DateField(default=date.today)
account_one = models.ForeignKey(
LedgerAccount, on_delete=models.SET_NULL, blank=True, null=True)
account_two = models.ForeignKey(
LedgerAccount, on_delete=models.SET_NULL, blank=True, null=True, related_name='+')
type = models.IntegerField(
choices=EntryTypes.choices,
default=EntryTypes.RISE,
)
amount = models.DecimalField(max_digits=10, decimal_places=2)
comment = models.CharField(max_length=256)
status = models.IntegerField(
choices=Statuses.choices,
default=Statuses.ENABLE,
)
def __str__(self):
# TODO: переделать на "%s ..." % (self.number, ...)
return f'id:{self.pk}, ' \
f"{date_format(self.date, 'SHORT_DATE_FORMAT')}, " \
f'{self.comment}, ' \
f'{self.get_account_one_type_symbol()}{self.account_one.number} ' \
f'{self.get_account_two_type_symbol()}{self.account_two.number}, ' \
f'{self.amount}, ' \
f'{self.get_status()}'
def get_description(self):
# TODO: переделать на "%s ..." % (self.number, ...)
return f'Проводка: {self.comment}, ' \
f'id: {self.pk}, ' \
f"дата: {date_format(self.date, 'SHORT_DATE_FORMAT')}, " \
f'счета: {self.get_account_one_type_symbol()}{self.account_one.number} ' \
f'{self.get_account_two_type_symbol()}{self.account_two.number}, ' \
f'сумма: {self.amount}, ' \
f'статус: {self.get_status()}'
def get_absolute_url(self):
return reverse('ledger:entry_view', args=[self.pk])
# def get_type(self):
# return self.EntryTypes(self.type).label
def get_account_one_type_symbol(self):
if self.type == self.EntryTypes.RISE:
return '+'
elif self.type == self.EntryTypes.MOVE or \
self.type == self.EntryTypes.FALL:
return '-'
def get_account_two_type_symbol(self):
if self.type == self.EntryTypes.RISE or \
self.type == self.EntryTypes.MOVE:
return '+'
elif self.type == self.EntryTypes.FALL:
return '-'
def get_status(self):
return self.Statuses(self.status).label
class Meta:
verbose_name = "Бухгалтерская проводка"
verbose_name_plural = "Бухгалтерские проводки"
|
23,208 | 553ade0350d4982def04d8d827fc08ed6b085e50 | from django import forms
from django.contrib.auth.models import User
from page_ap.models import Profile, Teams, PLAYER_POSITIONS, EMPLOYE_POSITIONS, YearBook, Post, BaseBuildings, Trainings
from page_ap.models import Images
from crispy_forms.helper import FormHelper
from django.utils.timezone import timezone
from django.conf import settings
from captcha.fields import ReCaptchaField
from tinymce.widgets import TinyMCE
from datetime import datetime
import os
import calendar
from string import Template
from django.utils.safestring import mark_safe
# DAYS = list(calendar.day_name)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class PlayerRegistrationForm(forms.ModelForm):
helper = FormHelper()
# helper.form_show_labels = False
team = forms.ModelChoiceField(label="Wybierz drużynę",
queryset=Teams.objects.filter(archive=False),
required=True,
help_text='*')
position = forms.ChoiceField(choices=PLAYER_POSITIONS,
label="Pozycja zawodnika",
widget=forms.Select(),
required=True,
help_text='*')
date_of_birth = forms.DateField(
label="Data urodzenia",
widget=forms.SelectDateWidget(years=range(1950, (datetime.now().year) +
1)))
password = forms.CharField(label='Hasło',
widget=forms.PasswordInput,
max_length=12)
password2 = forms.CharField(label='Powtórz hasło',
widget=forms.PasswordInput,
max_length=12)
phone_number = forms.CharField(label='Telefon kontaktowy',
required=True,
help_text='*',
max_length=12)
rodo = forms.BooleanField(
label='Zgoda na wyświetlanie danych osobowych',
required=False,
)
captcha = ReCaptchaField(required=True)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Hasła nie są identyczne.')
return cd['password2']
class RegistrationEmployeForm(forms.ModelForm):
password = forms.CharField(label='Hasło',
widget=forms.PasswordInput,
max_length=12)
employe_position = forms.ChoiceField(choices=EMPLOYE_POSITIONS,
label="Status pracownika akademii",
widget=forms.Select(),
required=True,
help_text='*')
date_of_birth = forms.DateField(
label="Data urodzenia",
widget=forms.SelectDateWidget(years=range(1950, (datetime.now().year) +
1)))
password2 = forms.CharField(label='Powtórz hasło',
widget=forms.PasswordInput,
max_length=12)
phone_number = forms.CharField(label='Telefon kontaktowy',
required=True,
help_text='*',
max_length=12)
hidden_password = forms.CharField(label='Wpisz tajne hasło',
widget=forms.PasswordInput())
captcha = ReCaptchaField(required=True)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Hasła nie są identyczne.')
return cd['password2']
def clean_hidden_password(self):
cd = self.cleaned_data
if cd['hidden_password'] != os.environ.get('HIDDEN_PASSWORD'):
raise forms.ValidationError('Hasło nie jest poprawne.')
return cd['hidden_password']
class ProfileEditForm(forms.Form):
is_player = forms.BooleanField(label="Czy jesteś czynnym zawodnikiem?")
team_name = forms.CharField(label="Aktualnie zawodnikiem w ...",
max_length=32,
required=False)
date_of_birth = forms.DateField(label="Data urodzenia",
widget=forms.SelectDateWidget)
certyficate = forms.CharField(label='Certyfikaty zawodowe- opis',
required=True,
help_text='*',
max_length=128)
phone_number = forms.CharField(label='Telefon kontaktowy',
required=True,
help_text='*',
min_length=9,
max_length=12)
# text = forms.CharField(label="Krótki opis kariery lub motto",
# widget=TinyMCE(attrs={
# 'cols': 80,
# 'rows': 30
# }))
text = forms.CharField(label="Krótki opis kariery lub motto",
widget=forms.Textarea(attrs={
"rows": 15,
"cols": 160
}),
max_length=256)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
widgets = {
'date_of_birth': forms.SelectDateWidget(),
'team_name': forms.Textarea(attrs={
'cols': 80,
'rows': 20
})
}
class ProfileEmployeUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'photo',
'employe_position',
'is_player',
'team_name',
'date_of_birth',
'phone_number',
'certyficate',
'text',
)
widgets = {
'date_of_birth':
forms.SelectDateWidget(years=range(1950, (datetime.now().year) +
1))
}
class ProfilePlayerUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'photo',
'team',
'date_of_start',
'position',
'is_pupil',
'from_team',
'date_of_birth',
'phone_number',
'text',
)
widgets = {
'date_of_birth':
forms.SelectDateWidget(years=range(1950, (datetime.now().year) +
1)),
'date_of_start':
forms.SelectDateWidget(years=range(1950, (datetime.now().year) +
1))
}
|
23,209 | 36b13909c0961363fee391e9ffd6df5e64575919 | import numpy as np
import scipy.io
import math
from keras.utils import np_utils
def shuffle(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def convert_matlab_file(samples, number_of_samples_per_class,signal_length, number_of_sensors, matlab_file, save_path):
x = scipy.io.loadmat(matlab_file)
sensors = np.transpose(x['InputData'], (3, 0, 1, 2)) # reorder the file
print(sensors.shape)
sensors = sensors.reshape(samples, signal_length, number_of_sensors)
X_train = sensors
Y_train = np.array([math.ceil(i / number_of_samples_per_class) -1 for i in range(1, 241)])
Y_train = np_utils.to_categorical(Y_train)
X_train, Y_train = shuffle(X_train, Y_train)
np.savez(save_path, X_train, Y_train)
return X_train, Y_train
def load_pickle(path):
t = np.load(path + ".npz")
return t['arr_0'], t['arr_1']
|
23,210 | 33181674c1a07b5622da36c4d14746590baf37dd | import sys
def prefix_eval(prefix_list):
#start from the right
prefix_list = prefix_list[::-1]
stack = []
for val in prefix_list:
if val in ('+', '-', '/', '*'):
num1 = stack.pop()
num2 = stack.pop()
if val == '+':
stack.append(num1 + num2)
elif val == '-':
stack.append(num1 - num2)
elif val == '/':
stack.append(num1 / num2)
elif val == '*':
stack.append(num1 * num2)
else:
stack.append(int(val))
return stack[0]
if __name__ == '__main__':
for line in open(sys.argv[1]):
if len(line.strip()) > 0:
print prefix_eval(line.strip().split(' '))
sys.exit(0)
|
23,211 | aa6b7439f7da459fddc9f47532b2ac1b8b6cbba2 | import logging
import time
from enum import Enum
from termcolor import colored
from detection.object_detector_base import BaseTFObjectDetector
from detection.pattern_detector_task_skipper import PatternBasedSkipAheadOptimizer
from detection.state_managers.state_manager import StateManager
from detection.states import StateHistoryStep
log = logging.getLogger(__name__)
class ObjectStates(Enum):
OBJECT_DETECTED = 1
class ObjectStateManager(StateManager):
def __init__(self, object_detector: BaseTFObjectDetector, pattern_detector, broker_q):
super().__init__(pattern_detector, broker_q)
self.object_detector = object_detector
self.object_detector.task_skipper = PatternBasedSkipAheadOptimizer(pattern_detector, ObjectStates.OBJECT_DETECTED)
def add_state(self, state, ts = None):
(label, accuracy, image_path) = state
history_step = StateHistoryStep(ObjectStates.OBJECT_DETECTED, state_attrs=(label, accuracy, image_path), ts=ts)
added = self.pattern_detector.add_to_state_history(
history_step, avoid_duplicates = True)
if added:
log.info(colored("object state changed: %s" % history_step, 'blue', attrs=['bold']))
def get_latest_committed_offset(self):
return self.object_detector.latest_committed_offset
def get_current_lag(self):
return self.object_detector.input_frame_q.size() |
23,212 | 3eecf715307d3eeb396c68c3705395b356eb1a73 | import math
import os
def main():
#escribe tu código abajo de esta línea
os.system('clear')
lado = float(input("Ingrese el valor del lado del cuadrado: "))
perimetro = 4 * lado
area1 = lado * lado
area2 = lado ** 2
area3 = math.pow(lado,2)
print(f"El perimetro del cuadrado es: {perimetro}")
print(f"El area del cuadrado con forma 1 es: {area1}")
print(f"El area del cuadrado con forma 2 es: {area2}")
print(f"El area del cuadrado con forma 3 es: {area3}")
if __name__=='__main__':
main()
|
23,213 | 15bc76ce1bed5b71c8d79cc9e8a919c0276f9a53 | ba2692.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000011111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000011111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111101110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000101111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000001111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000011111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000011111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000011111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000011111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000011111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000011111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000011111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000011111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000001111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000',
]
|
23,214 | ff56d3a04d76a6efdcbd1242cdd66ef5126e4e24 | #!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
__all__ = ['Mines']
class _CoordsFormatter():
"""
Formats coordinates in the interactive plot mode
"""
def __init__(self, width, height):
self.width = width
self.height = height
def __call__(self, x, y):
string = ''
try:
i = int(round(y))
j = int(round(x))
if i >= 0 and i < self.height and j >= 0 and j < self.width:
string = ' i = {}, j = {}'.format(i, j)
except Exception:
pass
return string
class Mines:
"""
Minesweeper
Parameters
----------
width : int
Width of minefield
height : int
Height of minefield
n_mines : int
Number of mines
show : bool (optional)
If True, displays game when initialized
"""
# Colormap object used for showing wrong cells
cmap_reds_alpha = LinearSegmentedColormap.from_list(name='Reds_alpha',
colors=[[0, 0, 0, 0], [.9, 0, 0, 1]])
# Figure dimensions (min width and height in inches and scale factor)
figsize = {'minw': 4, 'minh': 3, 'scale': .7}
# Color dictionary for coloring the revealed cells according with number
# of mines in the neighboring cells
color_dict = {1: [0, 0, 1], 2: [0, 1, 0], 3: [1, 0, 0], 4: [0, 0, .5],
5: [.5, 0, 0], 6: [0, 0, .66], 7: [0, 0, .33], 8: [0, 0, 0]}
# Pre-defined levels (level: [width, height, mines])
levels = {0: [8, 8, 10], 1: [16, 16, 40], 2: [30, 16, 99]}
# Aliases for the levels
level_aliases = {**dict.fromkeys(['beginner', 'b', '0', 0], 0),
**dict.fromkeys(['intermediate', 'i', '1', 1], 1),
**dict.fromkeys(['expert', 'e', '2', 2], 2)}
def __init__(self, width, height, n_mines, show=True):
self.width = width
self.height = height
self.n = self.width*self.height
self.n_mines = n_mines
if self.n_mines >= self.n:
raise Exception('n_mines must be < width*height')
self.n_not_mines = self.n - self.n_mines
self.ii, self.jj = np.mgrid[:self.height, :self.width]
self.i, self.j = self.ii.ravel(), self.jj.ravel()
self.mines = np.full((self.height, self.width), False, dtype=bool) # boolean, mine or not
# number of mines in the neighboring cells
self.mines_count = np.full((self.height, self.width), 0, dtype=int)
self.flags = np.full((self.height, self.width), False, dtype=bool) # mine flags
self.revealed = np.full((self.height, self.width), False, dtype=bool) # revealed cells
self.wrong = np.full((self.height, self.width), False, dtype=bool) # wrong guesses
self.mines_pts = None # once initialized, Lines2D object
self.flags_pts = None # Line2D objects
self.mines_count_txt = np.full((self.height, self.width), None,
dtype=object) # 2D array of Text objects
self.revealed_img = None # AxesImage object
self.wrong_img = None # AxesImage object
self.title_txt = None # Text object
self.is_initialized = False # if game is initialized
self.is_game_over = False
# Connection ids of mouse click and key press events
self.cid_mouse = None
self.cid_key = None
self.fig, self.ax = plt.subplots(figsize=(max(self.width*self.figsize['scale'],
self.figsize['minw']),
max(self.height*self.figsize['scale'],
self.figsize['minh'])))
self.fig.canvas.manager.set_window_title(
u'pymines {} × {} ({} mines)'.format(self.width, self.height, self.n_mines))
self.draw_minefield()
if show:
plt.show()
def refresh_canvas(self):
"""
Updates minefield
"""
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw_minefield(self):
"""
Draws initial empty minefield board
"""
# Resets member variables to initial values
self.is_initialized = False
self.is_game_over = False
self.mines[:, :] = False
self.mines_count[:, :] = 0
self.flags[:, :] = False
self.revealed[:, :] = False
# Clears plot, sets limits
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.axis('off')
self.ax.set_xlim(-.6, self.width - .4)
self.ax.set_ylim(-.6, self.height - .4)
# Draws grid lines
for j in np.arange(-.5, self.width):
self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')
for i in np.arange(-.5, self.height):
self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')
# Connects mouse click and key press event handlers and coordinates formatter
if self.cid_mouse is None:
self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.ax.format_coord = _CoordsFormatter(self.width, self.height)
# Title text: number of flags/total mines
self.title_txt = self.ax.set_title(
'{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def initialize(self, i, j):
"""
Initializes new game. This function is called after first click
in order to prevent the first click being straight over a mine
"""
population = set(range(self.n))
population.remove(i*self.width + j) # removes initial click
idx = random.sample(population, self.n_mines) # choose mines
# Sets mines
self.mines[self.i[idx], self.j[idx]] = True
# Sets neighbor mines counter
for i, j in zip(self.i, self.j):
self.mines_count[i, j] = self.count_neighbor_mines(i, j)
# Sets wrong guesses
self.wrong = ~self.mines & self.flags
# Initializes plot objects
self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)
self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')
self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)
# Initializes text objects of neighbor mines counter. They're
# initially set as non visible. As the cells are revealed, their
# status is changed to visible
p_count = self.mines_count > 0
for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):
self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',
color=self.color_dict[count], ha='center',
va='center', visible=False)
self.is_initialized = True
self.refresh_canvas()
def get_ij_neighbors(self, i, j):
"""
Gets the i, j coordinates (i is row, y coordinate, j is column,
x coordinate) of the neighboring cells
"""
ii, jj = np.mgrid[i-1:i+2, j-1:j+2]
ii, jj = ii.ravel(), jj.ravel()
filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)
ij_neighbors = set(zip(ii[filtr], jj[filtr]))
ij_neighbors.remove((i, j))
return ij_neighbors
def count_neighbor_mines(self, i, j):
"""
Counts the number of mines in the neighboring cells
"""
n_neighbor_mines = -1
if not self.mines[i, j]:
n_neighbor_mines = np.count_nonzero(
self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
return n_neighbor_mines
def count_neighbor_flags(self, i, j):
"""
Counts the number of flags in the neighboring cells
"""
return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
def update_revealed(self, i, j):
"""
Updates revealed cells by checking i, j cell and, recursevely,
the contiguous cells without mines
"""
if not self.revealed[i, j]:
# If not revealed cell
if self.mines_count[i, j] < 0:
# If wrong guess, games is over
self.wrong = ~self.mines & self.flags
self.wrong[i, j] = True
self.game_over()
else:
# If guess is correct
self.revealed[i, j] = True
if self.mines_count[i, j] == 0:
# Recursively looks for contiguous cells without mines
for _i, _j in self.get_ij_neighbors(i, j):
if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:
self.flags[_i, _j] = False
self.update_revealed(_i, _j)
elif self.mines_count[i, j] > 0:
# The line below only makes sense when it's in the middle of the
# recursion. For instance, a cell is flagged, but it is part of a
# big blob that's going to be revealed. The game doesn't punish
# the player in this scenario. This behavior has been copied
# from gnome-mines
self.flags[i, j] = False
# Reveals mine count
self.mines_count_txt[i, j].set_visible(True)
elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):
# If cell that's already revealed is clicked and the number of
# neighboring flags is the same as the number of neighboring
# mines, then the hidden neighbor cells are recursevely
# revealed. Evidently, if any flag guess is wrong, the game is
# over.
for _i, _j in self.get_ij_neighbors(i, j):
if not self.flags[_i, _j] and not self.revealed[_i, _j]:
self.update_revealed(_i, _j)
def reveal(self, i, j):
"""
Reveals clicked cell and contiguous cells without mines
"""
if not self.is_game_over:
if not self.flags[i, j]:
# Game is initialized after first click in order to prevent
# the first click being straight over a mine
if not self.is_initialized:
self.initialize(i, j)
self.update_revealed(i, j)
self.revealed_img.set_data(self.revealed)
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.refresh_canvas()
if np.count_nonzero(self.revealed) == self.n_not_mines:
self.game_over(True)
def flag(self, i, j):
"""
Flags i, j cell
"""
# Does not allow starting a game with a flag
if not self.is_game_over and self.is_initialized:
if not self.revealed[i, j]:
self.flags[i, j] = not self.flags[i, j]
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def game_over(self, win=False):
"""
Callback when game is over
"""
self.is_game_over = True
if win:
self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags
self.title_txt.set_text('You win! Press F2 to start a new game')
else:
self.wrong_img.set_data(self.wrong) # wrong guesses
self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],
self.ii[self.mines & ~self.flags],
'kX', ms=10) # shows mines
self.title_txt.set_text('You lose! Press F2 to start a new game')
self.refresh_canvas()
def on_mouse_click(self, event):
"""
Callback when mouse is clicked
"""
if not self.is_game_over:
try:
# i, j coordinates of the click event
i = int(round(event.ydata))
j = int(round(event.xdata))
# Left button
if event.button == 1 or event.button == 2:
self.reveal(i, j)
# Right button
elif event.button == 3:
self.flag(i, j)
except (TypeError, IndexError):
pass
def on_key_press(self, event):
"""
Callback when key is pressed
"""
# F2 for starting new game
if event.key == 'f2':
self.draw_minefield()
@staticmethod
def new_game(*args, level='beginner', show=True):
"""
Static method for initializing the game with custom settings or in pre-defined levels
(beginner, intermediate, expert)
"""
if len(args) == 3:
minefield = args
else:
minefield = Mines.levels[Mines.level_aliases[level]]
return Mines(*minefield, show)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', metavar='level (b, i, e)', default='beginner', help='level, i.e., '
'beginner (8 x 8, 10 mines), intermediate (16 x 16, 40 mines), expert (30 '
'x 16, 99 mines)')
parser.add_argument('-c', metavar=('width', 'height', 'mines'), default=[], type=int, nargs=3,
help='custom game, provided width, height, and number of mines')
args = parser.parse_args()
game = Mines.new_game(*args.c, level=args.l)
|
23,215 | 7b57f8a6736475e064fe74f97ad3945b2f54e283 | '''
Created on 3 Sep 2013
@author: hicksj
'''
from pages.page import Page
from xml.etree import ElementTree
import re
import string
from pages.pageLink import HtmlLink, PageLink
from pages.mailto import Mailto
from pages import mailto
from pages.settings import Settings
class PersonData:
def __init__(self, name=None, address=None, phoneNumbers=[], roles=[]):
self.name = name
self.address = address
self.phoneNumbers = phoneNumbers
self.roles = roles
class RoleData:
def __init__(self, name=None, email=None, club=None):
self.name = name
self.email = email
self.club = club
class Contacts(Page):
sourceFile = "{0}/contacts.xml".format(Settings.dataDirectory)
def __init__(self, pageId, full, params={}, role=None):
Page.__init__(self, pageId, params, role)
self.full = full
def getTitle(self):
return "SEHICL Contacts"
def getContactData(self, rootElement=None):
answer = []
if rootElement is None:
rootElement = ElementTree.parse(self.sourceFile)
for personElement in rootElement.findall("person"):
answer.append(self.getPersonData(personElement))
return answer
def getPersonData(self, personElement):
answer = PersonData()
self.getName(personElement, answer)
self.getRoles(personElement, answer)
if self.full:
self.getPhoneNumbers(personElement, answer)
self.getAddress(personElement, answer)
return answer
def getName(self, personElement, dataObject):
nameElement = personElement.find("name")
dataObject.name = None if nameElement is None else nameElement.text.strip()
def getAddress(self, personElement, dataObject):
addressElement = personElement.find("address")
dataObject.address = None if addressElement is None else addressElement.text.strip()
def getPhoneNumbers(self, personElement, dataObject):
phoneNumbers = []
for phoneElement in personElement.findall("phone"):
phoneNo = self.formatPhoneNumber(phoneElement.text.strip())
phoneNumbers.append(phoneNo)
dataObject.phoneNumbers = phoneNumbers
def formatPhoneNumber(self, phoneNumber):
digits = re.sub("[^0-9]", "", phoneNumber)
groups = None
if digits[0] == '0':
length = len(digits)
if digits[1] == "2":
if length == 11:
groups = [3, 4, 4]
elif digits[1] == "1":
if digits[2] == "1" or digits[3] == "1":
if length == 11:
groups = [4, 3, 4]
else:
if length in range(9, 12):
groups = [5, length - 5]
else:
if length == 11:
groups = [5, 6]
if groups is None:
answer = "???{0}".format(digits)
else:
groupedDigits = []
start = 0
for g in groups:
end = start + g
groupedDigits.append(digits[start:end])
start = end
answer = string.join(groupedDigits, " ")
return answer
def getRoles(self, personElement, dataObject):
roles = []
for roleElement in personElement.findall("role"):
role = self.getRole(roleElement)
roles.append(role)
dataObject.roles = roles
def getRole(self, roleElement):
answer = RoleData()
self.getRoleName(roleElement, answer)
self.getClub(roleElement, answer)
self.getEmail(roleElement, answer)
return answer
def getRoleName(self, roleElement, dataObject):
name = roleElement.get("name", None)
dataObject.name = None if name is None else name.strip()
def getClub(self, roleElement, dataObject):
club = roleElement.get("club", None)
dataObject.club = None if club is None else club.strip()
def getEmail(self, roleElement, dataObject):
emailElement = roleElement.find("email")
address = None if emailElement is None else emailElement.get("id", None)
dataObject.email = None if address is None else address.strip()
def getContent(self, rootElement=None):
html = """
<div id="contacts">
<h1>Contacts</h1>
<p>
<a href="#committee">Committee</a> | <a href="#club">Club secretaries</a>
</p>
{headermessages}
<h2><a id="committee">Committee</a></h2>
{committee}
{committeemessages}
<h2><a id="club">Club secretaries</a></h2>
{clubs}
</div>
"""
personList = self.getContactData(rootElement)
headerMessages = self.getHeaderMessages()
committee = self.getCommitteeContacts(personList)
committeeMessages = self.getCommitteeMessages();
clubs = self.getClubContacts(personList)
answer = html.format(headermessages=headerMessages, committee=committee, committeemessages=committeeMessages, clubs=clubs)
return answer
def getHeaderMessages(self):
html = """
<p>
{gsm.atag}Game Set & Match</a>, the
League's sponsors, are located at Unit 1, Beaver Trade Park, Quarry Lane, Chichester PO19 8NY
(tel: 01243 538800).
</p>
<p>
The main means of communication on league matters is by e-mail. <b>For
urgent contacts, however, it may be preferable to telephone; telephone
numbers for committee members, club secretaries and team captains and
managers are given {phonelocation}.</b>
</p>
<p>
All e-mail contacts should be made using the e-mail addresses
assigned to the relevant roles in the League's domain (that is,
addresses ending with <i>sehicl.org.uk</i>). This ensures that when the
person performing a role changes, the e-mail is still routed to the correct
person. The easiest way to do this is to click on the person's name in
the lists below.
</p>
<p>
Please let {webmaster.html} know if any of the information below is incomplete or incorrect.
</p>
"""
if self.full:
phoneLocation = "below"
else:
fullContacts = PageLink("fullContacts", self)
phoneLocation = "on the <a href=\"{fullcontacts.url}\">Full Contacts</a> page".format(fullcontacts=fullContacts)
gsm = HtmlLink("http://www.gsam.co.uk/")
webmaster = Mailto("website", "the Webmaster", description="SEHICL Webmaster")
answer = html.format(gsm=gsm, webmaster=webmaster, phonelocation=phoneLocation)
return answer
def getCommitteeMessages(self):
html = """
<p>
General contacts should be addressed to {contacts.html}.
</p>
<p>
Non-member clubs who wish to enter one or more teams in the League
are asked to contact {secretary.html} in the first instance.
</p>
<p>
Result sheets may be sent electronically to {fixturesec.html},
using one of the template documents available on the <a href="{resources.url}">Resources</a> page.
</p>
"""
contacts = Mailto("contacts", "")
secretary = Mailto("secretary", "the Secretary", description="SEHICL Secretary")
fixturesec = Mailto("fixturesec", "the Fixture Secretary", description="SEHICL Fixture Secretary")
resources = PageLink("resources", self)
answer = html.format(contacts=contacts, secretary=secretary, fixturesec=fixturesec, resources=resources)
return answer
def getCommitteeContacts(self, personList):
html = """
<table>
<tbody>
{contacts}
</tbody>
</table>
"""
contacts = []
contactsByRole = {}
for p in personList:
for r in p.roles:
if r.club is None:
contactsByRole[r.name.lower()] = (p, r)
roles = ("President", "Chairman", "Vice-Chairman", "Secretary", "Treasurer", "Umpires' Co-ordinator", "Fixture Secretary", "Webmaster")
for rn in roles:
person, role = contactsByRole.get(rn.lower(), (None, None))
contacts.append(self.getCommitteeContactHtml(rn, person, role, "Currently vacant"))
answer = html.format(contacts=string.join(contacts, "\n"))
return answer
def getClubContacts(self, personList):
contactsByClubAndRole = {}
for p in personList:
for r in p.roles:
if r.club is not None:
club = r.club
contactsByRole = contactsByClubAndRole.get(club, {})
if contactsByRole == {}:
contactsByClubAndRole[club] = contactsByRole
roleName = r.name
contactsForRole = contactsByRole.get(roleName, [])
if contactsForRole == []:
contactsByRole[roleName] = contactsForRole
contactsForRole.append((p, r))
answer = self.getAllClubsContactsHtml(contactsByClubAndRole)
return answer
def getCommitteeContactHtml(self, roleName, person, role, defaultText):
html = """
<tr>
<td class="role">{role}</td>
<td>{person}</td>
</tr>
"""
email = None if role is None else role.email
answer = html.format(role=roleName, person=self.getPersonHtml(person, email, defaultText, True))
return answer
def getPersonHtml(self, person, email, defaultText, includeAddress):
if person is None:
answer = defaultText
else:
htmlItems = []
if email is None:
htmlItems.append(person.name)
else:
mailto = Mailto(email, person.name)
htmlItems.append(mailto.html)
addressPresent = False
if self.full:
addressPresent = includeAddress and person.address is not None
if addressPresent:
htmlItems.append(person.address)
if len(person.phoneNumbers) > 0:
htmlItems.append(string.join(person.phoneNumbers, " / "))
answer = string.join(htmlItems, "<br>\n" if addressPresent else ",\n")
return answer
def getAllClubsContactsHtml(self, contactsByClubAndRole):
html = """
<table>
<tbody>
{contacts}
</tbody>
</table>
"""
sortableClubNames = {}
for name in contactsByClubAndRole.keys():
k = re.sub("[^a-z]", "", name.lower())
sortableClubNames[k] = name
contacts = []
for k in sorted(sortableClubNames.keys()):
name = sortableClubNames[k]
contacts.append(self.getClubContactsHtml(name, contactsByClubAndRole[name]))
answer = html.format(contacts=string.join(contacts, "\n"))
return answer
def getClubContactsHtml(self, clubName, contactsByRole):
html = """
<tr>
<td class="role">{club}</td>
<td>
{contacts}
</td>
</tr>
"""
answer = html.format(club=clubName, contacts=self.getClubContactListHtml(contactsByRole))
return answer
class PartialContacts(Contacts):
def __init__(self, pageId, params={}, role=None):
Contacts.__init__(self, pageId, False, params, role)
def getClubContactListHtml(self, contactsByRole):
person, role = contactsByRole.get("Secretary", [(None, None)])[0]
email = None if role is None else role.email
answer = self.getPersonHtml(person, email, "TBC", False)
return answer
class FullContacts(Contacts):
def __init__(self, pageId, params={}, role=None):
Contacts.__init__(self, pageId, True, params, role)
def getClubContactListHtml(self, contactsByRole):
roleOrder = ["secretary", "captain", "senior", "a team", "b team", "c team", "under-16", "under-13"]
rolesByOrder = {}
for k in contactsByRole.keys():
order = roleOrder.index(k.lower())
rolesByOrder[order] = k
contacts = []
for k in sorted(rolesByOrder.keys()):
roleName = rolesByOrder[k]
contactsForRole = contactsByRole[roleName]
peopleData = [self.getPersonHtml(person, role.email, "TBC", roleName == "Secretary") for person, role in contactsForRole]
contacts.append("{0}: {1}".format(roleName, string.join(peopleData, "; ")))
answer = string.join(contacts, "<br>\n")
return answer
|
23,216 | 495e07a58cac8a2c7c6316aa79062248d74c932a | import pandas as pd
import numpy as np
import scipy
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
def load_file(filename):
return pd.read_csv(filename)
def split_data(data, ratio):
shuffle(data)
index = int(data.shape[0]*ratio)
print(index)
return data.iloc[:index], data.iloc[index:]
def shuffle(data):
return data.sample(frac=1).reset_index(drop=True)
def calc_attribute_probabilities_cont(class_data):
return {column: [class_data[column].mean(), class_data[column].std()] for column in class_data}
def predict(parameters, data):
class_probabilities = np.zeros((data.shape[0], len(parameters)))
for key in parameters:
probability_matrix = np.zeros((data.shape[0], len(data.columns)))
i = 0
for column in data.columns:
probability_matrix[:, i] = [scipy.stats.norm(parameters[key][column][0],
parameters[key][column][1]).pdf(elem) for elem in data[column]]
i += 1
probability_values = np.sum(np.log(probability_matrix), axis=1)
class_probabilities[:, key] = probability_values
print(class_probabilities)
return class_probabilities.argmax(1)
def print_metrics(actual, predicted):
print('Accuracy: ' + str(accuracy_score(actual, predicted)))
print('Precision: ' + str(precision_score(actual, predicted, average="macro")))
print('Recall: ' + str(recall_score(actual, predicted, average="macro")))
print('F-score: ' + str(f1_score(actual, predicted, average="macro")))
label_col = 'Type'
data = load_file('diabets.data')
data = shuffle(data)
train, test = train_test_split(data, test_size=0.2)
model_parameters = {category: calc_attribute_probabilities_cont(data.loc[data[label_col] == category].drop(label_col, axis=1)) for category in data[label_col].unique()}
print(model_parameters)
test_y = test[label_col]
test = test.drop(label_col, axis=1)
prediction = predict(model_parameters, test)
# print(sum(prediction == test_y.as_matrix())/prediction.shape[0])
print("MANUAL")
print_metrics(prediction, test_y.as_matrix())
gnb = GaussianNB()
y_pred = gnb.fit(train.drop(label_col, axis=1), train[label_col]).predict(test)
# print(sum(y_pred == test_y.as_matrix())/prediction.shape[0])
print("AUTOMATIC")
print_metrics(y_pred, test_y.as_matrix())
|
23,217 | 473ef6190da52f5b34b98b68bb78f12c87dd0775 | import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv('creditcard.csv')
data.Time = (data.Time-data.Time.min())/data.Time.std()
data.Amount = (data.Amount-data.Amount.mean())/data.Amount.std()
plt.figure()
data.groupby('Class').V1.count().plot(kind='bar')
plt.title('0-1 Class distribution')
plt.figure()
ax = data.Amount.hist(grid=False, bins=50)
ax.set_yscale("log", nonposy='clip')
plt.title('Amount')
plt.figure()
data.Time.hist(grid=False, bins=50)
plt.title('Time')
plt.figure()
correlations = data.corr()['Class'].drop('Class')
correlations.sort_values().plot(kind='bar')
plt.title('Correlations to Class')
frauds = data[data.Class == 1]
non_frauds = data[data.Class == 0]
frauds_no = len(frauds)
balanced_data = pd.concat([frauds, non_frauds.sample(frauds_no)])
plt.figure()
balanced_data.groupby('Class').V1.count().plot(kind='bar')
plt.title('0-1 Class distribution (subsampled)')
plt.figure()
ax = balanced_data.Amount.hist(grid=False, bins=50)
ax.set_yscale("log", nonposy='clip')
plt.title('Amount (subsampled)')
plt.figure()
correlations = balanced_data.corr()['Class'].drop('Class')
correlations.sort_values().plot(kind='bar')
plt.title('Correlations to Class (subsampled)')
|
23,218 | 08743ba4d294faa0a3d1948a7e15644910df90e1 | from fractions import Fraction
from expression.Node import Node, ConstantNode, FunctionNode, VariableNode
from expression.parser.Parser import parse
def is_variable_part(n: Node):
if isinstance(n, FunctionNode):
for a in n.args:
if is_variable_part(a):
return True
elif isinstance(n, VariableNode):
return True
return False
def is_numeric_part(n: Node):
if isinstance(n, ConstantNode):
return True
return False
# Must be a single term to work.
# Term must be simplified to work (e.g. simplify 2*2*4*x^2 to 16*x^2)
# Coefficient must be first item in the list (e.g. 16*x^2 vs x^2*16)
def pull_coefficient(term_n: Node):
if isinstance(term_n, VariableNode):
return Fraction(1)
elif isinstance(term_n, ConstantNode):
return term_n
elif isinstance(term_n, FunctionNode):
if term_n.op == '*':
if term_n.args[0] and is_numeric_part(term_n.args[1]):
return term_n.args[1]
elif is_numeric_part(term_n.args[0]) and is_variable_part(term_n.args[1]):
return term_n.args[0]
else:
return ConstantNode(1)
else:
return ConstantNode(1)
else:
raise ValueError('???')
if __name__ == '__main__':
tree = parse('2*(2*4*x^2)')
coefficient = pull_coefficient(tree)
print(f'{coefficient}')
|
23,219 | d34b90470733599f44cbaf94be53fe95506b563e | #-*-coding:UTF-8-*-
#!/usr/bin/env python
# Author: yourname@wandoujia.com
# Created Time: 07/26/13 14:26:01
# about:
import redis
import time
import sys
import requests
def send_message(subject, text, to):
return requests.post(
"https://api.mailgun.net/v2/mail-internal.wandoujia.com/messages",
auth=("api", "key-5br6rwrim18qcnavw7vfxrud2d9sg5r2"),
data={"from": "喂豌豆 <feedme@samples.mailgun.org>",
"to": to,
"subject": subject,
"text": text})
if __name__ == "__main__":
c = redis.Redis(host='10.0.25.74', port=6379, db=8)
people = c.keys("dinner:cname:*")
str_time = time.strftime("%Y%m%d", time.localtime())
holiday = [
"20130501",
"20130610",
"20130611",
"20130612",
"20130927",
"20130928",
"20130929",
"20130930",
"20131001",
"20131002",
"20131003",
"20131004",
"20131005",
"20131006",
"20131007",
"20131008",
"20131009"]
if str_time in holiday:
print "holiday"
sys.exit(0)
text = "hi,\n加快订餐速度吧,五点截止,现在就订餐吧。\n 订餐地址: http://fan.wandoulabs.com"
subject = "晚饭是人生大事"
to = list()
for p in people:
mail = p.split(":")[-1]
flag = c.exists("dinner:%s:%s" % (str_time, mail))
if flag:
continue
else:
if mail.split("@")[1] == "wandoujia.com":
print mail
to.append(mail)
if to:
send_message(subject, text, to)
|
23,220 | 97536b9cdab6052c170a8b7e2cefc46a2738b57f |
from unittest import TestCase
from ypod.conf import Config
from ypod.db.engine import create_session
from ypod.db.schema import Track, Artist, Album
class SchemaTest(TestCase):
def test_schema(self):
config = Config(db='sqlite://')
Session = create_session(config)
session = Session()
assert Track.current_disk_generation(session) == 0, Track.current_disk_generation(session)
def test_next_disk_generation(self):
config = Config(db='sqlite://')
Session = create_session(config)
session = Session()
artist = Artist(name='bob')
session.add(artist)
album = Album(name='bob sings')
session.add(album)
track = Track(artist=artist, album=album, name='a sad song',
disk_generation=3, path='/music/bob/songs')
session.add(track)
session.commit()
assert Track.current_disk_generation(session) == 3
track = session.query(Track).filter(Track.name == 'a sad song').one()
assert track.name == 'a sad song'
assert track.artist.name == 'bob'
artist = session.query(Artist).filter(Artist.name == 'bob').one()
assert artist.name == 'bob'
assert track.artist == artist
|
23,221 | 5712cb7e2be226ac188dad2feb10e8817d50c108 | import tensorflow as tf
import word_reader
import config
import sys
import numpy as np
import random
from tensorflow.python.ops import rnn, rnn_cell
class OutputModel:
def init_all(self, wr):
self.word_reader = wr
# initializer = tf.constant_initializer(value=wr.word_vectors, dtype=tf.float32)
# self.word_dict = tf.get_variable('word_dict', shape=[len(wr.word_vectors), config.embedding_size],
# initializer=initializer, trainable=config.trainable)
self.word_dict = tf.get_variable('haha', shape=None, dtype=tf.float32,
initializer=tf.constant(wr.word_vectors),
trainable=False)
# paraphrase sentences
self.x1_index = tf.placeholder(tf.int32, [None, config.max_sentence_len])
self.x1 = tf.nn.embedding_lookup(self.word_dict, self.x1_index)
# Permuting batch_size and n_steps
self.x1 = tf.transpose(self.x1, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
self.x1 = tf.reshape(self.x1, [-1, config.embedding_size])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
self.x1 = tf.split(0, config.max_sentence_len, self.x1)
self.x2_index = tf.placeholder(tf.int32, [None, config.max_sentence_len])
self.x2 = tf.nn.embedding_lookup(self.word_dict, self.x2_index)
# Permuting batch_size and n_steps
self.x2 = tf.transpose(self.x2, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
self.x2 = tf.reshape(self.x2, [-1, config.embedding_size])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
self.x2 = tf.split(0, config.max_sentence_len, self.x2)
self.y = tf.placeholder("int64", [None])
# Forward direction cell
lstm_fw_cell_x1 = rnn_cell.LSTMCell(config.hidden_size, forget_bias=1.0, state_is_tuple=True)
# Backward direction cell
lstm_bw_cell_x1 = rnn_cell.LSTMCell(config.hidden_size, forget_bias=1.0, state_is_tuple=True)
# Forward direction cell
lstm_fw_cell_x2 = rnn_cell.LSTMCell(config.hidden_size, forget_bias=1.0, state_is_tuple=True)
# Backward direction cell
lstm_bw_cell_x2 = rnn_cell.LSTMCell(config.hidden_size, forget_bias=1.0, state_is_tuple=True)
# Get lstm cell output
outputs_x1, _, _ = rnn.bidirectional_rnn(lstm_fw_cell_x1, lstm_bw_cell_x1,
self.x1, dtype=tf.float32, scope="RNN1")
outputs_x2, _, _ = rnn.bidirectional_rnn(lstm_fw_cell_x2, lstm_bw_cell_x2,
self.x2, dtype=tf.float32, scope="RNN2")
outputs = tf.concat(1, [outputs_x1[-1], outputs_x2[-1]])
self.weights = tf.Variable(tf.random_uniform([config.hidden_size * 4, config.classes],
dtype=tf.float32))
# self.weights = tf.Variable(tf.random_uniform([config.embedding_size * 2, config.classes],
# dtype=tf.float32))
self.b = tf.Variable(tf.random_uniform([config.classes]), dtype=tf.float32)
mid = tf.matmul(outputs, self.weights) + self.b
self.results = tf.nn.softmax(mid)
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(self.results, self.y)
self.cost = tf.reduce_sum(self.loss)
init = tf.initialize_all_variables()
vars = tf.trainable_variables()
opt=tf.train.GradientDescentOptimizer(learning_rate=config.learning_rate)
self.optimizer = opt.minimize(self.cost, var_list=vars)
self.sess = tf.Session()
self.sess.run(init)
return
def train(self, q, c, label):
self.sess.run(self.optimizer,
feed_dict={self.x1_index: q, self.x2_index: c, self.y : label})
cost = self.sess.run(self.cost,
feed_dict={self.x1_index: q, self.x2_index: c, self.y : label})
return cost
def cal_cost(self, q, c, label):
cost = self.sess.run((self.cost, self.optimizer),
feed_dict={self.x1_index: q, self.x2_index: c, self.y : label})
return tf.reduce_sum(cost)
def predict(self, q, c):
pred = self.sess.run(self.results,
feed_dict={self.x1_index: q, self.x2_index: c})
return np.argmax(np.array(pred), axis=1)
def get_weight(self):
return self.sess.run(self.weights)
def train_and_test(self, fold_cut, batch_size):
train = self.word_reader.corpus_set[:fold_cut]
test = self.word_reader.corpus_set[fold_cut:]
x1 = []
x2 = []
y = []
batch_count = 0
cost = 0
for i in range(0, fold_cut):
batch_count += 1
x1.append(train[i].q_list)
x2.append(train[i].c_list)
y.append(train[i].label)
if batch_count == batch_size or i == fold_cut-1:
see_weights = self.get_weight()
cost_tmp = self.train(x1, x2, y)
see_weights = self.get_weight()
cost += cost_tmp
x1 = []
x2 = []
y = []
batch_count = 0
continue
right_count = 0
for i in range(0, len(test)):
x1 = []
x1.append(test[i].q_list)
x2 = []
x2.append(test[i].c_list)
ans = self.predict(x1, x2)
if abs(ans[0]-test[i].label) < 1e-6:
right_count += 1
return right_count * 1.0 / len(test), cost
if __name__ == '__main__':
wr = word_reader.WordReader()
# read paraphrase file
wr.read_file(sys.argv[1])
# read pretraining file
if len(sys.argv) >= 3 and config.pre_train == True:
wr.read_file(sys.argv[2])
random.shuffle(wr.corpus_set)
model = OutputModel()
model.init_all(wr)
for i in range(0, config.epoch):
acc, cost = model.train_and_test(config.fold_cut, config.batch_size)
print "round: " + str(i)
print "accuracy: " + str(acc)
print "cost: " + str(cost) |
23,222 | 603c7aaccf157d97e08b0afb8f8277e183bafb49 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Export checkpoint file into MINDIR format"""
from pathlib import Path
import mindspore.common.dtype as mstype
import mindspore.numpy as mnp
from mindspore import context
from mindspore import export
from mindspore import load_checkpoint
from model_utils.config import get_config
from src.generator.generator import Generator
from src.utils import check_args
def export_ctsdg(cfg):
"""
Export CTSDG generator for inference
Args:
cfg: Model configuration
Returns:
None
"""
generator = Generator(
image_in_channels=config.image_in_channels,
edge_in_channels=config.edge_in_channels,
out_channels=config.out_channels
)
generator.set_train(False)
load_checkpoint(cfg.checkpoint_path, generator)
ckpt_path = Path(cfg.checkpoint_path)
output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix()
file_format = config.file_format
img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size],
dtype=mstype.float32)
edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32)
mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32)
export(generator, img_dummy, edge_dummy, mask_dummy,
file_name=output_file_name, file_format=file_format)
print(f'{output_file_name}.mindir exported successfully!', flush=True)
if __name__ == '__main__':
config = get_config()
check_args(config)
context.set_context(
mode=context.GRAPH_MODE,
device_target=config.device_target,
device_id=config.device_id,
)
export_ctsdg(config)
|
23,223 | 6ed80b188b6643888e17bcca33686ad4b01259a1 | #coding:utf-8
'''
Created on 2016��8��19��
@author: Shark
'''
import urllib
import urllib2
import cookielib
filename = 'cookie.txt'
cookie = cookielib.MozillaCookieJar(filename)
handler = urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
pastdata = urllib.urlencode({'StuName':'1467004077','PassWord':'love1314'})
logiUrl = 'http://jwc.jxnu.edu.cn/Default_Login.aspx?preurl='
result = opener.open(logiUrl,pastdata)
cookie.save(ignore_discard=True, ignore_expires=True)
gradeUrl = 'http://jwc.jxnu.edu.cn/MyControl/All_Display.aspx?UserControl=xfz_cj.ascx&Action=Personal'
result = opener.open(gradeUrl)
print result.read() |
23,224 | c6c70946b687f8910a863cd5103dc59b6b5b9884 | ## REAL CLEAN CODE##
from PIL import Image
import random
import math
def 보로노이_다이어그램_생성(폭, 높이, 셀_숫자):
image = Image.new("RGB", (폭, 높이))
putpixel = image.putpixel
imgx,imgy = image.size
nx = [135 , 260, 260, 260, 260, 390, 390, 390, 390, 450, 450, 570, 570, 630, 630, 630, 630, 730, 730, 730, 730, 850 ]
ny = [250, 420, 300, 190, 80 , 420, 300, 190, 80 , 300, 190, 300, 190, 420, 300, 190, 80 , 420, 300, 190, 80 , 250]
nr = [102, 153, 204, 255, 153, 102, 255, 51, 153, 255, 153, 51, 000, 0, 0, 0, 0,0, 0, 0, 51, 0 ]
ng = [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 51 , 102, 0, 51, 000, 0, 0, 0, 0,0, 0, 0, 51, 0 ]
nb = [0 , 0 , 0 , 0 , 0 , 51 , 0 , 0 , 51 , 102, 0, 255, 255, 102, 153, 102,0,0,102,255,255, 153 ]
#num_cells = len(nx)
'''
for i in range(셀_숫자):
nx.append(random.randrange(imgx))
ny.append(random.randrange(imgy))
nr.append(random.randrange(256))
ng.append(random.randrange(256))
nb.append(random.randrange(256))'''
print(nx,"\n",ny)
print(imgx, imgy)
for y in range(imgy):
for x in range(imgx):
dmin = math.hypot(imgx, imgy)
j = -1
for i in range(셀_숫자):
d = math.hypot(nx[i]-x, ny[i]-y)
if d < dmin:
dmin = d
j = i
if (x, y) == (nx[j], ny[j]):
putpixel((x, y), (255, 255, 255))
else:
putpixel((x, y), (nr[j], ng[j], nb[j] ))
image.save("VoronoiDiagram.png", "PNG")
image.show()
if __name__== "__main__":
보로노이_다이어그램_생성(1000, 500,22)
|
23,225 | 20ac3cf321491ff00a447ffb06b6b8b9ba3a9261 | import numpy as np
import matplotlib.pyplot as plt
from open_controller import Open_Controller
from quad1d_eom import ydot
##################################################################################
##################################################################################
# Here we are going to apply a continuous and constant control effort with a value
# of 1.7!
control_effort = 1.7
##################################################################################
##################################################################################
# Simulation Parameters
N = 500 # Number of simulation points (increments?)
t0 = 0 # starting time (sec)
tf = 30 # end time (sec)
time = np.linspace(t0, tf, N)
dt = time[1] - time[0] # change in u(tf) and u(t0) where u is linspace function
if __name__ == '__main__':
##################################################################################
# Core simulation code
# Inital conditions (i.e., initial state vector)
y = [0, 0]
#y[0] = initial altitude, (m)
#y[1] = initial speed, (m/s)
# Initialize array to store values
soln = np.zeros((len(time), len(y)))
# Create an instance of the Open_Controller class
controller = Open_Controller()
# Set constant control effort
controller.setControlEffort(control_effort)
# Set altitude
r = 10 # meteres TODO: 何これ?
controller.setTarget(r)
# Simulate quadrotor motion
j = 0 # dummy counter
for t in time:
# Evaluate the state at the next time point
y = ydot(y, t, controller)
# Store results
soln[j, :] = y
j += 1
##################################################################################
# Plot results
# Plot 1: This is the altitude of our quad copter as a function of time!
SP = np.ones_like(time) * r # altitude set point
fig = plt.figure()
ax1 =fig.add_subplot(221)
ax1.plot(time, soln[:, 0], time, SP, '--')
ax1.set_xlabel('Time (sec)')
ax1.set_ylabel('Altitude (m)')
# Plot 2: This is the speed of our quad copter as a function of time!
ax2 = fig.add_subplot(212)
ax2.plot(time, soln[:,1])
ax2.set_xlabel('Time, (sec)')
ax2.set_ylabel('Speed, (m/s)')
plt.tight_layout()
# plt.show()
# Plot 3: This is the control effort applied to our quad copter as a function of time!
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax3.plot(time, controller.effort_applied, label='effort', linewidth=3, color = 'red')
ax3.set_xlabel('Time, (sec)')
ax3.set_ylabel('Control Effort')
h, l = ax3.get_legend_handles_labels()
ax3.legend(h, l)
plt.tight_layout()
plt.show()
##################
y0 = soln[:,0] #altitude
rise_time_index = np.argmax(y0>r)
RT = time[rise_time_index]
print("The rise time is {0:.3f} seconds".format(RT))
OS = (np.max(y0) - r)/r*100
if OS < 0:
OS = 0
print("The percent overshoot is {0:.1f}%".format(OS))
print ("The offset from the target at 30 seconds is {0:.3f} meters".format(abs(soln[-1,0]-r)))
|
23,226 | 2ce4527d5dba87fffe84ddb069874cae75a50754 | from flask.json import JSONEncoder
class DTOResponse:
def __init__(self, error=False, error_msg=""):
self.error = error
self.error_msg = error_msg
def serialize(self):
return {
'error': self.error,
'error_msg': self.error_msg
}
class DTOUserInfo:
def __init__(self, user_data, error=False, error_msg=""):
self.accountId = user_data['Data']['Account'][0]['AccountId']
self.nickname = user_data['Data']['Account'][0]['Nickname']
self.error = error
self.error_msg = error_msg
def serialize(self):
return {
'accountId': self.accountId,
'nickname': self.nickname,
'error': self.error,
'error_msg': self.error_msg
}
class DTOProductInfo:
def __init__(self, productId, productName, storeName, category, discount, error=False, error_msg=""):
self.productId = productId
self.productName = productName
self.storeName = storeName
self.price = price
self.discount = discount
self.error = error
self.error_msg = error_msg
def serialize(self):
return {
'productId': self.productId,
'productName': self.productName,
'storeName': self.storeName,
'price': self.price,
'discount': self.discount,
'error': self.error,
'error_msg': self.error_msg
}
class DTOStore:
def __init__(self, store1=None, error=False, error_msg=""):
self.store = store1
def serialize(self):
return {
'storeName': self.store.name,
'storeCategory': self.store.store_type,
}
class DTOProductList:
def __init__(self, productList=None, error=False, error_msg=""):
self.productList = productList
def serialize(self):
return {
'productList': self.productList
}
class JSONDTOEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (DTOUserInfo,DTOProductInfo,DTOResponse)):
return obj.serialize()
elif isinstance(obj, DTOStore):
return obj.serialize()
elif isinstance(obj, DTOProductList):
return obj.serialize()
return super(JSONDTOEncoder, self).default(obj)
|
23,227 | 30bebb528aac34a380b94cde4a5264c28d0c5f20 | import sys
import click
import logging
import time
from grafannotate.annotation import Annotation
CURRENT_TIMESTAMP = int(time.time())
@click.command()
@click.option('-u', '--uri', 'annotate_uri',
default='http://localhost:3000/api/annotations',
help='URI to send annotation to. Default: "http://localhost:3000/api/annotations".')
@click.option('-k', '--api-key', 'api_key', default=None,
help='Grafana API key to pass in Authorisation header')
@click.option('-T', '--title', 'title', default='event', help='Event title. Default: "event".')
@click.option('-t', '--tag', 'tags', multiple=True, help='Event tags (can be used multiple times).')
@click.option('-d', '--description', 'description', help='Event description body. Optional.')
@click.option('-s', '--start', 'start_time', default=CURRENT_TIMESTAMP,
help='Start timestamp (unix secs). Default: current timestamp.')
@click.option('-e', '--end', 'end_time', default=CURRENT_TIMESTAMP,
help='End timestamp (unix secs). Optional.')
@click.option('--debug/--no-debug', default=False,
help='Set debug logging on')
def main(annotate_uri, api_key, title, tags, description, start_time, end_time, debug):
"""
Send Grafana annotations to various endpoints
"""
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
logging.basicConfig(format=' [%(levelname)s] %(message)s', level=log_level)
try:
if description is None:
if not sys.stdin.isatty():
description = "".join([line for line in iter(sys.stdin.readline, '')])
else:
description = ""
this_annotation = Annotation(title, tags, description, start_time, end_time)
result = this_annotation.send(annotate_uri, api_key)
if result['event_data']:
logging.debug(result['event_data'])
if result['message']:
logging.info(result['message'])
except Exception as e:
logging.exception(e)
"""
We could exit 1 here but we really don't want to cause a job to
fail just because we couldn't send an event.
"""
sys.exit(0)
|
23,228 | b492e5c32b3c8fef62fa60ff0a65d525eb15ba1c | print()
print("Manipulating List")
# Slicing List
t = [9,41,12,3,74,15]
print(t[1:3])
#>> 42,12
#upto but not including
# -----------------------------
print()
print("Building a list from scratch")
print()
stuff = list()
stuff.append("book")
stuff.append(99)
print(stuff)
#>>['book', 99]
#list are mutable
# ---------------------
num = [3,41,12,9,74,15]
print(len(num))
#>> 6
print(max(num))
# >>74
print(min(num))
#>>3
print(sum(num))
#>>154
print(sum(num) / len(num))
#25.6
# ---------------------------
print("===============")
print()
total =0
count =0
while True:
inp = input('Enter a number: ')
if inp == 'done' :break
value = float(inp)
total = total + value
count = count + 1
average = total / count
print('Average: ', average)
# -------------------------
print()
print("another loop, using \"List Data Structure\" to produce the same result")
print()
numlist = list()
while True :
inp = input("Enter a number: ")
if inp == "done": break
value = float(inp)
numlist.append(value)
average1 = sum(numlist)/len(numlist)
print("Average:", average1)
# ========================
print()
print("====================")
print()
|
23,229 | 93e81c64660b71b8df37b1ed5522d84763b12a2d | import mock
import pytest
from mock.mock import call
from twindb_backup.clone import _get_mysql_service_name
from twindb_backup.destination.ssh import Ssh
@pytest.mark.parametrize(
"side_effect, expected_name",
[
([("0", ""), ("1\n", "")], "mysqld"),
([("0\n", ""), ("1\n", "")], "mysqld"),
([("\n0\n", ""), ("1\n", "")], "mysqld"),
([("1\n", ""), ("0\n", "")], "mysql"),
([("0", ""), ("0", ""), ("1", "")], "mariadb"),
],
)
def test_get_mysql_service_name(side_effect, expected_name):
with mock.patch.object(Ssh, "execute_command", side_effect=side_effect) as mock_execute:
assert _get_mysql_service_name(Ssh("foo")) == expected_name
mock_execute.assert_has_calls(
[call(f"systemctl list-units --full -all | grep -F '{expected_name}.service' | wc -l")]
)
|
23,230 | 1136d8f7c029973fb300305dde2b54fac64edf96 | import sys
from program import *
from program.board import *
with open(sys.argv[1], "r") as f:
script = f.read()
f.close()
b = Board(script) |
23,231 | 061f090f3b6bbc655c66d5fcabe73eea0cd42a5e |
# 一. 绑定方法: 特殊之处在于将调用者本身当做第一个参数自动传入
# 1. 绑定给对象的方法: 调用者是对象, 自动传入的是对象
# 2. 绑定给类的方法: 调用者是类, 自动传入的是类
# (使用场景: 提供一种新的造对象的方法)
# class Mysql:
# def __init__(self, ip, port):
# self.ip = ip
# self.port = port
#
# def func(self):
# print('%s:%s' % (self.ip, self.port))
#
# @classmethod # 将下面的函数装饰成绑定给类的方法
# def from_conf(cls):
# print(cls)
# return cls('127.0.0.1', 3306) # 可以设置成读取另外的配置文件
#
# # obj1 = Mysql('127.0.0.1', 3306)
#
# obj2 = Mysql.from_conf()
# 二. 非绑定方法 -> 静态方法
# 没有绑定给任何人: 调用者可以是类、对象, 没有自动传参的效果
class Mysql:
def __init__(self, ip, port):
self.nid = self.create_id()
self.ip = ip
self.port = port
@staticmethod # 将下述函数装饰成一个静态方法
def create_id():
import uuid
return uuid.uuid4()
@classmethod
def f1(cls):
pass
def f2(self):
pass
obj1 = Mysql('127.0.0.1', 3306)
print(Mysql.create_id) # <function Mysql.create_id at 0x039723D8>
print(obj1.create_id) # <function Mysql.create_id at 0x039723D8>
print(Mysql.f1) # <bound method Mysql.f1 of <class '__main__.Mysql'>>
print(obj1.f2) # <bound method Mysql.f2 of <__main__.Mysql object at 0x0336C030>>
Mysql.create_id()
obj1.create_id() |
23,232 | 87c5a2c77cf8710b4d03bae0d373cc05aefaa5e4 | """Flask configuration class."""
import os
class Config:
def __init__(self):
"""Base configuration variables."""
self.LOG_LEVEL = os.getenv('LOG_LEVEL')
self.LOGGLY_TOKEN = os.getenv('LOGGLY_TOKEN')
self.LOGIN_DISABLED = os.getenv('LOGIN_DISABLED')
self.SECRET_KEY = os.getenv('SECRET_KEY')
self.GH_CLIENT_ID = os.getenv('GH_CLIENT_ID')
self.GH_SECRET = os.getenv('GH_SECRET')
self.MONGO_URL = os.getenv('MONGO_URL')
self.MONGO_DB = os.getenv('MONGO_DB')
self.STATUSES = ["Not Started", "In Progress", "Completed"]
|
23,233 | 8a754b4a7fdaa076f3016a55c871a865260712fa | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^brewpubs/$', views.ListCreateBrewpub.as_view(), name='brewpub_list'),
url(r'^brewpubs/(?P<pk>\d+)/$', views.RetrieveUpdateDestroyBrewpub.as_view(),
name='brewpub_detail'),
url(r'^brewpubs/(?P<brewpub_pk>\d+)/reviews/$', views.ListCreateBrewpubReview.as_view(),
name='brewpub_review_list'),
url(r'^beer/$', views.ListCreateBeer.as_view(), name='beer_list'),
url(r'^beer/(?P<pk>\d+)/$', views.RetrieveUpdateDestroyBeer.as_view(),
name='beer_detail'),
url(r'^beer/(?P<beer_pk>\d+)/reviews/$', views.ListCreateBeerReview.as_view(),
name='beer_review_list'),
url(r'^reviews/$', views.ListUsersReviews.as_view(),
name='user_reviews'),
url(r'^reviews/(?P<pk>\d+)/$', views.RetrieveUpdateDestroyReview.as_view(),
name='user_detail_review'),
]
|
23,234 | 6ea54cbec59ce5ae99604bd755e06ab8818bba6e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 20:37:20 2021
@author: jguterl
"""
import libconf
import os,io
import shutil
from SimManager import SimulationManager, MakeSimFolder, MakeParameterArray, MakeParamInfo, UpdateInputFile
class Run(SimulationManager):
def __init__(self):
self.GITRExecPath = os.path.expanduser('~/GITR/build/GITR')
self.ParamScan = {}
self.ParamModif = {}
self.ReferenceDirectory = None
self.SimRootPath = None
self.Verbose = False
self.ListDirectory = []
self.Init()
def ShowSummary(self, ShowAll=False):
print('GITRRun instance attributes:')
for k in list(self.__dict__.keys()):
if k == 'ListScan' and not ShowAll:
continue
else:
print(' - {} : {}'.format(k,getattr(self,k)))
def SetGITRExec(self, ExecPath:str):
ExecPath = os.path.expanduser(ExecPath)
assert os.path.exists(ExecPath) == True, 'Cannot find GITR executable: {}'.format(ExecPath)
self.GITRExecPath = ExecPath
def SetParamScan(self, ConfigFile,Params, Values=None):
self.ParamScan = {}
if type(Params) == dict:
self.ParamScan=dict((k,{'ConfigFile':ConfigFile,'Values':v}) for k,v in Params.items())
else:
if type(Values) != list:
raise KeyError('Values of parameters must be given as a a list')
self.ParamScan[Params] = {'ConfigFile':ConfigFile,'Values':Values}
def AddParamScan(self, ConfigFile, Params, Values=None):
if type(Params) == dict:
self.ParamScan.update(dict((k,{'ConfigFile':ConfigFile,'Values':v}) for k,v in Params.items()))
else:
if type(Values) != list:
raise KeyError('Values of parameters must be given as a a list')
self.ParamScan[Params] = {'ConfigFile':ConfigFile,'Values':Values}
def SetModifParam(self, ConfigFile, Params, Value=None, AddParam=False):
self.ParamModif = {}
if type(Params) == dict:
self.ParamModif= dict((k,{'Value':Value,'ConfigFile':ConfigFile,'AddParam':AddParam}) for k,v in Params.items())
else:
assert Value is not None
self.ParamModif[Params]={'Value':Value,'ConfigFile':ConfigFile,'AddParam':AddParam}
def ModifParam(self, ConfigFile, Params, Value=None):
if type(Params) == dict:
self.ParamModif.update(dict((k,{'Value':Value,'ConfigFile':ConfigFile}) for k,v in Params.items()))
else:
assert Value is not None
self.ParamModif[Params]={'Value':Value,'ConfigFile':ConfigFile}
def ApplyModifyParam(self, AddParam=False):
Dic={}
Dic['ParameterInfo'] = MakeParamInfo(self.ParamModif)
for D in self.ListDirectory:
Dic.update({'Directory': D})
UpdateInputFile(Dic, self.LoadMethod, self.DumpMethod, AddParam, self.Verbose)
def SetReferenceDirectory(self, ReferenceDirectory:str)->None:
ReferenceDirectory = os.path.expanduser(ReferenceDirectory)
assert os.path.exists(ReferenceDirectory) == True, 'Cannot find ReferenceDirectory: {}'.format(ReferenceDirectory)
self.ReferenceDirectory = ReferenceDirectory
def SetSimRootPath(self, SimRootPath:str)->None:
SimRootPath = os.path.expanduser(SimRootPath)
self.SimRootPath = SimRootPath
@staticmethod
def DumpMethod(FilePath:str, Config:dict)->None:
FilePath = os.path.expanduser(FilePath)
assert os.path.exists(FilePath), "Cannot find the configfile {}".format(FilePath)
f = io.open(FilePath,'w')
libconf.dump(Config,f)
f.close()
@staticmethod
def LoadMethod(FilePath:str)->dict:
FilePath = os.path.expanduser(FilePath)
assert os.path.exists(FilePath), "Cannot find the configfile {}".format(FilePath)
f = io.open(FilePath,'r')
Config = libconf.load(f)
f.close()
return Config
def SetupScan(self, OverWrite=False, Format='value', AddParam=False):
assert type(self.ReferenceDirectory)==str and os.path.exists(self.ReferenceDirectory), "Cannot find base folder path {}".format(self.ReferenceDirectory)
assert type(self.SimRootPath)==str
assert os.path.exists(self.GITRExecPath)
print('>>>> Setting up scanning of parameters {} from reference folder {} with SimRootPath: {}'.format(list(self.ParamScan.keys()),self.ReferenceDirectory,self.SimRootPath))
print('Executable: {}'.format(self.GITRExecPath))
self.ParameterArray = MakeParameterArray(self.ParamScan, Format, self.Verbose)
for SimInfo in self.ParameterArray.flat:
Directory = MakeSimFolder(SimInfo['Suffix'], self.ReferenceDirectory, self.SimRootPath, OverWrite, self.Verbose)
self.SetSimulation(SimInfo,Directory, self.GITRExecPath, self.LoadMethod, self.DumpMethod, AddParam, self.Verbose)
self.ListDirectory.append(Directory)
print('>>>> Applying modification of fixed parameters')
self.ApplyModifyParam()
print('>>>> Modification of fixed parameters applied.')
self.DumpInfo(Folder=self.SimRootPath)
def Clean(self,OutputDirectory='output'):
for L in self.ListDirectory:
if os.path.exists(os.path.join(L,OutputDirectory)):
print('Cleaning {}'.format(os.path.join(L,OutputDirectory)))
shutil.rmtree(os.path.join(L,OutputDirectory))
os.mkdir(os.path.join(L,OutputDirectory))
# ListNewFolder =["{}_{}_{}".format(self.BaseFolder,ParamName, Value)
# for k,v in self.ParamScan.items():
# ParamName = k.split('.')[-1]
# for a in itertools.product(v1,v2,v3):
# for V in v:
# NewFolder = "{}_{}_{}".format(self.BaseFolder,ParamName, Value)
# NewFolder = "{}_{}_{}".format(BaseFolder,'_'.join(ListSuffix))
# print("Copying {} to {}".format(BaseFolder,NewFolder))
# CopyFolder(BaseFolder,NewFolder)
|
23,235 | abc3840e47d29b956b2bfb91d919f94dc5189958 | import os
import argparse
import indexer
import search
def option_index(args):
"""Handles command line option 'index'."""
print("= MAKE INDEX =")
print()
print("Database folder:\t{}".format(args.folder))
if not os.path.isdir(args.folder):
raise OSError("No such directory!")
print("Index file:\t\t{}".format(args.indexfile))
indexer.create_index_from_folder(args.folder, args.indexfile)
def option_search(args):
"""Handles command line option 'search'."""
print("= SEARCH =")
print()
print("Index file:\t\t{}".format(args.indexfile))
print("QE enabled:\t\t{}".format(args.queryexpansion))
if not os.path.exists(args.indexfile):
raise OSError("No such file!")
print("Query:\t\t\t'{}'".format(args.query))
print("\n")
search.search_index(args.indexfile, args.query,
top=args.top,
default_field=args.defaultfield,
display_fields=args.resultfields,
qe=args.queryexpansion)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A small command line utility to search Linan Qiu's reddit-dataset.\n"
"This utility consists of two parts: \n"
"\t'index', which creates an index from a dataset, and\n"
"\t'search', which searches a previously created index using a query.\n"
"Be sure to read the included help functions (--help) for all available functionality.",
epilog="the searchable fields are:\n\t'text', 'id', 'subreddit', 'meta',' time' and 'author'\n\n"
"the displayable fields (for results) are:\n"
"\t'text', 'id', 'subreddit', 'meta',' time', 'author', 'ups', 'downs', 'authorlinkkarma', 'authorkarma', 'authorisgold'",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func=lambda x: parser.print_help())
subparsers = parser.add_subparsers(help="Use --help on the following sub-commands for more details:")
indexparser = subparsers.add_parser("index", help="Create a new index")
indexparser.add_argument("folder", action="store", help="Folder containing all database files")
indexparser.add_argument("indexfile", action="store", help="Path of index file to be created")
indexparser.set_defaults(func=option_index)
searchparser = subparsers.add_parser("search", help="Run a search query")
searchparser.add_argument("indexfile", action="store", help="Location of Lucene-generated index file")
searchparser.add_argument("query", action="store", help="Your search query")
searchparser.add_argument("-t", "--top", action="store", type=int, default=10,
help="(optional) Maximum amount of results to display")
searchparser.add_argument("-df", "--defaultfield", action="store", default="text",
help="(optional) Default field for query, others can still be searched using one or multiple <field>:\"query\"")
searchparser.add_argument("-rf", "--resultfields", nargs="+", action="store", default=["subreddit", "author", "text"],
help="(optional) List of fields to display in search results")
searchparser.add_argument("-qe", "--queryexpansion", action='store_true', help="(optional) Enable query expansion")
searchparser.set_defaults(func=option_search)
args = parser.parse_args()
args.func(args)
|
23,236 | d829bce036bbb28a5411eb1b2f60d2c7cd5c0fc5 |
import unittest
import sphinxtogithub
class MockFileObject(object):
before = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
"""
after = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="static/default.css" type="text/css" />
<link rel="stylesheet" href="static/pygments.css" type="text/css" />
"""
def read(self):
return self.before
def write(self, text):
self.written = text
class MockOpener(object):
def __init__(self):
self.file_object = MockFileObject()
def __call__(self, name, readmode="r"):
self.name = name
return self.file_object
class TestFileHandler(unittest.TestCase):
def testProcess(self):
filepath = "filepath"
opener = MockOpener()
file_handler = sphinxtogithub.FileHandler(filepath, [], opener)
file_handler.process()
self.assertEqual(opener.file_object.written, MockFileObject.before)
self.assertEqual(opener.name, filepath)
def testProcessWithReplacers(self):
filepath = "filepath"
replacers = []
replacers.append(sphinxtogithub.Replacer("_static/default.css", "static/default.css"))
replacers.append(sphinxtogithub.Replacer("_static/pygments.css", "static/pygments.css"))
opener = MockOpener()
file_handler = sphinxtogithub.FileHandler(filepath, replacers, opener)
file_handler.process()
self.assertEqual(opener.file_object.written, MockFileObject.after)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestFileHandler("testProcess"))
suite.addTest(TestFileHandler("testProcessWithReplacers"))
return suite
|
23,237 | 31c7476f79b490b00ec821a3d14ea09d21f4d9cc | from __future__ import print_function
import boto3
import json
print('Loading function')
#set region
REGION = 'ap-south-1'
#set the SNS topic ARN you want to alert on
SNS_TOPIC_ARN = 'arn:aws:sns:ap-south-1:773591337265:Security-Notification'
def lambda_handler(event, context):
#print(event)
event = event['Records'][0]['Sns']['Message']
s = "{'muffin' : 'lolz', 'foo' : 'kitty'}"
json_acceptable_event = event.replace("\'", "\"")
# print(json_acceptable_event)
event = json.loads(json_acceptable_event)
# print(event)
# print(event['detail'])
# print(event['detail']['eventName'])
# print(event['detail']['sourceIPAddress'])
# print(event['detail']['userAgent'])
# print(event['detail']['requestParameters'])
# print(event['detail']['responseElements'])
# print(event['time'])
sns_body = 'EventName: {}\nSourceIP: {}\nUserAgent: {}\nRequest: {}\nResponse: {}\nTime: {}'.format(event['detail']['eventName'], event['detail']['sourceIPAddress'], event['detail']['userAgent'], event['detail']['requestParameters'], event['detail']['responseElements'], event['time'])
client = boto3.client('sns', region_name=REGION)
# print(sns_body)
response = client.publish(
TopicArn=SNS_TOPIC_ARN,
Subject='Security group state change notification',
Message=sns_body
) |
23,238 | 9f87136fdbc8380e7bc8e02ba6d9b0f21b32f9dd | import sys
import glob
import os
merged_folder_name = sys.argv[1] + "-merged"
allfiles = []
for arg in sys.argv[1:]:
allfiles += glob.glob(arg+"/*")
if not os.path.exists(merged_folder_name):
os.makedirs(merged_folder_name)
for afile in allfiles:
os.rename(afile, merged_folder_name + afile[afile.rfind("\\"):]) |
23,239 | f9f96c2515d79bcd2d00d27547202a2bf3d4717f |
def sum_seq(sequence):
suma = 0
for element in sequence:
if isinstance(element, (list, tuple)):
suma += sum_seq(element)
else:
suma += element
return suma
seq = [1, 2, 3, [2, 4], (1, 4, 5, [1, 1, 1])]
print(sum_seq(seq)) |
23,240 | c4a8940dcc31391cfaaec1abb5b02348b65dffe1 | import torch
import numpy as np
def run_sis(T, G, S, I, newI, nu, mu, d, beta, N):
'''
Runs the linearized SIS model, returning the total number of infected agents
summed over all time steps.
'''
#duplicate these variables along an additional axis to match the batch size
nu = torch.diag(1 - nu).expand_as(beta)
d = torch.diag(1 - d).expand_as(beta)
G = G.expand_as(beta)
#run the main loop for the linearized disease dynamics
total_infected = I.mean(dim=0).sum()
for t in range(1, T):
new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1]
old_infections = nu @ d
A = G @ (new_infections + old_infections)
I = A @ I
I = I + newI[t]
total_infected += I.mean(dim=0).sum()
return total_infected
def run_seis(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E):
'''
Runs the linearized SEIS model, returning the total number of infected agents
summed over all time steps.
'''
#duplicate these variables along an additional axis to match the batch size
nu = torch.diag(1 - nu).expand_as(beta)
d = torch.diag(1 - d).expand_as(beta)
G = G.expand_as(beta)
E = E.expand_as(beta)
alpha_fast = torch.diag(alpha_fast).expand_as(beta)
alpha_slow = alpha_slow.expand_as(beta)
#run the main loop for the linearized disease dynamics
total_infected = I.mean(dim=0).sum()
import numpy as np
infections_time = np.zeros((T, 100))
for t in range(1, T):
new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I
# print(new_infections.shape)
# print(alpha_fast.shape)
new_infections_active = alpha_fast @ new_infections
new_infections_latent = new_infections - new_infections_active
E = mu[t-1] @ E
activations = alpha_slow*E
E = (1 - alpha_slow)*E
# print(E[:, 0].shape)
# print(new_infections_latent.squeeze().shape)
E[:, 0] += new_infections_latent.squeeze()
E = G @ E @ G
old_infections = nu @ d @ I
# print(new_infections_active.shape)
# print(old_infections.shape)
# print(newI[t].shape)
# print(activations.sum(dim=2).shape)
# print(I.shape)
I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I)
I = G @ I
# infections_time.append(I.mean(dim=0).sum().item())
for j in range(100):
total_pop = (1./torch.diag(N[t, j])).sum()
infections_time[t, j] = I[j].sum()/total_pop
if t == 1 and j == 0:
print(total_pop.item(), I[j].sum().item())
# total_infected += I.mean(dim=0).sum()
return infections_time
def vector_to_diag(not_informed_fraction, beta):
not_informed_fraction_diag = torch.zeros_like(beta)
for s in range(beta.shape[0]):
not_informed_fraction_diag[s] = torch.diag(not_informed_fraction[s].squeeze())
# print('diag output')
# print(not_informed_fraction_diag)
return not_informed_fraction_diag
def run_seis_information(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max):
'''
Runs the linearized SEIS model, returning the total number of infected agents
summed over all time steps.
'''
#duplicate these variables along an additional axis to match the batch size
informed = nu.view(len(nu), 1)
informed = informed.expand(beta.shape[0], *informed.shape)
nu = torch.diag(1 - nu).expand_as(beta)
d = torch.diag(1 - d).expand_as(beta)
G = G.expand_as(beta)
E = E.expand_as(beta)
alpha_fast = torch.diag(alpha_fast).expand_as(beta)
alpha_slow = alpha_slow.expand_as(beta)
#keep track of infected, latent, and informed at each time step
all_infections = torch.zeros(T, beta.shape[1], 1)
all_E = torch.zeros(T, E.shape[1], E.shape[2])
all_F = torch.zeros_like(all_infections)
#run the main loop for the linearized disease dynamics
for t in range(1, T):
#update nu with new information spread
not_informed_fraction = 1 - informed
not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta)
#constant scaling the beta for information spread
informed = 0.1*not_informed_fraction_diag@beta_information@informed + informed
nu = nu_max*informed
nu = vector_to_diag(1 - nu, beta)
#infections
new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I
new_infections_active = alpha_fast @ new_infections
new_infections_latent = new_infections - new_infections_active
E = mu[t-1] @ E
activations = alpha_slow*E
E = (1 - alpha_slow)*E
E[:, 0] += new_infections_latent.squeeze()
E = G @ E @ G
old_infections = nu @ d @ I
I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I)
I = G @ I
#return E, I, F by time and age group
#mean across samples
all_infections[t] = I.mean(dim=0)
all_E[t] = E.mean(dim=0)
all_F[t] = informed.mean(dim = 0)
return all_infections, all_E, all_F
def run_seis_information_new(T, G, S, I, migration_I, migration_E, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max):
'''
Runs the linearized SEIS model, returning the total number of infected agents
summed over all time steps.
'''
#read in for first period of F, informed
#nu_sq = np.loadtxt('ann2018_clearanceProb.csv.csv', delimiter=',', skiprows=1)
#nu_sq[np.isnan(nu_sq)] = 0
#nu_sq = nu_sq.mean(axis = 0)
#nu_sq = torch.from_numpy(nu_sq)
#duplicate these variables along an additional axis to match the batch size
beta = beta.expand_as(G)
informed = nu.view(len(nu), 1)
informed = informed.expand(beta.shape[0], *informed.shape)
nu = torch.diag(1 - nu).expand_as(beta)
num_samples = G.shape[0]
#keep track of infected, latent, and informed at each time step
all_I = torch.zeros(T, num_samples, beta.shape[1], 1).double()
all_E = torch.zeros(T, num_samples, E.shape[1], E.shape[2]).double()
all_F = torch.zeros_like(all_I).double()
all_I[0] = I[0]
all_E[0] = E[0]
#all_I[0] = I[30]
#all_E[0] = E[30]
all_F[0] = informed
#run the main loop for the linearized disease dynamics
for t in range(1, T):
#update nu with new information spread
not_informed_fraction = 1 - informed
not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta)
#constant scaling the beta for information spread
informed = not_informed_fraction_diag@beta_information@informed + informed
#print('here is info beta mat')
#print(beta_information)
#print('here is informed')
#print(informed)
#debug sze
nu = nu_max*informed
nu = vector_to_diag(1 - nu, beta)
#infections
new_infections = S[t-1] @ mu @ beta @ N[t-1] @ I
new_infections_active = alpha_fast @ new_infections
new_infections_latent = new_infections - new_infections_active
E = mu @ E
activations = alpha_slow@E
E = E - activations
E += new_infections_latent
E = G @ E + migration_E[t] #CHANGING TO USING THE LAST MIGRATION PERIOD
#E = G @ E + migration_E[30]
old_infections = nu @ d @ I
I = new_infections_active + old_infections + activations
I = G @ I + migration_I[t] #CHANGING TO USING THE LAST MIGRATION PERIOD
#I = G @ I + migration_I[30]
#return E, I, F by time and age group
#mean across samples
all_I[t] = I
all_E[t] = E
all_F[t] = informed
#print(all_I)
return all_I, all_E, all_F
class SISInstance():
"""
Represents an instantiation of the SIS model with a particular (distribution
over) parameters. Foward pass computes total infections as a function of nu,
backward computes gradient wrt nu.
"""
def __init__(self, T, G, S, I, newI, mu, d, beta, N):
self.T = T
self.G = G
self.S = S
self.I = I
self.newI = newI
self.mu = mu
self.d = d
self.beta = beta
self.N = N
def __call__(self, nu):
return run_sis(self.T, self.G, self.S, self.I, self.newI, nu, self.mu, self.d, self.beta, self.N)
def greedy(grad, U, L, K):
'''
Greedily select budget number of elements with highest weight according to
grad
'''
sorted_groups = torch.sort(grad)[1]
nu = L.clone()
curr = 0
while (nu - L).sum() < K and curr < len(grad):
amount_add = min([U[sorted_groups[curr]] - L[sorted_groups[curr]], K - (nu - L).sum()])
nu[[sorted_groups[curr]]] += amount_add
curr += 1
return nu
def sfw_torch(L, U, K, T, G, S, I, newI, mu, d, beta, N, num_iters = 100):
sis = SISInstance(T, G, S, I, newI, mu, d, beta, N)
nu = torch.rand_like(L, requires_grad=True)
nu.data.zero_()
nu.grad = torch.zeros_like(nu)
for i in range(num_iters):
val = sis(nu + L)
nu.grad.zero_()
val.backward()
nu.data += 1./num_iters * greedy(nu.grad, U - L, torch.zeros_like(nu), K)
nu.data += L
return nu
def sfw_seis_torch(L, U, K, T, G, S, I, migration_I, migration_E, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max, num_iters = 100):
nu = torch.rand_like(L, requires_grad=True)
nu.data.zero_()
nu.grad = torch.zeros_like(nu)
for i in range(num_iters):
print('optimizing: {}/{}'.format(i, num_iters))
all_I, all_E, all_F = run_seis_information_new(T, G, S, I, migration_I, migration_E, nu + L, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max)
val = all_I.sum()
nu.grad.zero_()
val.backward()
nu.data += 1./num_iters * greedy(nu.grad, U - L, torch.zeros_like(nu), K)
nu.data += L
#print(nu)
return nu
|
23,241 | d872ae0a30e77734fad2524c6d6e7e18f97e27f4 | # This Program converts Arabic numeral into Roman Numerals.
n = int(input())
if n>0:
roman = ""
while n>0:
if n>=1000:
roman +="M"
n-=1000
elif n>=900:
roman +="CM"
n-=900
elif n>=500:
roman +="D"
n-=500
elif n>=400:
roman +="CD"
n-=400
elif n>=100:
roman +="C"
n-=100
elif n>=90:
roman +="XC"
n-=90
elif n>=50:
roman +="L"
n-=50
elif n>=40:
roman +="XL"
n-=40
elif n>=10:
roman +="X"
n-=10
elif n>=9:
roman +="IX"
n-=9
elif n>=5:
roman +="V"
n-=5
elif n>=4:
roman +="IV"
n-=4
elif n>=1:
roman +="I"
n-=1
else:
break
print(roman)
else:
print("Please enter a Postive Numeral only. Try Again.")
|
23,242 | 4eaba66a6bf0ddcb6150b110876bd209762de241 | from django.shortcuts import render
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required, login_required
from django.http import HttpResponse, JsonResponse, QueryDict
from django.contrib.auth import authenticate, login, logout
from django.template import loader, RequestContext
from django.views.decorators.csrf import csrf_exempt, csrf_protect
class IndexView(View):
#method_decorator 装饰器将函数装饰器转换成方法装饰器,这样它才可以用于实例方法上
@method_decorator(login_required( redirect_field_name='next', login_url='/login/'))
def get(self,request):
return render(request, 'dashboard/public/index.html')
class LogIn(View):
def get(self, request):
return render(request, 'dashboard/login.html', {'title': 'rebbot 运维'})
@method_decorator(login_required(redirect_field_name='next', login_url='/login/'))
def post(self, request):
username = request.POST.get('username', None)
password = request.POST.get('password', None)
print(username)
ret = {'status': 0}
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
ret['nexturl'] = '/dashboard/'
else:
ret['status'] = 2
ret['errmsg'] = 'err'
ret['username'] = username
print(ret)
return JsonResponse(ret, safe=True)
class DefaultView(View):
def get(self, request):
return HttpResponse('default')
class LogOut(View):
@method_decorator(login_required(redirect_field_name='next', login_url='/login/'))
def get(self, request):
logout(request)
return HttpResponse('账户退出成功')
def permit(request):
return render(request, 'dashboard/public/permit.html')
# Create your views here.
|
23,243 | fb51ebe5b24f39eb4f74aef232e9a702ac171d24 | #
#
#
import logging
from collections import defaultdict
from os import listdir, makedirs
from os.path import isdir, isfile, join
from ..record import Record
from ..yaml import safe_dump, safe_load
from . import ProviderException
from .base import BaseProvider
class YamlProvider(BaseProvider):
'''
Core provider for records configured in yaml files on disk.
config:
class: octodns.provider.yaml.YamlProvider
# The location of yaml config files. By default records are defined in a
# file named for the zone in this directory, the zone file, e.g.
# something.com.yaml.
# (required)
directory: ./config
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
# Whether or not to enforce sorting order when loading yaml
# (optional, default True)
enforce_order: true
# Whether duplicate records should replace rather than error
# (optional, default False)
populate_should_replace: false
# The file extension used when loading split style zones, Null means
# disabled. When enabled the provider will search for zone records split
# across multiple YAML files in the directory with split_extension
# appended to the zone name, See "Split Details" below.
# split_extension should include the "."
# (optional, default null, "." is the recommended best practice when
# enabling)
split_extension: null
# When writing YAML records out to disk with split_extension enabled
# each record is written out into its own file with .yaml appended to
# the name of the record. The two exceptions are for the root and
# wildcard nodes. These records are written into a file named
# `$[zone.name].yaml`. If you would prefer this catchall file not be
# used `split_catchall` can be set to False to instead write those
# records out to `.yaml` and `*.yaml` respectively. Note that some
# operating systems may not allow files with those names.
# (optional, default True)
split_catchall: true
# Disable loading of the zone .yaml files.
# (optional, default False)
disable_zonefile: false
Split Details
-------------
All files are stored in a subdirectory matching the name of the zone
(including the trailing .) of the directory config. It is a recommended
best practice that the files be named RECORD.yaml, but all files are
sourced and processed ignoring the filenames so it is up to you how to
organize them.
With `split_extension: .` the directory structure for the zone github.com.
managed under directory "zones/" would look like:
zones/
github.com./
$github.com.yaml
www.yaml
...
Overriding Values
-----------------
Overriding values can be accomplished using multiple yaml providers in the
`sources` list where subsequent providers have `populate_should_replace`
set to `true`. An example use of this would be a zone that you want to push
to external DNS providers and internally, but you want to modify some of
the records in the internal version.
config/octodns.com.yaml
---
other:
type: A
values:
- 192.30.252.115
- 192.30.252.116
www:
type: A
values:
- 192.30.252.113
- 192.30.252.114
internal/octodns.com.yaml
---
'www':
type: A
values:
- 10.0.0.12
- 10.0.0.13
external.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
zones:
octodns.com.:
sources:
- config
targets:
- route53
internal.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
internal:
class: octodns.provider.yaml.YamlProvider
directory: ./internal
populate_should_replace: true
zones:
octodns.com.:
sources:
- config
- internal
targets:
- pdns
You can then sync our records eternally with `--config-file=external.yaml`
and internally (with the custom overrides) with
`--config-file=internal.yaml`
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS_POOL_VALUE_STATUS = True
SUPPORTS_DYNAMIC_SUBNETS = True
SUPPORTS_MULTIVALUE_PTR = True
# Any record name added to this set will be included in the catch-all file,
# instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
def __init__(
self,
id,
directory,
default_ttl=3600,
enforce_order=True,
populate_should_replace=False,
supports_root_ns=True,
split_extension=False,
split_catchall=True,
disable_zonefile=False,
*args,
**kwargs,
):
klass = self.__class__.__name__
self.log = logging.getLogger(f'{klass}[{id}]')
self.log.debug(
'__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, disable_zonefile=%s',
id,
directory,
default_ttl,
enforce_order,
populate_should_replace,
supports_root_ns,
split_extension,
split_catchall,
disable_zonefile,
)
super().__init__(id, *args, **kwargs)
self.directory = directory
self.default_ttl = default_ttl
self.enforce_order = enforce_order
self.populate_should_replace = populate_should_replace
self.supports_root_ns = supports_root_ns
self.split_extension = split_extension
self.split_catchall = split_catchall
self.disable_zonefile = disable_zonefile
def copy(self):
kwargs = dict(self.__dict__)
kwargs['id'] = f'{kwargs["id"]}-copy'
del kwargs['log']
return YamlProvider(**kwargs)
@property
def SUPPORTS(self):
# The yaml provider supports all record types even those defined by 3rd
# party modules that we know nothing about, thus we dynamically return
# the types list that is registered in Record, everything that's know as
# of the point in time we're asked
return set(Record.registered_types().keys())
def supports(self, record):
# We're overriding this as a performance tweak, namely to avoid calling
# the implementation of the SUPPORTS property to create a set from a
# dict_keys every single time something checked whether we support a
# record, the answer is always yes so that's overkill and we can just
# return True here and be done with it
return True
@property
def SUPPORTS_ROOT_NS(self):
return self.supports_root_ns
def list_zones(self):
self.log.debug('list_zones:')
zones = set()
extension = self.split_extension
if extension:
# we want to leave the .
trim = len(extension) - 1
self.log.debug(
'list_zones: looking for split zones, trim=%d', trim
)
for dirname in listdir(self.directory):
not_ends_with = not dirname.endswith(extension)
not_dir = not isdir(join(self.directory, dirname))
if not_dir or not_ends_with:
continue
if trim:
dirname = dirname[:-trim]
zones.add(dirname)
if not self.disable_zonefile:
self.log.debug('list_zones: looking for zone files')
for filename in listdir(self.directory):
not_ends_with = not filename.endswith('.yaml')
too_few_dots = filename.count('.') < 2
not_file = not isfile(join(self.directory, filename))
if not_file or not_ends_with or too_few_dots:
continue
# trim off the yaml, leave the .
zones.add(filename[:-4])
return sorted(zones)
def _split_sources(self, zone):
ext = self.split_extension
utf8 = join(self.directory, f'{zone.decoded_name[:-1]}{ext}')
idna = join(self.directory, f'{zone.name[:-1]}{ext}')
directory = None
if isdir(utf8):
if utf8 != idna and isdir(idna):
raise ProviderException(
f'Both UTF-8 "{utf8}" and IDNA "{idna}" exist for {zone.decoded_name}'
)
directory = utf8
else:
directory = idna
for filename in listdir(directory):
if filename.endswith('.yaml'):
yield join(directory, filename)
def _zone_sources(self, zone):
utf8 = join(self.directory, f'{zone.decoded_name}yaml')
idna = join(self.directory, f'{zone.name}yaml')
if isfile(utf8):
if utf8 != idna and isfile(idna):
raise ProviderException(
f'Both UTF-8 "{utf8}" and IDNA "{idna}" exist for {zone.decoded_name}'
)
return utf8
return idna
def _populate_from_file(self, filename, zone, lenient):
with open(filename, 'r') as fh:
yaml_data = safe_load(fh, enforce_order=self.enforce_order)
if yaml_data:
for name, data in yaml_data.items():
if not isinstance(data, list):
data = [data]
for d in data:
if 'ttl' not in d:
d['ttl'] = self.default_ttl
record = Record.new(
zone, name, d, source=self, lenient=lenient
)
zone.add_record(
record,
lenient=lenient,
replace=self.populate_should_replace,
)
self.log.debug(
'_populate_from_file: successfully loaded "%s"', filename
)
def populate(self, zone, target=False, lenient=False):
self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
zone.decoded_name,
target,
lenient,
)
if target:
# When acting as a target we ignore any existing records so that we
# create a completely new copy
return False
before = len(zone.records)
sources = []
split_extension = self.split_extension
if split_extension:
sources.extend(self._split_sources(zone))
if not self.disable_zonefile:
sources.append(self._zone_sources(zone))
# determinstically order our sources
sources.sort()
for source in sources:
self._populate_from_file(source, zone, lenient)
self.log.info(
'populate: found %s records, exists=False',
len(zone.records) - before,
)
return False
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug(
'_apply: zone=%s, len(changes)=%d',
desired.decoded_name,
len(changes),
)
# Since we don't have existing we'll only see creates
records = [c.new for c in changes]
# Order things alphabetically (records sort that way
records.sort()
data = defaultdict(list)
for record in records:
d = record.data
d['type'] = record._type
if record.ttl == self.default_ttl:
# ttl is the default, we don't need to store it
del d['ttl']
if record._octodns:
d['octodns'] = record._octodns
# we want to output the utf-8 version of the name
data[record.decoded_name].append(d)
# Flatten single element lists
for k in data.keys():
if len(data[k]) == 1:
data[k] = data[k][0]
if not isdir(self.directory):
self.log.debug('_apply: creating directory=%s', self.directory)
makedirs(self.directory)
if self.split_extension:
# we're going to do split files
decoded_name = desired.decoded_name[:-1]
directory = join(
self.directory, f'{decoded_name}{self.split_extension}'
)
if not isdir(directory):
self.log.debug('_apply: creating split directory=%s', directory)
makedirs(directory)
catchall = {}
for record, config in data.items():
if self.split_catchall and record in self.CATCHALL_RECORD_NAMES:
catchall[record] = config
continue
filename = join(directory, f'{record}.yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
record_data = {record: config}
safe_dump(record_data, fh)
if catchall:
# Scrub the trailing . to make filenames more sane.
filename = join(directory, f'${decoded_name}.yaml')
self.log.debug(
'_apply: writing catchall filename=%s', filename
)
with open(filename, 'w') as fh:
safe_dump(catchall, fh)
else:
# single large file
filename = join(self.directory, f'{desired.decoded_name}yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(dict(data), fh, allow_unicode=True)
class SplitYamlProvider(YamlProvider):
'''
DEPRECATED: Use YamlProvider with the split_extension parameter instead.
When migrating the following configuration options would result in the same
behavior as SplitYamlProvider
config:
class: octodns.provider.yaml.YamlProvider
# extension is configured as split_extension
split_extension: .
split_catchall: true
disable_zonefile: true
TO BE REMOVED: 2.0
'''
def __init__(self, id, directory, *args, extension='.', **kwargs):
kwargs.update(
{
'split_extension': extension,
'split_catchall': True,
'disable_zonefile': True,
}
)
super().__init__(id, directory, *args, **kwargs)
self.log.warning(
'__init__: DEPRECATED use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0'
)
|
23,244 | f205583fe9ce86f76eebc7b2ab0157631f409992 | def wl(func): #闭包
def inner(user,passwd):
if user == 123 and passwd == 123:
print ('登录成功')
func()
else:
print ('输入错误')
return inner
@wl
def f1():
print ('欢迎使用f1...')
@wl
def f2():
print ('欢迎使用f2...')
@wl
def f3():
print ('欢迎使用f3...')
@wl
def f4():
print ('欢迎使用f4...')
user = int(input('请输入账户:'))
passwd = int(input('请输入密码:'))
f1 (user,passwd)
|
23,245 | 1ca10e35a9c942b57ca34eef6255fb59c948d27a | import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import roslib
from keras import models
from keras import backend
model = models.load_model("/home/fizzer/Comp_CNN/please.h5")
#model = models.load_model("/home/fizzer/353comp/src/drive/new_trained_model_driver_cnn_bad?.h5")
model._make_predict_function()
ANGULAR_SPEED = 0.05
LINEAR_SPEED = 0.1
class CNN_driver():
def __init__(self):
self.move = Twist()
self.move_pub = rospy.Publisher("/cmd_vel",Twist,queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/rrbot/camera1/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
img_normed = cv_image / 255
processed_img = np.expand_dims(img_normed,axis=0)
y_predicts = model.predict(processed_img)
print(y_predicts)
prediction = y_predicts.argmax(axis=1)[0]
prediction = int(prediction)
if prediction == 0:
print("Going forward...")
self.move.linear.x = LINEAR_SPEED
self.move.angular.z = 0
elif prediction == 1:
print("Going backward...")
self.move.linear.x = -LINEAR_SPEED
self.move.angular.z = 0
elif prediction == 2:
print("Rotating left...")
self.move.linear.x = 0
self.move.angular.z = ANGULAR_SPEED
elif prediction == 3:
print("Rotating right...")
self.move.linear.x = 0
self.move.angular.z = -ANGULAR_SPEED
self.move_pub.publish(self.move)
def main(args):
driver = CNN_driver()
rospy.init_node('cnn_node', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv) |
23,246 | 4bec8bb994ba76b2b9431604dc5f32b756a69eec | #!/usr/bin/env python3
import re
import typing
from functools import reduce
from difflib import SequenceMatcher
# from sqlmap
def htmlunescape(value):
"""
From Sqlmap
Returns (basic conversion) HTML unescaped value
>>> htmlunescape('a<b')
'a<b'
"""
retVal = value
if value and isinstance(value, str):
codes = (("<", '<'), (">", '>'), (""", '"'),
(" ", ' '), ("&", '&'), ("'", "'"))
retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)
try:
retVal = re.sub(
r"&#x([^ ;]+);", lambda match: chr(int(match.group(1), 16)), retVal)
except ValueError:
pass
return retVal
def get_filtered_page_content(page, onlyText=True, split=" "):
"""
From Sqlmap
Returns filtered page content without script, style and/or comments
or all HTML tags
>>> getFilteredPageContent(u'<html><title>foobar</title><body>test</body></html>')
u'foobar test'
"""
retVal = page
# only if the page's charset has been successfully identified
if isinstance(page, str):
retVal = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>%s" %
(r"|<[^>]+>|\t|\n|\r" if onlyText else ""), split, page)
retVal = re.sub(r"%s{2,}" % split, split, retVal)
retVal = htmlunescape(retVal.strip().strip(split))
return retVal
def calc_ratio(seq1: str, seq2: str,
autojunk: bool = True, isjunk=None) -> float:
_seqmt = SequenceMatcher(isjunk, seq1, seq2, autojunk)
ret = _seqmt.ratio()
del _seqmt
return ret
def extract_dynamic_content_marking(
seq1: str, seq2: str,
autojunk: bool = True, isjunk=None, border_length=20
) -> typing.List[typing.Tuple[str, str]]:
seqm = SequenceMatcher(isjunk, seq1, seq2, autojunk)
blocks = list(seqm.get_matching_blocks())
mached_markings = []
while blocks:
current_block = blocks.pop(0)
if current_block.size < border_length:
continue
if not blocks:
break
for next_block in blocks:
#next_block = blocks[0]
if next_block.size < border_length:
continue
prefix = seq1[
current_block.a:current_block.a + current_block.size
][-border_length:]
suffix = seq1[
next_block.a:next_block.a + next_block.size
][:border_length]
mached_markings.append((prefix, suffix))
break
return mached_markings
def regex_remove_dynamic_content_by_markings(
text: str,
markings: typing.List[typing.Tuple],
repl: str = " "
) -> str:
for (prefix, suffix) in markings:
rgx = re.compile("(?<={}).*(?={})".format(
re.escape(prefix), re.escape(suffix)
))
text = rgx.sub(repl, text)
return text
def _extractNremove_by_prefix_and_suffix(text, prefix, suffix, repl=" "):
if not prefix or not suffix or not text:
return []
if (prefix not in text) or (suffix not in text):
return []
textlist = list(text)
frstc_p = prefix[0]
len_p = len(prefix)
frstc_s = suffix[0]
len_s = len(suffix)
state = "CLOSE" # NO OPEN CLOSE
results = []
freelist = []
buff = ""
# print(frstc_p, frstc_s)
while textlist:
_chr = textlist[0]
# print("".join(freelist), "buff:", buff, "_chr:", _chr)
# import time
# time.sleep(0.2)
if _chr == frstc_p:
if "".join(textlist[:len_p]) == prefix:
state = "OPEN"
freelist.append(buff)
# print("add BUff", buff)
buff = ""
[freelist.append(textlist.pop(0)) for _ in range(len_p)]
continue
else:
freelist.append(textlist.pop(0))
elif _chr == frstc_s:
if state == "OPEN":
if "".join(textlist[:len_s]) == suffix:
state = "CLOSE"
results.append(buff)
freelist.append(repl)
[freelist.append(textlist.pop(0)) for _ in range(len_s)]
continue
else:
buff += _chr
textlist.pop(0)
# freelist.append(textlist.pop(0))
else:
freelist.append(textlist.pop(0))
else:
if state == "OPEN":
buff += _chr
textlist.pop(0)
else:
freelist.append(textlist.pop(0))
return "".join(freelist), results
def extract_by_prefix_and_suffix(text, prefix, suffix):
_, results = _extractNremove_by_prefix_and_suffix(text, prefix, suffix)
return results
def remove_dynamic_content_by_markings(text, markings, repl=" "):
for (prefix, suffix) in markings:
text, _ = _extractNremove_by_prefix_and_suffix(
text, prefix, suffix, repl,
)
return text
|
23,247 | 8621d25f645ac2b9babc3eb5edbeb25d579da92e | size=int(input())
list1=[]
for i in range(size):
lis0=input().split(',')
lis0=[int(x) for x in lis0]
list1.append(lis0)
dep=[[0 for i in range(size)] for j in range(size)]
dep[size-1][size-1]=max(1,1-list1[size-1][size-1])
for i in range(size-1):
dep[size-1][size-i-2]=max(1,dep[size-1][size-i-1]-list1[size-1][size-i-2])
dep[size-i-2][size-1]=max(1,dep[size-i-1][size-1]-list1[size-i-2][size-1])
for i in range(size-2,-1,-1):
for j in range(size-2,-1,-1):
depmin=min(dep[i+1][j],dep[i][j+1])
dep[i][j]=max(1,depmin-list1[i][j])
print(dep[0][0]) |
23,248 | 9797a8dd3191c28381add1087d59bc6400ad1e1d | import datetime
from pathlib import Path
OUTPATH = Path(r"/home/mrchou/code/AxisDraw/files/unfinished")
CONTENT = \
"""Date:{date}
HEADER:{header}
{content}
"""
dates = [
"2019-06-04", "2019-06-06", "2019-06-11", "2019-06-13", "2019-06-18", "2019-06-20", "2019-06-25", "2019-06-27",
"2019-07-02", "2019-07-04", "2019-07-09", "2019-07-11", "2019-07-16", "2019-07-18", "2019-07-23", "2019-07-25",
"2019-08-06", "2019-08-08", "2019-08-13", "2019-08-15", "2019-08-20", "2019-08-22", "2019-08-27", "2019-08-29",
"2019-09-03", "2019-09-05", "2019-09-10", "2019-09-12", "2019-09-17", "2019-09-19", "2019-09-24", "2019-09-26",
"2019-10-03", "2019-10-08", "2019-10-10", "2019-10-15", "2019-10-17", "2019-10-22", "2019-10-24", "2019-10-29",
"2019-11-05", "2019-11-07", "2019-11-12", "2019-11-14", "2019-11-19", "2019-11-21", "2019-11-26", "2019-11-28",
"2019-12-03", "2019-12-05", "2019-12-10", "2019-12-12", "2019-12-17", "2019-12-19", "2019-12-24", "2019-12-26",
"2020-01-02", "2020-01-07", "2020-01-09", "2020-01-14", "2020-01-21", "2020-01-30",
"2020-02-04", "2020-02-06", "2020-02-11", "2020-02-13", "2020-02-18", "2020-02-20", "2020-02-25", "2020-02-27",
"2020-03-03", "2020-03-05", "2020-03-10", "2020-03-12", "2020-03-17", "2020-03-19", "2020-03-24", "2020-03-26",
"2020-04-07", "2020-04-09", "2020-04-14", "2020-04-16", "2020-04-21", "2020-04-23", "2020-04-28", "2020-04-30",
"2020-05-05", "2020-05-07", "2020-05-12", "2020-05-14", "2020-05-19", "2020-05-21", "2020-05-26", "2020-05-28",
"2020-06-02", "2020-06-04", "2020-06-09", "2020-06-11", "2020-06-16", "2020-06-18", "2020-06-23", "2020-06-30",
"2020-07-07", "2020-07-09", "2020-07-14", "2020-07-16", "2020-07-21", "2020-07-23", "2020-07-28", "2020-07-30",
"2020-08-04", "2020-08-06", "2020-08-11", "2020-08-13", "2020-08-18", "2020-08-20", "2020-08-25", "2020-08-27",
"2020-09-03", "2020-09-08", "2020-09-10", "2020-09-15", "2020-09-17", "2020-09-22", "2020-09-24", "2020-09-29",
"2020-10-06", "2020-10-08", "2020-10-13", "2020-10-15", "2020-10-20", "2020-10-22", "2020-10-27", "2020-10-29",
"2020-11-03", "2020-11-05", "2020-11-10", "2020-11-12", "2020-11-17", "2020-11-19", "2020-11-24", "2020-11-26",
"2020-12-03", "2020-12-08", "2020-12-10", "2020-12-15", "2020-12-17", "2020-12-22", "2020-12-24", "2020-12-29",
"2021-01-05", "2021-01-07", "2021-01-12", "2021-01-14", "2021-01-19", "2021-01-21", "2021-01-26", "2021-01-28",
"2021-02-02", "2021-02-04", "2021-02-09", "2021-02-18", "2021-02-23", "2021-02-25",
"2021-03-04", "2021-03-09", "2021-03-11", "2021-03-16", "2021-03-18", "2021-03-23", "2021-03-25", "2021-03-30",
"2021-04-06", "2021-04-08", "2021-04-13", "2021-04-15", "2021-04-20", "2021-04-22", "2021-04-27", "2021-04-29",
"2021-05-04", "2021-05-06", "2021-05-11", "2021-05-13", "2021-05-18", "2021-05-20", "2021-05-25", "2021-05-27"
]
if __name__ == "__main__":
for date in dates:
date = datetime.datetime.strptime(date, "%Y-%m-%d")
PARENT = f"{date.year}{date.month:02d}"
PARENT = OUTPATH.joinpath(PARENT)
if not PARENT.is_dir():
PARENT.mkdir()
fname = f"{date.year}-{date.month:02d}-{date.day:02d}"
outf = PARENT.joinpath(f"{fname}.txt")
with open(str(outf), "w") as f:
f.write(CONTENT.format(date=fname, header="header", content="content"))
|
23,249 | 5a8d2e3e2cd3302429e76191e92cdb521d5fd161 | import asyncio
import datetime
import pytest
from aiocache import caches
from django.utils import timezone
from jose import jwt
from model_mommy import mommy
from simple_settings import settings
from challenge import app as _app
from challenge.application.helpers import create_jwt_key
@pytest.fixture(scope='session')
def loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
def app(loop):
return _app
@pytest.fixture(autouse=True)
def client(aiohttp_client, app, loop):
return loop.run_until_complete(aiohttp_client(app))
@pytest.fixture()
def client_authenticated(aiohttp_client, app, loop, token):
return loop.run_until_complete(
aiohttp_client(
app,
headers={'Authorization': f'Bearer {token}'}
)
)
@pytest.fixture(autouse=True)
def clear_cache(request, loop):
marker = request.keywords.get('clear_cache', None)
if marker:
for key in settings.AIO_CACHES:
cache = caches.get(key)
loop.run_until_complete(cache.clear())
@pytest.fixture
def application():
return mommy.make('application.Application')
@pytest.fixture
def token(application):
return application.create_token().token
@pytest.fixture
def expired_token(application):
jwt_key = create_jwt_key(application)
return jwt.encode(
claims={
'id': application.id,
'name': application.name,
'exp': timezone.now() - datetime.timedelta(days=1)
},
key=jwt_key,
algorithm='HS256'
)
@pytest.fixture
def product_id():
return '6c49be9c-f87f-9791-73fc-ce5b7c5d44dd'
@pytest.fixture
def catalog_url(product_id):
settings.CATALOG_CONFIG['url'] = 'http://catalog.com'
catalog_url = settings.CATALOG_CONFIG['url']
return f'{catalog_url}/api/product/{product_id}'
@pytest.fixture
def catalog_response(product_id):
return {
'price': 55.9,
'image': 'http://challenge-api.luizalabs.com/b.jpg',
'brand': 'tramontina',
'id': product_id,
'title': 'Jogo de Inox para Torta Copacabana 7 Peças'
}
|
23,250 | f99fdbe13074cba8ee7637fade91f94c6b7dd20a | # https://codeforces.com/problemset/problem/1360/A
import sys
import os
import heapq
try:
path = "./file/input.txt"
if os.path.exists(path):
sys.stdin = open(path, 'r')
# sys.stdout = open(r"./file/output.txt", 'w')
except:
pass
t = int(input())
def printd(value):
# print(value)
pass
for _ in range(t):
arr = list(map(int, input().split(" ")))
a, b = arr[0], arr[1]
if a > b:
b *= 2
else:
a *= 2
if a > b:
result = a * a
else:
result = b * b
print(result)
|
23,251 | 8b3e1237a5bf8b3782747ec11ba96a903167b83b | import cv2
import dlib
import numpy as np
from scipy.spatial import distance as dist
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
detector = dlib.get_frontal_face_detector()
"""
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
"""
def get_landmarks(im):
rects = detector(im,1)
#print(len(rects))
if len(rects)>1:
print("More than 1 faces")
return "error"
if len(rects)==0:
print("No faces")
return "error"
return np.matrix([[p.x,p.y] for p in predictor(im,rects[0]).parts()])
def annotate_landmarks(im,landmarks):
im = im.copy()
for idx,point in enumerate(landmarks):
pos = (point[0,0],point[0,1])
cv2.putText(im,str(idx),pos,fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,fontScale=0.4,color=(0,0,255))
cv2.circle(im,pos,3,color=(0,255,255))
#print(landmarks[30]," ",landmarks[8]," ",landmarks[45]," ",landmarks[36]," ",landmarks[64]," ",landmarks[48])
return im
# This function will take landmarks as input and compute EAR (Eye Aspect Ratio)
def left_eye(landmarks):
# 42 to 47
features = []
features.append(landmarks[42])
features.append(landmarks[43])
features.append(landmarks[44])
features.append(landmarks[45])
features.append(landmarks[46])
features.append(landmarks[47])
features = np.squeeze(np.asarray(features))
l_A = dist.euclidean(features[1],features[5])
l_B = dist.euclidean(features[2],features[4])
l_C = dist.euclidean(features[0],features[3])
l_ear = (l_A+l_B)/(2.0*l_C)
return l_ear
def right_eye(landmarks):
# 36 to 41
right_features = []
right_features.append(landmarks[36])
right_features.append(landmarks[37])
right_features.append(landmarks[38])
right_features.append(landmarks[39])
right_features.append(landmarks[40])
right_features.append(landmarks[41])
right_features = np.squeeze(np.asarray(right_features))
r_A = dist.euclidean(right_features[1],right_features[5])
r_B = dist.euclidean(right_features[2],right_features[4])
r_C = dist.euclidean(right_features[0],right_features[3])
r_ear = (r_A+r_B)/(2.0*r_C)
return r_ear
def eye_open(image):
landmarks = get_landmarks(image)
if landmarks == "error":
return image,0,0
image_with_landmarks = annotate_landmarks(image,landmarks)
left_ear = left_eye(landmarks)
#print(top_lip_center)
right_ear = right_eye(landmarks)
#print(left_ear)
#print(right_ear)
return image_with_landmarks, left_ear, right_ear
# Open the webcam
cap = cv2.VideoCapture(0)
blinks = 0
blink_status = False
left_blinks = 0
right_blinks = 0
left_blink_status = False
right_blink_status = False
while True:
ret, frame = cap.read()
image_landmarks, left_ear, right_ear = eye_open(frame)
prev_blink_status = blink_status
prev_left_blink_status = left_blink_status
prev_right_blink_status = right_blink_status
ear = (left_ear+right_ear)/2.0
# print("left_ear",left_ear)
# print("right_ear",right_ear)
# print("ear",ear)
# Blink ( both eyes are closed )
if ear < 0.25 and ear!=0 and right_ear < 0.25 and left_ear < 0.25 and left_ear!=0 and right_ear!=0:
blink_status = True
cv2.putText(frame,"Double eye blink",(50,450),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)
print("Double eye blink")
cv2.putText(frame,"Doctor",(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,127),2)
else:
blink_status = False
# Only left eye is closed
if left_ear < 0.25 and left_ear!=0:
left_blink_status = True
cv2.putText(frame,"Left eye blink",(150,650),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)
cv2.putText(frame,"Help!!!",(150,150),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,127),2)
print("Left blink")
# Only right eye is closed
elif right_ear < 0.25 and right_ear!=0:
right_blink_status = True
cv2.putText(frame,"Right eye blink",(100,350),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)
cv2.putText(frame,"Food!!!",(250,250),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,127),2)
print("Right blink")
if prev_blink_status == True and blink_status == False:
blinks=blinks+1
cv2.imshow("Live Landmarks",image_landmarks)
cv2.imshow("Blink Detection",frame)
# 13 is the Enter key
if cv2.waitKey(1) == 13:
break
cap.release()
cv2.destroyAllWindows()
|
23,252 | cf30c54e7c298e96fcd574478776a2bcbcaa8c41 | """sessions table
Revision ID: faed6ba536c5
Revises: c5ea7d44234e
Create Date: 2021-05-26 20:57:16.562426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'faed6ba536c5'
down_revision = 'c5ea7d44234e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('session', sa.Column('nextSessionDate', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_session_nextSessionDate'), 'session', ['nextSessionDate'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_session_nextSessionDate'), table_name='session')
op.drop_column('session', 'nextSessionDate')
# ### end Alembic commands ###
|
23,253 | 17a787f59041488b44edaef594719324dd5c0134 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import *
from django.db.models import Q
from . import models
from django.core.mail import send_mail
from itw2proj import settings
vidext = ['mp4', 'MP4', 'MKV', 'avi', 'mkv', 'flv', 'webm', 'wmv']
songext = ['mp3', 'aac3']
imgext = ['jpg', 'png','jpeg', 'gif']
def home(request):
return render(request, 'webapp/home.html', context=None)
def pre_login(request):
return render(request, 'webapp/prelogin.html')
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
if user_form.is_valid():
user = user_form.save(commit=False)
user.set_password(user.password)
user.save()
send_mail(
'Registration complete',
'Welcome to Mython,'+user_form.cleaned_data['first_name']+' ! We’re glad you’re a part of our community.\n\n Find all you need, share with others, and connect with peers!\n\n\nThanks for joining!\nThe Mython team ',
settings.EMAIL_HOST_USER,
[user_form.cleaned_data['email'], 'mython.itw.gmail.com'],
fail_silently=False,
)
registered = True
else:
print(user_form.errors)
else:
user_form = UserForm()
return render(request, 'webapp/register.html', {'user_form': user_form,
'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/webapp/user_home')
else:
return HttpResponse("<h1>Your Home account is disabled</h1>")
else:
print("Invalid Login Detail: {0} {1}".format(username, password))
return HttpResponse("Invalid Login detail provided")
else:
return render(request, 'webapp/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/webapp/')
def index(request):
if request.user.is_authenticated():
vidqr = models.Video.objects.all().order_by("-uploaded_at")[:6]
msgqr = models.Message.objects.all().order_by("-pk")[:6]
imgqr = models.Image.objects.all().order_by("-pk")[:6]
fileqr = models.File.objects.all().order_by("-pk")[:6]
context = {'vidqr': vidqr, 'msgqr': msgqr, 'imgqr':imgqr, 'fileqr':fileqr}
return render(request, 'webapp/index.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def addvid(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = VideosForm(request.POST, request.FILES)
if form.is_valid():
form.instance.extension = form.cleaned_data['video'].name.split(sep='.')[-1]
form.instance.size = form.cleaned_data['video'].size/1024
if form.instance.extension in vidext:
form.save()
return render(request, 'webapp/uploadsuccess.html', context=None)
else:
if form.instance.extension in songext:
temp = 'songs'
ref = 'webapp:addsong'
if form.instance.extension in imgext:
temp = 'images'
ref = 'webapp:img_add'
elif form.instance.extension in vidext:
temp = 'videos'
ref = 'webapp:vid_add'
else:
temp = 'files'
ref = 'webapp:file_add'
return render(request, 'webapp/unsupportedextension.html', context={
'ext': form.instance.extension,
'temp': temp,
'ref':ref
})
else:
form = VideosForm()
return render(request, 'webapp/Forms/FileForm.html', {
'form': form
})
else:
return render(request, 'webapp/home.html', context=None)
def vid_all(request):
if request.user.is_authenticated():
vidall = models.Video.objects.all().order_by("-uploaded_at")
context = {'vidall': vidall}
return render(request, 'webapp/vid_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def vid(request, vid_id):
if request.user.is_authenticated():
video = get_object_or_404(models.Video, pk=vid_id)
context = {'video': video}
return render(request, 'webapp/vid.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def messages_all(request):
if request.user.is_authenticated():
msg = models.Message.objects.all().order_by("-pk")
context = {'all_messages': msg}
return render(request, 'webapp/messages_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def message(request, mid):
if request.user.is_authenticated():
msg = get_object_or_404(models.Message, pk=mid)
context = {'msg': msg}
return render(request, 'webapp/message.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def add_message(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = MessagesForm(request.POST)
if form.is_valid():
form.instance.sender = request.user.username
form.save()
return render(request, 'webapp/uploadsuccess.html', context=None)
else:
form = MessagesForm()
return render(request, 'webapp/Forms/MessageForm.html', {
'form': form
})
else:
return render(request, 'webapp/home.html', context=None)
def notification_all(request):
if request.user.is_authenticated():
ntfc = models.Notification.objects.filter(send_to=request.user).order_by("-pk")
sent = models.Notification.objects.filter(sender__exact=request.user.username).order_by("-pk")
context = {'all_notification': ntfc, 'sent': sent}
return render(request, 'webapp/notifications_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def notification(request, nid):
if request.user.is_authenticated():
ntfc = get_object_or_404(models.Notification, pk=nid)
context = {'ntfc': ntfc}
return render(request, 'webapp/notification.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def add_notification(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = NotificationForm(request.POST)
if form.is_valid():
form.instance.sender = request.user.username
form.save()
return render(request, 'webapp/uploadsuccess.html', context=None)
else:
form = NotificationForm()
return render(request, 'webapp/Forms/MessageForm.html', {
'form': form
})
else:
return render(request, 'webapp/home.html', context=None)
def uploadmenu(request):
if request.user.is_authenticated():
return render(request, 'webapp/uploadmenu.html')
else:
return render(request, 'webapp/home.html', context=None)
def contact(request):
if request.user.is_authenticated():
return render(request, 'webapp/contact.html')
else:
return render(request, 'webapp/home.html', context=None)
def delete_vid(request, vid_id):
if request.user.is_authenticated():
video_reqd = models.Video.objects.get(pk=vid_id)
video_reqd.delete()
vidall = models.Video.objects.all()
context = {'vidall': vidall}
return render(request, 'webapp/vid_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def search(request):
if request.user.is_authenticated():
query = request.GET.get("q")
vid_query = models.Video.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(category__icontains=query)
)
file_query = models.File.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query)
)
msg_query = models.Message.objects.filter(
Q(sender__icontains=query) |
Q(message__icontains=query)
)
ntfc_query = models.Notification.objects.filter(
Q(sender__icontains=query) |
Q(body__icontains=query) |
Q(send_to__email__icontains=query) |
Q(send_to__username__icontains=query) |
Q(send_to__first_name__icontains=query) |
Q(send_to__last_name__icontains=query)
)
img_query = models.Image.objects.filter(
Q(name__icontains=query)
)
if len(vid_query) == 0 and len(msg_query) == 0 and len(ntfc_query) == 0 and len(img_query)==0 and len(file_query)==0:
obj = 0
return render(request, 'webapp/search.html', context={'obj': obj})
else:
obj = 1
return render(request, 'webapp/search.html', context={
'vid_query': vid_query,
'msg_query': msg_query,
'ntfc_query': ntfc_query,
'img_query': img_query,
'file_query':file_query,
'obj': obj}
)
else:
return render(request, 'webapp/home.html', context=None)
def hello(request):
return render(request, 'webapp/hello.html', None)
def delete_img(request, img_id):
if request.user.is_authenticated():
image_reqd = models.Image.objects.get(pk=img_id)
image_reqd.delete()
imgall = models.Image.objects.all()
context = {'imgall': imgall}
return render(request, 'webapp/img_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def img_all(request):
if request.user.is_authenticated():
imgall = models.Image.objects.all().order_by("-uploaded_at")
context = {'imgall': imgall }
return render(request, 'webapp/img_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def img(request, img_id):
if request.user.is_authenticated():
image = get_object_or_404(models.Image, pk=img_id)
context = {'image': image}
return render(request, 'webapp/img.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def addimg(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = ImagesForm(request.POST, request.FILES)
if form.is_valid():
form.instance.extension = form.cleaned_data['img'].name.split(sep='.')[-1]
form.instance.size = form.cleaned_data['img'].size/1024
if form.instance.extension in imgext:
form.save()
return render(request, 'webapp/uploadsuccess.html', context=None)
else:
if form.instance.extension in songext:
temp = 'songs'
ref = 'webapp:addsong'
k = 1
elif form.instance.extension in vidext:
temp = 'videos'
ref = 'webapp:vid_add'
k = 1
else:
temp = 'files'
return render(request, 'webapp/unsupportedextension.html', context={
'ext': form.instance.extension,
'temp': temp,
'ref':ref
})
else:
form = ImagesForm()
return render(request, 'webapp/Forms/FileForm.html', {
'form': form
})
else:
return render(request, 'webapp/home.html', context=None)
def addalbum(request):
if request.method == 'POST':
form = AlbumForm(request.POST , request.FILES )
if form.is_valid():
form.save()
return render(request, 'webapp/uploadsuccess.html',context=None)
else:
form = AlbumForm()
return render(request, 'webapp/Forms/FileForm.html', {
'form': form
})
def addsong(request):
if request.method == 'POST':
form = SongsForm(request.POST, request.FILES)
if form.is_valid():
form.instance.extension = form.cleaned_data['song'].name.split(sep='.')[-1]
form.instance.size = form.cleaned_data['song'].size/1024
form.save()
return render(request, 'webapp/uploadsuccess.html',context=None)
else:
form = SongsForm()
return render(request, 'webapp/Forms/FileForm.html', {
'form': form
})
def album_all(request):
albumall = models.Album.objects.all()
context = {'albumall': albumall}
return render(request,'webapp/albumall.html',context)
def album(request, s_id):
album_reqd = get_object_or_404(models.Album, pk=s_id)
song_query = models.Song.objects.filter(album_i__title__exact=album_reqd.title)
# if len(song_query) == 0:
# obj = 0
# return render(request, 'webapp/album.html', context={'obj': obj})
# else:
# obj = 1
return render(request, 'webapp/album.html', context={
'album': album_reqd,
'songs': song_query,
}
)
def delete_file(request, file_id):
if request.user.is_authenticated():
file_reqd = models.File.objects.get(pk=file_id)
file_reqd.delete()
fileall = models.File.objects.all()
context = {'fileall': fileall}
return render(request, 'webapp/file_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def addfile(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = FileForm(request.POST, request.FILES)
if form.is_valid():
k=0
form.instance.extension = form.cleaned_data['src'].name.split(sep='.')[-1]
form.instance.size = form.cleaned_data['src'].size / 1024
if form.instance.extension in songext:
temp = 'songs'
ref = 'webapp:addsong'
k=1
if form.instance.extension in imgext:
temp = 'images'
ref = 'webapp:img_add'
k=1
elif form.instance.extension in vidext:
temp = 'videos'
ref = 'webapp:vid_add'
k=1
if k==1:
return render(request, 'webapp/unsupportedextension.html', context={
'ext': form.instance.extension,
'temp': temp,
'ref':ref,
})
else:
form.save()
return render(request, 'webapp/uploadsuccess.html', context=None)
else:
form = FileForm()
return render(request, 'webapp/Forms/FileForm.html', {
'form': form
})
else:
return render(request, 'webapp/home.html', context=None)
def file_all(request):
if request.user.is_authenticated():
fileall = models.File.objects.all().order_by("-uploaded_at")
context = {'fileall': fileall}
return render(request, 'webapp/file_all.html', context)
else:
return render(request, 'webapp/home.html', context=None)
def file(request, file_id):
if request.user.is_authenticated():
file = get_object_or_404(models.File, pk=file_id)
context = {'file': file}
return render(request, 'webapp/file.html', context)
else:
return render(request, 'webapp/home.html', context=None)
|
23,254 | 8facc2142980ad596a0fe7414c9a04923621daf3 | #! /usr/bin/python
import sys
def main ():
res = times (1, 2)
print (res)
res = times (3.14, 4)
print (res)
res = times ("Hello!", 5)
print (res)
sys.exit (0)
def times (n1, n2):
return n1*n2
main ()
|
23,255 | 89508abad32b8f9623c814575d2e094a7a597d04 | import faust
class Company(faust.Record):
name: str
domain: str
class Domain_information(faust.Record, serializer='json'):
clearbitData: str
name: str |
23,256 | 6399d852e2f802504b41794858b8f6cf2d3bc917 | from Domain.AddOperation import AddOperation
import random
from Domain.DeleteOperation import DeleteOperation
from Domain.Car import Car
from Domain.CarValidator import CarValidator
from Repository.FileRepository import FileRepository
from Service.UndoService import UndoService
class CarService:
def __init__(self, cars_repository: FileRepository, car_validator: CarValidator,
transaction_repository: FileRepository,
undo_service: UndoService):
self.__cars_repository = cars_repository
self.__car_validator = car_validator
self.__transaction_repository = transaction_repository
self.__undo_service = undo_service
def create(self, id_car, model, purchase_year, km_travelled, in_warranty):
"""
It adds a car in the storage dict.
:param id_car: int, an_id
:param model: string, a model
:param purchase_year: int, the purchase_year
:param km_travelled: int, km_travelled
:param in_warranty: boolean, in_warranty
"""
car = Car(id_car, model, purchase_year, km_travelled, in_warranty)
self.__car_validator.validate(car)
if in_warranty == 'yes':
car.in_warranty = True
else:
car.in_warranty = False
self.__cars_repository.create(car)
self.__undo_service.add_to_undo(AddOperation(self.__cars_repository, car))
def delete(self, id_car):
car = self.__cars_repository.find_by_id(id_car)
self.__cars_repository.delete(id_car)
self.__undo_service.add_to_undo(DeleteOperation(self.__cars_repository, car))
def update(self, id_car, model, purchase_year, km_travelled, in_warranty):
car = self.__cars_repository.find_by_id(id_car)
if car is None:
raise KeyError(f"The car with id: {id_car} doesn't exist.")
if model != '':
car.model = model
if purchase_year != '':
car.purchase_year = purchase_year
if km_travelled != '':
car.km_travelled = km_travelled
if in_warranty != '':
car.in_warranty = in_warranty
self.__car_validator.validate(car)
if in_warranty == 'yes' or in_warranty == '':
car.in_warranty = True
else:
car.in_warranty = False
self.__cars_repository.update(car)
def get_all(self):
return self.__cars_repository.get_all()
def merge_sort(self, arr, key=lambda x: x[1]):
if len(arr) < 2:
return arr
# Finding the mid of the array
mid = len(arr) // 2
# Dividing the array elements
left = arr[:mid]
# into 2 halves
right = arr[mid:]
# Sorting the first half
self.merge_sort(left)
# Sorting the second half
self.merge_sort(right)
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(left) and j < len(right):
if key(left[i]) > key(right[j]):
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
# Checking if any element was left
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
return arr
def DObyWorkmanship(self):
result = []
for car in self.__cars_repository.get_all():
sum_workmanship = 0
for transaction in self.__transaction_repository.get_all():
if car.id_entity == transaction.id_car:
sum_workmanship += transaction.sum_workmanship
result.append((car.model, sum_workmanship))
output = self.merge_sort(result, key=lambda tuple: tuple[1])
return output
def updateWarranty(self, current_year):
output = []
for car in self.__cars_repository.get_all():
if current_year - car.purchase_year > 3 or car.km_travelled > 60000:
output.append(car)
return output
def populate_cars(self, number_of_cars):
in_warrantys = ['yes', 'no']
models = ('toyota', 'audi', 'bmw', 'mercedes', 'ford', 'skoda', 'ferrari')
for index in range(number_of_cars):
id_car = random.randint(3, 1000000000000)
model = random.choice(models)
purchase_year = random.randint(1999, 2020)
km_travelled = random.randint(200000, 500000)
in_warranty = random.choice(in_warrantys)
a_car = Car(id_car, model, purchase_year, km_travelled, in_warranty)
self.__cars_repository.create(a_car)
|
23,257 | 2d3e70b0a2558f3197ce31c6a9e9fb174b1d962d | #lest we hope god grants the end to this pygame hell swiftly
#can we just do java already
#like, i don't even know java and i want to do it at this point
#gofd can this end
#I legit had a pygame nightmare
#Dennis Dayan
#spaceincaders.py #honestly i dont even care that i misspelled that
import pygame
import sys
import time
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30)
screen = pygame.display.set_mode((800, 600))
x = 25
y = 540
width = 40
height = 60
vel = 5
# enemyx = 40
# enemyy = 40
score = 0
# def countdown(n):
# run = True
# while run:
# time.sleep(n)
# if n== 0:
# print('TIME IS UP!')
#Enter music here
pygame.mixer.init()
pygame.mixer.music.load('ps1.mp3')#any mp3 file
pygame.mixer.music.play()
name = str(input('Enter Your Name: '))
boom = 10
while boom > 0:
time.sleep(1)
print(boom)
boom -= 1
print("START GAME!")
def draw():
screen.blit(ship.image, (x, y))
for i in projlist:
i.draw(screen)
for i in enemyList:
i.draw(screen)
Score()
pygame.display.update()
winscreen = pygame.image.load('win.jpg')
def redrawCollision():
for p in projlist:
p.draw(screen)
p.rect = pygame.Rect(p.x, p.y, p.size, p.size)
for i in enemyList:
if(p.collision(i)):
del enemyList[enemyList.index(i)]
global score
score += 1
pygame.mixer.pre_init(44100, 16, 2, 4096)
hitsound = pygame.mixer.Sound('mgs.wav')
hitsound.play()
# if score >= 13:
# for i in range(80, 680, 100):
# enemyList.append(Enemy(i, 50, 50)) #this is promising, fix with watsons help
# for i in range(30, 730, 100):
# enemyList.append(Enemy(i, 150, 50))
#enter collide music here
def Score():
scoremessage = 'Score : {} '.format(score)
textsurface = myfont.render(scoremessage, False, (0, 0, 0))
screen.blit(textsurface,(0,0))
def redrawCreatures():
pass
clock = pygame.time.Clock()
pygame.mouse.set_visible(0)
running = True
#Initializes characters
ship = pygame.sprite.Sprite()
ship.rect = pygame.Rect(40, 40, 40, 40) #(X, Y, H, W) #Change these
shipfile = pygame.image.load('ship.png')
ship.image = shipfile
# enemy = pygame.sprite.Sprite()
# enemy.rect = pygame.Rect(40, 40, 40, 40)
# enemyfile = pygame.image.load('ramsticks.jpg')
# enemy.image = enemyfile
class Enemy():
def __init__(self, x, y, size):
self.x = x
self.y = y
self.size = size
self.self = pygame.sprite.Sprite()
self.rect = pygame.Rect(x, y, size, size)
image = pygame.image.load('ramsticks.jpg')
image = pygame.transform.scale(image, (size, size))
self.image = image
def draw(self, screen):
screen.blit(self.image, (self.x, self.y))
enemyList = []
for i in range(80, 680, 100):
enemyList.append(Enemy(i, 50, 50))
for i in range(30, 730, 100):
enemyList.append(Enemy(i, 150, 50))
for i in range(5, 700, 100):
enemyList.append(Enemy(i, 250, 50))
#End of Character Initilization
projlist = []
class Projectile(object):
def __init__(self, x,y,width):
self.x = x
self.y = y
self.size = width
self.vel = 10
self.self = pygame.sprite.Sprite()
self.rect = pygame.Rect(x, y, width, width)
image = pygame.image.load('projectile.png')
image = pygame.transform.scale(image, (5, 5))
self.image = image
def draw(self, screen):
screen.blit(self.image, (self.x-1, self.y-1))
def collision(self, obj):
if pygame.sprite.collide_rect(self,obj):
return True
return False
theClock = pygame.time.Clock()
background = pygame.image.load('macosx.jpg')
while running:
clock.tick(60)
screen.blit(background, (0,0))
if score >= 20:
screen.blit(winscreen, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
for projectile in projlist:
if projectile.y > 0:
projectile.y -= vel
else:
del projlist[-1]
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
sys.exit(0)
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and x > vel:
x -= vel
if keys[pygame.K_RIGHT] and x < 700 - vel - width:
x += vel
if keys[pygame.K_SPACE]:
projlist.append(Projectile(x, y, 10))
# put collision method here
# for projectile in projlist:
# for enemy in enemyList:
# if Projectile.collision(projectile, Enemy): #NEED TO FIX,
# print('Collision')
screen.blit(ship.image, (x, y))
ship.rect = pygame.Rect(x, y, 40, 40) #(X, Y, H, W) #Change these
draw()
redrawCollision()
redrawCreatures()
pygame.display.update()
scoreRun = True
while scoreRun:
if score >= 20:
file = open("high.txt", "w")
file.write("{} Scored 20 Points and WON!".format(name))
#wrap program in function and make level 2
|
23,258 | 611913a36cc5b791e9e25b7dfe48087d8a72f3e7 | import cv2
import numpy as np
path = r''
img = cv2.imread('a.jpg')
image_to_show = np.copy(img)
videoCapture = cv2.VideoCapture(path)
fps = videoCapture.get(cv2.CAP_PROP_FPS)
size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
videoWriter = cv2.VideoWriter('Output.avi', cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
success, frame = videoCapture.read()
cv2.namedWindow('image')
i = 0
while True:
draw()
cv2.imshow('image', image_to_show)
k = cv2.waitKey(1)
if k == ord('s'): #bam s luu anh
cv2.imwrite('Anh'+str(i)+'.png', image_to_show)
i = i + 1
elif k == 27 :
break
cv2.destroyAllWindown() |
23,259 | cbeea3db77a83eb77c465597a7da5712936d839e | import requests
import http.client
import json
from bs4 import BeautifulSoup
import re
import random
user_agent_list = [
#Chrome
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
#Firefox
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)'
]
def header_rotate():
for i in range(1, 6):
user_agent = random.choice(user_agent_list)
headers = {'User-Agent': user_agent}
return headers
class Crawler:
def trade_spider(self, home_team, away_team):
scores = []
full_time = []
url = 'http://wildstat.com/p/2301/ch/all/club1/'+home_team+'/club2/'+away_team
#url = 'http://wildstat.com/p/2301/ch/all/club1/ENG_Liverpool_FC/club2/ENG_Arsenal_London'
source_code = requests.get(url, headers=header_rotate)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for stats in soup.findAll('span', id=re.compile("score_")):
for full_score in stats.findAll('b'):
full_time.append(full_score.text)
return full_time
def get_price(url):
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.findAll('div', {'class': 'price'}):
return item.string
def get_countries(url):
countries = {}
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('div', {'class': 'dmn-left-g'}):
for country in item.find_all('a', href=True):
countries[re.sub('[^A-Za-z0-9]+', '', item.text)] = re.sub('[^A-Za-z0-9]+', '', country['href'])[1:]
return countries
def get_leagues(url):
leagues = {}
class_list = ['smn-left-g', 'smn-left-w', 'smn-left-gn']
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('div', class_=class_list):
for league in item.find_all('a', href=True):
leagues[re.sub('[^A-Za-z0-9]+', '', item.text)] = re.sub('[^A-Za-z0-9]+', '', league['href'])[1:]
return leagues
def get_years(url):
years = {}
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('a', {'class': 'year'}):
years[re.sub('[^A-Za-z0-9]+', '', item.text)] = item['href']
return years
def get_club(url):
clubs = {}
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('table', {'class': 'championship'}):
for tr in item.find_all('tr'):
for td in tr.find_all('td', {'align': 'left'}):
for a in td.find_all('a', href=True):
if not str.isdigit(a.text) and not re.match("^[0-9.]*$", a.text):
clubs[a.text] = a['href']
return clubs
def get_week(url):
weeks = {}
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for div in soup.find_all('div', {'class': 'tour'}):
for item in div('select', {'class': 'toursel'}):
for option in item.find_all('option'):
weeks[option.text] = option['value']
return weeks
def get_week_club(url):
clubs = {}
source_code = requests.get(url, headers=header_rotate())
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('table', {'class': 'championship'}):
for tr in item.find_all('tr'):
for td in tr.find_all('td', {'align': 'left'}):
for a in td.find_all('a', href=True):
if not str.isdigit(a.text) and not re.match("^[0-9.]*$", a.text):
clubs[a.text] = a['href']
return clubs
def get_table(url):
clubs = []
source = requests.get(url, headers=header_rotate())
plain_text = source.text
soup = BeautifulSoup(plain_text, 'html.parser')
for item in soup.find_all('table', {'class': 'championship'}):
for tr in item.find_all('tr'):
data = []
for td in tr.find_all('td'):
if td.text:
data.append(td.text)
if data:
data.pop(0)
if len(data) == 18:
data.pop(7)
clubs.append(data)
break
return clubs[2:]
def f(f_a):
list=f_a.split('-')
return int(list[0])
def a(f_a):
list=f_a.split('-')
return int(list[1])
class FootballAPI:
def get_data(self):
connection = http.client.HTTPConnection('api.football-data.org')
headers = {'X-Auth-Token': 'b62b5ab1be29464ba47452d9fff08380'}
connection.request('GET', '/v2/competitions/2021/matches?season=2018', None, headers)
response = json.loads(connection.getresponse().read().decode())
return response
|
23,260 | 53613b865e03f70db8f9272f0002f33e219fd674 | import unittest
import credit
class TestCheckType(unittest.TestCase):
def test_amex_cards(self):
self.assertEqual(credit.check_type(378282246310005), 1)
self.assertEqual(credit.check_type(371449635398431), 1)
self.assertEqual(credit.check_type(378734493671000), 1)
def test_visa_cards(self):
self.assertEqual(credit.check_type(4111111111111111), 3)
self.assertEqual(credit.check_type(4012888888881881), 3)
self.assertEqual(credit.check_type(4222222222222), 3)
def test_mastercard_cards(self):
# self.assertEqual(credit.check_type(2223000048400011), 2)
self.assertEqual(credit.check_type(5105105105105100), 2)
self.assertEqual(credit.check_type(5555555555554444), 2)
class TestIsValid(unittest.TestCase):
def test_valid_cards(self):
self.assertEqual(credit.is_valid(378282246310005), True)
self.assertEqual(credit.is_valid(378282246310005), True)
self.assertEqual(credit.is_valid(371449635398431), True)
self.assertEqual(credit.is_valid(378734493671000), True)
self.assertEqual(credit.is_valid(4111111111111111), True)
self.assertEqual(credit.is_valid(4012888888881881), True)
self.assertEqual(credit.is_valid(4222222222222), True)
self.assertEqual(credit.is_valid(2223000048400011), True)
self.assertEqual(credit.is_valid(2223016768739313), True)
self.assertEqual(credit.is_valid(5555555555554444), True)
class TestSumDigits(unittest.TestCase):
def test(self):
self.assertEqual(credit.sum_digits(10), 1)
self.assertEqual(credit.sum_digits(5555555555554444), 76)
if __name__ == "__main__":
unittest.main()
|
23,261 | 4b0521e5a139e60a8e29dc077d3ae3f2d49eb36a | # galera = [['João', 19], ['Joaquim', 13], ['Ana', 33], ['Maria', 45]]
# for pessoa in galera:
# print(pessoa[0], 'tem', pessoa[1], 'anos')
galera = []
dado = []
for c in range(0, 2):
dado.append(str(input('Nome: ')))
dado.append(int(input('Idade: ')))
galera.append(dado[:])
dado.clear()
print(galera)
|
23,262 | 378e5310dfe51d13ccce7d02735c5814bd743da7 | #!/usr/bin/python
from scapy.all import *
from random import *
def watch_for_it(pkt):
# Port whitelist (SSH)
if TCP in pkt and pkt[TCP].sport == 2222:
return
# SYN pkt
elif TCP in pkt and pkt[TCP].flags == 2:
seqnum = randint(1, 4294967295)
p=IP(dst=pkt[IP].src)/TCP(dport=pkt[TCP].sport, sport=pkt[TCP].dport, ack=pkt[TCP].seq+1, seq=seqnum, flags="SA")
return send(p)
# PSHACK
elif TCP in pkt and pkt[TCP].flags == 24:
p=IP(dst=pkt[IP].src)/TCP(dport=pkt[TCP].sport, sport=pkt[TCP].dport, ack=pkt[TCP].seq+len(pkt[Raw].load), seq=pkt[TCP].ack, flags="A")
return send(p)
# FINACK
elif TCP in pkt and pkt[TCP].flags == 17:
p=IP(dst=pkt[IP].src)/TCP(dport=pkt[TCP].sport, sport=pkt[TCP].dport, ack=pkt[TCP].seq+1, seq=pkt[TCP].ack, flags="FA")
return send(p)
else:
return
sniff(iface="eth0", count=0, prn=watch_for_it)
|
23,263 | c6dfe17e68ac087ba16d4f3d4f619f73698b759d | import datetime
import pymongo
from bson import ObjectId
from django.contrib.auth.hashers import make_password, check_password
from .link_scaper import link_scraping
from database_connection.helpers.collections import user_entity, test_scores, question_bank
def upload_student_details(request):
try:
request.data['password'] = create_sha256_password(request.data['password'])
user_entity.insert(request.data)
return {'msg': 'entity data uploaded', 'status': True}
except:
return {'msg': 'unable to upload details, please try again', 'status': False}
def student_login(request):
entity_object = user_entity.find_one({'login_id': request.GET.get('login_id')})
if entity_object:
bool_value = check_password(request.GET.get('password'), entity_object['password'])
if bool_value is True:
return {'msg': 'login successful', 'status': True}
else:
return {'msg': 'login id or password is wrong', 'status': False}
else:
return {'msg': 'entity does not exist', 'status': False}
def upload_student_answers(request):
try:
entity_object = user_entity.find_one({'login_id': request.data['student_id']})
for question_object in request.data['question_level_marks_list']:
question_bank_object = question_bank.find_one({'_id': ObjectId(question_object['question_id'])})
question_object['total'] = int(question_bank_object['marks'])
question_object['topic_id'] = question_bank_object['topic_id']
test_scores.insert({'student_id': entity_object['_id'],
'total_marks_obtained': request.data['total_marks_obtained'],
'question_level_marks_list': request.data['question_level_marks_list'],
'date_of_test': datetime.datetime.now()})
return {'msg': 'test details uploaded successfully', 'status': True}
except:
return {'msg': 'could not upload test details', 'status': False}
def previous_scores(request):
previous_test_scores_list = []
try:
user_entity_object = user_entity.find_one({'login_id': request.GET.get('student_id')})
previous_scores_object = test_scores.find({'student_id': user_entity_object['_id']},
{'question_level_marks_list.topic_id': 0}).sort([
('date_of_test', pymongo.DESCENDING)])
for previous_test_scores in previous_scores_object:
previous_test_scores['student_id'] = str(previous_test_scores['student_id'])
previous_test_scores['_id'] = str(previous_test_scores['_id'])
previous_test_scores['date_of_test'] = str(previous_test_scores['date_of_test'].day) + '/' + \
str(previous_test_scores['date_of_test'].month) + '/' + \
str(previous_test_scores['date_of_test'].year)
for answers in previous_test_scores['question_level_marks_list']:
question_bank_object = question_bank.find_one({'_id': ObjectId(answers['question_id'])})
answers['solution'] = question_bank_object['solution']
answers['keywords'] = "Keywords to include "+str(question_bank_object['keywords'])
answers['question'] = question_bank_object['question_description']
link_list = link_scraping(answers['question'])
answers['list_of_links'] = link_list
previous_test_scores_list.append(previous_test_scores)
return {'msg': 'previous test scores', 'status': True, 'list': previous_test_scores_list}
except:
return {'msg': 'unable to get test scores', 'status': False}
def create_sha256_password(password):
password = make_password(password)
return password
|
23,264 | 949ce12df48cb6a14cb2204e8d47c3d3fa93087c | # _*_ encoding: utf-8 _*_
__author__ = 'Vincent'
__date__ = '2019/3/13 15:22'
import tensorflow as tf
data1 = tf.constant(2.5)
data3 = tf.constant(2,dtype=tf.int32)
data2 = tf.Variable(10, name='var')
print(data1) # Tensor("Const:0", shape=(), dtype=float32)
print(data2) # <tf.Variable 'var:0' shape=() dtype=int32_ref>
sess = tf.Session()
print(sess.run(data1))
# tf 中变量的初始化也要在sess中完成
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(data2))
print(sess.run(data3))
sess.close()
# 本质 tf = tensor + 计算图
# tensor 数据
# operation +-*/ 四则运算 赋值等操作
# 计算图(graphs) 数据操作的过程
# Session 运行环境
# sess使用完之后要进行关闭或者使用with关键字
'''
# with关键字的使用方法:
init = tf.global_variables_initializer()
sess.run(init)
with sess:
print(sess.run(data1))
'''
|
23,265 | 9239b039f6cccad164f42eac5c09618060af11af | from conans import python_requires
common = python_requires('conan_common_recipes/0.0.8@Manu343726/testing')
class UnittestConan(common.CMakePackage):
name = 'unittest'
version = '0.0.0'
license = 'MIT'
requires = ('tinyrefl/0.4.1@Manu343726/testing',
'ctti/0.0.2@Manu343726/testing',
'fmt/5.3.0@bincrafters/stable',
'CTRE/v2.4@ctre/stable',
'backward/1.3.1@Manu343726/stable',
'elfspy/master@Manu343726/testing')
build_requires = 'tinyrefl-tool/0.4.1@Manu343726/testing'
generators = 'cmake'
|
23,266 | 0df25bccc7160cb2dc771409b9677ab4669d3a89 | # weather_stn_data.py - Used by extract_numbers.py, get_info_from_mshr.py, get_past_observations_cdo.py, get_daily_normals_cdo.py
# Location of each of the following numbers in the image corresponds to a city (or weather station) in the continental U.S.
# Location of (lower-left pixel of) first character of number in image when the number is 2 characters and positive, e.g. 12 or 63, not 1, 101, -1, -23
# Number in image is "weather number", e.g. max temperature prediction.
# 0-based row=y, column=x (origin=lower-left); character is 13 rows x 9 columns pixel array
# Data Science class project - General Assembly - Seattle - SEA-DAT1 (10/27/15 - 1/21/16)
# Developed by Bruce Aker 1/11/16 - 1/21/16
[{'icao_code':'KABE','row':494,'col':739,'stn_id_cdo':'GHCND:USW00014737','state':'PA','weather_station':'ALLENTOWN INTL AP'},
{'icao_code':'KABQ','row':275,'col':299,'stn_id_cdo':'GHCND:USW00023050','state':'NM','weather_station':'ALBUQUERQUE INTL AP'},
{'icao_code':'KACV','row':437,'col': 54,'stn_id_cdo':'GHCND:USW00024283','state':'CA','weather_station':'ARCATA EUREKA AP'},
{'icao_code':'KACY','row':480,'col':762,'stn_id_cdo':'GHCND:USW00093730','state':'NJ','weather_station':'ATLANTIC CITY INTL AP'},
{'icao_code':'KALB','row':540,'col':739,'stn_id_cdo':'GHCND:USW00014735','state':'NY','weather_station':'ALBANY AP'},
{'icao_code':'KANJ','row':541,'col':582,'stn_id_cdo':'GHCND:USW00014847','state':'MI','weather_station':'SAULT STE MARIE SANDERSON FLD'},
{'icao_code':'KATL','row':308,'col':669,'stn_id_cdo':'GHCND:USW00013874','state':'GA','weather_station':'ATLANTA HARTSFIELD INTL AP'},
{'icao_code':'KBCE','row':335,'col':215,'stn_id_cdo':'GHCND:USW00023159','state':'UT','weather_station':'BRYCE CANYON AP'},
{'icao_code':'KBGR','row':610,'col':774,'stn_id_cdo':'GHCND:USW00014606','state':'ME','weather_station':'BANGOR INTL AP'},
{'icao_code':'KBHM','row':294,'col':632,'stn_id_cdo':'GHCND:USW00013876','state':'AL','weather_station':'BIRMINGHAM AP'},
{'icao_code':'KBIL','row':484,'col':281,'stn_id_cdo':'GHCND:USW00024033','state':'MT','weather_station':'BILLINGS INTL AP'}, #wfo=byz
{'icao_code':'KBIS','row':502,'col':380,'stn_id_cdo':'GHCND:USW00024011','state':'ND','weather_station':'BISMARCK MUNI AP'},
{'icao_code':'KBNA','row':343,'col':617,'stn_id_cdo':'GHCND:USW00013897','state':'TN','weather_station':'NASHVILLE INTL AP'},
{'icao_code':'KBNO','row':464,'col':138,'stn_id_cdo':'GHCND:USW00094185','state':'OR','weather_station':'BURNS MUNI AP'},
{'icao_code':'KBOI','row':456,'col':174,'stn_id_cdo':'GHCND:USW00024131','state':'ID','weather_station':'BOISE AIR TERMINAL'}, #wfo=boi
{'icao_code':'KBOS','row':555,'col':775,'stn_id_cdo':'GHCND:USW00014739','state':'MA','weather_station':'BOSTON LOGAN INTL AP'},
{'icao_code':'KBPI','row':425,'col':255,'stn_id_cdo':'GHCND:USW00024164','state':'WY','weather_station':'BIG PINEY MARBLETON AP'},
{'icao_code':'KBTV','row':573,'col':729,'stn_id_cdo':'GHCND:USW00014742','state':'VT','weather_station':'BURLINGTON INTL AP'},
{'icao_code':'KBUF','row':510,'col':677,'stn_id_cdo':'GHCND:USW00014733','state':'NY','weather_station':'BUFFALO NIAGARA INTL AP'},
{'icao_code':'KCAE','row':335,'col':719,'stn_id_cdo':'GHCND:USW00013883','state':'SC','weather_station':'COLUMBIA METRO AP'},
{'icao_code':'KCLE','row':466,'col':649,'stn_id_cdo':'GHCND:USW00014820','state':'OH','weather_station':'CLEVELAND HOPKINS INTL AP'},
{'icao_code':'KCMH','row':435,'col':646,'stn_id_cdo':'GHCND:USW00014821','state':'OH','weather_station':'COLUMBUS PORT COLUMBUS INTL AP'},
{'icao_code':'KCOS','row':350,'col':330,'stn_id_cdo':'GHCND:USW00093037','state':'CO','weather_station':'COLORADO SPRINGS MUNI AP'}, #wfo=pub
{'icao_code':'KCOU','row':356,'col':517,'stn_id_cdo':'GHCND:USW00003945','state':'MO','weather_station':'COLUMBIA RGNL AP'},
{'icao_code':'KCPR','row':429,'col':306,'stn_id_cdo':'GHCND:USW00024089','state':'WY','weather_station':'Casper, Natrona County International Airport'},
{'icao_code':'KCRE','row':350,'col':756,'stn_id_cdo':'GHCND:USW00093718','state':'SC','weather_station':'N MYRTLE BCH AP'},
{'icao_code':'KCRP','row':132,'col':470,'stn_id_cdo':'GHCND:USW00012924','state':'TX','weather_station':'CORPUS CHRISTI INTL AP'},
{'icao_code':'KCRW','row':413,'col':676,'stn_id_cdo':'GHCND:USW00013866','state':'WV','weather_station':'CHARLESTON YEAGER AP'},
{'icao_code':'KDCA','row':452,'col':735,'stn_id_cdo':'GHCND:USW00013743','state':'VA','weather_station':'WASHINGTON REAGAN AP'},
{'icao_code':'KDFW','row':240,'col':463,'stn_id_cdo':'GHCND:USW00003927','state':'TX','weather_station':'Dallas-Fort Worth WSCMO AP'},
{'icao_code':'KDSM','row':418,'col':487,'stn_id_cdo':'GHCND:USW00014933','state':'IA','weather_station':'DES MOINES INTL AP'},
{'icao_code':'KEYW','row':146,'col':788,'stn_id_cdo':'GHCND:USW00012836','state':'FL','weather_station':'KEY WEST INTL AP'},
{'icao_code':'KFAR','row':510,'col':429,'stn_id_cdo':'GHCND:USW00014914','state':'ND','weather_station':'Fargo, Hector International Airport'}, #WFO=FGF, Grand Forks, ND
{'icao_code':'KFAT','row':339,'col': 94,'stn_id_cdo':'GHCND:USW00093193','state':'CA','weather_station':'Fresno Air Terminal'}, #wfo=hnx
{'icao_code':'KFLG','row':283,'col':216,'stn_id_cdo':'GHCND:USW00003103','state':'AZ','weather_station':'FLAGSTAFF PULLIAM AP'},
{'icao_code':'KFMY','row':187,'col':768,'stn_id_cdo':'GHCND:USW00012835','state':'FL','weather_station':'FT MYERS PAGE FLD AP'},
{'icao_code':'KFSD','row':449,'col':438,'stn_id_cdo':'GHCND:USW00014944','state':'SD','weather_station':'SIOUX FALLS FOSS FLD'},
{'icao_code':'KFST','row':190,'col':364,'stn_id_cdo':'GHCND:USW00023091','state':'TX','weather_station':'FT STOCKTON PECOS AP'},
{'icao_code':'KGEG','row':533,'col':173,'stn_id_cdo':'GHCND:USW00024157','state':'WA','weather_station':'SPOKANE INTL AP'}, #wfo=otx
{'icao_code':'KGGW','row':527,'col':306,'stn_id_cdo':'GHCND:USW00094008','state':'MT','weather_station':'GLASGOW INTL AP'},
{'icao_code':'KGRB','row':491,'col':548,'stn_id_cdo':'GHCND:USW00014898','state':'WI','weather_station':'GREEN BAY A S INTL AP'},
{'icao_code':'KGRR','row':473,'col':591,'stn_id_cdo':'GHCND:USW00094860','state':'MI','weather_station':'GRAND RAPIDS INTL AP'},
{'icao_code':'KGSO','row':383,'col':718,'stn_id_cdo':'GHCND:USW00013723','state':'NC','weather_station':'PIEDMONT TRIAD INTL AP'},
{'icao_code':'KHLN','row':502,'col':238,'stn_id_cdo':'GHCND:USW00024144','state':'MT','weather_station':'HELENA RGNL AP'}, #wfo=tfx
{'icao_code':'KIAH','row':185,'col':502,'stn_id_cdo':'GHCND:USW00012960','state':'TX','weather_station':'HOUSTON INTERCONT AP'},
{'icao_code':'KICT','row':334,'col':444,'stn_id_cdo':'GHCND:USW00003928','state':'KS','weather_station':'WICHITA DWIGHT D EISENHOWER NA'},
{'icao_code':'KINL','row':547,'col':465,'stn_id_cdo':'GHCND:USW00014918','state':'MN','weather_station':'International Falls Airport'},
{'icao_code':'KJAN','row':253,'col':584,'stn_id_cdo':'GHCND:USW00003940','state':'MS','weather_station':'JACKSON INTL AP'},
{'icao_code':'KJAX','row':266,'col':738,'stn_id_cdo':'GHCND:USW00013889','state':'FL','weather_station':'JACKSONVILLE INTL AP'},
{'icao_code':'KLAS','row':310,'col':163,'stn_id_cdo':'GHCND:USW00023169','state':'NV','weather_station':'LAS VEGAS MCCARRAN AP'},
{'icao_code':'KLAX','row':278,'col':101,'stn_id_cdo':'GHCND:USW00023174','state':'CA','weather_station':'Los Angeles International Airport'}, #wfo=lox
{'icao_code':'KLBB','row':248,'col':380,'stn_id_cdo':'GHCND:USW00023042','state':'TX','weather_station':'LUBBOCK INTL AP'},
{'icao_code':'KLBF','row':397,'col':388,'stn_id_cdo':'GHCND:USW00024023','state':'NE','weather_station':'N PLATTE RGNL AP'},
{'icao_code':'KLEX','row':390,'col':636,'stn_id_cdo':'GHCND:USW00093820','state':'KY','weather_station':'LEXINGTON BLUEGRASS AP'},
{'icao_code':'KLIT','row':292,'col':537,'stn_id_cdo':'GHCND:USW00013963','state':'AR','weather_station':'LITTLE ROCK AP ADAMS FLD'},
{'icao_code':'KMCI','row':373,'col':480,'stn_id_cdo':'GHCND:USW00003947','state':'MO','weather_station':'KANSAS CITY INTL AP'},
{'icao_code':'KMCO','row':228,'col':762,'stn_id_cdo':'GHCND:USW00012815','state':'FL','weather_station':'ORLANDO INTL AP'},
{'icao_code':'KMEM','row':307,'col':571,'stn_id_cdo':'GHCND:USW00013893','state':'TN','weather_station':'MEMPHIS INTL AP'},
{'icao_code':'KMIA','row':184,'col':804,'stn_id_cdo':'GHCND:USW00012839','state':'FL','weather_station':'MIAMI INTL AP'},
{'icao_code':'KMIE','row':426,'col':610,'stn_id_cdo':'GHCND:USW00094895','state':'IN','weather_station':'MUNCIE DELAWARE CO AP'},
{'icao_code':'KMLI','row':427,'col':531,'stn_id_cdo':'GHCND:USW00014923','state':'IL','weather_station':'MOLINE QUAD CITY INTL AP'},
{'icao_code':'KMOB','row':229,'col':625,'stn_id_cdo':'GHCND:USW00013894','state':'AL','weather_station':'MOBILE RGNL AP'},
{'icao_code':'KMSY','row':205,'col':594,'stn_id_cdo':'GHCND:USW00012916','state':'LA','weather_station':'NEW ORLEANS INTL AP'},
{'icao_code':'KOKC','row':290,'col':447,'stn_id_cdo':'GHCND:USW00013967','state':'OK','weather_station':'OKLAHOMA CITY WILL ROGERS AP'},
{'icao_code':'KORF','row':424,'col':765,'stn_id_cdo':'GHCND:USW00013737','state':'VA','weather_station':'NORFOLK INTL AP'},
{'icao_code':'KP60','row':462,'col':254,'stn_id_cdo':'GHCND:USW00094173','state':'WY','weather_station':'Yellowstone Lake'},
{'icao_code':'KPDX','row':513,'col':101,'stn_id_cdo':'GHCND:USW00024229','state':'OR','weather_station':'Portland International Airport'},
{'icao_code':'KPHX','row':249,'col':206,'stn_id_cdo':'GHCND:USW00023183','state':'AZ','weather_station':'Phoenix Sky Harbor International Airport'}, #wfo=psr
{'icao_code':'KPOU','row':521,'col':749,'stn_id_cdo':'GHCND:USW00014757','state':'NY','weather_station':'POUGHKEEPSIE DUTCHESS CO AP'},
{'icao_code':'KPWM','row':581,'col':770,'stn_id_cdo':'GHCND:USW00014764','state':'ME','weather_station':'PORTLAND INTL JETPORT'},
{'icao_code':'KRAP','row':450,'col':352,'stn_id_cdo':'GHCND:USW00024090','state':'SD','weather_station':'RAPID CITY REGIONAL AP'},
{'icao_code':'KRDD','row':420,'col': 76,'stn_id_cdo':'GHCND:USW00024257','state':'CA','weather_station':'REDDING MUNI AP'}, #wfo=sto
{'icao_code':'KRNO','row':391,'col':107,'stn_id_cdo':'GHCND:USW00023185','state':'NV','weather_station':'RENO TAHOE INTL AP'}, #wfo=rev
{'icao_code':'KROA','row':405,'col':708,'stn_id_cdo':'GHCND:USW00013741','state':'VA','weather_station':'ROANOKE RGNL AP'},
{'icao_code':'KRST','row':466,'col':494,'stn_id_cdo':'GHCND:USW00014925','state':'MN','weather_station':'ROCHESTER INTL AP'},
{'icao_code':'KSAN','row':250,'col':116,'stn_id_cdo':'GHCND:USW00023188','state':'CA','weather_station':'SAN DIEGO LINDBERGH FLD'}, #wfo=sgx
{'icao_code':'KSAT','row':167,'col':447,'stn_id_cdo':'GHCND:USW00012921','state':'TX','weather_station':'SAN ANTONIO INTL AP'},
{'icao_code':'KSAV','row':301,'col':732,'stn_id_cdo':'GHCND:USW00003822','state':'GA','weather_station':'SAVANNAH INTL AP'},
{'icao_code':'KSEA','row':545,'col':115,'stn_id_cdo':'GHCND:USW00024233','state':'WA','weather_station':'Seattle Tacoma Airport'}, #WFO=SEW, Sandpoint, Seattle, WA
{'icao_code':'KSHV','row':241,'col':520,'stn_id_cdo':'GHCND:USW00013957','state':'LA','weather_station':'SHREVEPORT RGNL AP'},
{'icao_code':'KSLC','row':394,'col':225,'stn_id_cdo':'GHCND:USW00024127','state':'UT','weather_station':'SALT LAKE CITY INTL AP'}, #wfo=slc
{'icao_code':'KSPI','row':400,'col':551,'stn_id_cdo':'GHCND:USW00093822','state':'IL','weather_station':'SPRINGFIELD CAPITAL AP'},
{'icao_code':'KTAD','row':319,'col':337,'stn_id_cdo':'GHCND:USW00023070','state':'CO','weather_station':'TRINIDAD PERRY STOKES AP'},
{'icao_code':'KTPA','row':210,'col':745,'stn_id_cdo':'GHCND:USW00012842','state':'FL','weather_station':'TAMPA INTL AP'},
{'icao_code':'KTUS','row':220,'col':222,'stn_id_cdo':'GHCND:USW00023160','state':'AZ','weather_station':'TUCSON INTL AP'},
{'icao_code':'KUIL','row':562,'col': 92,'stn_id_cdo':'GHCND:USW00094240','state':'WA','weather_station':'Quillayute State Airport'}, #WFO=SEW
{'icao_code':'KWMC','row':411,'col':142,'stn_id_cdo':'GHCND:USW00024128','state':'NV','weather_station':'WINNEMUCCA MUNI AP'}, #wfo=lkn
{'icao_code':'KYKM','row':523,'col':132,'stn_id_cdo':'GHCND:USW00024243','state':'WA','weather_station':'Yakima Air Terminal'}] #WFO=PDT, Pendleton, OR
# {'icao_code':'K','row':000,'col':000,'stn_id_cdo':'GHCND:USW000','state':'','weather_station':''},
|
23,267 | e382fb2d6205558e18199e283bd271b895bedd6e | import json
import os
from collections import defaultdict
wd = os.path.dirname(os.getcwd())
class_path = os.path.join(wd, 'model_data', 'coco_classes.txt') # change to the classes path you want to detect
is_train = 1 # whether train dataset or valid dataset
if is_train:
image_dir = '/media/data1/datasets/coco/train2017' # your train image dir
annotation_file = '/media/data1/datasets/coco/annotations/instances_train2017.json' # your train image annotation dir
gen_files = 'train.txt'
else:
image_dir = '/media/data1/datasets/coco/val2017' # your val image dir
annotation_file = '/media/data1/datasets/coco/annotations/instances_val2017.json' # your val image annotation dir
gen_files = 'valid.txt'
name_box_id = defaultdict(list)
id_name = dict()
with open(class_path) as f:
class_names = f.readlines()
classes = [c.strip() for c in class_names]
list_file = open(os.path.join(wd, 'model_data', gen_files), 'w')
with open(annotation_file) as f:
data = json.load(f)
annotations = data['annotations']
for ant in annotations:
image_id = ant['image_id']
image_path = os.path.join(image_dir, '%012d.jpg' % image_id)
cat = ant['category_id']
if 1 <= cat <= 11:
cat -= 1
elif 13 <= cat <= 25:
cat -= 2
elif 27 <= cat <= 28:
cat -= 3
elif 31 <= cat <= 44:
cat -= 5
elif 46 <= cat <= 65:
cat -= 6
elif cat == 67:
cat -= 7
elif cat == 70:
cat -= 9
elif 72 <= cat <= 82:
cat -= 10
elif 84 <= cat <= 90:
cat -= 11
name_box_id[image_path].append([ant['bbox'], cat])
for key, box_infos in name_box_id.items():
list_file.write(key)
for info in box_infos:
x_min = int(info[0][0])
y_min = int(info[0][1])
x_max = x_min + int(info[0][2])
y_max = y_min + int(info[0][3])
box_info = " %d,%d,%d,%d,%d" % (x_min, y_min, x_max, y_max, int(info[1]))
list_file.write(box_info)
list_file.write('\n')
list_file.close()
# list_file_val.close()
# clean dataset
with open(os.path.join(wd, 'model_data', gen_files), 'r') as f1:
old_line = f1.readlines()
with open(os.path.join(wd, 'model_data', gen_files), 'w') as f2:
for line in old_line:
line_ = line.split(' ')
if len(line_) > 1:
f2.write(line)
|
23,268 | 441fbb30e0c00829084378ba76c9893fcd6b086f | #coding: utf-8
"""
Copyright (C) 2008-2011 EdenWall Technologies
Written by Michael Scherer <m.scherer AT inl.fr>
Feth Arezki <farezki AT edenwall.com>
Pierre-Louis Bonicoli <plbonicoli AT edenwall.com>
François Toussenel <ftoussenel AT edenwall.com>
Victor Stinner <vstinner AT edenwall.com>
This file is part of NuFirewall.
NuFirewall is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
NuFirewall is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NuFirewall. If not, see <http://www.gnu.org/licenses/>
"""
from __future__ import with_statement
from errno import EACCES
from glob import glob
from nnd.client import Client as NNDClient
from os import chmod, close, mkdir, unlink, write
from os.path import exists, join
from shutil import copy, move
from subprocess import PIPE
from tempfile import mkstemp, NamedTemporaryFile
from time import sleep
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.threads import deferToThread
from twisted.python.failure import Failure
import re
from ufwi_rpcd.backend.exceptions import ConfigError
from ufwi_rpcd.backend import tr
from ufwi_rpcd.backend.process import runCommand, is_enabled_in_runit
from ufwi_rpcd.common.download import decodeFileContent
from ufwi_rpcd.common.error import exceptionAsUnicode, reraise
from ufwi_rpcd.common import EDENWALL
from ufwi_rpcd.common.radius_client import RadiusServer
from ufwi_rpcd.core.config.manager import modify_ext_conf
from ufwi_rpcd.core.context import Context
from ufwi_conf.backend.dnsutils import boolean_dig
from ufwi_conf.backend.nnd_instance import NND_SOCKET
from ufwi_conf.backend.net_ads import (
ad_info,
net_ads_keytab_command,
net_ads_testjoin,
)
from ufwi_conf.backend.unix_service import (
ConfigServiceComponent,
fix_strict_perms,
runCommandAndCheck,
RunCommandError,
)
from ufwi_conf.common.ha_statuses import ENOHA, SECONDARY, PRIMARY
from ufwi_conf.common.netcfg import deserializeNetCfg
from ufwi_conf.common.user_dir import AD, KERBEROS, KERBEROS_AD, \
LDAP, NND, NuauthCfg, RADIUS, SAME, NOT_CONFIGURED
from ufwi_conf.common.user_dir.protocols import getNetBiosName
from .ad_join import check_join, configureLocalSid, joinAd, saveLocalSid, \
sambaRuntimeFiles, sambaRuntimeFilesModified
from .error import NO_CACHE_DIR, NuauthException, \
NUAUTH_INVALID_CONF, UNABLE_TO_FETCH_KEYTAB, NNDError
from .kerberos import parseKeytab
from .ldapsearch_return_codes import LDAPSEARCH_RETURN_CODES
from .tests import canconnectnnd, test_user, test_group, test_auth
MASTER_KEY = 'nuauth' # For config_manager.
_RADIUS_CONF = '/etc/pam_radius_auth.conf'
_PAM_NUAUTH_CONF = '/etc/pam.d/nuauth'
_SASL_NUAUTH_CONF = '/etc/sasl/nuauth.conf'
_NUAUTH_KRB5 = '/etc/nufw/nuauth.d/nuauth_krb5.conf'
_NUAUTH_USER_DIR_CONF = '/etc/nufw/nuauth.d/nuauth_user_dir.conf'
#Automatic fetching of keytab produces this file
_AD_KEYTAB = '/etc/krb5.keytab'
_NSSWITCH_CONF = '/etc/nsswitch.conf'
_SMB_CONF='/etc/samba/smb.conf'
_PAM_LDAP_CONF = '/etc/pam_ldap.conf'
_NSS_LDAP = '/etc/libnss-ldap.conf'
KRB5_KEYTAB_FILENAME = 'nuauth.keytab'
CACHE_DIR = '/var/cache/ufwi_rpcd'
ETC_DIR = '/etc/nufw'
CACHE_KRB5_KEYTAB_FILENAME = join(CACHE_DIR, KRB5_KEYTAB_FILENAME)
KRB5_KEYTAB_FILENAME = join(ETC_DIR, KRB5_KEYTAB_FILENAME)
_NND_EXE = "/usr/bin/nufw-nss-daemon"
_NND_CONF = "/etc/nnd/nnd.conf"
_NND_CA_DIR = "/etc/nnd/certs"
_CANAMEFORMAT = "CA_cert_%s.pem"
_NND_CONFCHECK_CMD = (_NND_EXE, "--testconf")
_NND_UP_TIMEOUT = 15 #seconds per server
LDAP_CERT_FILE = '/var/lib/ufwi_rpcd/ldap_server_cert'
LDAP_CERT_FILE = '/var/lib/ufwi_rpcd/ldap_server_cert'
TEST_LDAPCONF = '/var/lib/ufwi_rpcd/test_ldap/ldap.conf'
_LDAP_CONF = '/etc/ldap/ldap.conf'
LDAPSEARCH_BIN = '/usr/bin/ldapsearch'
_PAMTYPE = {
AD: 'winbind',
NND: 'ldap',
LDAP: 'ldap',
RADIUS: 'radius_auth',
KERBEROS: 'krb5',
KERBEROS_AD: 'winbind',
NOT_CONFIGURED : ''
}
_LOG_NSS = '[NSS] '
_LOG_PAM = '[PAM] '
tech2user = {
AD: "Active Directory",
NND: "Generic LDAP",
LDAP: "Posix LDAP",
RADIUS: "Radius",
KERBEROS: "Kerberos",
KERBEROS_AD: "Active Directory with Kerberos support",
NOT_CONFIGURED : "No Directory"
}
_INITIALIZING_GROUP_MAPPING_MSG = tr(
"Initializing group mapping with a request on '%(USERNAME)s' info"
)
def _nsswitch_compat(_type):
if _type == AD:
return 'winbind'
if _type == LDAP:
return 'ldap'
return ''
def _cleandir(directory, globexpr):
if exists(directory):
for filename in glob(join(directory, globexpr)):
unlink(filename)
else:
mkdir(directory)
def _gennames(prefix, base, number):
"""
_gennames("domain", 1, 4)
-> 'domain1'
-> 'domain2'
-> 'domain3'
-> 'domain4'
"""
for index in xrange(number):
yield "%s%d" % (prefix, base + index)
def _nnddomainlabels(domains):
return ",".join(
_gennames("domain", 1, len(domains))
)
def _nndserverlabels(domain_index, domain):
return",".join(
_gennames(
"server%s." % domain_index,
1,
len(domain.servers)
)
)
def _nndsocketexists():
if not exists(NND_SOCKET):
raise NNDError("Socket does not exist: %s" % NND_SOCKET)
return "ok"
class NuauthComponent(ConfigServiceComponent):
"""
- services of ha & hostname:
are called when determining netbios name during apply
- service of resolv :
nameserver1 is used during apply when trying to resolv
kerberos names
- nuauth calls ha.getHAMode() but it works if the component
is not loaded
"""
NAME = MASTER_KEY
VERSION = "1.0"
REQUIRES = ('hostname', 'resolv')
CONFIG = {}
CONFIG_DEPENDS = ('nuauth_bind',)
ACLS = {
'bind': set(('addForwarderDomain',)),
'ha': set(('getHAMode', 'syncTime')),
'hostname': set(('getPrimaryHostname',)),
'network': set(('getNetconfig',)),
'nuauth': set(('status','start')),
'resolv': set(('addDomainNameServer', 'booleanQuery')),
'nupki': set(('getPrivateCertPath', 'getCertPath', 'getCACertPath')),
'system': set(('setUseNND',)),
'ufwi_ruleset': set(('setConfig',)),
# dummy acl just to be able to check that auth_cert component is loaded
# (see _ufwi_rpcdStarted() method)
'auth_cert': set(('status',)),
}
ROLES = {
'conf_read': frozenset((
'getNuauthConfig',
'availableModules',
'test_user',
'test_group',
'test_auth',
'ad_info',
)),
'conf_write': frozenset((
'setNuauthConfig',
'upload_krb_keytab',
'upload_ldap_server_cert',
'delete_ldap_server_cert',
'testLDAP',
'testRADIUS'
)),
'multisite_read': set((
'status',
)),
}
check_ldap_ip = ConfigServiceComponent.check_ip
check_ad_ip = ConfigServiceComponent.check_ip
check_ad_domain = ConfigServiceComponent.check_domain
check_ad_dns_domain = ConfigServiceComponent.check_domain
def __init__(self):
ConfigServiceComponent.__init__(self)
self.__during_ha_import = False
self.__config_before_ha_import = None
self.config = None
self.adstatus = False
self.last_ad_info = {}
self.has_auth_cert = False
self.ad_status_update_time = "Not updated yet"
def init(self, core):
ConfigServiceComponent.init(self, core)
try:
if not exists('/etc/sasl'):
mkdir('/etc/sasl')
chmod('/etc/sasl', 0755)
except OSError, err:
if err.errno == EACCES:
self.critical("SASL trouble ahead: %s" % err)
else:
raise
# auth
self.addConfFile(_PAM_NUAUTH_CONF, 'root:root', '0644')
self.addConfFile(_PAM_LDAP_CONF, 'root:nuauth', '0644')
self.addConfFile('/etc/krb5.conf', 'root:root', '0644')
self.addConfFile(_SMB_CONF, 'root:root', '0644')
## radius
self.addConfFile(_RADIUS_CONF, 'nuauth:nuauth', '0600')
## kerberos
self.addConfFile(_SASL_NUAUTH_CONF, 'root:root', '0644')
self.addConfFile(_NUAUTH_KRB5, 'root:root', '0644')
# group
## generic
self.addConfFile(_NSSWITCH_CONF, 'root:root', '0644')
## ldap
self.addConfFile(_NSS_LDAP, 'root:root', '0644')
self.addConfFile(_LDAP_CONF, 'root:root', '0644')
## nnd
self.addConfFile(_NND_CONF, 'root:root', '0644')
#ldap tests
self.addConfFile(TEST_LDAPCONF, 'root:root', '0644')
# auth & group
## ldap, ad, nnd
self.addConfFile(_NUAUTH_USER_DIR_CONF, 'root:root', '0644')
self.core.notify.connect('ha', 'ImportStart', self.__haImportStart)
self.core.notify.connect('ha', 'ImportEnd', self.__haImportEnd)
self.core.notify.connect('ufwi_rpcd', 'started', self._ufwi_rpcdStarted)
def _ufwi_rpcdStarted(self, notify_context):
context = Context.fromComponent(self)
self.has_auth_cert = self.core.hasComponent(context, 'auth_cert')
def getHostname(self):
context = Context.fromComponent(self)
return self.core.callService(context, "hostname",
"getPrimaryHostname")
def __haImportStart(self, context):
self.__during_ha_import = True
#save the config before we overwrite it!
#This config will only be used to calculate if we should or not
#attempt an AD join
self.__config_before_ha_import = self.config.serialize()
def __haImportEnd(self, context):
self.__during_ha_import = False
self.__config_before_ha_import = None
def __previous_config(self):
"""
Finds the running config or returns None
"""
if self.__during_ha_import:
return self.__config_before_ha_import
try:
return self.core.config_manager.get(
self.NAME,
which_configuration='applied'
)
except (KeyError, ConfigError):
return None
def _parameters_unchanged(self):
previous_config = self.__previous_config()
if previous_config is None:
return False
return self.config.serialize() == previous_config
def apply_config(self, responsible, arg, modified_paths):
"""
return a deferred
"""
self.important("Nuauth module applying its config")
#a deferred
return self._apply_conf(responsible)
def checkApply(self, data):
if isinstance(data, Failure):
self.critical("Problem configuring nuauth")
self.writeError(data)
# a rollback will be triggered
return data
def service_restartWinbind(self, context):
#winbind will not stop when asked
#We use runit for winbind, so...
self.killwinbind()
def killwinbind(self):
command = '/usr/bin/killall -9 winbindd'
process, status = runCommand(self, command.split(), env={})
if status:
self.info("winbind: no such process?")
@inlineCallbacks
def timedcheck(
self,
tries,
function,
func_args=(),
func_kw=None,
message="",
err_message="",
suc_message=""
):
"""
tries: int > 1
command: as a list
"""
if func_kw is None:
func_kw = {}
last = tries - 1
for time in xrange(tries):
yield deferToThread(sleep, 1)
try:
if message:
self.debug(message)
yield deferToThread(function, *func_args, **func_kw)
except Exception:
if err_message:
self.debug(err_message)
if time == last:
raise
else:
if suc_message:
self.debug(suc_message)
break
@inlineCallbacks
def _apply_conf(self, responsible):
###Ensure winbind is always startable (it was not beforehand)
###See commented calls in startServices/stopServices
yield deferToThread(self.stopServices, responsible)
ha_type = ENOHA
if EDENWALL:
context = Context.fromComponent(self)
try:
ha_type = yield self.core.callService(context, 'ha', 'getHAMode')
except Exception, err:
self.debug("can not read high availability status")
self.writeError(err)
yield self._config_nss(ha_type, responsible)
yield self._config_pam(ha_type)
yield deferToThread(self.startServices, responsible, ha_type)
def stopService(self, name):
self.info("Stop service %s" % name)
try:
self.etc_initd(name, "stop")
except Exception, err:
self.error('Stopping service %s failed: %s'
% (name, exceptionAsUnicode(err)))
def stopRunit(self, name):
self.info("Stop runit of service %s" % name)
try:
self.etc_initd(name, "exit")
except Exception, err:
self.error('Stopping runit of %s failed: %s'
% (name, exceptionAsUnicode(err)))
def stopServices(self, responsible):
responsible.feedback(tr("Stopping services"))
self.etc_initd("nuauth", "stop")
if is_enabled_in_runit("nufw-nss-daemon"):
self.stopService("nufw-nss-daemon")
# If we don't stop "runit nufw-nss-daemon" process, reenable
# quickly nufw-nss-daemon in runit (create the symlink) may fail:
# sometimes runit starts nufw-nss-daemon and then quickly stops it.
# Stop runit ensures this weird behaviour.
self.stopRunit("nufw-nss-daemon")
self.set_enabled_in_runit(False, "nufw-nss-daemon")
if is_enabled_in_runit('winbind'):
self.stopService('winbind')
self.set_enabled_in_runit(False, 'winbind')
self.killwinbind()
def startServices(self, responsible, ha_type):
responsible.feedback(tr("Starting services"))
if not self.has_auth_cert:
self.etc_initd("nuauth", "start")
else:
self.debug("Don't start nuauth: auth_cert will restart the service")
def _enable_winbind(self, ha_type):
# if winbind is an ha ressource: when starting heartbeat will start winbind
self.set_enabled_in_runit(True, 'winbind')
self.killwinbind()
try: # In case it was already AD:
self.etc_initd("winbind", "start")
except RunCommandError:
# The link has just been created (causes "runsv not running"),
# in case we switch to AD.
pass
def etc_initd(self, daemon_name, action):
"""
Sensible actions: start, stop, restart
"""
cmd = "/etc/init.d/" + daemon_name, action
try:
runCommandAndCheck(self, cmd)
except RunCommandError, err:
self.error(
"Action '%s' on daemon %s failed" %
(action, daemon_name)
)
reraise(err)
return True
@inlineCallbacks
def _config_pam(self, ha_type):
auth_type = self.config.getAuthType()
self.info(_LOG_PAM + "Configuring for protocol %s" % auth_type)
self.generate_configfile({}, (_SASL_NUAUTH_CONF,))
if not auth_type in (NOT_CONFIGURED, LDAP, NND, RADIUS, AD, KERBEROS,
KERBEROS_AD):
raise NotImplementedError("Unhandled auth protocol: %s" %
self.config.org.protocol)
yield self.configure_auth(auth_type, ha_type)
pam_type = _PAMTYPE[auth_type]
self.info("Nuauth pam type: %s" % pam_type)
self.generate_configfile({'pam_type': pam_type}, (_PAM_NUAUTH_CONF,),
prefix=_LOG_PAM)
self.info(_LOG_PAM + "Done configuring for protocol %s" % auth_type)
@inlineCallbacks
def configure_auth(self, auth_type, ha_type):
"""
Always generate all files (whatever the configuration), so unused
files are cleaned.
"""
yield self.configure_kerberos(auth_type, ha_type)
self.configure_radius(auth_type)
@inlineCallbacks
def configure_kerberos(self, auth_type, ha_type):
templates_variables = {}
if auth_type == KERBEROS_AD:
if self.config.org.dns_domain is None:
raise NuauthException(NUAUTH_INVALID_CONF, "Unspecified domain")
templates_variables.update({
# AD
'use_kerberos': True,
'realm': self.config.org.domain.upper(),
'domain': self.config.org.dns_domain,
'kdc': self.config.org.controller_ip,
})
elif auth_type == KERBEROS:
# Kerberos, main realm
# they are all strings ! kerberos_domain, kdc
templates_variables.update({
'use_kerberos': True,
'kdc': unicode(self.config.auth.kdc),
'realm': unicode(self.config.auth.kerberos_domain).upper(),
'domain': unicode(self.config.auth.kerberos_domain).lower(),
})
if templates_variables.get('use_kerberos', False):
templates_variables['hostname'] = yield self.getHostname()
self.generate_configfile(templates_variables, (_NUAUTH_KRB5, '/etc/krb5.conf',))
if auth_type == KERBEROS:
if exists(CACHE_KRB5_KEYTAB_FILENAME):
#We are safe here:
#from shutils.copy doc:
#"Copy the file src to the file or directory dst." [...]
#a file with the same basename as src is created
#(or overwritten) in the directory specified. Permission bits
#are copied. src and dst are path names given as strings.
copy(CACHE_KRB5_KEYTAB_FILENAME, KRB5_KEYTAB_FILENAME)
fix_strict_perms('root', 'nuauth', KRB5_KEYTAB_FILENAME)
elif auth_type == KERBEROS_AD:
if ha_type != SECONDARY:
for keytab_command in ('create', 'add nuauth'):
try:
net_ads_keytab_command(self,
self.config.org.user,
self.config.org.password,
keytab_command)
except RunCommandError:
raise NuauthException(UNABLE_TO_FETCH_KEYTAB,
"[Kerberos+AD] Could not fetch AD keytab")
move(_AD_KEYTAB, KRB5_KEYTAB_FILENAME)
fix_strict_perms('root', 'nuauth', KRB5_KEYTAB_FILENAME)
def configure_radius(self, auth_type):
templates_variables = {
'use_radius': auth_type == RADIUS,
'radius_conf': self.config.auth
}
self.generate_configfile(templates_variables, (_RADIUS_CONF,),
prefix=_LOG_PAM)
def _pam_ldap(self):
pass
#everything done nss side
def _pam_nnd(self):
pass
#everything done nss side
@inlineCallbacks
def _config_nss(self, ha_type, responsible):
protocol = self.config.org.protocol
self.info(_LOG_NSS + "Configuring for protocol %s" % protocol)
self.critical(
_LOG_NSS
+ "Now configuring nuauth directory source for protocol %s"
% protocol
)
responsible.feedback(
tr(
"Now configuring nuauth group directory source for "
"%(AUTH_PROTOCOL)s protocol."
),
AUTH_PROTOCOL=tech2user[protocol]
)
if protocol == AD:
passwd = group = 'files winbind'
shadow = 'files'
elif protocol == LDAP:
passwd = group = shadow = 'files ldap'
else:
passwd = group = shadow = 'compat'
self.generate_configfile(
{
'passwd': passwd,
'group': group,
'shadow': shadow,
},
(_NSSWITCH_CONF,),
prefix=_LOG_NSS
)
use_nnd = (protocol == NND)
ctx = Context.fromComponent(self)
yield self.core.callService(ctx, 'system', 'setUseNND', use_nnd)
config = {'nufw': {'require_group_name': use_nnd}}
yield self.core.callService(ctx, 'ufwi_ruleset', 'setConfig', config)
templates_variables = {}
if protocol == NND:
templates_variables['auth_module'] = 'nnd'
templates_variables['groups_module'] = 'nnd'
templates_variables['use_groups_name'] = 1
templates_variables['log_users_without_realm'] = 0
else:
templates_variables['auth_module'] = 'system'
templates_variables['groups_module'] = 'system'
templates_variables['use_groups_name'] = 0
templates_variables['log_users_without_realm'] = 1
self.generate_configfile(templates_variables, (_NUAUTH_USER_DIR_CONF,), prefix=_LOG_NSS)
if protocol == LDAP:
self._nss_ldap(responsible)
elif protocol == NND:
yield self._nss_nnd(responsible)
elif protocol == AD:
yield self._nss_ad(ha_type, responsible)
elif protocol == NOT_CONFIGURED:
#/etc/nsswitch will contain 'compat' value
pass
else:
responsible.feedback(tr("This protocol is not handled, sorry"))
raise NotImplementedError(
"Unhandled org protocol: %s" % protocol
)
self.info(_LOG_NSS + "Done configuring for protocol %s" % protocol)
def _nss_ldap(self, responsible):
if self.config.org.protocol != LDAP:
return
self.generate_configfile(
{
'user' : self.config.org.user,
'password' : self.config.org.password,
'dn_users' : self.config.org.dn_users,
'dn_groups' : self.config.org.dn_groups,
'uri': self.config.org.uri,
'server_cert_set': self.config.getLdapCertPresent(),
'reqcert': self.config.getReqcertPolicy(),
},
(
_NSS_LDAP,
_LDAP_CONF,
_PAM_LDAP_CONF
),
prefix=_LOG_NSS
)
@inlineCallbacks
def _nss_nnd(self, responsible):
# Sort the domain list by key (i.e. by name), so that nnd.conf
# reflects the domain order displayed in the frontend (to make
# nnd.conf easier to check).
domains = list(self.config.org.domains.values())
domains.sort()
# The CA certificates will be (re)created below.
_cleandir(_NND_CA_DIR, _CANAMEFORMAT % "*")
serversnb = 0
for domain_index, domain in enumerate(domains):
onebased_dindex = domain_index + 1
domain_strindex = "domain%d" % onebased_dindex
if domain.label == self.config.org.default_domain:
default_domain = domain_strindex
domain.server_list = _nndserverlabels(onebased_dindex, domain)
# CA certificates.
for server_index, server in enumerate(domain.servers):
serversnb += 1
if server.ca_cert:
canamevalues = "%d.%d" % (onebased_dindex, server_index + 2)
filepath = join(_NND_CA_DIR, _CANAMEFORMAT % canamevalues)
ca_cert = decodeFileContent(server.ca_cert)
with open(filepath, "w") as fd:
fd.write(ca_cert)
server.ca_path = filepath
nnd_config = {
"default_domain": default_domain,
"domain_list": _nnddomainlabels(domains),
"domains": domains,
"socket": NND_SOCKET,
"log_level": "DEBUG",
"ldap_log_level": 0,
}
self.generate_configfile(nnd_config, (_NND_CONF,), prefix=_LOG_NSS)
yield self._checknndconffile()
created = self.set_enabled_in_runit(True, "nufw-nss-daemon")
if not created:
self.etc_initd("nufw-nss-daemon", "restart")
self.debug("Waiting for nufw-nss-daemon")
yield self._checknndsocket(serversnb)
yield self._checknndstatus(serversnb)
@inlineCallbacks
def _checknndsocket(self, serversnb):
yield self.timedcheck(
_NND_UP_TIMEOUT*serversnb,
_nndsocketexists,
message="Checking socket existence (%s)..." % NND_SOCKET,
err_message="Socket does not exist.",
suc_message="Socket found."
)
@inlineCallbacks
def _checknndstatus(self, serversnb):
client = NNDClient(logger=self)
yield self.timedcheck(
_NND_UP_TIMEOUT*serversnb,
canconnectnnd,
func_args=(client, NND_SOCKET),
message="Trying to connect...",
err_message="Connection failed.",
suc_message="Connection sucessful."
)
@inlineCallbacks
def _checknndconffile(self):
try:
yield deferToThread(runCommandAndCheck, self, _NND_CONFCHECK_CMD)
except Exception:
raise NNDError(tr("Error in generated nufw-nss-daemon config file"))
def __should_join(self, responsible):
if not self.__during_ha_import:
responsible.feedback(
tr("Not during an HA import, forcing AD join")
)
return True
elif not self._parameters_unchanged():
responsible.feedback(tr("Parameters changed, forcing AD join"))
return True
try:
check_join(self, responsible)
except NuauthException:
responsible.feedback(tr("Forcing AD join"))
return True
responsible.feedback(
tr("It seems that the domain has been joined already.")
)
return False
@inlineCallbacks
def _nss_ad(self, ha_type, responsible):
"""If secondary configuration files will be generated but don't
do join"""
hostname = yield self.getHostname()
should_join = ha_type == SECONDARY or self.__should_join(responsible)
if should_join:
netbios_name = getNetBiosName(hostname)
yield self._config_ad(self.config.org, netbios_name, ha_type,
responsible)
if ha_type == PRIMARY:
responsible.feedback(tr(
"Setting time on the passive member of the HA cluster."
))
yield self.ha_time_sync()
@inlineCallbacks
def _config_ad(self, ad_config, hostname, ha_type, responsible):
self.info(_LOG_NSS + "Setting up connexion Active directory domain")
domain = ad_config.dns_domain
if domain is None:
raise NuauthException(NUAUTH_INVALID_CONF, "Unspecified domain")
domain = domain.upper()
if hostname is None:
raise NuauthException(NUAUTH_INVALID_CONF, "Unspecified hostname")
hostname = hostname.upper()
user = ad_config.user
password = ad_config.password
# ha enabled: use only service ips
# ha disabled: use all ips (which are service ips)
serialized_net_cfg = yield self.core.callService(
Context.fromComponent(self),
'network', 'getNetconfig'
)
netcfg = deserializeNetCfg(serialized_net_cfg)
ips_edw = set()
for iface in netcfg.iterInterfaces():
for net in iface.iterNets():
if len(net.service_ip_addrs) != 0:
ips_edw |= net.service_ip_addrs
templates_variables = {
#AD
'use_ad': True,
'domain': domain,
'controller_ip': ad_config.controller_ip,
'workgroup': ad_config.workgroup,
'wins_ip': ad_config.getWinsIP(),
'password': password,
'hostname': hostname,
'ips_edw': ips_edw,
}
auth_type = self.config.getAuthType()
if auth_type == KERBEROS:
#Kerberos, main realm
#they are all strings ! kerberos_domain, kdc
templates_variables['use_kerberos'] = True
templates_variables.update(self.config.auth.iter_attr_and_values())
self.generate_configfile(
templates_variables, (_SMB_CONF,), prefix=_LOG_NSS
)
if ha_type == SECONDARY:
sambaRuntimeFilesModified(self)
self.debug(
"Secondary node: not joining the Active Directory domain %s"
% domain
)
return
responsible.feedback(
tr("Now joining the Active Directory domain %(AD_DOMAIN)s"),
AD_DOMAIN=domain
)
#This will raise Exceptions if appropriate:
yield deferToThread(
self.joinAd, user, domain, password, hostname, responsible
)
if auth_type == KERBEROS_AD:
saveLocalSid(self)
responsible.feedback(
_INITIALIZING_GROUP_MAPPING_MSG, USERNAME=ad_config.user
)
yield deferToThread(self._enable_winbind, ha_type)
yield self._checkadstatus(ad_config.user)
check_join(self, responsible)
@inlineCallbacks
def _checkadstatus(self, aduser):
yield self.timedcheck(
20,
runCommandAndCheck,
func_args=(
self, ["/usr/bin/wbinfo", "-i", aduser]
)
)
def joinAd(self, user, domain, password, hostname, responsible):
try:
# FIXME: read the domain from the resolv component using a service
# FIXME: call or use a component method
our_dns_domain = self.core.config_manager.get('resolv', 'domain')
if our_dns_domain is not None:
our_dns_domain = our_dns_domain.lower()
ad_dns = self.config.org.dns_domain
if ad_dns is not None:
ad_dns = ad_dns.lower()
if ad_dns == our_dns_domain:
block_tcp_53 = "no"
responsible.feedback(
tr(
"Joining and registering our hostname '%(HOSTNAME)s' "
"in '%(DOMAIN)s' DNS record."
),
HOSTNAME=hostname,
DOMAIN=our_dns_domain
)
else:
block_tcp_53 = "yes"
responsible.feedback(
tr(
"Joining but NOT registering DNS record in domain "
"'%(AD_DOMAIN)s' because the domain of this server is "
"'%(DNS_DOMAIN)s'."
),
DNS_DOMAIN=our_dns_domain,
AD_DOMAIN=ad_dns,
)
except ConfigError:
block_tcp_53 = "no"
responsible.feedback(
tr(
"Joining and trying to register the DNS record because the server "
"domain is not set. This may explain a delay in joining the DNS record."
)
)
#following line raises many exceptions, see the method joinAd
self.important("May take time (several minutes)")
return joinAd(self, user, domain, password, block_tcp_53)
def read_config(self, *args, **kwargs):
try:
self.config = self._read_config()
except (ConfigError, KeyError):
self.debug("Nuauth: user directory config not loaded, nuauth not configured")
server_cert_set = exists(LDAP_CERT_FILE)
self.config = NuauthCfg()
self.config.setLdapCertPresent(server_cert_set)
ok, msg = self.config.isValidWithMsg()
if not ok:
self.error("nuauth read an invalid config: %s" % msg)
def _read_config(self):
serialized = self.core.config_manager.get(MASTER_KEY)
return NuauthCfg.deserialize(serialized)
def save_config(self, message, context=None):
serialized = self.config.serialize()
with self.core.config_manager.begin(self, context) as cm:
try:
cm.delete(MASTER_KEY)
except ConfigError:
pass
try:
cm.delete('nuauth_bind')
except ConfigError:
pass
cm.set(MASTER_KEY, serialized)
# Make nuauth:apply depend on bind:apply:
cm.set('nuauth_bind', 'nuauth_bind_dependency', '1')
cm.commit(message)
def service_availableModules(self, context):
return {
'auth': (RADIUS, KERBEROS, KERBEROS_AD, SAME, NOT_CONFIGURED),
'group': (LDAP, AD, NND, NOT_CONFIGURED)
}
def service_getNuauthConfig(self, context):
return self.config.serialize()
def service_setNuauthConfig(self, context, serialized, message):
self.config = NuauthCfg.deserialize(serialized)
ok, msg = self.config.isValidWithMsg(use_state=True)
if ok:
self.save_config(message, context)
else:
msg = "Got an invalid configuration: %s (%s/%s)" % (msg,
self.config.auth.protocol, self.config.org.protocol)
raise NuauthException(NUAUTH_INVALID_CONF, msg)
def get_ports(self):
return [ {'proto':'tcp', 'port': 4129}, ]
# maybe use slapdn ?
def check_dn(self, value):
regexp = re.compile('[A-Za-z0-9.-]+=[^=]+')
for i in value.split(','):
if not regexp.match(i):
return False
return True
def service_upload_ldap_server_cert(self, context, encoded_bin):
return deferToThread(self._upload_ldap_server_cert, encoded_bin)
def _upload_ldap_server_cert(self, encoded_bin):
#TODO: translate opening errors into user understandable strings
decoded_content = decodeFileContent(encoded_bin)
tmpfile, tmpfilename = mkstemp()
try:
write(tmpfile, decoded_content)
except Exception:
unlink(tmpfilename)
raise
finally:
close(tmpfile)
#TODO: here, check the file
#FIXME: IOErrors to NuauthException
move(tmpfilename, LDAP_CERT_FILE)
fix_strict_perms('root', 'nuauth', LDAP_CERT_FILE)
self.config.setLdapCertPresent(True)
return "Ok"
def service_delete_ldap_server_cert(self, context):
return deferToThread(self._delete_ldap_server_cert)
def _delete_ldap_server_cert(self):
unlink(LDAP_CERT_FILE)
self.config.setLdapCertPresent(False)
return "Ok"
def service_upload_krb_keytab(self, context, encoded_bin):
return deferToThread(self._upload_krb_keytab, encoded_bin)
def _upload_krb_keytab(self, encoded_bin):
#TODO: translate opening errors into user anderstandable strings
decoded_content = decodeFileContent(encoded_bin)
tmpfile, tmpfilename = mkstemp(suffix='.keytab')
try:
write(tmpfile, decoded_content)
except Exception:
unlink(tmpfilename)
raise
finally:
close(tmpfile)
try:
user_info = parseKeytab(self, tmpfilename)
except Exception, err:
try:
unlink(tmpfilename)
except OSError:
pass
raise err
try:
move(tmpfilename, CACHE_KRB5_KEYTAB_FILENAME)
except IOError, err:
if not exists(CACHE_DIR):
raise NuauthException(NO_CACHE_DIR, "%s does not exist !" % CACHE_DIR)
raise err
fix_strict_perms('root', 'nuauth', CACHE_KRB5_KEYTAB_FILENAME)
return user_info
def checKerberosServerInDNS(self, kerberos_config):
ns_server = self.core.config_manager.get("resolv", "nameserver1")
return all(
boolean_dig(self, server=ns_server, query=host)
for host in (
kerberos_config.kdc,
kerberos_config.kerberos_domain
)
)
def service_testRADIUS(self, context, radius_server, user, password):
radius_server = RadiusServer.deserialize(radius_server)
ok, msg = radius_server.isValidWithMsg()
if not ok:
raise NuauthException(msg)
return radius_server.test(self, user, password)
def service_testLDAP(self, context, dc, base, uri, search_filter, password):
templates_variables = {'reqcert': 'allow'}
self.generate_configfile(templates_variables, (TEST_LDAPCONF,))
#example of generated command:
#ldapwhoami -x -w nupik \
# -D cn=admin,dc=edenwall,dc=com \
# -H ldap://tetram.inl.fr"
cmd = [LDAPSEARCH_BIN, "-x"]
cmdstr = '%s -x' % LDAPSEARCH_BIN
if password:
cmd.extend(('-w', password))
cmdstr += ' -w %%%password%%%'
if dc:
cmd.extend(('-D', dc))
cmdstr += ' -D %s' % dc
if base:
cmd.extend(('-b', base))
cmdstr += ' -b %s' % base
cmd.extend(('-H', uri))
cmdstr += ' -H %s' % uri
cmd.append(search_filter)
cmdstr += ' %s' % search_filter
env = {'LDAPCONF': TEST_LDAPCONF}
self.debug("env: %s" % unicode(env))
with NamedTemporaryFile(suffix='.ldapsearch.stdout') as stdout_file:
process, value = runCommand(
self,
cmd, cmdstr=cmdstr,
stdout=stdout_file, stderr=PIPE,
env=env
)
if value != 0:
info = process.stderr.read()
error_title = tr("ldapsearch encountered the following error:")
if not info:
info = ""
if value in LDAPSEARCH_RETURN_CODES:
msg = LDAPSEARCH_RETURN_CODES[value]
msg = "%s\n%s\n%s" % (error_title, msg, info)
raise NuauthException(
NUAUTH_INVALID_CONF,
"%s\n%s" % (error_title, info)
)
raise NuauthException(
NUAUTH_INVALID_CONF,
"%s\n%s" % (error_title, info)
)
stdout_file.seek(0)
return stdout_file.read()
def service_runtimeFiles(self, context):
files = {
'deleted': (
LDAP_CERT_FILE,
KRB5_KEYTAB_FILENAME
),
'added' : (
(LDAP_CERT_FILE, 'cert'),
(KRB5_KEYTAB_FILENAME, 'cert')
)
}
# Kerberos AD with HA: backup local sid
if self.should_copy_ad_info():
sambaRuntimeFiles(self, files)
return files
def service_runtimeFilesModified(self, context):
if self.should_copy_ad_info():
configureLocalSid(self)
sambaRuntimeFilesModified(self)
def should_copy_ad_info(self):
return EDENWALL and self.config.hasAD()
@modify_ext_conf
def service_copyPKI(self, context, pkiname, cname):
CERT_DEST = ETC_DIR + '/server.crt'
KEY_DEST = ETC_DIR + '/server.key'
CRL_DEST = ETC_DIR + '/server.crl'
CA_DEST = ETC_DIR + '/ca.crt'
types = {
'certificate': CERT_DEST,
'key': KEY_DEST,
'ca': CA_DEST,
'crl': CRL_DEST,
}
paths = []
def addPath(path):
paths.append(path)
def copyPKI(unused):
try:
for type_, path in (('certificate', paths[0]),
('key', paths[1]),
('ca', paths[2])):
dest = types[type_]
try:
with open(dest, 'w+b') as fd:
fd.write(open(path).read())
except IOError, err:
try:
self.error(context,
tr('Nuauth: ') +
tr('Error while copying certificates from an internal PKI: %s')
% err)
except Exception:
self.error(context,
tr('Nuauth: ') +
tr('Error while copying certificates from an internal PKI.'))
return
except IndexError:
return False
return True
component_context = Context.fromComponent(self)
defer = self.core.callService(component_context, 'nupki',
'getCertPath', pkiname, cname)
defer.addCallback(addPath)
defer.addCallback(lambda x: self.core.callService(component_context,
'nupki', 'getPrivateCertPath', pkiname, cname))
defer.addCallback(addPath)
defer.addCallback(lambda x: self.core.callService(component_context,
'nupki', 'getCACertPath', pkiname))
defer.addCallback(addPath)
defer.addErrback(self.writeError)
defer.addCallback(copyPKI)
return defer
def service_test_user(self, context, username):
nnd = self.config.org.protocol == NND and self.config.auth.protocol == SAME
return deferToThread(
test_user,
self,
username,
nnd=nnd
)
service_test_user.__doc__ = test_user.__doc__
def service_test_group(self, context, groupname):
nnd = self.config.org.protocol == NND
return deferToThread(
test_group,
self,
groupname,
nnd=nnd
)
service_test_group.__doc__ = test_group.__doc__
def service_test_auth(self, context, username, passwd):
nnd = self.config.org.protocol == NND and self.config.auth.protocol == SAME
return deferToThread(
test_auth,
self,
username,
passwd,
nnd=nnd
)
service_test_auth.__doc__ = test_auth.__doc__
def _update_adstatus(self):
self.adstatus = net_ads_testjoin(self)
if self.adstatus:
self.last_ad_info = ad_info(self)
self.ad_status_update_time = self.timestamp()
def _build_ad_info(self):
return {
"service version": 1,
"current status": self.adstatus,
"realm": self.last_ad_info.get("Realm", ""),
"time offset": self.last_ad_info.get("Server time offset", ""),
"update time": self.ad_status_update_time,
"parent server": self.last_ad_info.get("LDAP server name", ""),
}
@inlineCallbacks
def service_ad_info(self, context, wanted_version=1):
result = yield deferToThread(self.ad_info, wanted_version)
returnValue(result)
def ad_info(self, wanted_version):
self._update_adstatus()
return self._build_ad_info()
|
23,269 | 9637034ab19785fff599abbaad2ad320e827a636 | from sqlalchemy import create_engine
import requests,pymysql,time,random
import lxml.etree
import pandas as pd
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
headers = {'user-agent': user_agent}
urls = list(f'https://movie.douban.com/subject/34462775/comments?start={page*20}&limit=20&sort=new_score&status=P' for page in range(5))
url = 'https://movie.douban.com/subject/34462775/comments?start=0&limit=20&sort=new_score&status=P'
n_star = {'力荐':5,'推荐':4,'还行':3,'较差':2,'很差':1}
def get(url):
response = requests.get(url,headers=headers)
return response.text
def data_processing(user,star,content):
df = pd.DataFrame({'user':user,'origin_star':star, 'content':content})
df['star'] = df['origin_star'].map(n_star)
df = df.fillna('0.0')
df = df.drop('origin_star',axis=1)
return df
def crawl_reviews(text):
selector = lxml.etree.HTML(text)
user = selector.xpath('//*[@class="comment-info"]/a/text()')
star = selector.xpath('//*[@class="comment-info"]/span[2]/@title')
content = selector.xpath('//*[@class="comment"]/p/span/text()')
return data_processing(user,star,content)
def insert_sql(dataframe):
engine = create_engine('mysql+pymysql://root:@127.0.0.1:3306/douban',encoding='utf-8')
pd.io.sql.to_sql(dataframe,'movie',con=engine,index=True,if_exists='append')
print('to mysql successfully!')
if __name__ == "__main__":
for url in urls:
df = crawl_reviews(get(url))
insert_sql(df)
time.sleep(random.choice([3,4,5,5,3]))
|
23,270 | 2872a2e42963086778eeeb18d5cbe7cb8d329d6e | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Asset(models.Model):
""" 所有资产的共有数据表 """
asset_type_choice = (
('server', 'server_device'),
('networkdevice', 'network_device'),
('storagedevice', 'stroage_device'),
('securitydevice', 'security_device'),
('software', 'software_device'),
)
asset_status = (
(0, 'online'),
(1, 'offline'),
(2, 'unknown'),
(3, 'fault'),
(4, 'backup'),
)
asset_type = models.CharField(choices=asset_type_choice, max_length=64, default='server', verbose_name="asset_type")
name = models.CharField(max_length=64, unique=True, verbose_name="asset_name") # 不可重复
sn = models.CharField(max_length=128, unique=True, verbose_name="asset_serial_number") # 不可重复
business_unit = models.ForeignKey('BusinessUnit', null=True, blank=True, verbose_name='belonging_business_unit')
status = models.SmallIntegerField(choices=asset_status, default=0, verbose_name='device_condition')
manufacturer = models.ForeignKey('Manufacturer', null=True, blank=True, verbose_name='manufacturer')
manage_ip = models.GenericIPAddressField(null=True, blank=True, verbose_name='manage_ip')
tags = models.ManyToManyField('Tag', blank=True, verbose_name='tags')
admin = models.ForeignKey(User, null=True, blank=True, verbose_name='asset_admin', related_name='admin')
idc = models.ForeignKey('IDC', null=True, blank=True, verbose_name='idc')
contract = models.ForeignKey('Contract', null=True, blank=True, verbose_name='contract')
purchase_day = models.DateField(null=True, blank=True, verbose_name="purchase_day")
expire_day = models.DateField(null=True, blank=True, verbose_name="expire_day")
price = models.FloatField(null=True, blank=True, verbose_name="price")
approved_by = models.ForeignKey(User, null=True, blank=True, verbose_name='approver', related_name='approved_by')
memo = models.TextField(null=True, blank=True, verbose_name='comment')
c_time = models.DateTimeField(auto_now_add=True, verbose_name='create_date')
m_time = models.DateTimeField(auto_now=True, verbose_name='update_time')
def __str__(self):
return '<%s> %s' % (self.get_asset_type_display(), self.name)
class Meta:
verbose_name = 'asset_table'
verbose_name_plural = "asset_table"
ordering = ['-c_time']
class Server(models.Model):
"""服务器设备"""
sub_asset_type_choice = (
(0, 'pc_server'),
(1, 'blade_computer'),
(2, 'mini+computer'),
)
created_by_choice = (
('auto', 'auto_record'),
('manual', 'manual_record'),
)
asset = models.OneToOneField('Asset') # 非常关键的一对一关联!
sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice, default=0, verbose_name="server_type")
created_by = models.CharField(choices=created_by_choice, max_length=32, default='auto', verbose_name="create_type")
hosted_on = models.ForeignKey('self', related_name='hosted_on_server',
blank=True, null=True, verbose_name="host_machine") # 虚拟机专用字段
model = models.CharField(max_length=128, null=True, blank=True, verbose_name='server_model')
raid_type = models.CharField(max_length=512, blank=True, null=True, verbose_name='raid_type')
os_type = models.CharField('os_type', max_length=64, blank=True, null=True)
os_distribution = models.CharField('os_distribution_version', max_length=64, blank=True, null=True)
os_release = models.CharField('os_release_version', max_length=64, blank=True, null=True)
def __str__(self):
return '%s--%s--%s <sn:%s>' % (self.asset.name, self.get_sub_asset_type_display(), self.model, self.asset.sn)
class Meta:
verbose_name = 'server'
verbose_name_plural = "server"
class SecurityDevice(models.Model):
"""安全设备"""
sub_asset_type_choice = (
(0, 'firewall'),
(1, 'intrusion_detect_system'),
(2, 'internet_gateway'),
(4, 'maintance_system'),
)
asset = models.OneToOneField('Asset')
sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice, default=0, verbose_name="security_device_type")
def __str__(self):
return self.asset.name + "--" + self.get_sub_asset_type_display() + " id:%s" % self.id
class Meta:
verbose_name = 'security_device'
verbose_name_plural = "security_device"
class StorageDevice(models.Model):
"""存储设备"""
sub_asset_type_choice = (
(0, 'disk_array'),
(1, 'network_storage'),
(2, 'tape_library'),
(4, 'tape_machine'),
)
asset = models.OneToOneField('Asset')
sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice, default=0, verbose_name="storage_device_type")
def __str__(self):
return self.asset.name + "--" + self.get_sub_asset_type_display() + " id:%s" % self.id
class Meta:
verbose_name = 'storage_device'
verbose_name_plural = "storage_device"
class NetworkDevice(models.Model):
"""网络设备"""
sub_asset_type_choice = (
(0, 'router'),
(1, 'interchanger'),
(2, 'load_balancing'),
(4, 'vpn_device'),
)
asset = models.OneToOneField('Asset')
sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice, default=0, verbose_name="network_device_type")
vlan_ip = models.GenericIPAddressField(blank=True, null=True, verbose_name="vlan_ip")
intranet_ip = models.GenericIPAddressField(blank=True, null=True, verbose_name="intranet_ip")
model = models.CharField(max_length=128, null=True, blank=True, verbose_name="network_device_model")
firmware = models.CharField(max_length=128, blank=True, null=True, verbose_name="device_firmware_version")
port_num = models.SmallIntegerField(null=True, blank=True, verbose_name="port_num")
device_detail = models.TextField(null=True, blank=True, verbose_name="device_detail")
def __str__(self):
return '%s--%s--%s <sn:%s>' % (self.asset.name, self.get_sub_asset_type_display(), self.model, self.asset.sn)
class Meta:
verbose_name = 'network_device'
verbose_name_plural = "network_device"
class Software(models.Model):
"""
只保存付费购买的软件
"""
sub_asset_type_choice = (
(0, 'operation_system'),
(1, 'office\development_software'),
(2, 'business_software'),
)
sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice, default=0, verbose_name="software_type")
license_num = models.IntegerField(default=1, verbose_name="license_number")
version = models.CharField(max_length=64, unique=True, help_text='example: CentOS release 6.7 (Final)',
verbose_name='software/system_version')
def __str__(self):
return '%s--%s' % (self.get_sub_asset_type_display(), self.version)
class Meta:
verbose_name = 'software/system'
verbose_name_plural = "software/system"
class IDC(models.Model):
"""data center"""
name = models.CharField(max_length=64, unique=True, verbose_name="idc_name")
memo = models.CharField(max_length=128, blank=True, null=True, verbose_name='comment')
def __str__(self):
return self.name
class Meta:
verbose_name = 'idc'
verbose_name_plural = "idc"
class Manufacturer(models.Model):
"""厂商"""
name = models.CharField('manufacturer_name', max_length=64, unique=True)
telephone = models.CharField('support_telephone', max_length=30, blank=True, null=True)
memo = models.CharField('comment', max_length=128, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'manufacturer'
verbose_name_plural = "manufacturer"
class BusinessUnit(models.Model):
"""业务线"""
parent_unit = models.ForeignKey('self', blank=True, null=True, related_name='parent_level')
name = models.CharField('business_name', max_length=64, unique=True)
memo = models.CharField('comment', max_length=64, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'business_unit'
verbose_name_plural = "business_unit"
class Contract(models.Model):
"""合同"""
sn = models.CharField('contract_serial_number', max_length=128, unique=True)
name = models.CharField('contract_name', max_length=64)
memo = models.TextField('comment', blank=True, null=True)
price = models.IntegerField('contract_price')
detail = models.TextField('contract_detail', blank=True, null=True)
start_day = models.DateField('contract_start_day', blank=True, null=True)
end_day = models.DateField('contract_end_day', blank=True, null=True)
license_num = models.IntegerField('license_number', blank=True, null=True)
c_day = models.DateField('create_time', auto_now_add=True)
m_day = models.DateField('update_time', auto_now=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'contract'
verbose_name_plural = "contract"
class Tag(models.Model):
"""标签"""
name = models.CharField('tag_name', max_length=32, unique=True)
c_day = models.DateField('create_day', auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'tag'
verbose_name_plural = "tag"
class CPU(models.Model):
"""CPU组件"""
asset = models.OneToOneField('Asset') # 设备上的cpu肯定都是一样的,所以不需要建立多个cpu数据,一条就可以,因此使用一对一。
cpu_model = models.CharField('cpu_model', max_length=128, blank=True, null=True)
cpu_count = models.PositiveSmallIntegerField('cpu_count', default=1)
cpu_core_count = models.PositiveSmallIntegerField('cpu_core_count', default=1)
def __str__(self):
return self.asset.name + ": " + self.cpu_model
class Meta:
verbose_name = 'cpu'
verbose_name_plural = "cpu"
class RAM(models.Model):
"""内存组件"""
asset = models.ForeignKey('Asset') # 只能通过外键关联Asset。否则不能同时关联服务器、网络设备等等。
sn = models.CharField('ram_serial_number', max_length=128, blank=True, null=True)
model = models.CharField('ram_model', max_length=128, blank=True, null=True)
manufacturer = models.CharField('ram_manufacturer', max_length=128, blank=True, null=True)
slot = models.CharField('slot', max_length=64)
capacity = models.IntegerField('ram_capacity', blank=True, null=True)
def __str__(self):
return '%s: %s: %s: %s' % (self.asset.name, self.model, self.slot, self.capacity)
class Meta:
verbose_name = 'ram'
verbose_name_plural = "ram"
unique_together = ('asset', 'slot') # 同一资产下的内存,根据插槽的不同,必须唯一
class Disk(models.Model):
"""存储设备"""
disk_interface_type_choice = (
('SATA', 'SATA'),
('SAS', 'SAS'),
('SCSI', 'SCSI'),
('SSD', 'SSD'),
('unknown', 'unknown'),
)
asset = models.ForeignKey('Asset')
sn = models.CharField('disk_serial_number', max_length=128)
slot = models.CharField('slot_position', max_length=64, blank=True, null=True)
model = models.CharField('disk_model', max_length=128, blank=True, null=True)
manufacturer = models.CharField('disk_manufacturer', max_length=128, blank=True, null=True)
capacity = models.FloatField('disk_capacity', blank=True, null=True)
interface_type = models.CharField('interface_type', max_length=16, choices=disk_interface_type_choice, default='unknown')
def __str__(self):
return '%s: %s: %s: %sGB' % (self.asset.name, self.model, self.slot, self.capacity)
class Meta:
verbose_name = 'disk'
verbose_name_plural = "disk"
unique_together = ('asset', 'sn')
class NIC(models.Model):
"""network card"""
asset = models.ForeignKey('Asset') # 注意要用外键
name = models.CharField('nic_name', max_length=64, blank=True, null=True)
model = models.CharField('nic_model', max_length=128)
mac = models.CharField('mac_address', max_length=64) # 虚拟机有可能会出现同样的mac地址
ip_address = models.GenericIPAddressField('ip_address', blank=True, null=True)
net_mask = models.CharField('mask', max_length=64, blank=True, null=True)
bonding = models.CharField('bonding_address', max_length=64, blank=True, null=True)
def __str__(self):
return '%s: %s: %s' % (self.asset.name, self.model, self.mac)
class Meta:
verbose_name = 'nic'
verbose_name_plural = "nic"
unique_together = ('asset', 'model', 'mac') # 资产、型号和mac必须联合唯一。防止虚拟机中的特殊情况发生错误。
class EventLog(models.Model):
"""
日志.
在关联对象被删除的时候,不能一并删除,需保留日志。
因此,on_delete=models.SET_NULL
"""
name = models.CharField('event_name', max_length=128)
event_type_choice = (
(0, 'other'),
(1, 'hardware_alternation'),
(2, 'increased_asset'),
(3, 'asset_offline'),
(4, 'asset_online'),
(5, 'maintance_routine'),
(6, 'business_online_update_alternation'),
)
asset = models.ForeignKey('Asset', blank=True, null=True, on_delete=models.SET_NULL) # 当资产审批成功时有这项数据
new_asset = models.ForeignKey('NewAssetApprovalZone', blank=True, null=True, on_delete=models.SET_NULL) # 当资产审批失败时有这项数据
event_type = models.SmallIntegerField('event_type', choices=event_type_choice, default=4)
component = models.CharField('event_component', max_length=256, blank=True, null=True)
detail = models.TextField('event_detail')
date = models.DateTimeField('event_time', auto_now_add=True)
user = models.ForeignKey(User, blank=True, null=True, verbose_name='event_executor', on_delete=models.SET_NULL) # 自动更新资产数据时没有执行人
memo = models.TextField('comment', blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'eventlog'
verbose_name_plural = "eventlog"
class NewAssetApprovalZone(models.Model):
"""新资产待审批区"""
sn = models.CharField('asset_serial_number', max_length=128, unique=True) # 此字段必填
asset_type_choice = (
('server', 'server'),
('networkdevice', 'networkdevice'),
('storagedevice', 'storagedevice'),
('securitydevice', 'securitydevice'),
('IDC', 'IDC'),
('software', 'software'),
)
asset_type = models.CharField(choices=asset_type_choice, default='server', max_length=64, blank=True, null=True,
verbose_name='asset_type')
manufacturer = models.CharField(max_length=64, blank=True, null=True, verbose_name='manufacturer')
model = models.CharField(max_length=128, blank=True, null=True, verbose_name='model')
ram_size = models.PositiveIntegerField(blank=True, null=True, verbose_name='ram_size')
cpu_model = models.CharField(max_length=128, blank=True, null=True, verbose_name='cpu_model')
cpu_count = models.PositiveSmallIntegerField(blank=True, null=True)
cpu_core_count = models.PositiveSmallIntegerField(blank=True, null=True)
os_distribution = models.CharField(max_length=64, blank=True, null=True)
os_type = models.CharField(max_length=64, blank=True, null=True)
os_release = models.CharField(max_length=64, blank=True, null=True)
data = models.TextField('asset_data') # 此字段必填
c_time = models.DateTimeField('create_time', auto_now_add=True)
m_time = models.DateTimeField('update_time', auto_now=True)
approved = models.BooleanField('approve', default=False)
def __str__(self):
return self.sn
class Meta:
verbose_name = 'new_asset_approval'
verbose_name_plural = "new_asset_approval"
ordering = ['-c_time'] |
23,271 | 5edc8f6d87ab591dd453cfedfedf7d0e83487f07 | '''
Created on May 9, 2014
@author: veb
'''
'''
problem 1.2
'''
import random, StringIO, time, binascii
def initial_data():
ceiling = 10 ** 7
i = 1
s = list()
se = set()
f = open('phone.txt', 'w')
f1 = open('phone_uniq.txt', 'w')
buf = StringIO.StringIO()
num = 1
while i <= ceiling:
x = random.randint(1, ceiling)
s.append(str(x) + '\n')
se.add(str(x) + '\n')
# if i % 10 ** (num - 1) == 0:
# # print len(s)
# buf.writelines(s)
# f.write(buf.getvalue())
# f.flush()
# buf.close()
# buf = StringIO.StringIO()
# s = list()
# print len(s)
# time.sleep(1)
i += 1
f.writelines(s)
f.flush()
f.close()
f1.writelines(se)
f1.flush()
f1.close()
# print type( 122121212134124545454545454546464646464612389012830182038102830128046123123123123+1)
def vector_sort():
'''
10,000,000 scale.
'''
out = open('ps.txt', 'w')
# time.sleep(100)
vector_space = [0] * 1024
tail_index = -1
with open('phone.txt', 'r') as fp :
for line in fp :
num = int(line.strip())
if num % 8 > 0:
index = num / 8
offset = 2 ** (8-num % 8)
# print bin(offset)
else:
index = (num-1) / 8
offset = 0b00000001
current_tail = len(vector_space) - 1
if current_tail < index :
vector_space.extend([0] * (index - current_tail + 1024))
src = vector_space[index]
dest = src | offset
vector_space[index] = dest
tail_index = tail_index if tail_index >= index else index
if len(vector_space) > tail_index:
vector_space = vector_space [:tail_index + 1]
index = 0
mo = [0b10000000, 0b01000000, 0b00100000, 0b00010000, 0b00001000, 0b00000100, 0b00000010, 0b00000001 ]
buf = StringIO.StringIO()
for e in vector_space:
count = 1
for x in mo :
if e & x > 0:
buf.write(str(index * 8 + count) + '\n')
count += 1
index += 1
out.write(buf.getvalue())
out.flush()
out.close()
begin = time.time()
#s = [1,2,3]
#print s[2:2]
initial_data()
vector_sort()
print time.time() - begin , 's'
# print 0b00000001, 0b11111111
|
23,272 | 12b7086f37d3d1851ab1f03643be25dcd97045af | from typing import Optional, cast, Tuple, Any, Dict
import attr
import torch
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import (
Memory,
ObservationType,
ActorCriticOutput,
DistributionType,
)
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.utils import spaces_utils as su
from allenact.utils.tensor_utils import batch_observations
@attr.s(kw_only=True)
class InferenceAgent:
actor_critic: ActorCriticModel = attr.ib()
rollout_storage: RolloutStorage = attr.ib()
device: torch.device = attr.ib()
sensor_preprocessor_graph: Optional[SensorPreprocessorGraph] = attr.ib()
steps_before_rollout_refresh: int = attr.ib(default=128)
memory: Optional[Memory] = attr.ib(default=None)
steps_taken_in_task: int = attr.ib(default=0)
last_action_flat: Optional = attr.ib(default=None)
has_initialized: Optional = attr.ib(default=False)
def __attrs_post_init__(self):
self.actor_critic.eval()
self.actor_critic.to(device=self.device)
if self.memory is not None:
self.memory.to(device=self.device)
if self.sensor_preprocessor_graph is not None:
self.sensor_preprocessor_graph.to(self.device)
self.rollout_storage.to(self.device)
self.rollout_storage.set_partition(index=0, num_parts=1)
@classmethod
def from_experiment_config(
cls,
exp_config: ExperimentConfig,
device: torch.device,
checkpoint_path: Optional[str] = None,
model_state_dict: Optional[Dict[str, Any]] = None,
mode: str = "test",
):
assert (
checkpoint_path is None or model_state_dict is None
), "Cannot have `checkpoint_path` and `model_state_dict` both non-None."
rollout_storage = exp_config.training_pipeline().rollout_storage
machine_params = exp_config.machine_params(mode)
if not isinstance(machine_params, MachineParams):
machine_params = MachineParams(**machine_params)
sensor_preprocessor_graph = machine_params.sensor_preprocessor_graph
actor_critic = cast(
ActorCriticModel,
exp_config.create_model(
sensor_preprocessor_graph=sensor_preprocessor_graph
),
)
if checkpoint_path is not None:
actor_critic.load_state_dict(
torch.load(checkpoint_path, map_location="cpu")["model_state_dict"]
)
elif model_state_dict is not None:
actor_critic.load_state_dict(
model_state_dict
if "model_state_dict" not in model_state_dict
else model_state_dict["model_state_dict"]
)
return cls(
actor_critic=actor_critic,
rollout_storage=rollout_storage,
device=device,
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
def reset(self):
if self.has_initialized:
self.rollout_storage.after_updates()
self.steps_taken_in_task = 0
self.memory = None
def act(self, observations: ObservationType):
# Batch of size 1
obs_batch = batch_observations([observations], device=self.device)
if self.sensor_preprocessor_graph is not None:
obs_batch = self.sensor_preprocessor_graph.get_observations(obs_batch)
if self.steps_taken_in_task == 0:
self.has_initialized = True
self.rollout_storage.initialize(
observations=obs_batch,
num_samplers=1,
recurrent_memory_specification=self.actor_critic.recurrent_memory_specification,
action_space=self.actor_critic.action_space,
)
self.rollout_storage.after_updates()
else:
dummy_val = torch.zeros((1, 1), device=self.device) # Unused dummy value
self.rollout_storage.add(
observations=obs_batch,
memory=self.memory,
actions=self.last_action_flat[0],
action_log_probs=dummy_val,
value_preds=dummy_val,
rewards=dummy_val,
masks=torch.ones(
(1, 1), device=self.device
), # Always == 1 as we're in a single task until `reset`
)
agent_input = self.rollout_storage.agent_input_for_next_step()
actor_critic_output, self.memory = cast(
Tuple[ActorCriticOutput[DistributionType], Optional[Memory]],
self.actor_critic(**agent_input),
)
action = actor_critic_output.distributions.sample()
self.last_action_flat = su.flatten(self.actor_critic.action_space, action)
self.steps_taken_in_task += 1
if self.steps_taken_in_task % self.steps_before_rollout_refresh == 0:
self.rollout_storage.after_updates()
return su.action_list(self.actor_critic.action_space, self.last_action_flat)[0]
|
23,273 | c8412f7c456c3aa2d5488738ab4bad58dc07f241 | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""An API for the Critical Pāli Dictionary"""
import argparse
import configparser
import datetime
import json
import logging
import os.path
import re
import flask
from flask import request, current_app
import sqlalchemy
from sqlalchemy.sql import text
import flask_sqlalchemy
from werkzeug.routing import Map, Rule
LANG = 'pi-Latn-x-iso'
MAX_RESULTS = 100
re_integer_arg = re.compile (r'^[0-9]+$')
re_normalize_headword = re.compile (r'^[-\[\(√°~]*(?:<sup>\d+</sup>)?(.*?)[-°~\)\]]*$')
class MySQLEngine (object):
""" Database Interface """
def __init__ (self, **kwargs):
args = self.get_connection_params (kwargs)
self.url = 'mysql+pymysql://{user}:{password}@{host}:{port}/{database}'.format (**args)
logger.log (logging.INFO,
'MySQLEngine: Connecting to mysql+pymysql://{user}:password@{host}:{port}/{database}'.format (**args))
self.engine = sqlalchemy.create_engine (self.url + '?charset=utf8mb4&sql_mode=ANSI',
pool_recycle = 300)
def get_connection_params (self, kwargs = {}):
""" Get connection parameters from .my.cnf file. """
config = configparser.ConfigParser ()
if 'MYSQL_CONF' in kwargs:
config.read (('/etc/my.cnf', os.path.expanduser (kwargs['MYSQL_CONF'])))
else:
config.read (('/etc/my.cnf', os.path.expanduser ('~/.my.cnf')))
section = config[kwargs.get ('MYSQL_GROUP', 'mysql')]
from_my_cnf = {
'host' : section.get ('host', 'localhost').strip ('"'),
'port' : section.get ('port', '3306').strip ('"'),
'database' : section.get ('database', '').strip ('"'),
'user' : section.get ('user', '').strip ('"'),
'password' : section.get ('password', '').strip ('"'),
}
return from_my_cnf
def execute (conn, sql, parameters, debug_level = logging.DEBUG):
start_time = datetime.datetime.now ()
result = conn.execute (text (sql.strip ()), parameters)
logger.log (debug_level, '%d rows in %.3fs',
result.rowcount, (datetime.datetime.now () - start_time).total_seconds ())
return result
def clip (i, min_, max_):
return max (min (int (i), max_), min_)
def arg (name, default, regex, msg = None):
arg = request.args.get (name, default)
if not regex.match (arg):
if msg is None:
msg = 'Invalid %s parameter' % name
flask.abort (msg)
return arg
cpd_iso_trans = str.maketrans ('âêîôû', 'aeiou')
def normalize_iso (text):
"""Normalize to ISO 15919
CPD transliteration is almost ISO 15919, but uses uppercase for proper names
and 'â' instead of 'a' to signal a syncope 'a' + 'a'.
We have to replace all 'â's because they definitely do not conform to ISO.
We get away with serving uppercase letters in proper names because it is an
easy fix on the client's side.
"""
return text.translate (cpd_iso_trans)
def make_headword (row, lang = LANG):
""" row is: headword_id, text, article_id """
normalized = text = normalize_iso (row[1])
m = re_normalize_headword.match (normalized)
if m:
normalized = m.group (1).lower ()
return {
'articles_url' : 'v1/articles/%d' % row[2],
'headwords_url' : 'v1/headwords/%d' % row[0],
'lang' : lang,
'normalized_text' : normalized,
'text' : text,
}
def make_json_response (obj):
resp = flask.Response (json.dumps (obj, indent=2, sort_keys=True), mimetype='application/json')
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
def make_headwords_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_headword (row, lang) for row in res ]
})
# need this before first @app.endpoint declaration
app = flask.Flask (__name__)
@app.endpoint ('info')
def info ():
""" Endpoint. The root of the application. """
info = {
'name' : app.config['APPLICATION_NAME'],
'short_name' : app.config['APPLICATION_SHORT_NAME'],
'main_page_url' : app.config['APPLICATION_MAIN_URL'],
# 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),
'css' : 'span.smalltext { font-size: smaller }',
'supported_langs_query' : [ LANG ],
}
return make_json_response (info)
@app.endpoint ('headwords')
def headwords ():
""" Endpoint. Retrieve a list of headword IDs.
This implements the search query and wordlist.
"""
q = request.args.get ('q')
fulltext = request.args.get ('fulltext')
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
where = ''
if (not q) and (not fulltext):
# Retrieve full list of headwords
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
if q:
q = q.replace ('-', '')
q = q.replace ('%', '')
q = q.replace ('?', '_')
q = q.replace ('*', '%')
where = "(keyword LIKE :q) AND"
if not fulltext:
# easy out
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE keyword LIKE :q
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'q' : q, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT DISTINCT
k.id,
k.webkeyword COLLATE utf8mb4_bin AS webkeyword,
k.no
FROM keyword k,
article a
WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))
AND a.no = k.no
ORDER BY k.sortkeyword, k.n, k.no
LIMIT :limit
OFFSET :offset
""".format (where = where), { 'q' : q, 'fulltext' : fulltext,
'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
@app.endpoint ('headwords_id')
def headwords_id (_id):
""" Retrieve a headword. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE id = :id
""", { 'id' : _id })
return make_headwords_response (res)
@app.endpoint ('headwords_id_context')
def headwords_id_context (_id):
""" Retrieve a list of headwords around a given headword. """
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT keyword, sortkeyword
FROM keyword
WHERE id = :id
""", { 'id' : _id })
keyword, sortkeyword = res.fetchone ()
res1 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword < :sortkeyword
ORDER BY sortkeyword DESC, n DESC, no DESC
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit })
res2 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword >= :sortkeyword
ORDER BY sortkeyword, n, no
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })
res = []
for row in reversed (res1.fetchall ()):
res.append (row[:3])
for row in res2:
res.append (row[:3])
return make_headwords_response (res, limit)
def make_article (row, lang = LANG):
""" row is: article_id """
return {
'articles_url' : 'v1/articles/%d' % row[0],
}
def make_articles_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_article (row, lang) for row in res ]
})
@app.endpoint ('articles')
def articles ():
""" Endpoint. Retrieve a list of articles. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
ORDER BY no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_articles_response (res, limit)
@app.endpoint ('articles_id')
def articles_id (_id = None):
""" Endpoint. Retrieve an article. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
WHERE no = :id
""", { 'id' : _id })
return make_articles_response (res)
@app.endpoint ('articles_id_formats')
def articles_id_formats (_id):
""" Endpoint. Retrieve an article's available formats. """
canonical_url = app.config['APPLICATION_MAIN_URL'] + 'search?article_id='
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT webtext FROM article WHERE no=:no
""", { 'no' : _id })
return make_json_response ([
{
'mimetype' : 'text/x-html-literal',
'lang' : LANG,
'embeddable' : True,
'text' : normalize_iso ('<div>%s</div>' % res.fetchone ()[0]),
},
{
'mimetype' : 'text/html',
'lang' : LANG,
'canonical' : True,
'urls' : [ canonical_url + str (_id) ],
}
])
@app.endpoint ('articles_id_headwords')
def articles_id_headwords (_id):
""" Endpoint. Retrieve the list of headwords for an article. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE no = :id
ORDER BY sortkeyword
LIMIT :limit
OFFSET :offset
""", { 'id' : _id, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
#
# main
#
parser = argparse.ArgumentParser (description='A simple API for dictionares')
parser.add_argument ('-v', '--verbose', dest='verbose', action='count',
help='increase output verbosity', default=0)
parser.add_argument ('-c', '--config-file', dest='config_file', action='append',
required=True, metavar='CONFIG_FILE',
help="a config file (repeat for more than one, later ones overwrite)")
args = parser.parse_args ()
args.start_time = datetime.datetime.now ()
LOG_LEVELS = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARN,
3: logging.INFO,
4: logging.DEBUG
}
args.log_level = LOG_LEVELS.get (args.verbose + 1, logging.CRITICAL)
logging.basicConfig (format = '%(asctime)s - %(levelname)s - %(message)s')
logging.getLogger ('sqlalchemy.engine').setLevel (args.log_level)
logging.getLogger ('server').setLevel (args.log_level)
logger = logging.getLogger ('server')
for config_file in args.config_file:
app.config.from_pyfile (config_file)
app.config.dba = MySQLEngine (**app.config)
app.config['SQLALCHEMY_DATABASE_URI'] = app.config.dba.url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['server_start_time'] = str (int (args.start_time.timestamp ()))
app.url_map = Map ([
Rule ('/v1', endpoint = 'info'),
Rule ('/v1/headwords', endpoint = 'headwords'),
Rule ('/v1/headwords/<int:_id>', endpoint = 'headwords_id'),
Rule ('/v1/headwords/<int:_id>/context', endpoint = 'headwords_id_context'),
Rule ('/v1/articles', endpoint = 'articles'),
Rule ('/v1/articles/<int:_id>', endpoint = 'articles_id'),
Rule ('/v1/articles/<int:_id>/formats', endpoint = 'articles_id_formats'),
Rule ('/v1/articles/<int:_id>/headwords', endpoint = 'articles_id_headwords'),
])
dba = flask_sqlalchemy.SQLAlchemy ()
dba.init_app (app)
port = app.config.get ('APPLICATION_PORT', 5000)
path = app.config.get ('APPLICATION_ROOT', '')
logger.log (logging.INFO, "'{name}' is now served from localhost:{port}{path}/v1".format (
name = app.config['APPLICATION_NAME'],
port = port,
path = path))
if __name__ == "__main__":
from werkzeug.serving import run_simple
if path == '':
run_simple ('localhost', port, app)
else:
from werkzeug.wsgi import DispatcherMiddleware
application = DispatcherMiddleware (flask.Flask ('dummy_app_for_root'), {
app.config['APPLICATION_ROOT'] : app,
})
run_simple ('localhost', port, application)
|
23,274 | 711398a2ac2d1959cb9a29833e88b5f26c7fcc3e | from employee import Employee
from manager import Manager
from developer import Developer
import datetime
# =============================================================================
print('\n')
print('-' * 100)
print('Employee Class Example')
print('-' * 100)
print('\n')
# Instantiating Instances
emp_1 = Employee('Jane', 'Doe', 30000)
# Using Alternate Constructor to Instantiate Instances
emp_2_str = 'John-Doe-20000'
emp_2 = Employee.from_string(emp_2_str)
print('---------- Employee 1 ----------')
print(emp_1.get_full_name())
print(emp_1.email)
print('Employee 1 Pay: ', emp_1.pay)
print('Employee 1 Raise Amount: ', emp_1.raise_amount)
emp_1.apply_raise()
print('Employee 1 New Pay: ', emp_1.pay)
print('\n')
print('---------- Employee 2 ----------')
print(emp_2.get_full_name())
print(emp_2.email)
print('Employee 2 Pay: ', emp_2.pay)
print('Employee 2 Raise Amount: ', emp_2.raise_amount)
emp_2.set_raise_amount(1.10)
print('Employee 2 New Raise Amount: ', emp_2.raise_amount)
emp_2.apply_raise()
print('Employee 2 New Pay: ', emp_2.pay)
print('\n')
print('Number of Employees Created: ', Employee.num_of_employees)
# =============================================================================
print('\n')
print('-' * 100)
print('Developer Sub Class Example')
print('-' * 100)
print('\n')
# Instantiating instances
dev_1 = Developer('Arin', 'Blue', 40000, 'Python')
print('---------- Developer 1 ----------')
print(dev_1.get_full_name())
print(dev_1.email)
print('Developer 1 Pay: ',dev_1.pay)
print('Developer 1 Raise Amount: ', dev_1.raise_amount)
dev_1.apply_raise()
print('Developer 1 New Pay: ', dev_1.pay)
print('\n')
# =============================================================================
print('\n')
print('-' * 100)
print('Manager Sub Class Example')
print('-' * 100)
print('\n')
# Instantiating instances
man_1 = Manager('Some', 'Manager', 80000, dev_1)
print('---------- Manager 1 ----------')
print(man_1.get_full_name())
print(man_1.email)
print('Manager 1 Pay: ', man_1.pay)
print('Manager 1 Raise Amount: ', man_1.raise_amount)
man_1.apply_raise()
print('Manager 1 New Pay: ', man_1.pay)
print('\n')
# =============================================================================
print('\n')
print('-' * 100)
print('Date Time Example')
print('-' * 100)
print('\n')
# Create date objects
# (Year-Month-Day)
# Friday
my_date = datetime.date(2018, 6, 8)
# Saturday
my_other_date = datetime.date(2018, 6, 9)
print('Is this day (Friday) a workday? ', Employee.is_workday(my_date))
print('Is this day (Saturday) a workday? ', Employee.is_workday(my_other_date))
# Helpful Tips ----------------------------------------------------------------
# Test if main has run
# if __name__ == '__main__':
# print('I executed!')
# Output class or instance info
# print(emp_1.__dict__)
# print(Employee.__dict__)
# Output class or instance info
# print(help(Employee))
|
23,275 | edc91f02622a327a9337640403abbb1837846b17 | #!/usr/bin/env python
from __future__ import print_function
from subprocess import Popen, PIPE
import argparse
import pprint
import json
import os
import re
def _update_file(fpath, content):
with open(fpath, 'w+') as output:
output.write(content)
def gen_capi(args):
"""Generate C API RST as string"""
if not args.header:
return ""
cmd = ["ctags", "-x", "--c-kinds=fpsgx", args.header]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode:
return ""
titles = {
"nvm_geo": "Geometry",
"nvm_buf": "Buffer Allocation",
"nvm_dev": "Device Management",
"nvm_addr": "Addressing",
"nvm_cmd": "Raw Commands",
"nvm_vblk": "Virtual Block",
"nvm_bbt": "Bad-Block-Table"
}
docs = {}
lib = {}
for line in out.split("\n"):
parts = (" ".join(line.split())).split(" ")[:2]
if len(parts) < 2:
continue
name, kind = parts
ns = "_".join(name.split("_")[:2])
if ns not in lib:
lib[ns] = {}
if kind not in lib[ns]:
lib[ns][kind] = []
lib[ns][kind].append(name)
for ns in lib:
if "prototype" in lib[ns]:
ordering = [
"bbt_get", "bbt_set", "bbt_mark", "bbt_flush",
"addr_erase", "addr_read", "addr_write", "addr_check",
"addr_.*2",
"vblk_erase", "vblk_p?read", "vblk_p?write", "vblk_pad",
"lba_p?read", "lba_p?write",
"_alloc", "_fill", "_free", "_pr",
"_get_", "_set_"
]
ordered = []
for order in ordering:
for func in lib[ns]["prototype"]:
if re.search(order, func):
if func not in ordered:
ordered.append(func)
lib[ns]["prototype"] = list(
set(lib[ns]["prototype"]) -
set(ordered)
) + ordered
title = "%s - %s" % (ns, titles[ns]) if ns in titles else ns
rst = "\n".join([
".. _sec-capi-%s:" % ns, "",
title,
"=" * len(title),
"", ""
])
if "typedefs" in lib[ns]:
for typedef in lib[ns]["typedefs"]:
rst += "\n".join([
typedef,
"-" * len(typedef), "",
".. doxygentypedef:: %s" % typedef,
"", ""
])
for mangler in ["struct", "externvar"]:
if mangler in lib[ns]:
for struct in lib[ns][mangler]:
rst += "\n".join([
struct,
"-" * len(struct), "",
".. doxygenstruct:: %s" % struct,
" :members:",
"", ""
])
if "enum" in lib[ns]:
for enum in lib[ns]["enum"]:
rst += "\n".join([
enum,
"-" * len(enum), "",
".. doxygenenum:: %s" % enum,
"", ""
])
if "prototype" in lib[ns]:
for func in lib[ns]["prototype"]:
rst += "\n".join([
func,
"-" * len(func), "",
".. doxygenfunction:: %s" % func,
"", ""
])
docs[ns] = rst
return docs
def expand_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
if __name__ == "__main__":
PRSR = argparse.ArgumentParser(description='Generate C API docs')
PRSR.add_argument(
"path",
type=str,
help="Path to DIR containing C API RST src (also output dir)"
)
PRSR.add_argument(
"--header",
type=str,
required=True,
help="Path to liblightnvm.h"
)
ARGS = PRSR.parse_args()
if ARGS.path:
ARGS.path = expand_path(ARGS.path)
if ARGS.header:
ARGS.header = expand_path(ARGS.header)
try:
RST = gen_capi(ARGS)
for NS in RST:
_update_file(os.sep.join([ARGS.path, "%s.rst" % NS]), RST[NS])
#if RST:
# with open(ARGS.rst, "w") as RST_FD:
# RST_FD.write(RST)
except OSError as EXC:
print("FAILED: generating RST err(%s)" % EXC)
|
23,276 | d283e8c00f9d79b5a9bc049eec8f67066026fe24 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
# %%
data = pd.read_csv('spam_ham_dataset.csv')
data = data.iloc[:,2:]
# ham/0 spam/1
# %%
data.head(5)
# %%
import matplotlib.pyplot as plt
plt.style.use('seaborn')
plt.figure(figsize=(6,4))
data['label_num'].value_counts().plot(kind='bar')
# %%
data['text'] = data['text'].str.lower()
data.head()
# %%
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, RegexpTokenizer
# %%
stop_words=set(stopwords.words('english'))
# %%
stop_words.add('subject')
# %%
def text_process(text):
tokenizer = RegexpTokenizer('[a-z]+')
token = tokenizer.tokenize(text)
lemmatizer = WordNetLemmatizer()
token = [lemmatizer.lemmatize(w) for w in token if lemmatizer.lemmatize(w) not in stop_words]
return token
# %%
data['text'] = data['text'].apply(text_process)
# %%
data.info()
# %%
data.head()
# %%
X = data['text']
y = data['label_num']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train,y_test = train_test_split(X,y,train_size = 0.7)
# %%
train = pd.concat([X_train,y_train],axis=1)
test = pd.concat([X_test,y_test],axis = 1)
# %%
ham_train = train[train['label_num'] == 0] # 正常邮件
spam_train = train[train['label_num'] == 1] # 垃圾邮件
# %%
# 各取30封组成词库
ham_train_part = ham_train['text'].sample(30, random_state=42)
spam_train_part = spam_train['text'].sample(30, random_state=42)
# %%
part_words = []
for text in pd.concat([ham_train_part,spam_train_part]):
part_words += text
# %%
part_words_set = set(part_words)
print(len(part_words_set))
# %%
import numpy as np
# 将正常邮件与垃圾邮件的单词都整理为句子,单词间以空格相隔,CountVectorizer()的句子里,单词是以空格分隔的
train_part_texts = [' '.join(text) for text in np.concatenate((spam_train_part.values, ham_train_part.values))]
# 训练集所有的单词整理成句子
train_all_texts = [' '.join(text) for text in train['text']]
# 测试集所有的单词整理成句子
test_all_texts = [' '.join(text) for text in test['text']]
# %%
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
cv = CountVectorizer()
part_fit = cv.fit(train_part_texts) # 以部分句子为参考
train_all_count = cv.transform(train_all_texts) # 对训练集所有邮件统计单词个数
test_all_count = cv.transform(test_all_texts) # 对测试集所有邮件统计单词个数
tfidf = TfidfTransformer()
train_tfidf_matrix = tfidf.fit_transform(train_all_count)
test_tfidf_matrix = tfidf.fit_transform(test_all_count)
# %%
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(train_tfidf_matrix, y_train)
model.score(test_tfidf_matrix,y_test)
# %%
|
23,277 | f5c66f72bf35fac70da38bc8e239f94b23e6861c | import pygame
import random
import A_algo_duplicate
pygame.init()
pygame.mixer.init()
# creating window
screen_width = 900
screen_height = 960
gameWindow = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("snake game")
carImg = pygame.image.load('apple.png')
overImg = pygame.image.load('tenor.gif')
welcm = pygame.image.load('welcome_page.png')
green = (1, 50, 32)
# Game specific variables
start_on = True
def start_game():
pygame.mixer.music.load('media.io_background.wav')
pygame.mixer.music.play(20)
exit_game = False
game_over = False
snake_x = 5
snake_y = 5
snake_width = 20
FPS = 20
snake_mv = 'r'
food_pos = (random.randint(1, 43), random.randint(1, 43))
head = [[4, 5], [snake_x, snake_y]]
snake_len = 1
clock = pygame.time.Clock()
font = pygame.font.SysFont("Snake Chan", 29)
font2 = pygame.font.SysFont("Snake Chan", 45)
return exit_game, game_over, snake_x, snake_y, snake_width, FPS, snake_mv, food_pos, head, snake_len, clock, font, font2
exit_game, game_over, snake_x, snake_y, snake_width, FPS, snake_mv, food_pos, head, snake_len, clock, font, font2 = start_game()
def plot_snake(gameWindow, head, snake_width):
for i in head:
pygame.draw.rect(gameWindow, green, [i[0] * 20, i[1] * 20, snake_width, snake_width])
# pygame.draw.circle(gameWindow, green, [head[-1][0] + snake_width//2, head[-1][1] + snake_width//2], snake_width//2 + 1)
# if snake_len//3 < snake_width//2:
# for i in range(0, len(head)//3):
# pygame.draw.circle(gameWindow, green, [head[i][0] + snake_width//2, head[i][1] + snake_width//2], snake_width//2 + i - snake_len//3)
# for i in head[len(head)//3:-1]:
# pygame.draw.circle(gameWindow, green, [i[0] + snake_width//2, i[1] + snake_width//2], snake_width//2)
# else:
# for i in range(0, snake_width//2):
# pygame.draw.circle(gameWindow, green, [head[i][0] + snake_width//2, head[i][1] + snake_width//2], snake_width//2 - snake_width//2 + i)
# for i in head[snake_width//2:-1]:
# pygame.draw.circle(gameWindow, green, [i[0] + snake_width//2, i[1] + snake_width//2], snake_width//2)
if head[-1][0] == head[-2][0]:
pygame.draw.circle(gameWindow, (255, 255, 255), [head[-1][0] * 20 + snake_width//2 + 4, head[-1][1] * 20 + snake_width//2], 4)
pygame.draw.circle(gameWindow, (255, 255, 255), [head[-1][0] * 20 + snake_width//2 - 4, head[-1][1] * 20 + snake_width//2], 4)
else:
pygame.draw.circle(gameWindow, (255, 255, 255), [head[-1][0] * 20 + snake_width//2, head[-1][1] * 20 + snake_width//2 - 4], 4)
pygame.draw.circle(gameWindow, (255, 255, 255), [head[-1][0] * 20 + snake_width//2, head[-1][1] * 20 + snake_width//2 + 4], 4)
def put_text(text, color, x, y, font):
text_screen = font.render(text, True, color)
gameWindow.blit(text_screen, [x, y])
with open("high_score.txt", "r") as f:
high_score = int(f.read())
sound1 = pygame.mixer.Sound('bite.wav')
while not exit_game:
while start_on:
gameWindow.blit(welcm, (20, 20))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
start_on = False
command, end_game = A_algo_duplicate.main_func(head, food_pos)
for i in command[::-1]:
snake_x = i[0]
snake_y = i[1]
if snake_x == food_pos[0] and snake_y == food_pos[1]:
snake_len += 1
# print(head)
# print("eaten", food_pos)
sound1.play()
head.append([snake_x, snake_y])
food_pos = (random.randint(1, 43), random.randint(1, 43))
while food_pos in head:
food_pos = (random.randint(1, 43), random.randint(1, 43))
# print("next", food_pos)
continue
else:
# if len(head) > snake_len:
head.append([snake_x, snake_y])
del head[0]
gameWindow.fill((255, 255, 255))
if (snake_len - 1) * 5 > high_score:
high_score = (snake_len - 1) * 5
if head[-1][0] >= 45 or head[-1][0] <= 0 or head[-1][1] >= 45 or head[-1][1] <= 0 or (head[-1] in head[:-1] and len(head) != 1) or end_game == True:
# print(head)
# x = input()
print(food_pos)
pygame.mixer.music.load('over.mp3')
pygame.mixer.music.play()
game_over = True
while game_over == True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
exit_game, game_over, snake_x, snake_y, snake_width, FPS, snake_mv, food_pos, head, snake_len, clock, font, font2 = start_game()
gameWindow.fill((255, 255, 255))
put_text('score : ' + str((snake_len - 1) * 5), green, 10, 910, font)
put_text('highest score : ' + str(high_score), green, 450, 910, font)
pygame.draw.line(gameWindow, (0, 0, 0), (0, 900), (900, 900), 5)
gameWindow.blit(carImg, (food_pos[0]*20, food_pos[1] * 20))
plot_snake(gameWindow, head, snake_width)
pygame.display.update()
clock.tick(FPS)
game_over = False
continue
else:
pygame.quit()
exit()
pygame.display.update()
clock.tick(FPS)
gameWindow.blit(overImg, (340, 180))
put_text('Game over', green, 300, 400, font2)
put_text('press ENTER to restart or any other key', green, 20, 500, font)
# break
put_text('score : ' + str((snake_len - 1) * 5), green, 10, 910, font)
put_text('highest score : ' + str(high_score), green, 450, 910, font)
with open("high_score.txt", "w") as f:
f.write(str(high_score))
pygame.draw.line(gameWindow, (0, 0, 0), (0, 900), (900, 900), 5)
gameWindow.blit(carImg, (food_pos[0]*20, food_pos[1] * 20))
plot_snake(gameWindow, head, snake_width)
pygame.display.update()
clock.tick(FPS)
pygame.quit()
exit()
|
23,278 | b6e4a3933bb9336e5d91e27d4e09233e656a2755 | import copy
from unittest.mock import patch
# from genome import GenomeFactory, Genome
# from config import Config
# # import src.NeuralNetwork
# from neat import compatibility_distance, sort_species, \
# calculate_num_excess_disjoint_genes, create_population
@patch('src.genome.Config.config')
def test_compatibility_distance(mock_config):
from src.genome import GenomeFactory
from neat import compatibility_distance
mock_config['DefaultGenome']['constant_excess'] = 1
num_input = 1
num_output = 1
g = GenomeFactory.create_genome(num_input, num_output)
g1 = g.copy()
distance = compatibility_distance(g, g1)
assert (distance < 1)
assert (distance > -1)
g1.mutate_add_node()
g1.mutate_add_node()
g1.mutate_add_node()
distance = compatibility_distance(g, g1)
assert (distance > 1 or distance > -1)
def test_sort_species_multiple():
num_input = 3
num_output = 2
num_genomes = 5
Config = {'config'}
Config.config['DefaultGenome']['compatibility_threshold'] = '0.1'
genomes = []
for i in range(num_genomes):
g = GenomeFactory.create_genome(num_input, num_output)
g.mutate_add_connection()
g.mutate_add_node()
g.mutate_add_node()
g.mutate_add_node()
genomes.append(g)
g1 = g.copy()
g1.mutate_add_node()
g1.mutate_add_connection()
g1.mutate_add_connection()
g1.mutate_add_connection()
g1.mutate_add_connection()
genomes.append(g1)
g2 = g.copy()
genomes.append(g2)
# random.seed(random.randint(0,255))
result = sort_species(genomes)
assert (len(result.keys()) > 1)
assert (len(result.keys()) <= num_genomes * 3)
def test_sort_species_single():
num_input = 3
num_output = 2
num_genomes = 5
Config.config['DefaultGenome']['compatibility_threshold'] = '100'
genomes = []
for i in range(num_genomes):
g = GenomeFactory.create_genome(num_input, num_output)
g.mutate_add_connection()
genomes.append(g)
g1 = g.copy()
g1.mutate_add_node()
genomes.append(g1)
g2 = g.copy()
genomes.append(g2)
# random.seed(random.randint(0,255))
result = sort_species(genomes)
assert (len(result.keys()) == 1)
def test_calculate_num_excess_disjoint_genes():
# Excess
genome1 = {1, 2, 3, 4, 5, 6, 7}
genome2 = {2, 3, 4}
excess, disjoint = calculate_num_excess_disjoint_genes(genome1, genome2)
assert (excess == 4)
assert (disjoint == 0)
# One oe each end
genome1 = {2, 3, 4, 5}
genome2 = {1, 2, 3, 4}
excess, disjoint = calculate_num_excess_disjoint_genes(genome1, genome2)
assert (excess == 2)
assert (disjoint == 0)
# disjoint
genome1 = {1, 5}
genome2 = {1, 2, 3, 4, 5}
excess, disjoint = calculate_num_excess_disjoint_genes(genome1, genome2)
assert (excess == 0)
assert (disjoint == 3)
# disjoint and excess
genome1 = {1, 5, 6, 7, 8}
genome2 = {1, 2, 3, 4, 5}
excess, disjoint = calculate_num_excess_disjoint_genes(genome1, genome2)
assert (excess == 3)
assert (disjoint == 3)
def test_mutate_add_connection():
num_input = 3
num_output = 2
before = GenomeFactory.create_genome(num_input, num_output)
genome = copy.deepcopy(before)
genome.mutate_add_connection()
assert (len(before.connection_genes) < len(genome.connection_genes))
def test_mutate_add_node():
num_input = 3
num_output = 2
new_node_key = num_input + num_output + 1
new_connection_index = num_input * num_output + 1
before = GenomeFactory.create_genome(num_input, num_output)
genome = copy.deepcopy(before)
genome.mutate_add_node()
# find and make sure one node is disabled
loc = None
for index, gene in genome.connection_genes.items():
if not gene.enabled:
loc = index
break
# Test gene is turned off
assert (before.connection_genes[loc].enabled is True)
assert (genome.connection_genes[loc].enabled is False)
# Test new connections and nodes are present
assert (len(genome.connection_genes) > len(before.connection_genes))
assert (len(genome.connection_genes) == len(before.connection_genes) + 2)
assert (len(genome.node_genes) > len(before.node_genes))
assert (len(genome.node_genes) == len(before.node_genes) + 1)
# make sure new connections are added with correct values
new_connection_key = list(genome.connection_genes.keys())[new_connection_index - 1]
assert (genome.connection_genes[new_connection_key])
assert (genome.connection_genes[new_connection_key].enabled is True)
# Make sure old connection and new connections are hooked up appropriately
first_new_conn_in_node_key = genome.connection_genes[new_connection_key].in_node_key
first_new_conn_out_node_key = genome.connection_genes[new_connection_key].out_node_key
second_new_in_node_key = genome.connection_genes[new_connection_key + 1].in_node_key
second_new_out_node_key = genome.connection_genes[new_connection_key + 1].out_node_key
prev_in = before.connection_genes[loc].in_node_key
prev_out = before.connection_genes[loc].out_node_key
assert (first_new_conn_in_node_key == prev_in)
assert (first_new_conn_out_node_key == new_node_key)
assert (second_new_in_node_key == new_node_key)
assert (second_new_out_node_key == prev_out)
def create_disjoint_genomes():
genome1 = GenomeFactory().create_genome(2, 1)
genome1.fitness = 1
genome2 = GenomeFactory().create_genome(2, 1)
genome2.fitness = 3
Genome.mutate_add_node(genome2)
return genome1, genome2
def test_crossover():
genome1 = GenomeFactory.create_genome(2,1)
genome2 = GenomeFactory.create_genome(2,1)
genome2.mutate_add_node()
offspring = genome1.crossover(genome2)
assert (len(offspring.connection_genes.keys()) == 4)
assert (offspring.connection_genes[7] == genome2.connection_genes[7])
def test_create_genome():
tests = [(1, 1), (2, 1), (9, 9)]
for num_i, num_o in tests:
g1 = GenomeFactory.create_genome(num_i, num_o)
connection_count = len(g1.connection_genes)
print(f'Expected: {num_i * num_o}, Found: {connection_count}')
assert (connection_count == num_i * num_o)
def test_create_population():
length = '10'
Config.config['NEAT']['pop_size'] = length
g = GenomeFactory.create_genome(1, 1)
pop = create_population(g)
assert (len(pop) == int(length))
def test_create_network():
g = GenomeFactory.create_genome(4, 2)
net, _ = g.create_graphs()
assert (net)
def test_feedforward():
g1 = GenomeFactory.create_genome(2, 1)
x = [[0, 0], [0, 1], [1, 1], [1, 0]]
y = [[0], [1], [0], [1]]
result = NeuralNetwork.NeuralNetwork.feedforward(g1, x, y)
assert (len(result) == 4)
def test_find_layers():
g1 = GenomeFactory.create_genome(2, 1)
l = NeuralNetwork.NeuralNetwork.find_layers(g1)
assert (len(l) == 1)
g1.mutate_add_node()
l2 = NeuralNetwork.NeuralNetwork.find_layers(g1)
assert (len(l2) == 2)
g1.mutate_add_node()
g1.mutate_add_node()
g1.mutate_add_node()
g1.mutate_add_node()
g1.mutate_add_node()
g1.mutate_add_node()
l3 = NeuralNetwork.NeuralNetwork.find_layers(g1)
assert (len(l3) > 2)
|
23,279 | 44763b5584e456e0bad467b25e0d5466e7d7c37a | print("****Sharing on Twitter****")
import tweepy
import os
# Authenticate to Twitter
auth = tweepy.OAuthHandler("YOURTWITTERAPI", "YOURTWITTERAPI")
auth.set_access_token("YOURTWITTERAPI", "YOURTWITTERAPI")
# Create API object
api = tweepy.API(auth)
# Number of files in the directory
lista = os.listdir(".")
# Create a tweet
for i in range(len(lista)):
try:
api.update_with_media(str(i)+".jpg")
print("Shared : "+str(i)+".jpg")
except:
print("no image called "+str(i)+".jpg")
pass
|
23,280 | fa333a4c504fe1139e3dc4089a1b178fca58504b | from glob import glob
import os
os.chdir('lacto')
for fname in glob('*.jpg'):
os.rename(fname, fname.replace('mask', 'lacto_mask'))
os.chdir('../gardner')
for fname in glob('*.jpg'):
os.rename(fname, fname.replace('mask', 'gardner_mask'))
os.chdir('../bacte')
for fname in glob('*.jpg'):
os.rename(fname, fname.replace('mask', 'bacte_mask'))
os.chdir('../noise')
for fname in glob('*.jpg'):
os.rename(fname, fname.replace('mask', 'noise_mask'))
|
23,281 | 7a6726814204c4e676e145bdb69e9e7db0520839 | import os
import pathlib
import numpy as np
import tensorflow as tf
from data_reader import img_preprocess
image_size = 384
def representative_dataset():
global image_size
data_path = "/data2/competition/classification/represent_data/"
for dirpath, dirnames, filenames in os.walk(data_path):
for imgname in filenames:
image = tf.io.read_file(os.path.join(data_path, imgname))
image = tf.compat.v1.image.decode_jpeg(image)
image = img_preprocess(image, image_size, "per", False)
image = np.array(image)
print(os.path.join(data_path, imgname))
image = np.reshape(image, (1, image_size, image_size, 3))
yield [image.astype(np.float32)]
def representative_dataset_sample():
for _ in range(100):
data = np.random.rand(1, image_size, image_size, 3)
yield [data.astype(np.float32)]
def convert_from_save_model(model_save_path, _image_size=384, type="normal", tflite_save_path=None):
global image_size
image_size = _image_size
converter = tf.lite.TFLiteConverter.from_saved_model(model_save_path)
if type == "dynamic":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
elif type == "float16":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif type == "full_int":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
tflite_model = converter.convert()
if tflite_save_path != None:
with open(tflite_save_path, 'wb') as f:
f.write(tflite_model)
return tflite_save_path
def convert_from_keras_model(model, _image_size=384, type="normal", tflite_save_path=None):
global image_size
image_size = _image_size
converter = tf.lite.TFLiteConverter.from_keras_model(model)
return _convert(converter,type,tflite_save_path)
def _convert(converter, type, tflite_save_path):
if type == "dynamic":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
elif type == "float16":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif type == "full_int":
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
tflite_model = converter.convert()
if tflite_save_path != None:
with open(tflite_save_path, 'wb') as f:
f.write(tflite_model)
return tflite_save_path
|
23,282 | 7c35f52f0c0e13d07d3a880e01da4de0ea37f081 | ##copy sheets
import ezsheets
ss1 = ezsheets.createSpreadsheet('First Spreadsheet')
ss2 = ezsheets.createSpreadsheet('Second Spreadsheet')
ss1[0].updateRow(1, ['Some', 'data', 'in', 'the', 'first', 'row'])
ss1[0].copyTo(ss2)
sss=ss2.sheetTitles
print(sss)
|
23,283 | cb3727ea852b2016c5a767e4b58f7c1ccf58a36a | # Generated by Django 4.0.5 on 2022-07-06 14:54
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cars', '0003_carmodel_auto_park'),
]
operations = [
migrations.AlterField(
model_name='carmodel',
name='brand',
field=models.CharField(max_length=100, validators=[django.core.validators.RegexValidator('^[a-zA-Z0-9_\\s]{3,100}$', 'only a-z A-Z 0-9 _ space min 3 max 100')]),
),
migrations.AlterField(
model_name='carmodel',
name='price',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(2000), django.core.validators.MaxValueValidator(1000000)]),
),
migrations.AlterField(
model_name='carmodel',
name='year',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1990), django.core.validators.MaxValueValidator(2022)]),
),
]
|
23,284 | c6c139507ecc340b2e767e2bcabb493240d75b55 | import os
import shutil
import random
#Find the names of the file in the directory of alphabet
saved_path = os.getcwd()
fileList = os.listdir(r"C:\Sumanth\pythonprojects\Udacity\2\alphabet")
userMessage = raw_input("What message do you want to make? Enter a lowercase message only. You can use spaces and periods but no other punctuation")
alphabetArray = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","."," "]
for eachCharacter in userMessage:
count = -1
characterCount = 0
print 'comparing ' + eachCharacter + ' from your input to',
for eachLetter in alphabetArray:
print eachLetter
count = count + 1
if eachLetter == eachCharacter:
imageSource = "C:\\Sumanth\\pythonprojects\\Udacity\\2\\alphabet\\" + fileList[count]
imageDest = "C:\\Sumanth\\pythonprojects\\Udacity\\2\\secretmessage\\ " + str(random.randrange(0,1000)) + alphabetArray[characterCount]
print 'File to be copied is ', imageSource
print 'It has been copied to ', imageDest
shutil.copy(imageSource,imageDest)
characterCount = characterCount + 1
break |
23,285 | bbc86cc43024b203d37581039e3678e66ef0e6f4 | # external imports
from sqlalchemy.ext.declarative import declared_attr
from flask.ext.jsontools import JsonSerializableBase
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.inspection import inspect
# local imports
from nautilus import admin
from ..db import db
class _Meta(type):
"""
The base metaclass for the nautilus models. Currently, it's primary use is to
automatically register a model class with the admin after it is created.
"""
def __init__(self, name, bases, attributes, **kwds):
# create the super class
super().__init__(name, bases, attributes, **kwds)
# if the class is not a nautilus base class
if 'nautilus_base' not in attributes or not attributes['nautilus_base']:
# perform the necessary functions
self.onCreation()
return
class _MixedMeta(_Meta, type(db.Model)):
"""
This meta class mixes the sqlalchemy model meta class and the nautilus one.
"""
JsonBase = declarative_base(cls=(JsonSerializableBase,))
class BaseModel(db.Model, JsonBase, metaclass=_MixedMeta):
nautilus_base = True # necessary to prevent meta class behavior on this model
def __init__(self, **kwargs):
""" treat kwargs as attribute assignment """
# loop over the given kwargs
for key, value in kwargs.items():
# treat them like attribute assignments
setattr(self, key, value)
def _json(self):
# build a dictionary out of just the columns in the table
return {
column.name: getattr(self, column.name) \
for column in type(self).columns()
}
@classmethod
def onCreation(cls): pass
@classmethod
def primary_keys(cls):
return [key.name for key in inspect(cls).primary_key]
@classmethod
def requiredFields(cls):
return [key.name for key in inspect(cls).columns if not key.nullable]
@classmethod
def columns(cls):
return inspect(cls).columns
def primary_key(self):
return getattr(self, type(self).primary_keys()[0])
def save(self):
# add the entry to the db session
db.session.add(self)
# commit the entry
db.session.commit()
@declared_attr
def __tablename__(self):
return '{}_{}'.format(self.__module__.split('.')[-1], self.__name__.lower())
__abstract__ = True
__table_args__ = dict(mysql_charset='utf8')
|
23,286 | 037a0945da9485edf94c1f3c3e80cdcd3a9eba4c | from django import forms
from .models import Course
from django.forms import ModelForm
class CourseForm(ModelForm):
class Meta:
model = Course
fields = '__all__'
course_name = forms.CharField(
required = True,
max_length = 150,
widget = forms.TextInput(
attrs = {
'id' : 'course-name',
'class' : 'form-control',
'placeholder' : 'input your course name here',
'autocomplete' : 'off',
}
)
)
lecturer = forms.CharField(
required = True,
max_length = 150,
widget = forms.TextInput(
attrs = {
'id' : 'lecturer',
'class' : 'form-control',
'placeholder' : 'input your lecturer name here',
'autocomplete' : 'off',
}
)
)
credits = forms.IntegerField(
required = True,
widget = forms.NumberInput(
attrs = {
'id' : 'credits',
'class' : 'form-control',
'placeholder' : 'input the credits of your course here',
'autocomplete' : 'off',
}
)
)
description = forms.CharField(
required = True,
max_length = 250,
widget = forms.Textarea(
attrs = {
'id' : 'description',
'class' : 'form-control',
'placeholder' : 'input the description about your course here',
'autocomplete' : 'off',
}
)
)
TERM_CHOICES = (
('odd', 'odd'),
('even', 'even')
)
term = forms.ChoiceField(
required = True,
choices = TERM_CHOICES,
widget = forms.Select(
attrs = {
'id' : 'term',
'class' : 'form-control',
'placeholder' : 'input the terms of the course here',
'autocomplete' : 'off',
}
)
)
ACADEMIC_YEAR = (
('2018/2019', '2018/2019'),
('2019/2020', '2019/2020'),
('2020/2021', '2020/2021'),
('2021/2022', '2021/2022')
)
academic_year = forms.ChoiceField(
required = True,
choices = ACADEMIC_YEAR,
widget = forms.Select(
attrs = {
'id' : 'academic-year',
'class' : 'form-control',
'placeholder' : 'input the academic year of the course here',
'autocomplete' : 'off',
}
)
)
room = forms.CharField(
required = True,
max_length = 20,
widget = forms.TextInput(
attrs = {
'id' : 'room',
'class' : 'form-control',
'placeholder' : 'input the room number of the course here',
'autocomplete' : 'off',
}
)
)
|
23,287 | a3624083cbe6e09b69301e01f202704574de57e0 | from flask import Blueprint
from configparser import ConfigParser, RawConfigParser
from flask import current_app
main = Blueprint('main', __name__)
config_object = ConfigParser()
config_object.read("config.ini")
api = config_object["URL_API"]
from . import routes, events
|
23,288 | eb8b8b512e0154383cb0ddd028679ed35682df18 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'pycnic'
SITENAME = 'The Pycnic'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'Europe/Berlin'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
#FEED_ALL_ATOM = None
#CATEGORY_FEED_ATOM = None
#TRANSLATION_FEED_ATOM = None
#AUTHOR_FEED_ATOM = None
#AUTHOR_FEED_RSS = None
# Blogroll
LINKS = None
#LINKS = (
# ('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (
('atom feed', FEED_ALL_ATOM, 'rss'),
('github', 'http://github.com/pycnic'),
('docker', 'https://hub.docker.com/u/pycnic/'),
)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
DEFAULT_DATE = 'fs'
THEME = 'pelican-themes/pelican-bootstrap3'
PLUGIN_PATHS = ["pelican-plugins"]
PLUGINS = [
'liquid_tags.notebook',
'related_posts',
'series',
'category_order',
'render_math',
]
# pelican-bootstrap3 settings
JINJA_EXTENSIONS = ['jinja2.ext.i18n']
PLUGINS += ['i18n_subsites']
PLUGIN_PATHS = ["pelican-plugins"]
BOOTSTRAP_THEME = 'darkly'
SHOW_ARTICLE_AUTHOR = True
SHOW_ARTICLE_CATEGORY = True
SHOW_DATE_MODIFIED = True
GITHUB_USER = 'pycnic'
GITHUB_SHOW_USER_LINK = False
CC_LICENSE = 'CC-BY'
DISPLAY_TAGS_ON_SIDEBAR = True
PLUGINS += ['tag_cloud']
DISPLAY_RECENT_POSTS_ON_SIDEBAR = True
DISPLAY_ARTICLE_INFO_ON_INDEX = True
PYGMENTS_STYLE = 'solarizeddark'
SHOW_SERIES = True
DISPLAY_SERIES_ON_SIDEBAR = True
|
23,289 | 6aa0c3fc5a0e8b37abe0d8e5e6e0d5ca2b27aba9 | i = 1
x = 100
nbprime = 0
while True:
c=0;
for j in range(1, (i+1)):
a = i%j
if (a==0):
c = c+1
if (c==2):
print(nbprime,',', i)
nbprime = nbprime + 1
if nbprime >= x:
break
i=i+1 |
23,290 | 1b0a3298ff0f0a284315604aa73e354569f2fe25 | # Generated by Django 2.2.3 on 2019-07-10 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lancamento',
name='tipo',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AlterField(
model_name='lancamento',
name='categoria',
field=models.IntegerField(),
),
]
|
23,291 | 683c731ce288b33e5f4ccad6ab3fe185b00957e7 | # Reverse of three characters in the given string
s = input()
# s = "abcdefghijk"
res = ""
for i in range(0, len(s), 3):
words = s[i : i+3]
# print(words)
newWords = words[ : : -1]
res = res +" "+ newWords
print(res) |
23,292 | c30c815ccdd9ddb8661bca0391ee8b7d48c2b823 |
extruder_cutter = '1/8_endmill'
extruder_material = 'delrin'
extruder_thickness = 12
cutter_rad=milling.tools[extruder_cutter]['diameter']/2
spacer_cutter = '1/8_endmill'
spacer_material = 'pvc'
spacer_thickness = 3
cover_cutter = '1/8_endmill'
cover_material = 'pvc'
cover_thickness = 3
motor = 'NEMA1.7'
width = 42.3
height = 13.7
corner_rad = 5
centre = V(0,0)
stepper_pos = V(0, (width/2)-(height/2))
plane = camcam.add_plane(Plane('plane', cutter=cover_cutter))
plane.add_layer(
'spacer',
material = spacer_material,
thickness = spacer_thickness,
z0 = 0)
plane.add_layer(
'foo',
material = spacer_material,
thickness = spacer_thickness,
z0 = 0)
plane.add_layer(
'extruder',
material = extruder_material,
thickness = extruder_thickness,
z0 = 0)
plane.add_layer(
'cover',
material = cover_material,
thickness = cover_thickness,
z0 = 0)
cover_border = RoundedRect(
stepper_pos + V(0, height/2),
width = width,
height = width+height,
rad = corner_rad,
centred = True)
stepper = Stepper(stepper_pos, motor, 'foo')
d = stepper.d
spacer_pos = centre
spacer_x = width/2
spacer_y = (height)/2
x_offset=12
spacer_border = Path(closed=True, side='out')
spacer_border.add_point(PIncurve(spacer_pos + V(-spacer_x, -spacer_y), radius=corner_rad, direction = 'CW'))
spacer_border.add_point(PIncurve(spacer_pos + V(-spacer_x, spacer_y), radius=corner_rad, direction = 'CW'))
spacer_border.add_point(PIncurve(spacer_pos + V( -x_offset, spacer_y), radius=-1.5, direction = 'CW'))
spacer_border.add_point(POutcurve(stepper_pos, radius=11, direction = 'CW'))
spacer_border.add_point(PIncurve(spacer_pos + V( x_offset, spacer_y), radius=0, direction = 'CW'))
spacer_border.add_point(PIncurve(spacer_pos + V( spacer_x, spacer_y), radius=corner_rad, direction = 'CW'))
spacer_border.add_point(PIncurve(spacer_pos + V( spacer_x, -spacer_y), radius=corner_rad, direction = 'CW'))
feed_offset=6
border = Path(closed=True, side='out')
border.add_point(PIncurve(spacer_pos + V(-spacer_x, spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( spacer_x, spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( spacer_x, -spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( (feed_offset+1), -spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( feed_offset, -spacer_y+0.2), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( (feed_offset-1), -spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( (-feed_offset+1), -spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( -feed_offset, -spacer_y+0.2), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V( (-feed_offset-1), -spacer_y), radius=corner_rad, direction = 'CW'))
border.add_point(PIncurve(spacer_pos + V(-spacer_x, -spacer_y), radius=corner_rad, direction = 'CW'))
spacer = plane.add_path(
Part(name = 'space_part',
border = spacer_border,
layer = 'spacer',
cutter = spacer_cutter
)
)
spacer.add(Hole(stepper_pos+V(d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
spacer.add(Hole(stepper_pos+V(-d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
extruder = plane.add(
Part(name = 'extrued_part',
border = border,
layer = 'extruder',
cutter = extruder_cutter
)
)
extruder.add(Hole(stepper_pos+V(d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
extruder.add(Hole(stepper_pos+V(-d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover = plane.add(
Part(name = 'cover_part',
border = cover_border,
layer = 'cover',
cutter = cover_cutter
)
)
cover.add(Hole(stepper_pos+V(d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover.add(Hole(stepper_pos+V(-d['bolt_sep']/2, -d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover.add(Hole(stepper_pos+V(d['bolt_sep']/2, d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover.add(Hole(stepper_pos+V(-d['bolt_sep']/2, d['bolt_sep']/2), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover.add(Hole(stepper_pos, rad=d['shaft_diam']/2+1))
cover.add(Hole(stepper_pos+V(d['bolt_sep']/2, d['bolt_sep']/1.05), rad=milling.bolts[d['bolt_size']]['clearance']/2))
cover.add(Hole(stepper_pos+V(-d['bolt_sep']/2, d['bolt_sep']/1.05), rad=milling.bolts[d['bolt_size']]['clearance']/2))
|
23,293 | d5d7d725f7f6e005490c4b125781574ebd1defb4 | '''
findingWeightSquared.py
Requires Python 3.4+, numpy, and matplotlib
This file displays a plot showing the sum of errors squared for possible
adjustment weights.
'''
from ProjectFunctions import correlation, best_fit_line, find_range_adjustment_results
import matplotlib.pyplot as plt
import numpy as np
mpgData = 'data/auto-mpg.data.txt'
dataKey = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year']
with open(mpgData) as f:
content = f.readlines()
data = [[float(x) for x in line.split()[0:8]] for line in content]
mpg = np.array([car[0] for car in data], dtype=np.float64)
cylinders = np.array([car[1] for car in data], dtype=np.float64)
displacement = np.array([car[2] for car in data], dtype=np.float64)
horsepower = np.array([car[3] for car in data], dtype=np.float64)
weight = np.array([car[4] for car in data], dtype=np.float64)
acceleration = np.array([car[5] for car in data], dtype=np.float64)
model_year = np.array([car[6] for car in data], dtype=np.float64)
origin = np.array([car[7] for car in data], dtype=np.float64)
corr_vector_to_mpg = [correlation(mpg, cylinders), correlation(mpg, displacement), correlation(mpg, horsepower), correlation(mpg, weight), correlation(mpg, acceleration), correlation(mpg, model_year)]
abs_corr_vector = [abs(x) for x in corr_vector_to_mpg]
weight_to_mpg_m, weight_to_mpg_b = best_fit_line(weight, mpg)
cylinder_to_mpg_m, cylinder_to_mpg_b = best_fit_line(cylinders, mpg)
displacement_to_mpg_m, displacement_to_mpg_b = best_fit_line(displacement, mpg)
horsepower_to_mpg_m, horsepower_to_mpg_b = best_fit_line(horsepower, mpg)
acceleration_to_mpg_m, acceleration_to_mpg_b = best_fit_line(acceleration, mpg)
model_year_to_mpg_m, model_year_to_mpg_b = best_fit_line(model_year, mpg)
mpg_equations = [(cylinder_to_mpg_m, cylinder_to_mpg_b), (displacement_to_mpg_m, displacement_to_mpg_b), (horsepower_to_mpg_m, horsepower_to_mpg_b), (weight_to_mpg_m, weight_to_mpg_b), (acceleration_to_mpg_m, acceleration_to_mpg_b), (model_year_to_mpg_m, model_year_to_mpg_b)]
possibilities = find_range_adjustment_results(data, mpg_equations, abs_corr_vector)
plt.scatter([_ for _ in range(-100,100)], possibilities, label='weight adjustment values', color='k', s=25, marker="o")
plt.xlabel('Adjustment Weight')
plt.ylabel('Sum of Errors Squared')
plt.show()
print('done.')
|
23,294 | dcf127c88af7b4ab93ddfef7ffe24abe663133bd | import re
import numpy as np
class Miscellaneous(object):
@staticmethod
def ConvertStrToBagOfWords(reg, reg_exclude, document_in_a_sentence):
# find all words match regular expression
document_word_list = re.findall(reg, document_in_a_sentence)
return document_word_list
@staticmethod
#input word_dict = {'word1':2, 'word2': 9}, converted to ndarray in order
def DictToNdarray(word_dict):
sorted_value_list = [word_dict[x] for x in sorted(word_dict)]
word_array = np.array([sorted_value_list])
return word_array |
23,295 | 5209bc665d7122356034c53b988cc8d807fe9439 | """
Module to test the Course class.
"""
import unittest
import lib
class TestCourse(unittest.TestCase):
def setUp(self):
"Prepare for running a test"
self.course = lib.COURSE(0, "Test Course")
def test_course_to_str(self):
"Test the course name"
self.assertEqual(str(self.course), "Test Course")
def test_course_add_module(self):
"Test that adding a module only accepts a module object"
with self.assertRaises(TypeError):
self.course.add_module("Module String")
def test_course_list_modules(self):
"Test that the module list returns a list of modules"
self.assertTrue(isinstance(self.course.get_modules(), list))
|
23,296 | 9accf7af0b980c4685ae937a20d54b829b80dda0 |
import numpy as np
import tensorflow as tf
import pprint
import pickle
from tqdm import tqdm
from sklearn import(manifold, datasets, decomposition, ensemble, discriminant_analysis, random_projection)
from time import time
import matplotlib.pyplot as plt
from tensorflow.python.keras import backend as K
import re
row_size = 5000
genders= {'M': 'blue', 'F': 'red'}
accents = {'English': 'blue',
'Scottish': 'red',
'NorthernIrish': 'green',
'Irish': 'yellow',
'Indian': 'purple',
'Welsh': 'brown',
'American': 'orange',
'Canadian': 'black',
'SouthAfrican': 'cyan',
'Australian_Engl': 'magenta',
'NewZealand': 'pink'}
"""
layers = ["generator/downsc_conv0/Reshape_2:0",
"generator/downsc_conv1/Reshape_2:0",
"generator/downsc_conv2/Reshape_2:0",
"generator/downsc_conv2/Reshape_2:0",
"generator/bottleneck_conv/Reshape_2:0",
"generator/upsc_conv3/merge_1/concat:0",
"generator/upsc_conv2/merge_2/concat:0",
"generator/upsc_conv1/merge_3/concat:0",
"generator/upsc_conv0/merge_4/concat:0",
"generator/merge_5/add:0"]
"""
layers = ["generator/merge_5/add:0"]
layers = ["generator/upsc_conv0/lstm_9/transpose_1:0"]
layers = ["generator/upsc_conv0/merge_4/concat:0"]
print("loading...")
speakerInfo = np.loadtxt("../data/vctk/VCTK-Corpus/speaker-info.txt",
dtype={"names": ("ID", "AGE", "GENDER", "ACCENT", "REGION"),
"formats": ('|S15', '|S15', '|S15', '|S15', '|S20')})
speakerInfoDict = {}
for i in range(1, len(speakerInfo)):
row = speakerInfo[i]
speakerInfoDict[row[0]] = row
print("loaded speaker info")
ids = np.load("../data/vctk/multispeaker/ID_list") # this associates each audio sample with the id of its speaker
# i.e., the id on line 1 is the id of the sample in row 1 of the datafile
print("loaded id list")
data = pickle.load(open("../data/vctk/multispeaker/full-data-vctk-multispeaker-interp-val.4.16000.-1.8192.0.25"))
#data = np.array([[1, 2, 3, 4, 5, 6, 7]])
print("loaded data")
##acs = []
##for i in range(0, len(data)):
# acs.append(speakerInfoDict[str(ids[i])][3])
##from collections import Counter
##print Counter(acs)
##x = 1/0
maps = {}
used_ids = []
with tf.compat.v1.Session() as sess:
# setup session
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
K.set_session(sess)
# load model
saver =tf.compat.v1.train.import_meta_graph('./full-snr-multispeaker_audiohybrid2.lr0.00300.1.g4.b32.d8192.r4.lr0.000300.1.g4.b32/model.ckpt-41761.meta')
graph = tf.compat.v1.get_default_graph()
saver.restore(sess, tf.train.latest_checkpoint('./full-snr-multispeaker_audiohybrid2.lr0.00300.1.g4.b32.d8192.r4.lr0.000300.1.g4.b32'))
#graph.clear_collection('losses')
for i in tqdm(list(range(0, len(data)))):
u = np.random.uniform()
if u > 1: continue
used_ids.append(ids[i])
#names = [n.name for n in tf.get_default_graph().as_graph_def().node if "Placeholder" in n.op]
X_in = graph.get_tensor_by_name("X:0")
alpha_in = graph.get_tensor_by_name("alpha:0")
x =np.reshape(data[i], (1, len(data[i]), 1))
feed_dict = {X_in:x, alpha_in: 0.1}
k_tensors = [n for n in graph.as_graph_def().node if 'keras_learning_phase' in n.name]
#assert len(k_tensors) <= 1
if k_tensors:
k_learning_phase = graph.get_tensor_by_name(k_tensors[0].name + ':0')
feed_dict[k_learning_phase] = False
# run op and add resulting activations to array
restored = [graph.get_tensor_by_name(layer) for layer in layers]
activations = sess.run(restored, feed_dict)
graph.clear_collection('losses')
for i in range(len(activations)):
a = activations[i]
shape = a.shape
frag_size = row_size / shape[2]
a = a[:,(shape[1]-frag_size)/2: (shape[1]+frag_size)/2, :]
a = np.reshape(a, (-1))
if layers[i] not in maps: maps[layers[i]] = []
maps[layers[i]].append(a)
print(used_ids)
for layer, acts in tqdm(iter(maps.items())):
acts = np.array(acts)
print(acts.shape)
name = re.sub("[/,:]", "_", layer)
np.save('lstm_merge_activations_' +name, acts)
# run t-SNE
tsne = manifold.TSNE(n_components=2, perplexity=30, init='pca', random_state=0)
X_tsne = tsne.fit_transform(acts)
# plot t-SNE
gs = []
for i in range(0, X_tsne.shape[0]):
gs.append(speakerInfoDict[str(used_ids[i])][2])
colors =[genders[x] for x in gs]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(X_tsne[:,0], X_tsne[:,1], c=colors)
handles =ax.get_legend_handles_labels()
pts = [plt.Line2D((0,1),(0,0), color = c, marker = 'o', linestyle = '') for c in list(genders.values())]
ax.legend(pts, list(genders.keys()), loc=4)
plt.title(name)
fig.savefig("lstm_merge_t-SNE_gender_" + name+".png")
plt.clf()
acs = []
for i in range(0, X_tsne.shape[0]):
acs.append(speakerInfoDict[str(used_ids[i])][3])
colors =[accents[x] for x in acs]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(X_tsne[:,0], X_tsne[:,1], c=colors)
handles =ax.get_legend_handles_labels()
pts = [plt.Line2D((0,1),(0,0), color = c, marker = 'o', linestyle = '') for c in list(accents.values())]
ax.legend(pts, list(accents.keys()), loc=4)
plt.title(name)
fig.savefig("lstm_merge_t-SNE_accent_" + name+".png")
plt.clf()
np.save('lstm_merge_used_ids', np.array(used_ids))
|
23,297 | c130c58c99764c5b1b389625d9600d174cb4ef2a | # -*- coding: utf-8 -*-
import json
import logging
import time
import requests
from requests import ReadTimeout
from odoo import api, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
""" 钉钉部门功能模块 """
# 拓展部门员工
class HrEmployee(models.Model):
_inherit = 'hr.employee'
din_id = fields.Char(string='钉钉用户Id')
din_unionid = fields.Char(string='钉钉唯一标识')
din_jobnumber = fields.Char(string='钉钉员工工号')
din_hiredDate = fields.Date(string='入职时间')
din_sy_state = fields.Boolean(string=u'同步标识', default=False)
work_status = fields.Selection(string=u'工作状态', selection=[(1, '待入职'), (2, '试用期'), (3, '正式员工'), (4, '离职')])
# 上传员工到钉钉
@api.multi
def create_ding_employee(self):
for res in self:
url = self.env['ali.dindin.system.conf'].search([('key', '=', 'user_create')]).value
token = self.env['ali.dindin.system.conf'].search([('key', '=', 'token')]).value
# 获取部门din_id
department_list = list()
if res.department_id:
department_list.append(res.department_id.din_id)
else:
raise UserError("请选择员工部门!")
data = {
'name': res.name, # 名称
'department': department_list, # 部门
'position': res.job_title if res.job_title else '', # 职位
'mobile': res.mobile_phone if res.mobile_phone else '', # 手机
'tel': res.work_phone if res.work_phone else '', # 手机
'workPlace': res.work_location if res.work_location else '', # 办公地址
'remark': res.notes if res.notes else '', # 备注
'email': res.work_email if res.work_email else '', # 邮箱
'jobnumber': res.din_jobnumber if res.din_jobnumber else '', # 工号
}
headers = {'Content-Type': 'application/json'}
try:
result = requests.post(url="{}{}".format(url, token), headers=headers, data=json.dumps(data), timeout=10)
result = json.loads(result.text)
logging.info(result)
if result.get('errcode') == 0:
res.write({'din_id': result.get('userid')})
res.message_post(body=u"钉钉消息:员工信息已上传至钉钉", message_type='notification')
else:
raise UserError('上传钉钉系统时发生错误,详情为:{}'.format(result.get('errmsg')))
except ReadTimeout:
raise UserError("上传员工至钉钉超时!")
# 修改员工同步到钉钉
@api.multi
def update_ding_employee(self):
"""修改员工时同步至钉钉"""
for res in self:
url = self.env['ali.dindin.system.conf'].search([('key', '=', 'user_update')]).value
token = self.env['ali.dindin.system.conf'].search([('key', '=', 'token')]).value
# 获取部门din_id
department_list = list()
if not res.department_id:
raise UserError("请选择员工部门!")
data = {
'userid': res.din_id, # userid
'name': res.name, # 名称
'department': department_list.append(res.department_id.din_id), # 部门
'position': res.job_title if res.job_title else '', # 职位
'mobile': res.mobile_phone if res.mobile_phone else '', # 手机
'tel': res.work_phone if res.work_phone else '', # 手机
'workPlace': res.work_location if res.work_location else '', # 办公地址
'remark': res.notes if res.notes else '', # 备注
'email': res.work_email if res.work_email else '', # 邮箱
'jobnumber': res.din_jobnumber if res.din_jobnumber else '', # 工号
}
headers = {'Content-Type': 'application/json'}
try:
result = requests.post(url="{}{}".format(url, token), headers=headers, data=json.dumps(data), timeout=30)
result = json.loads(result.text)
logging.info(result)
if result.get('errcode') == 0:
res.message_post(body=u"新的信息已同步更新至钉钉", message_type='notification')
else:
raise UserError('上传钉钉系统时发生错误,详情为:{}'.format(result.get('errmsg')))
except ReadTimeout:
raise UserError("上传员工至钉钉超时!")
# 重写删除方法
@api.multi
def unlink(self):
userid = self.din_id
super(HrEmployee, self).unlink()
din_delete_employee = self.env['ir.config_parameter'].sudo().get_param('ali_dindin.din_delete_employee')
if din_delete_employee:
self.delete_din_employee(userid)
return True
@api.model
def delete_din_employee(self, userid):
"""删除钉钉用户"""
url = self.env['ali.dindin.system.conf'].search([('key', '=', 'user_delete')]).value
token = self.env['ali.dindin.system.conf'].search([('key', '=', 'token')]).value
data = {
'userid': userid, # userid
}
try:
result = requests.get(url="{}{}".format(url, token), params=data, timeout=20)
result = json.loads(result.text)
logging.info("user_delete:{}".format(result))
if result.get('errcode') != 0:
raise UserError('删除钉钉用户时发生错误,详情为:{}'.format(result.get('errmsg')))
except ReadTimeout:
raise UserError("上传员工至钉钉超时!")
# 员工列表和看板上同步员工数据按钮执行的方法,若使用回调同样使用本方法进行同步
@api.model
def synchronous_dingding_employee(self):
"""同步钉钉部门员工列表"""
logging.info("同步钉钉部门员工列表")
url = self.env['ali.dindin.system.conf'].search([('key', '=', 'user_listbypage')]).value
token = self.env['ali.dindin.system.conf'].search([('key', '=', 'token')]).value
# 获取所有部门
departments = self.env['hr.department'].sudo().search([('din_id', '!=', '')])
for department in departments:
emp_offset = 0
emp_size = 100
result_state = dict()
while True:
logging.info(">>>开始获取{}部门的员工".format(department.name))
data = {
'access_token': token,
'department_id': department[0].din_id,
'offset': emp_offset,
'size': emp_size,
}
result_state = self.get_dingding_employees(department, url, data)
if result_state.get('has_more'):
emp_offset = emp_offset + 1
else:
break
if not result_state.get('state'):
return result_state
return {'state': True}
@api.model
def get_dingding_employees(self, department, url, data):
result = requests.get(url=url, params=data, timeout=15)
result = json.loads(result.text)
if result.get('errcode') == 0:
for user in result.get('userlist'):
data = {
'name': user.get('name'), # 员工名称
'din_id': user.get('userid'), # 钉钉用户Id
'din_unionid': user.get('unionid'), # 钉钉唯一标识
'mobile_phone': user.get('mobile'), # 手机号
'work_phone': user.get('tel'), # 分机号
'work_location': user.get('workPlace'), # 办公地址
'notes': user.get('remark'), # 备注
'job_title': user.get('position'), # 职位
'work_email': user.get('email'), # email
'din_jobnumber': user.get('jobnumber'), # 工号
'department_id': department[0].id, # 部门
}
if user.get('hiredDate'):
time_stamp = self.get_time_stamp(user.get('hiredDate'))
data.update({
'din_hiredDate': time_stamp, # 入职时间
})
employee = self.env['hr.employee'].search([('din_id', '=', user.get('userid'))])
if employee:
employee.sudo().write(data)
else:
self.env['hr.employee'].sudo().create(data)
return {'state': True, 'has_more': result.get('hasMore')}
else:
logging.info(">>>获取部门员工失败,原因为:{}".format(result.get('errmsg')))
return {'state': False, 'msg': "获取部门员工失败,原因为:{}".format(result.get('errmsg'))}
@api.model
def get_time_stamp(self, timeNum):
"""
将13位时间戳转换为时间
:param timeNum:
:return:
"""
timeStamp = float(timeNum / 1000)
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
# 未使用,但是不能删除,因为第一个版本创建的视图还存在
class DinDinSynchronousEmployee(models.TransientModel):
_name = 'dindin.synchronous.employee'
_description = "同步钉钉部门员工功能模型"
|
23,298 | 42b6ac4c9062b6f2aefeaeaade92ec486f8dfe7b | import argparse
from chess_cheater.predictor import Predictor
parser = argparse.ArgumentParser(description="Chess cheater")
parser.add_argument("pos_arg", type=str, help="the cheaters name")
if __name__ == "__main__":
args = parser.parse_args()
cheater = args.pos_arg
for prediction in Predictor().get_predictions(cheater):
print(
f"best move in game {prediction.game} vs {prediction.opponent}: {prediction.best_move}"
)
|
23,299 | ee6321cf819a02d1b465c117380633d88c32f350 | # pylint:disable=C0111,C0103
def movie_duration_buckets(db):
pass
def longest_movies_by_director(db, first_letter):
pass
def top_3_longest(db, first_letter):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.