index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
26,231
|
HyXFR/ss-tool
|
refs/heads/main
|
/sstool.py
|
###################################################################################################
#
#
# SSTool - By HyXFR
# Please contact into discord HyXFR#1231 for add cheats to the list (for md5 hash), only versions and mods !
# Availible on Windows, Linux and Other systems
# Requirements (for start ScreenShare): Python3, hashlib, mediafire, os, tkinter, pathlib, time
# Requirements (of this file) : Python3, tkinter
# It is recommanted to compile to .exe the screenshare (created) file, your MEDIAFIRE EMAIL AND PASSWORD are be writed into the file, ITS LARGELY RECOMMANDED TO CHANGE YOUR MEDIAFIRE PASSWORD AFTER A SCREENSHARE !
#
#
###################################################################################################
def write():
repertory = repertorye.get()
mf_email = mf_emaile.get()
mf_password = mf_passworde.get()
file_ss = file_sse.get()
mods_repertory = mods_repertorye.get()
versions_repertory = version_usede.get()
with open(file_ss, 'w') as f:
f.write("""
import hashlib
#Def
def getHash(filePath):
fileObj = open(filePath, 'rb')
md5 = hashlib.md5()
while True:
file = fileObj.read(8096)
if not file:
break
md5.update(file)
return md5.hexdigest()
#Config
repertory = \'""" + repertory + """\'
mf_email = \'""" + mf_email + """\'
mf_password = \'""" + mf_password + """\'
mods_repertory = \'""" + mods_repertory + """\'
versions_repertory = \'""" + versions_repertory + """\'
cheatlists_mods = [ #This is the "list" of cheats MODS, in the next of this, you can edit this by a md5 file hash, please contact me into discord (HyXFR#1231) if you have some cheats (in .jar file, please precise if it is a version or a mod), I can have the md5 hash of file for add here some clients.
'587430c043ca3b1e0d366d168c869670' #Serenity Mod
]
cheatlists_ver = [ #This is the "list" of cheats VERSIONS, in the next of this, you can edit this by a md5 file hash, please contact me into discord (HyXFR#1231) if you have some cheats (in .jar file, please precise if it is a version or a mod), I can have the md5 hash of file for add here some clients.
'8c0443868b9e46c77d39db61c755679d', #Aritois 1.12 Version
'e066242c7ae7e43fdcf037b180bb4913' #Wurst 1.8 Version
]
from mediafire.client import (MediaFireClient, File, Folder)
import os
from tkinter import *
from pathlib import Path
import time
window = Tk()
window.title("Verify User")
window.geometry("480x160")
window.resizable(False, False)
def valider():
def envoy_files():
info.update()
import mediafire
try:
client = MediaFireClient()
client.login(email= mf_email,
password= mf_password,
app_id='42511')
except mediafire.api.MediaFireApiError:
window.title("Error : Invalid Connexion")
return
except:
window.title("Connexion error 0")
return
try: #result file
client.upload_file("results.txt", "mf:/result.txt")
os.system('rm results.txt')
window.title("Result file Sended")
except KeyError:
window.title("Result file Sended")
except FileNotFoundError:
window.title('Error : result File not found')
return
except:
window.title("Error 0")
with open("results.txt", "a") as f:
f.write("Error 0 ")
f.write(time.asctime())
f.close()
window.title("Sending mods File...") #Mods
try:
os.system('zip -r -q minecraft-mods ' + mods_repertory)
client.upload_file("minecraft-mods.zip", "mf:/minecraft-mods.zip")
os.system('rm minecraft-mods.zip')
window.title("mods File Sended !")
except KeyError:
window.title("mods File Sended !")
except FileNotFoundError:
window.title('Error : mods File not found')
return
except:
window.title("Error 0")
with open("results.txt", "a") as f:
f.write("Error 0 ")
f.write(time.asctime())
f.close()
window.title("Sending versions File...") #Versions
try:
os.system('zip -r -q minecraft-versions ' + versions_repertory)
client.upload_file("minecraft-versions.zip", "mf:/minecraft-versions.zip")
os.system('rm minecraft-versions.zip')
window.title("versions File Sended !")
except KeyError:
window.title("versions File Sended !")
except FileNotFoundError:
window.title('Error : Versions File not found')
return
except:
window.title("Error 0")
with open("results.txt", "a") as f:
f.write("Error 0 ")
f.write(time.asctime())
f.close()
try: #.minecraft file
os.system('zip -r -q minecraft ' + repertory)
client.upload_file("minecraft.zip", "mf:/minecraft-client.zip")
os.system('rm minecraft.zip')
window.title(".minecraft File Sended !")
except KeyError:
window.title(".minecraft File Sended !")
except FileNotFoundError:
window.title('Error : minecraft File not found')
return
except:
window.title("Error 0")
with open("results.txt", "a") as f:
f.write("Error 0 ")
f.write(time.asctime())
f.close()
window.title("All files sended !")
def check_versions():
hash = getHash(versions_repertory)
if hash in cheatlists_ver:
with open("results.txt", 'a') as f:
f.write('Bad version :' + versions_repertory + '\\n')
f.close()
def check_mods():
path = Path(mods_repertory)
for child in path.iterdir():
hash = getHash(str(child))
if hash in cheatlists_mods:
with open("results.txt", "a") as f:
f.write('Bad mod : ' + str(child) + '\\n')
f.close()
else:
with open("results.txt", "a") as f:
f.write('Safe mod : ' + str(child) + '\\n')
f.close()
with open("results.txt", "w") as f:
f.write("Versions and mods check RESULT ")
f.write(time.asctime())
f.write('\\n')
f.close()
check_versions()
check_mods()
envoy_files()
vide = Label(window, text=" ")
vide.grid(row=0, column=0)
mdpl = Label(window, text="In click on \\"Verify\\", you accept to send your .minecraft file to")
mdpl.grid(row=2, column=0)
mdpl = Label(window, text="a moderator. Also, your .minecraft was been scanned.")
mdpl.grid(row=3, column=0)
valid = Button(window, text="Verify", command=valider)
valid.grid(row=4, column=0)
info = Label(window, text=" ")
info.grid(row=4, column=1)
mainloop()
""")
window.title("Successfull Created !")
import ppaw
from tkinter import *
window = Tk()
window.title("Create a SS-Tool")
window.geometry("480x240")
window.resizable(True, True)
pb = ppaw.pastebin.Pastebin("bad_api_key")
paste = pb.get_paste("DbTpaniK")
if paste.data == float(1.0):
vide = Label(window, text=" ")
vide.grid(row=0, column=0)
repetoryl = Label(window, text="Repertory of .minecraft file:")
repetoryl.grid(row=3, column=0)
repertorye = Entry(window)
repertorye.grid(row=3,column=1)
file_ssl = Label(window, text="File for write :")
file_ssl.grid(row=4, column=0)
file_sse = Entry(window)
file_sse.grid(row=4,column=1)
mods_repertoryl = Label(window, text="Repertory of mods (used actually")
mods_repertoryl.grid(row=5, column=0)
mods_repertorye = Entry(window)
mods_repertorye.grid(row=5,column=1)
text = Label(window, text=", e.g. : c:\.minecraft\mods\1.7.10:")
text.grid(row=6,column=0)
version_usedl = Label(window, text="Version of user (the .jar in actual version) : ")
version_usedl.grid(row=7, column=0)
version_usede = Entry(window)
version_usede.grid(row=7,column=1)
mf_emaill = Label(window, text="Email of mediafire account :")
mf_emaill.grid(row=8, column=0)
mf_emaile = Entry(window)
mf_emaile.grid(row=8,column=1)
mf_passwordl = Label(window, text="Password of mediafire account :")
mf_passwordl.grid(row=9, column=0)
mf_passworde = Entry(window, show="●")
mf_passworde.grid(row=9,column=1)
valid = Button(window, text="Create", command=write)
valid.grid(row=10, column=1)
mainloop()
else:
vide = Label(window, text=" ")
vide.grid(row=0, column=0)
text1 = Label(window, text="New version of this program has released")
text1.grid(row=1, column=0)
text2 = Label(window, text="Download at https://github.com/HyXFR/ss-tool")
text2.grid(row=2, column=0)
|
{"/ppaw/__init__.py": ["/ppaw/pastebin.py"], "/ppaw/pastebin.py": ["/ppaw/__init__.py"], "/sstool.py": ["/ppaw/__init__.py"], "/ppaw/ppaw/request.py": ["/ppaw/__init__.py"]}
|
26,232
|
HyXFR/ss-tool
|
refs/heads/main
|
/ppaw/ppaw/request.py
|
"""
Python Pastebin API Wrapper.
Provide one-liner functions for reqing JSON files and handling any
errors provided by the PasteBin API.
"""
import json
import requests
from ppaw import __author__, __version__
from ppaw.errors import PPAWBaseException
USERAGENT = "PPAW v{} by {}".format(__version__, __author__)
def _handle(req):
"""
Parse JSON from a request string and handle any errors
thrown by Pastebin's API.
Args:
req (str): A JSON string returned from Pastebin's API.
Returns:
dict: Result from parsing the inputted JSON string.
Raises:
PPAWBaseException: when Pastebin's API returns an error.
"""
req = req.strip()
if "Bad API request" in req:
raise PPAWBaseException(req.replace("Bad API request, ", ""))
else:
try:
return json.loads(req)
except ValueError:
return req
def post(url, params={}):
"""
Performs a POST request to the specified URL
with the specified paramaters.
Args:
url (str): URL to perform POST request to.
params (Optional[dict]): Paramaters for the POST request. Defaults to {}.
Returns:
dict: Result from parsing the JSON returned by the POST request.
"""
return _handle(requests.post(url, data=params, headers={"User-Agent": USERAGENT}).text)
def get(url, params={}):
"""
Performs a GET request to the specified URL
with the specified paramaters.
Args:
url (str): URL to perform GET request to.
params (Optional[dict]): Paramaters for the GET request. Defaults to {}.
Returns:
dict: Result from parsing the JSON returned by the GET request.
"""
return _handle(requests.get(url, params=params, headers={"User-Agent": USERAGENT}).text)
|
{"/ppaw/__init__.py": ["/ppaw/pastebin.py"], "/ppaw/pastebin.py": ["/ppaw/__init__.py"], "/sstool.py": ["/ppaw/__init__.py"], "/ppaw/ppaw/request.py": ["/ppaw/__init__.py"]}
|
26,236
|
Imran72/Flat_Accountant
|
refs/heads/master
|
/accountant/sqliteConnector.py
|
import sqlite3
import datetime
import itertools
import telebot
from . import config
bot = telebot.TeleBot("1379702508:AAGquXO8II-Uzky60_YHzR7Ni6yddMQOZhg")
# ORM-handmade или класс взаимодействия с базой данных, реализованной на Sqlite
class sqliteConnector:
def __init__(self):
self.start_day = '2020-09-08'
self.days_of_life = 7
# Запись данных пользователя
def add_note(self, username, bill_date, bill, chat_id):
with sqlite3.connect('mydatabase.db') as conn:
conn.cursor().execute("""INSERT INTO notes (username, date, bill, chat_id)
VALUES (?, ?, ?, ?)""",
(username, bill_date, bill, chat_id))
# Ежедневная проверка на истечение срока сбора данных
def check_days_of_life(self):
with sqlite3.connect('mydatabase.db') as conn:
try:
sql = """SELECT date from notes ORDER BY date"""
cursor = conn.cursor()
cursor.execute(sql)
records = cursor.fetchall()
print(records)
start = datetime.datetime.strptime(records[0][0], '%Y-%m-%d')
finish = datetime.datetime.strptime(records[-1][0], '%Y-%m-%d')
print(start)
print(finish)
days = (finish - start).days
if days >= self.days_of_life:
self.account_distribution()
self.clean_data()
self.start_day = datetime.date.today()
print("Данные были затерты")
return True
except Exception:
print("Вероятно еще нет данных")
return False
# Функция зачистки ненужных данных
def clean_data(self):
with sqlite3.connect('mydatabase.db') as conn:
self.send_pdf()
sql = """DELETE from notes"""
cursor = conn.cursor()
cursor.execute(sql)
# Запрос на подсчет полной информации, расчет задолжностей по каждому пользователю
def account_distribution(self):
dict = {}
with sqlite3.connect('mydatabase.db') as conn:
sql = """SELECT * FROM notes """
cursor = conn.cursor()
records = cursor.execute(sql)
for chat_id, bill_iter in itertools.groupby(records, key=lambda r: r[4]):
full_bill = sum([i[3] for i in bill_iter])
dict[chat_id] = full_bill
medium_value = sum(list(dict.values())) / len(list(dict.keys()))
for key in dict:
delta = medium_value - dict[key]
if delta > 0:
text = 'За прошедшую неделю вы потратили меньше, нежели потратили ' \
'все остальные в среднем. Пора платить по счетам и нести долги ' \
'в казну! С вас полагается {} рублей'.format(delta)
elif delta < 0:
text = 'День сегодняшний ознаменован подсчетом затраченных всеми средств. По итогам этих подсчетов вы оказались' \
'в числе тех, кто внес в общее дело более, чем все остальные. Посему вам возлагается сумма в размере' \
'{} рублей'.format(delta)
else:
text = 'Вы чисты как снег. Заплатили вровень со средним значением, от чего вам полагается похвала в отсутсвие вознаграждения!'
bot.send_message(key, text)
# Функция, возвращающая общую информацию по всем пользователям
def get_common_info(self):
try:
dict = {}
with sqlite3.connect('mydatabase.db') as conn:
sql = """SELECT * FROM notes"""
cursor = conn.cursor()
records = cursor.execute(sql)
records = sorted(records, key=lambda r: r[4])
for info, bill_iter in itertools.groupby(records, key=lambda r: str(r[1]) + "?" + str(r[4])):
full_bill = sum([i[3] for i in bill_iter])
dict[info] = full_bill
message = ""
index = 1
for el in dict:
one, two = el.split('?')
message += '{}. [{}](tg://user?id={}) - {} рублей \n'.format(index, one, two, dict[el])
index += 1
return message
except Exception:
return 123
# Функция, возвращающая список пользователей
def get_users(self):
try:
st = ''
index = 1
with sqlite3.connect('mydatabase.db') as conn:
sql = """SELECT DISTINCT username FROM notes """
cursor = conn.cursor()
records = cursor.execute(sql)
for el in records:
st += "{}. {}\n".format(index, str(el)[2:-3])
index += 1
return st
except Exception:
return "Пока данных нет"
# Информация относительно определенного лица - зависит от выбора
def get_private_info(self, number):
try:
with sqlite3.connect('mydatabase.db') as conn:
sql = """SELECT DISTINCT username, chat_id FROM notes """
cursor = conn.cursor()
records = cursor.execute(sql)
name = ''
chat_id = 0
index = 1
for el in records:
if index == number:
name = el[0]
chat_id = el[1]
break
index += 1
records = conn.cursor().execute("""SELECT * FROM notes WHERE username = (?)""", (name,))
st = 'Информация по пользователю [{}](tg://user?id={}):\n'.format(name, chat_id)
for el in records:
st += str(el[2]) + ' потрачено ' + str(el[3]) + ' рублей\n'
return st
except Exception:
return "Пока данных нет"
# По истечению 7 дней данные затираются, формируется PDF документ, отправляемый...
def send_pdf(self):
pass
|
{"/accountant/start.py": ["/accountant/relational.py", "/accountant/sqliteConnector.py"], "/accountant/relational.py": ["/accountant/sqliteConnector.py"]}
|
26,237
|
Imran72/Flat_Accountant
|
refs/heads/master
|
/accountant/start.py
|
from accountant.relational import is_number, insert_query, get_users, get_common_info, get_private_info
import telebot
from .Mode import Mode
from . import config
import schedule
from accountant.sqliteConnector import sqliteConnector
import threading
import time
bot = telebot.TeleBot("1379702508:AAGquXO8II-Uzky60_YHzR7Ni6yddMQOZhg")
moder = Mode()
# Обработчик начала общения
@bot.message_handler(commands=['start'])
def start_cmd(message):
name = message.chat.first_name
keybd = telebot.types.ReplyKeyboardMarkup(True)
btn3 = telebot.types.KeyboardButton(text='Добавить Money🤑️')
btn1 = telebot.types.KeyboardButton(text='Узнать общие данные⚖️')
btn2 = telebot.types.KeyboardButton(text='Узнать частные данные🤷')
keybd.add(btn3)
keybd.add(btn1)
keybd.add(btn2)
text = 'Привет, {}! \n \n'.format(name)
text += "Я храню данные по вашей квартире💵 \n" \
"Отправь сумму денег, потраченную тобою на общее дело,и я добавлю их в базу🏦 \n" \
"По истечении 7 дней все пользователи получат уведомление - каждый узнает, сколько должен😱"
bot.send_message(message.chat.id, text, reply_markup=keybd)
# Обработчик 0-положения
@bot.message_handler(content_types=['text'], func=lambda message: moder.mode == Mode.States.INITIAL_STATE)
def send_text(message):
chat_id = message.chat.id
text = message.text
if text == 'Узнать общие данные⚖️':
mail = get_common_info()
print(mail)
bot.send_message(chat_id, mail, parse_mode='Markdown')
elif text == 'Узнать частные данные🤷':
if get_users() != "Пока данных нет":
mail = "Выберите номер пользователя, который вас интересует:\n\n" + get_users()
bot.send_message(chat_id, mail)
moder.mode = Mode.States.PRIVATE_INFO
else:
mail = "Пока данных нет"
bot.send_message(chat_id, mail)
elif text == 'Добавить Money🤑️':
mail = "Прекрасный выбор! Вводите сумму🤖"
moder.mode = Mode.States.RECORDING_STATE
bot.send_message(chat_id, mail)
else:
mail = 'Для начала выберите режим, мой друг)'
bot.send_message(chat_id, mail)
# Обработчик текстовых сообщений или, иначе говоря, 1-состояния
@bot.message_handler(content_types=['text'], func=lambda message: moder.mode == Mode.States.RECORDING_STATE)
def send_text(message):
chat_id = message.chat.id
text = message.text
if is_number(text):
try:
text = str(text).replace(',', '.')
insert_query(message, text)
mail = 'Ваши данные были успешно занесены в базу! Спасибо за содействие'
bot.send_message(chat_id, mail)
except Exception as e:
mail = 'К сожалению, что-то пошло не так. Убедитель, что вы ввели валидное положительное число'
bot.send_message(chat_id, mail)
moder.mode = Mode.States.INITIAL_STATE
elif text == 'Узнать общие данные⚖️':
mail = get_common_info()
bot.send_message(chat_id, mail, parse_mode='Markdown')
moder.mode = Mode.States.INITIAL_STATE
elif text == 'Узнать частные данные🤷':
mail = "Выберите номер пользователя, который вас интересует:\n\n" + get_users()
bot.send_message(chat_id, mail)
moder.mode = Mode.States.PRIVATE_INFO
else:
mail = 'Нужно отправить сумму, которую вы сегодня потратили!'
bot.send_message(chat_id, mail)
# Обработчик запроса на частные данные или, иначе говоря, 2-состояния
@bot.message_handler(content_types=['text'], func=lambda message: moder.mode == Mode.States.PRIVATE_INFO)
def send_user_info(message):
chat_id = message.chat.id
text = str(message.text)
if text == 'Узнать частные данные🤷':
mail = "Выберите номер пользователя, который вас интересует:\n\n" + get_users()
bot.send_message(chat_id, mail)
moder.mode = Mode.States.PRIVATE_INFO
elif text.isdigit():
number = int(text)
members = len(get_users().split('\n'))
if number < 1 or number > members - 1:
mail = "Нет пользователя с таким номером!"
bot.send_message(chat_id, mail)
else:
mail = get_private_info(number)
bot.send_message(chat_id, mail, parse_mode='Markdown')
moder.mode = Mode.States.INITIAL_STATE
elif text == 'Узнать общие данные⚖️':
mail = get_common_info()
bot.send_message(chat_id, mail, parse_mode='Markdown')
moder.mode = Mode.States.INITIAL_STATE
else:
mail = "Пользователя под таким номером нет! Попытайтесь еще разок🤫"
bot.send_message(chat_id, mail)
# ежедневная проверка на запись
def job():
conn = sqliteConnector()
conn.check_days_of_life()
schedule.every().day.at("10:30").do(job)
def go():
while True:
schedule.run_pending()
time.sleep(1)
t = threading.Thread(target=go, name="тест")
t.start()
bot.polling()
|
{"/accountant/start.py": ["/accountant/relational.py", "/accountant/sqliteConnector.py"], "/accountant/relational.py": ["/accountant/sqliteConnector.py"]}
|
26,238
|
Imran72/Flat_Accountant
|
refs/heads/master
|
/accountant/relational.py
|
from accountant.sqliteConnector import sqliteConnector
import datetime
# Проверка на число
def is_number(s):
try:
s = str(s).replace(',', '.')
s = float(s)
if s >= 0:
return True
else:
return False
except ValueError:
return False
# Формирование данных и запись в базу данных
def insert_query(message, text):
username = message.chat.first_name
chat_id = message.chat.id
date = datetime.date.today()
conn = sqliteConnector()
conn.add_note(username, date, float(text), chat_id)
# Возвращает информацию по всем пользователям (неподробно)
def get_common_info():
conn = sqliteConnector()
mail = conn.get_common_info()
return mail
# Возвращает списик пользователей по которым в дальнейшем можно запросить полную информацию
def get_users():
conn = sqliteConnector()
mail = conn.get_users()
return mail
# Возвращает приватную и полную информацию по пользователю: сумма+дата траты
def get_private_info(number):
conn = sqliteConnector()
mail = conn.get_private_info(number)
return mail
|
{"/accountant/start.py": ["/accountant/relational.py", "/accountant/sqliteConnector.py"], "/accountant/relational.py": ["/accountant/sqliteConnector.py"]}
|
26,243
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/model/match.py
|
import re
class IsCellphone():
def __init__(self):
self.p = re.compile(r'[1][^1269]\d{9}')
def iscellphone(self, number):
res = self.p.match(number)
if res:
return True
else:
return False
class IsMail():
def __init__(self):
self.p = re.compile(r'[^\._][\w\._-]+@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$')
def ismail(self, str):
res = self.p.match(str)
if res:
return True
else:
return False
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,244
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/config.py
|
DEBUG = False
SECRET_KEY = 'please change when deploy'
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,245
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/views/api.py
|
from flask import Blueprint, request, url_for, redirect, session
from ..model.common import judgeStudent, userExist
from ..model.match import IsCellphone, IsMail
from ..model.user import insertUser, userlogin
API = Blueprint('api', __name__)
@API.route('/login/', methods=['POST'])
def api_login():
data = request.form.to_dict()
username = data['username']
password = data['password']
try:
if (userlogin(username, password)):
session['user'] = username
else:
return "Error"
return "Login success"
except KeyError:
return "No user"
return "strange error"
@API.route('/logout/')
def api_logout():
if 'user' in session:
session.pop('user', None)
return redirect(url_for('index.index'))
@API.route('/register/', methods=['POST'])
def api_register():
data = request.form.to_dict()
if (judgeStudent(data['username'], data['idcard']) == 'pass'):
pass
elif (judgeStudent(data['username'], data['idcard']) == 'no such student'):
return "1"
elif (judgeStudent(data['username'], data['idcard']) == 'error'):
return "2"
if (data['password1'] == data['password2']):
pass
else:
return "3"
if (len(data['password1']) < 8):
return "4"
if (userExist(data['username'])):
return "5"
if (data['major'] == ""):
return "6"
if (not IsCellphone().iscellphone(data['phone'])):
return "7"
if (not IsMail().ismail(data['email'])):
return "8"
if (insertUser(data)):
session['user'] = data['username']
return "0"
return "9"
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,246
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/views/index.py
|
from flask import Blueprint, render_template, session
INDEX = Blueprint('index', __name__)
@INDEX.route('/')
def index():
if 'user' in session:
return render_template(
'index/index.html',
is_login=True,
user=session['user'])
return render_template('index/index.html', is_login=False)
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,247
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/model/common.py
|
import sqlite3
import os
import hashlib
dirname, filename = os.path.split(os.path.abspath(__file__))
def getStudentList():
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
cursor = c.execute("SELECT ID from STUIDC")
StudentList = []
for row in cursor:
StudentList.append(str(row[0]))
conn.close()
return StudentList
def judgeStudent(stuid, stuidc):
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
try:
cursor = c.execute("SELECT IDC from STUIDC WHERE ID==" + stuid)
except sqlite3.OperationalError:
conn.close()
return "no such student"
for i in cursor:
if (i[0] == hashlib.sha3_512(stuidc.encode()).hexdigest()):
conn.close()
return "pass"
conn.close()
return "error"
conn.close()
return "no such student"
def getMajorList():
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
cursor = c.execute("SELECT id,name from MAJORS")
MajorList = []
for row in cursor:
MajorList.append((row[0], row[1]))
conn.close()
return MajorList
def userExist(id):
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
try:
cursor = c.execute("SELECT id from USERS WHERE id==" + id)
except sqlite3.OperationalError:
conn.close()
return False
for i in cursor:
if (str(i[0]) == id):
conn.close()
return True
conn.close()
return False
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,248
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/model/user.py
|
import sqlite3
import os
import hashlib
dirname, filename = os.path.split(os.path.abspath(__file__))
def insertUser(data):
id = data['username']
pwd = hashlib.sha3_512(data['password1'].encode()).hexdigest()
major = str(data['major'])
email = data['email']
phone = data['phone']
isGirl = str(data['isGirl'])
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
try:
c.execute("INSERT INTO USERS (id,pass,major,email,phone,isGirl) \
VALUES (" + id + ",'" + pwd + "' , " + major + ", '" + email + "' ,'" +
phone + "','" + isGirl + "' )")
except:
conn.close()
return False
conn.commit()
conn.close()
return True
def userlogin(username, pwd):
conn = sqlite3.connect(dirname + '/../data/hongyi.db')
c = conn.cursor()
try:
cursor = c.execute("SELECT id,pass from USERS where id=="+username)
except:
conn.close()
return False
for i in cursor:
if (str(i[0]) == username
and i[1] == hashlib.sha3_512(pwd.encode()).hexdigest()):
conn.close()
return True
conn.close()
return False
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,249
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/views/login.py
|
from flask import Blueprint, render_template
LOGIN = Blueprint('login', __name__)
@LOGIN.route('/')
def login():
return render_template('login/login.html', is_login=False)
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,250
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/__init__.py
|
from flask import Flask
from .views.index import INDEX
from .views.login import LOGIN
from .views.api import API
from .views.register import REGISTER
APP = Flask(__name__, instance_relative_config=True)
APP.config.from_object('config')
APP.register_blueprint(INDEX, url_prefix='/')
APP.register_blueprint(LOGIN, url_prefix='/login')
APP.register_blueprint(API, url_prefix='/api')
APP.register_blueprint(REGISTER, url_prefix='/register')
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,251
|
HarborYuan/Student-Information-Management-System
|
refs/heads/master
|
/src/views/register.py
|
from flask import Blueprint, render_template
from ..model.common import getMajorList
REGISTER = Blueprint('REGISTER', __name__)
@REGISTER.route('/')
def register():
majors = getMajorList()
return render_template(
'register/register.html', majors=majors, is_login=False)
|
{"/src/views/api.py": ["/src/model/common.py", "/src/model/match.py", "/src/model/user.py"], "/src/__init__.py": ["/src/views/index.py", "/src/views/login.py", "/src/views/api.py", "/src/views/register.py"], "/src/views/register.py": ["/src/model/common.py"]}
|
26,255
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/dft.py
|
"""
Functions to calculate the product of a vector and a sub-sampled DFT matrix.
Copied from Adam Greig, https://github.com/sigproc/sparc-amp/blob/master/sparc_amp.ipynb
"""
import numpy as np
def dft(n, m, ordering):
"""
Returns functions to compute the sub-sampled Discrete Fourier transform,
i.e., operating with a wide rectangular matrix of random entries with norm 1.
n: number of rows
m: number of columns
It is most efficient (but not required) for max(m,n) to be a power of 2.
ordering: n-long array of row indices in [1, max(m,n)] to
implement subsampling
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length m
Ay(y): computes A^H.y (of length m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
w = 2**int(np.floor(np.log2(max(m, n))) + 1)
assert ordering.shape == (n,)
def Ax(x):
assert x.size == m, "x must be m long"
y = np.zeros(w, dtype=complex)
y[w-m:] = x.reshape(m)
y = np.fft.fft(y)
return y[ordering]
def Ay(y):
assert y.size == n, "input must be n long"
x = np.zeros(w, dtype=complex)
x[ordering] = y
x = np.fft.fft(np.flipud(x))
return x[w-m:]
return Ax, Ay
def dft_block(n, m, l, ordering):
"""
As `dft`, but computes in `l` blocks of size `n` by `m`, potentially
offering substantial speed improvements.
n: number of rows
m: number of columns per block
l: number of blocks
It is most efficient (though not required) when max(m,n) is a power of 2.
ordering: (l, n) shaped array of row indices in [1, max(m, n)] to
implement subsampling
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length l*m
Ay(y): computes A^H.y (of length l*m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
assert l > 0, "l must be positive"
assert ordering.shape == (l, n)
def Ax(x):
assert x.size == l*m
out = np.zeros(n, dtype=complex)
for ll in range(l):
ax, ay = dft(n, m, ordering=ordering[ll])
out += ax(x[ll*m:(ll+1)*m])
return out
def Ay(y):
assert y.size == n
out = np.empty(l*m, dtype=complex)
for ll in range(l):
ax, ay = dft(n, m, ordering=ordering[ll])
out[ll*m:(ll+1)*m] = ay(y)
return out
return Ax, Ay
def complex_sparc_transforms(n, M, L, ordering):
Ax, Ay = dft_block(n, M, L, ordering=ordering)
def Ab(b):
return Ax(b) / np.sqrt(n)
def Az(z):
return Ay(z) / np.sqrt(n)
return Ab, Az
def gen_ordering_dft(n, m, l, seed=0):
w = 2**int(np.floor(np.log2(max(m, n))) + 1)
rng = np.random.RandomState(seed)
ordering = np.empty((l, n), dtype=np.uint32)
idxs = np.arange(1, w, dtype=np.uint32)
for ll in range(l):
rng.shuffle(idxs)
ordering[ll] = idxs[:n]
return ordering
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,256
|
ard61/SparcComplex
|
refs/heads/master
|
/simulate.py
|
def run_simulation(sparc_params_json, num_trials):
import sys
sys.path.append('/home/zelda/ard61/project/code')
import numpy as np
import sparc as sp
p = sp.SparcParams.from_json(sparc_params_json)
## Setup
if p.D == 1:
(Ax, Ay) = sp.sparc_transforms(p.n, p.M, p.L, sp.gen_ordering(p.n, p.M, p.L))
if p.logK == 0:
sparc = sp.Sparc(p, Ax, Ay)
else:
sparc = sp.SparcModulated(p, Ax, Ay)
else:
(Ax, Ay) = sp.complex_sparc_transforms(p.n, p.M, p.L, sp.gen_ordering(p.n, p.M, p.L))
sparc = sp.SparcQpsk(p, Ax, Ay)
CERs = np.zeros(num_trials)
SERs = np.zeros(num_trials)
BERs = np.zeros(num_trials)
for i in range(num_trials):
CERs[i], SERs[i], BERs[i] = sp.error_rate(sparc)
return sp.DataPoint(p, num_trials, CERs, SERs, BERs)
def simulate(sparc_params, num_trials):
import sys
sys.path.append('/home/zelda/ard61/project/code')
import sheepdog
import sparc as sp
#conf = {"host": "yoshi", "shell": "/usr/bin/python3", "ge_opts": ['-q yoshi-low.q@yoshi.eng.cam.ac.uk']}
conf = {"host": "yoshi", "shell": "/usr/bin/python3"}
sparc_params_json = sparc_params.to_json()
num_machines = 40
quotient = num_trials // num_machines
remainder = num_trials - num_machines * quotient
trial_alloc = [quotient + 1] * remainder + [quotient] * (num_machines - remainder)
args = [[sparc_params_json, x] for x in trial_alloc]
results = sheepdog.map(run_simulation, args, conf)
data_point = sp.DataPoint.combine([result if result is not None
else sp.DataPoint.none(sparc_params)
for result in results])
return data_point
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,257
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/__init__.py
|
from sparc.awgn_capacity import *
from sparc.error_rate import error_rate
from sparc.bits2ints import *
from sparc.data_point import *
from sparc.dft import complex_sparc_transforms, gen_ordering_dft
from sparc.hadamard import sparc_transforms, gen_ordering
from sparc.power import *
from sparc.sparc_modulated import SparcModulated
from sparc.sparc_normal import Sparc
from sparc.sparc_qpsk import SparcQpsk
from sparc.sparc_params import SparcParams
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,258
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/sparc_params.py
|
import numpy as np
import json
from sparc.awgn_capacity import awgn_capacity
class SparcParams:
def __init__(self, n, D, L, logK, logM, Palloc):
assert (Palloc.shape[0] == L)
self.n = n # codeword length
self.D = D # codeword dimensionality (1 for real, 2 for complex)
self.L = L # #(sections)
self.logK = logK # log #(modulation constants in each section)
self.logM = logM # log #(columns per section)
self.Palloc = Palloc # Power allocation (power per section per transmission)
self.compute_consts()
def compute_consts(self):
self.K = 2**self.logK
self.M = 2**self.logM
self.Pchan = np.sum(self.Palloc) # Average power per transmission
self.inputbits = self.L * (self.logK + self.logM) # Number of bits input to the block code
self.R = self.inputbits / self.n # Code rate (bits per transmission)
self.C = awgn_capacity(self.Pchan) # Channel capacity per transmission
self.Ctotal = self.L * self.n * self.C # Total channel capacity
def __str__(self):
return "n: {}, D:{}, L:{}, K:{}, M:{}, R:{}, C:{}, Pchan: {}".format(
self.n,
self.D,
self.L,
self.K,
self.M,
self.R,
self.C,
self.Pchan
)
def __repr__(self):
return "n{},D{},L{},logK{},logM{},P{}".format(
self.n,
self.D,
self.L,
self.logK,
self.logM,
self.Pchan
)
def __eq__(self, other):
return (self.n == other.n
and self.D == other.D
and self.L == other.L
and self.logK == other.logK
and self.logM == other.logM
and np.all(self.Palloc == other.Palloc))
def comparable_with(self, other):
"""
To be comparable, two SPARCs need:
(1) to be transmitted in the same number of real transmissions, n * D.
(2) to code the same number of input bits, L * (logK + logM)
(3) to have the same power constraint in each signal dimension, Pchan / D.
"""
return (self.n * self.D == other.n * other.D # Same number of real-valued transmissions
and self.inputbits == other.inputbits # Same number of input bits
and self.Pchan / self.D == other.Pchan / other.D) # Same power constraint per dimension
@classmethod
def from_rate(cls, R, D, L, logK, logM, Palloc):
# n = int(np.round(L * (logK + logM) / R))
# Make sure n is even if D=1!
if D == 1:
n = int(np.round(L * (logK + logM) / R / 2)) * 2
else:
n = int(np.round(L * (logK + logM) / R))
return cls(n, D, L, logK, logM, Palloc)
def to_json(self):
return json.dumps({
'n': int(self.n),
'D': int(self.D),
'L': int(self.L),
'logK': int(self.logK),
'logM': int(self.logM),
'Palloc': self.Palloc.tolist()
})
@classmethod
def from_json(cls, string):
p = json.loads(string)
return cls(p['n'], p['D'], p['L'], p['logK'], p['logM'], np.array(p['Palloc']))
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,259
|
ard61/SparcComplex
|
refs/heads/master
|
/obtain_data_points.py
|
import os
import sys
sys.path.append('/home/zelda/ard61/project/code')
import numpy as np
import sparc
import simulate
from define_scenarios import *
scenario_inds = [index for index in np.ndindex(scenarios.shape)]
# Initialise data points list
data_points = np.zeros_like(scenarios, dtype=sparc.DataPoint)
for index in scenario_inds:
data_points[index] = sparc.DataPoint.none(scenarios[index])
# Create 'results' directory
results_dir = 'results'
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# Gather data
start_index = (0, 0, 0, 0) # Change this to resume from partially-interrupted simulations
max_trials = 400 # For robust results at high SNR, will need to be much larger
num_trials_increment = 400
for index in scenario_inds[scenario_inds.index(start_index):]:
print('Scenario index: {}'.format(index))
print(scenarios[index])
# Keep repeating trials until we have good enough estimates, or we hit max_trials trials.
while (data_points[index].num_trials < max_trials # Definitely stop if we hit max_trials trials
and (data_points[index].avg_BER == 0 # Otherwise, keep going if still at 0 trials
or data_points[index].stddev_BER / np.sqrt(data_points[index].num_trials) > data_points[index].avg_BER / 4)): # or if estimated standard deviation of average BER estimate is more than 25%
print("trial {}".format(data_points[index].num_trials))
# Run the additional trials
data_points[index] = sparc.DataPoint.combine([
data_points[index],
simulate.simulate(scenarios[index], num_trials_increment)
])
# Display statistics
print(data_points[index])
# Save the data in a file.
filename = '{}/{}.json'.format(results_dir, repr(scenarios[index]))
with open(filename, 'w') as file:
file.write(data_points[index].to_json())
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,260
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/power.py
|
import numpy as np
def modified_power_alloc(a, f, P_total, L, C):
P = np.zeros(L)
for l in range(L):
if l/L < f:
P[l] = np.exp2(-2*a*C*l/L)
else:
P[l] = np.exp2(-2*a*C*f)
P *= P_total / sum(P) # Scale P so that it sums to P_total
return P
def iterative_power_alloc(P_total, L, R):
P = np.zeros(L)
P_remain = P_total
for l in range(L):
tau_sq = 1 + P_remain
P[l] = 2*np.log(2)*R*tau_sq/L
if P[l] < P_remain / (L-l):
P[l:] = P_remain / (L-l)
break
P_remain = P_remain - P[l]
return P
def Pchan_to_ebn0(Pchan, R):
return 10 * np.log10(Pchan / (2*R))
def ebn0_to_Pchan(ebn0, R):
return 2 * R * (10 ** (ebn0 / 10))
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,261
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/bits2ints.py
|
import numpy as np
def bits2ints(bits, width):
"""
Transform an array of bits (ints in {0,1})
into an array of ints
where each int is sampled from `width` bits.
"""
assert(bits.size % width == 0)
L = bits.size // width
ints = np.zeros(L, dtype=int)
for l in range(L):
cur_int = 0
for bit in bits[l*width: (l+1)*width]:
cur_int = (cur_int << 1) | bit
ints = ints.append(cur_int)
return ints
def ints2bits(ints, width):
"""
Transform an array of ints
into an array of bits
where each int maps to `width` bits.
"""
bits = np.zeros(width * ints.size, dtype=int)
for l in range(ints.size):
bits[l*width : (l+1)*width] = np.array([int(bit) for bit in np.binary_repr(ints[l], width=width)])
return bits
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,262
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/sparc_modulated.py
|
"""
AMP Modulated SPARC decoder with Hadamard matrices.
"""
import numpy as np
class SparcModulated:
def __init__(self, sparc_params, Ax, Ay):
"""
Initialise a modulated SPARC with codeword matrix provided as the Ax and Ay functions
and power allocation P.
This defines the constants n, L, K, M.
"""
self.p = sparc_params
assert(self.p.K % 2 == 0) # Symmetric modulation scheme
self.sqrtnPl = np.sqrt(self.p.n * self.p.Palloc)
# Optimal modulation constants
self.a = [np.sqrt(3 / (self.p.K**2 - 1)) * (2*R - 1) for R in range(1, self.p.K//2 + 1)]
self.a = np.array(self.a + [-a for a in self.a])
self.a = self.sqrtnPl.reshape(self.p.L, 1).repeat(self.p.K, axis=1) * self.a.reshape(1, self.p.K).repeat(self.p.L, axis=0)
#print(self.a)
assert(self.a.shape == (self.p.L, self.p.K))
# Functions to calculate Ax, A^T y
self.Ax, self.Ay = Ax, Ay
def encode(self, x):
"""
Encode the length-L vector of integers x between 1 and M into a codeword.
"""
assert(x.size == self.p.L)
beta = np.zeros((self.p.L, self.p.M))
ind = (x / self.p.K).astype(int)
beta[range(self.p.L), ind] = self.a[range(self.p.L), x % self.p.K]
codeword = self.Ax(beta.ravel())
#assert(np.abs(codeword.dot(codeword) / self.p.n - self.p.Pchan) < self.p.Pchan/5) # Make sure average power is within 20% of the channel power constraint
return codeword
def eta(self, s, tau_sq):
a = self.a.reshape(self.p.L, 1, self.p.K).repeat(self.p.M, axis=1)
s = s.reshape(self.p.L, self.p.M, 1).repeat(self.p.K, axis=2)
u = a / tau_sq * (s - a / 2)
u = np.exp(u - u.max())
self.u = u # cache result
numerator = np.sum(a * u, axis=2)
denominator = np.sum(np.sum(u, axis=2), axis=1)
eta = numerator / denominator.reshape(self.p.L, 1).repeat(self.p.M, axis=1)
return eta.ravel()
def onsager_frac(self, s):
section_sum = np.sum(self.u, axis=1)
num = np.sum(self.a**2 * section_sum, axis=1)
den = np.sum(section_sum, axis=1)
result = np.sum(num / den, axis=0)
#if self.p.K == 2:
# #assert(np.abs(result/self.p.n - self.p.Pchan) < self.p.Pchan/10) # Make sure our result is within 10% of the total energy
return result
def decode(self, y):
"""
AMP decoder. Decoding terminates when tau has stopped changing.
"""
assert(y.size == self.p.n)
# Setup!
beta = np.zeros(self.p.L * self.p.M) # beta_0 = 0
z = y # z_0 = y
s = beta + self.Ay(z)
tau_sq = np.dot(z,z) / self.p.n
tau_sq_prev = tau_sq + 1
# Iterate!
t = 1
decoding_threshold = 5*self.p.Palloc[self.p.L-1]
while tau_sq_prev - tau_sq >= decoding_threshold:
#print('t = {}, tau_sq = {}, avg(beta^2) = {}'.format(t, tau_sq, beta.dot(beta) / self.p.n))
# Calculate beta^t = eta^(t-1) (s_(t-1))
beta = self.eta(s, tau_sq)
# Calculate z_t = y - A beta^t - z_(t-1) / tau_(t-1)^2 * (P_total - (beta^t)^2 / n)
z = y - self.Ax(beta) + z / tau_sq * (self.onsager_frac(s) - beta.dot(beta)) / self.p.n
# Calculate s^t = beta^t + A^T z^(t)
s = beta + self.Ay(z)
# Calculate tau_t^2 = z_t^2 / n
tau_sq_prev = tau_sq
tau_sq = z.dot(z) / self.p.n
t += 1
#print('Final tau^2: {}'.format(tau_sq))
final_stat = s.reshape(self.p.L, self.p.M)
ind = np.abs(final_stat).argmax(axis=1) # The indices of the largest-magnitude elements in each section.
max_vals = final_stat[range(self.p.L),ind]
#print(max_vals)
mod_const = np.abs(self.a - max_vals.reshape(self.p.L, 1).repeat(self.p.K, axis=1)).argmin(axis=1) # The modulation constant closest to that element
x = ind * self.p.K + mod_const
return x
if __name__ == "__main__":
import sys
sys.path.append('/home/antoine/Documents/IIB Engineering/Project/Code')
import sparc as sp
## Setup
P_total = 63
sigma_sq = 1
#SNR_per_bit = 5
#R = 1
#P_total = 2 * R * (10 ** (SNR_per_bit / 10))
C = sp.awgn_capacity(P_total)
R = 0.8*C
R_pa = 1.1 * R
L = 1024
logK = 2
logM = 10
Palloc = sp.iterative_power_alloc(P_total, L, R_pa)
p = sp.SparcParams.from_rate(R, 1, L, logK, logM, Palloc)
print({'C': C, 'R': R, 'R_pa': R_pa, 'n': p.n, 'P_total': P_total})
(Ax, Ay) = sp.sparc_transforms(p.n, p.M, p.L, sp.gen_ordering(p.n, p.M, p.L))
sparc = SparcModulated(p, Ax, Ay)
## Generate vector to transmit.
input = np.random.randint(0, p.M, size=p.L)
## Encode it
x = sparc.encode(input)
## Transmit it
y = x + np.sqrt(sigma_sq) * np.random.normal(size=p.n)
## Decode it
output = sparc.decode(y)
print(input)
print(output)
section_errors = input != output
index_errors = input // p.K != output // p.K
modulation_errors = section_errors - index_errors
assert(np.any(modulation_errors == -1) == False)
print('Section error rate: {}'.format(np.sum(section_errors) / L))
print('Index errors: {} ; at sections {}'.format(np.sum(index_errors), np.nonzero(index_errors)))
print('Modulation constant errors {} ; at sections {}'.format(np.sum(modulation_errors), np.nonzero(modulation_errors)))
if p.K == 4:
# Check whether errors are more likely to happen when transmitting the
# lower-amplitude modulation constants?
low_amplitude = np.bitwise_or(input % p.K == 0, input % p.K == 2)
high_amplitude = np.bitwise_or(input % p.K == 1, input % p.K == 3)
print('low amplitude error rate: {}'.format(np.sum(np.bitwise_and(section_errors, low_amplitude)) / np.sum(low_amplitude)))
print('high amplitude error rate: {}'.format(np.sum(np.bitwise_and(section_errors, high_amplitude)) / np.sum(high_amplitude)))
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,263
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/data_point.py
|
import numpy as np
import json
from sparc.sparc_params import SparcParams
class DataPoint:
"""
A set of data points of a SPARC simulation.
Properties:
sparc_params: SparcParams
Simulation properties:
num_trials = #(simulations)
SER = array(section error rates)
BER = array(bit error rates)
avg_SER = average section error rate
avg_BER = average bit error rate
var_SER = variance of section error rate
var_BER = variance of bit error rate
"""
def __init__(self, sparc_params, num_trials, CERs, SERs, BERs):
assert(CERs.size == num_trials)
assert(SERs.size == num_trials)
assert(BERs.size == num_trials)
self.sparc_params = sparc_params
self.num_trials = num_trials
self.CERs = CERs
self.SERs = SERs
self.BERs = BERs
self.compute_stats()
def compute_stats(self):
if self.num_trials == 0:
self.avg_CER = 0
self.avg_SER = 0
self.avg_BER = 0
self.stddev_CER = 0
self.stddev_SER = 0
self.stddev_BER = 0
else:
self.avg_CER = np.sum(self.CERs) / self.num_trials
self.avg_SER = np.sum(self.SERs) / self.num_trials
self.avg_BER = np.sum(self.BERs) / self.num_trials
self.stddev_CER = np.sqrt(np.sum(self.CERs**2) / self.num_trials - self.avg_CER**2)
self.stddev_SER = np.sqrt(np.sum(self.SERs**2) / self.num_trials - self.avg_SER**2)
self.stddev_BER = np.sqrt(np.sum(self.BERs**2) / self.num_trials - self.avg_BER**2)
def __str__(self):
return '''Statistics for {} ; {} runs:
Codeword error rate: avg {} ; std-dev {}
Section error rate: avg {} ; std-dev {}
Bit error rate: avg {} ; std-dev {}'''.format(self.sparc_params,
self.num_trials,
self.avg_CER, self.stddev_CER,
self.avg_SER, self.stddev_SER,
self.avg_BER, self.stddev_BER)
@classmethod
def combine(cls, data_points):
sparc_params = data_points[0].sparc_params
for data_point in data_points:
assert(data_point.sparc_params == sparc_params)
num_trials = sum((data_point.num_trials for data_point in data_points))
CERs = np.concatenate([data_point.CERs for data_point in data_points])
SERs = np.concatenate([data_point.SERs for data_point in data_points])
BERs = np.concatenate([data_point.BERs for data_point in data_points])
return cls(sparc_params, num_trials, CERs, SERs, BERs)
def to_json(self):
return json.dumps({
'sparc_params': self.sparc_params.to_json(),
'num_trials': int(self.num_trials),
'CERs': self.CERs.tolist(),
'SERs': self.SERs.tolist(),
'BERs': self.BERs.tolist()
})
@classmethod
def none(cls, sparc_params):
return cls(sparc_params, 0, np.array([]), np.array([]), np.array([]))
@classmethod
def from_json(cls, string):
p = json.loads(string)
return cls(SparcParams.from_json(p['sparc_params']), p['num_trials'], np.array(p['CERs']), np.array(p['SERs']), np.array(p['BERs']))
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,264
|
ard61/SparcComplex
|
refs/heads/master
|
/define_scenarios.py
|
import numpy as np
import sparc
# Generate loads of different SPARC parameters, that are all comparable.
# Parameters common to all simulations
R_per_Ds = np.array([1, 1.5]) # Rate per dimension R/D
R_pa_per_Ds = np.array([0, 1.05]) * R_per_Ds # Power allocation rate per dimension R_pa/D
L = 1024
logK = 0
logKMs = np.array([9, 10])
# Channel powers at which we will test the SPARCs for each rate
ebn0s = np.stack([
np.arange(2, 6.1, 0.5),
np.arange(4, 8.1, 0.5),
])
# All the parameters we want to test
p_K1D1 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
p_K2D1 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
p_K4D1 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
p_K4D2 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
p_K8D2 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
p_K16D2 = np.zeros((R_per_Ds.size, ebn0s.shape[1], logKMs.size), dtype=sparc.SparcParams)
for i in range(R_per_Ds.size):
for j in range(ebn0s.shape[1]):
for k in range(logKMs.size):
R_per_D = R_per_Ds[i]
R_pa_per_D = R_pa_per_Ds[i]
ebn0 = ebn0s[i,j]
logKM = logKMs[k]
Pchan_per_D = sparc.ebn0_to_Pchan(ebn0, R_per_D)
Palloc_per_D = sparc.iterative_power_alloc(Pchan_per_D, L, R_pa_per_D)
# Unmodulated real
D = 1
logK = 0
logM = logKM - logK
p_K1D1[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# 2-ary real
logK = 1
logM = logKM - logK
p_K2D1[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# 4-ary real
logK = 2
logM = logKM - logK
p_K4D1[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# 4-ary complex
D = 2
logK = 2
logM = logKM - logK
p_K4D2[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# 8-ary complex
logK = 3
logM = logKM - logK
p_K8D2[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# 16-ary complex
logK = 4
logM = logKM - logK
p_K16D2[i,j,k] = sparc.SparcParams.from_rate(R_per_D * D, D, L, logK, logM, Palloc_per_D * D)
# Check they're all comparable!
assert(p_K1D1[i,j,k].comparable_with(p_K2D1[i,j,k]))
assert(p_K1D1[i,j,k].comparable_with(p_K4D1[i,j,k]))
assert(p_K1D1[i,j,k].comparable_with(p_K4D2[i,j,k]))
assert(p_K1D1[i,j,k].comparable_with(p_K8D2[i,j,k]))
assert(p_K1D1[i,j,k].comparable_with(p_K16D2[i,j,k]))
scenarios = np.stack([p_K1D1, p_K2D1, p_K4D1, p_K4D2, p_K8D2, p_K16D2])
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,265
|
ard61/SparcComplex
|
refs/heads/master
|
/graph.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
import sparc as sp
from define_scenarios import *
## Load data
results_dir = 'results'
data_points = np.zeros_like(scenarios, dtype=sparc.DataPoint)
scenario_inds = [index for index in np.ndindex(scenarios.shape)]
for index in scenario_inds:
data_points[index] = sparc.DataPoint.none(scenarios[index])
for index in scenario_inds:
filename = '{}/{}.json'.format(results_dir, repr(scenarios[index]))
try:
with open(filename) as file:
data_points[index] = sp.DataPoint.from_json(file.read())
except FileNotFoundError:
pass
# Create 'figures' directory
figures_dir = 'figures'
if not os.path.exists(figures_dir):
os.makedirs(figures_dir)
# Plot graphs.
###############################################################################
# Unmodulated real vs 2-ary real for constant rate 1.0, increasing Eb/N0.
###############################################################################
ax = plt.axes()
ebn0 = [ebn0s[0, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=1
data2 = [data_points[1, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=2
line1 = plt.semilogy(ebn0, data1, label='K=1, M=512')
line2 = plt.semilogy(ebn0, data2, label='K=2, M=256')
R = data_points[0, 0, 0, 0].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend()
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,R=1,K=1,2,KM=512.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 2-ary real for constant rate 1.5, increasing Eb/N0.
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[1, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=1
data2 = [data_points[1, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=2
line1 = plt.semilogy(ebn0, data1, label='K=1, M=512')
line2 = plt.semilogy(ebn0, data2, label='K=2, M=256')
R = data_points[0, 1, 0, 0].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend()
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,R=1.5,K=1,2,KM=512.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 4-ary real for constant rate 1.0, increasing Eb/N0.
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[0, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=1
data2 = [data_points[2, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # K=4
line1 = plt.semilogy(ebn0, data1, label='K=1, M=512')
line2 = plt.semilogy(ebn0, data2, label='K=4, M=128')
R = data_points[0, 0, 0, 0].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend()
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,R=1,K=1,4,KM=512.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 4-ary, 8-ary, 16-ary complex for constant rate 1.0, increasing Eb/N0.
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[0, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=1 K=1
data2 = [data_points[3, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=4
data3 = [data_points[4, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=8
data4 = [data_points[5, 0, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=16
line1 = plt.semilogy(ebn0, data1, label='D=1, K=1, M=512')
line2 = plt.semilogy(ebn0, data2, label='D=2, K=4, M=128')
line3 = plt.semilogy(ebn0, data3, label='D=2, K=8, M=64')
line4 = plt.semilogy(ebn0, data4, label='D=2, K=16, M=32')
R = data_points[0, 0, 0, 0].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend(loc='center left')
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,2,R=1,K=1,4,8,16,KM=512.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 4-ary, 8-ary, 16-ary complex for constant rate 1.5, increasing Eb/N0.
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[1, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=1 K=1
data2 = [data_points[3, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=4
data3 = [data_points[4, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=8
data4 = [data_points[5, 1, j, 0].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=16
line1 = plt.semilogy(ebn0, data1, label='D=1, K=1, M=512')
line2 = plt.semilogy(ebn0, data2, label='D=2, K=4, M=128')
line3 = plt.semilogy(ebn0, data3, label='D=2, K=8, M=64')
line4 = plt.semilogy(ebn0, data4, label='D=2, K=16, M=32')
R = data_points[0, 1, 0, 0].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend(loc='center left')
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,2,R=1.5,K=1,4,8,16,KM=512.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 4-ary, 8-ary, 16-ary complex for constant rate 1.0, increasing Eb/N0, for KM = 1024
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[0, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 0, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=1 K=1
data2 = [data_points[3, 0, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=4
data3 = [data_points[4, 0, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=8
data4 = [data_points[5, 0, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=16
line1 = plt.semilogy(ebn0, data1, label='D=1, K=1, M=1024')
line2 = plt.semilogy(ebn0, data2, label='D=2, K=4, M=256')
line3 = plt.semilogy(ebn0, data3, label='D=2, K=8, M=128')
line4 = plt.semilogy(ebn0, data4, label='D=2, K=16, M=64')
R = data_points[0, 0, 0, 1].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend(loc='center left')
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,2,R=1,K=1,4,8,16,KM=1024.png', bbox_inches='tight')
plt.close()
###############################################################################
# Unmodulated real vs 4-ary, 8-ary, 16-ary complex for constant rate 1.5, increasing Eb/N0, for KM=1024
###############################################################################
plt.figure()
ax = plt.axes()
ebn0 = [ebn0s[1, j] for j in range(ebn0s.shape[1])]
data1 = [data_points[0, 1, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=1 K=1
data2 = [data_points[3, 1, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=4
data3 = [data_points[4, 1, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=8
data4 = [data_points[5, 1, j, 1].avg_BER for j in range(ebn0s.shape[1])] # D=2 K=16
line1 = plt.semilogy(ebn0, data1, label='D=1, K=1, M=1024')
line2 = plt.semilogy(ebn0, data2, label='D=2, K=4, M=256')
line3 = plt.semilogy(ebn0, data3, label='D=2, K=8, M=128')
line4 = plt.semilogy(ebn0, data4, label='D=2, K=16, M=64')
R = data_points[0, 1, 0, 1].sparc_params.R
ebn0_min = sp.Pchan_to_ebn0(sp.minimum_Pchan(R), R)
plt.axvline(ebn0_min) # Shannon capacity
capacity_text = 'Minimum SNR: %.3f dB' % ebn0_min
capacity_bbox = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
# Text box for minimum power
ax.text(0.07, 0.1, capacity_text, transform=ax.transAxes, verticalalignment='bottom', bbox=capacity_bbox)
plt.legend(loc='center left')
plt.xlabel('Signal-to-noise ratio Eb/N0 (dB)')
plt.ylabel('Average Bit Error Rate')
plt.savefig(figures_dir + '/D=1,2,R=1.5,K=1,4,8,16,KM=1024.png', bbox_inches='tight')
plt.close()
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,266
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/error_rate.py
|
import numpy as np
from sparc.bits2ints import ints2bits
def error_rate(sparc):
"""
Test a SPARC, giving the section and bit error rates as output.
"""
logKM = sparc.p.logK + sparc.p.logM
input = np.random.randint(0, 2**logKM, size=sparc.p.L)
x = sparc.encode(input)
y = x + np.random.normal(size=x.shape)
output = sparc.decode(y)
# Calculate codeword error rate
CER = np.any(output != input)
# Calculate section error rate
l_err = np.sum(output != input)
SER = l_err / sparc.p.L
# Calculate bit error rate
b_err = np.sum(ints2bits(input, logKM) != ints2bits(output, logKM))
BER = b_err / (sparc.p.inputbits)
return (CER, SER, BER)
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,267
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/hadamard.py
|
"""
Functions to calculate the product of a vector and a Hadamard matrix.
Copied from Adam Greig, https://github.com/sigproc/sparc-amp/blob/master/sparc_amp.ipynb
"""
import numpy as np
try:
from pyfht import fht_inplace
except ImportError:
import warnings
warnings.warn("Using very slow Python version of fht, please install pyfht")
def fht_inplace(x):
N = len(x)
i = N>>1
while i:
for k in range(0, N, 2*i):
for j in range(k, i+k):
ij = i|j
temp = x[j]
x[j] += x[ij]
x[ij] = temp - x[ij]
i = i >> 1
def fht(n, m, ordering):
"""
Returns functions to compute the sub-sampled Walsh-Hadamard transform,
i.e., operating with a wide rectangular matrix of random +/-1 entries.
n: number of rows
m: number of columns
It is most efficient (but not required) for max(m+1,n+1) to be a power of 2.
ordering: n-long array of row indices in [1, max(m,n)] to
implement subsampling
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length m
Ay(y): computes A'.y (of length m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
w = 2**int(np.ceil(np.log2(max(m+1, n+1))))
assert ordering.shape == (n,)
def Ax(x):
assert x.size == m, "x must be m long"
y = np.zeros(w)
y[w-m:] = x.reshape(m)
fht_inplace(y)
return y[ordering]
def Ay(y):
assert y.size == n, "input must be n long"
x = np.zeros(w)
x[ordering] = y
fht_inplace(x)
return x[w-m:]
return Ax, Ay
def fht_block(n, m, l, ordering):
"""
As `fht`, but computes in `l` blocks of size `n` by `m`, potentially
offering substantial speed improvements.
n: number of rows
m: number of columns per block
l: number of blocks
It is most efficient (though not required) when max(m+1,n+1) is a power of 2.
ordering: (l, n) shaped array of row indices in [1, max(m, n)] to
implement subsampling
Returns (Ax, Ay, ordering):
Ax(x): computes A.x (of length n), with x having length l*m
Ay(y): computes A'.y (of length l*m), with y having length n
ordering: the ordering in use, which may have been generated from seed
"""
assert n > 0, "n must be positive"
assert m > 0, "m must be positive"
assert l > 0, "l must be positive"
assert ordering.shape == (l, n)
def Ax(x):
assert x.size == l*m
out = np.zeros(n)
for ll in range(l):
ax, ay = fht(n, m, ordering=ordering[ll])
out += ax(x[ll*m:(ll+1)*m])
return out
def Ay(y):
assert y.size == n
out = np.empty(l*m)
for ll in range(l):
ax, ay = fht(n, m, ordering=ordering[ll])
out[ll*m:(ll+1)*m] = ay(y)
return out
return Ax, Ay
def sparc_transforms(n, M, L, ordering):
Ax, Ay = fht_block(n, M, L, ordering=ordering)
def Ab(b):
return Ax(b) / np.sqrt(n)
def Az(z):
return Ay(z) / np.sqrt(n)
return Ab, Az
def gen_ordering(n, m, l, seed=0):
w = 2**int(np.ceil(np.log2(max(m+1, n+1))))
rng = np.random.RandomState(seed)
ordering = np.empty((l, n), dtype=np.uint32)
idxs = np.arange(1, w, dtype=np.uint32)
for ll in range(l):
rng.shuffle(idxs)
ordering[ll] = idxs[:n]
return ordering
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,268
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/awgn_capacity.py
|
import numpy as np
def awgn_capacity(Pchan):
return 1/2 * np.log2(1 + Pchan)
def minimum_Pchan(C):
return 2 ** (2*C) - 1
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,269
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/sparc_normal.py
|
"""
AMP SPARC decoder with Hadamard matrices.
"""
import numpy as np
class Sparc:
def __init__(self, sparc_params, Ax, Ay):
"""
Initialise a SPARC with codeword matrix provided as the Ax and Ay functions
and power allocation P.
This defines the constants n, L, M.
"""
self.p = sparc_params
# Vector of length M x L with each entry equal to sqrt(nP_l)
self.sqrtnPl = np.sqrt(self.p.n * np.repeat(self.p.Palloc, self.p.M))
# Average power
self.P_total = np.sum(self.p.Palloc)
# Functions to calculate Ax, A^T y
self.Ax, self.Ay = Ax, Ay
def encode(self, x):
"""
Encode the length-L vector of integers x between 1 and M into a codeword.
"""
assert(x.size == self.p.L)
beta = np.zeros(self.p.L * self.p.M)
for l in range(self.p.L):
beta[l * self.p.M + x[l]] = 1
beta = beta * self.sqrtnPl
codeword = self.Ax(beta)
return codeword
def eta(self, s, tau_sq):
u = s * self.sqrtnPl / tau_sq
max_u = u.max() # Regularise argument to exponential so that it's smaller than 1.
numerator = np.exp(u - max_u)
denominator = np.zeros(self.p.L)
for l in range(self.p.L):
denominator[l] = np.sum(numerator[l*self.p.M : (l+1)*self.p.M])
eta = self.sqrtnPl * numerator / np.repeat(denominator, self.p.M)
return eta
def decode(self, y):
"""
AMP decoder. Decoding terminates when tau has stopped changing.
"""
assert(y.size == self.p.n)
# Setup!
beta = np.zeros(self.p.M * self.p.L) # beta_0 = 0
z = y # z_0 = y
s = beta + self.Ay(z)
tau_sq = np.dot(z,z) / self.p.n
tau_sq_prev = tau_sq + 1
# Iterate!
t = 1
decoding_threshold = 5*self.p.Palloc[self.p.L-1]
while tau_sq_prev - tau_sq >= decoding_threshold:
#print('t = {}, tau_sq = {}, avg(beta^2) = {}'.format(t, tau_sq, np.dot(beta, beta) / self.p.n))
# Calculate beta^t = eta^(t-1) (s_(t-1))
beta = self.eta(s, tau_sq)
# Calculate z_t = y - A beta^t - z_(t-1) / tau_(t-1)^2 * (P_total - (beta^t)^2 / n)
z = y - self.Ax(beta) + z / tau_sq * (self.P_total - np.dot(beta, beta) / self.p.n)
# Calculate s^t = beta^t + A^T z^(t)
s = beta + self.Ay(z)
# Calculate tau_t^2 = z_t^2 / n
tau_sq_prev = tau_sq
tau_sq = np.dot(z,z) / self.p.n
t += 1
# Declare the maximum value in each section to be the decoded '1'.
x = np.zeros(self.p.L, dtype=int)
for l in range(self.p.L):
index = beta[l * self.p.M : (l+1) * self.p.M].argmax()
x[l] = index
return x
if __name__ == "__main__":
import sys
sys.path.append('/home/antoine/Documents/IIB Engineering/Project/Code')
import sparc as sp
## Setup
# Use SNR of 15 for C = 2 bits
P_total = 15
C = sp.awgn_capacity(P_total)
R = 0.8*C
L = 1024
logM = 9
logK = 1
Palloc = sp.modified_power_alloc(0.7, 0.6, P_total, L, C)
p = sp.SparcParams.from_rate(R, 1, L, logK, logM, Palloc)
(Ax, Ay) = sp.sparc_transforms(p.n, p.M, p.L, sp.gen_ordering(p.n, p.M, p.L))
sparc = Sparc(p, Ax, Ay)
## Generate vector to transmit.
input = np.random.randint(0, p.M, size=L)
## Encode it
x = sparc.encode(input)
## Transmit it
y = x + np.random.normal(size=p.n)
## Decode it
output = sparc.decode(y)
print(input)
print(output)
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,270
|
ard61/SparcComplex
|
refs/heads/master
|
/sparc/sparc_qpsk.py
|
"""
AMP Complex QPSK SPARC decoder with DFT matrices.
"""
import numpy as np
def abs2(z):
return np.sum(np.real(z)**2 + np.imag(z)**2)
class SparcQpsk:
def __init__(self, sparc_params, Ax, Ay):
"""
Initialise a modulated SPARC with codeword matrix provided as the Ax and Ay functions
and power allocation P.
This defines the constants n, L, K, M.
"""
self.p = sparc_params
self.sqrtnPl = np.sqrt(self.p.n * self.p.Palloc)
# Optimal modulation constants
self.a = np.exp(2 * np.pi * 1j * np.array(range(self.p.K)) / self.p.K)
self.a = self.sqrtnPl.reshape(self.p.L, 1).repeat(self.p.K, axis=1) * self.a.reshape(1, self.p.K).repeat(self.p.L, axis=0)
#print(self.a)
assert(self.a.shape == (self.p.L, self.p.K))
# Functions to calculate Ax, A^T y
self.Ax, self.Ay = Ax, Ay
def encode(self, x):
"""
Encode the length-L vector of integers x between 1 and M into a codeword.
"""
assert(x.size == self.p.L)
beta = np.zeros((self.p.L, self.p.M), dtype=complex)
ind = (x / self.p.K).astype(int)
beta[range(self.p.L), ind] = self.a[range(self.p.L), x % self.p.K]
codeword = self.Ax(beta.ravel())
# Make sure average power per transmission is within 10% of P_total.
#assert(abs2(codeword) / self.p.n - self.p.Pchan < self.p.Pchan/10)
codeword = np.stack([np.real(codeword), np.imag(codeword)], -1)
return codeword
def eta(self, s, tau_sq):
a = self.a.reshape(self.p.L, 1, self.p.K).repeat(self.p.M, axis=1)
s = s.reshape(self.p.L, self.p.M, 1).repeat(self.p.K, axis=2)
u = 2 * (np.real(s)*np.real(a) + np.imag(s)*np.imag(a)) / tau_sq
u = np.exp(u - u.max())
numerator = np.sum(a * u, axis=2)
denominator = np.sum(np.sum(u, axis=2), axis=1)
eta = numerator / denominator.reshape(self.p.L, 1).repeat(self.p.M, axis=1)
return eta.ravel()
def decode(self, y):
"""
AMP decoder. Decoding terminates when tau has stopped changing.
"""
assert(y.shape == (self.p.n, self.p.D))
# Recover y from the concatenated real and imaginary parts
y = y[:,0] + 1j * y[:,1]
# Setup!
beta = np.zeros(self.p.L * self.p.M, dtype=complex) # beta_0 = 0
z = y # z_0 = y
s = beta + self.Ay(z)
tau_sq = abs2(z) / self.p.n
tau_sq_prev = tau_sq + 1
# Iterate!
t = 1
decoding_threshold = 5*self.p.Palloc[self.p.L-1]
while tau_sq_prev - tau_sq >= decoding_threshold:
#print('t = {}, tau_sq = {}, avg(beta^2) = {}'.format(t, tau_sq, abs2(beta) / self.p.n))
# Calculate beta^t = eta^(t-1) (s_(t-1))
beta = self.eta(s, tau_sq)
# Calculate z_t = y - A beta^t - z_(t-1) / tau_(t-1)^2 * (P_total - (beta^t)^2 / n)
z = y - self.Ax(beta) + z / tau_sq * (self.p.Pchan - abs2(beta) / self.p.n)
# Calculate s^t = beta^t + A^T z^(t)
s = beta + self.Ay(z)
# Calculate tau_t^2 = z_t^2 / n
tau_sq_prev = tau_sq
tau_sq = abs2(z) / self.p.n
t += 1
#print('Final tau^2: {}'.format(tau_sq))
#final_stat = beta.reshape(self.L, self.M)
final_stat = s.reshape(self.p.L, self.p.M)
ind = np.abs(final_stat).argmax(axis=1) # The indices of the largest-magnitude elements in each section.
max_vals = final_stat[range(self.p.L),ind]
#print(max_vals)
mod_const = np.abs(self.a - max_vals.reshape(self.p.L, 1).repeat(self.p.K, axis=1)).argmin(axis=1) # The modulation constant closest to that element
x = ind * self.p.K + mod_const
return x
if __name__ == "__main__":
import sys
sys.path.append('/home/antoine/Documents/IIB Engineering/Project/Code')
import sparc as sp
## Setup
P_total = 15 # Power bound: average power per real transmission
sigma_sq = 1 # Average noise per real transmission
SNR_per_bit = 4
R = 1
P_total = 2 * R * (10 ** (SNR_per_bit / 10))
C = sp.awgn_capacity(P_total)
#R = 0.8*C # Number of bits / real transmission
D = 2
L = 1024
logK = 2
logM = 8
a = 0
f = 0
Palloc = sp.modified_power_alloc(a, f, P_total, L, C) # Power allocation: average power per real transmission in section l
p = sp.SparcParams.from_rate(R, D, L, logK, logM, Palloc)
(Ax, Ay) = sp.complex_sparc_transforms(p.n, p.M, p.L, sp.gen_ordering_dft(p.n, p.M, p.L))
sparc = SparcQpsk(p, Ax, Ay)
print({'C': p.C, 'R': p.R, 'n': p.n, 'P_total': p.Pchan})
## Generate vector to transmit.
input = np.random.randint(0, p.M, size=p.L)
## Encode it
x = sparc.encode(input)
## Transmit it
y = x + np.random.normal(size=x.shape)
## Decode it
output = sparc.decode(y)
print(input)
print(output)
section_errors = input != output
index_errors = input // p.K != output // p.K
modulation_errors = section_errors - index_errors
assert(np.any(modulation_errors == -1) == False)
print('Section error rate: {}'.format(np.sum(section_errors) / L))
print('Index errors: {} ; at sections {}'.format(np.sum(index_errors), np.nonzero(index_errors)))
print('Modulation constant errors {} ; at sections {}'.format(np.sum(modulation_errors), np.nonzero(modulation_errors)))
|
{"/simulate.py": ["/sparc/__init__.py"], "/sparc/__init__.py": ["/sparc/awgn_capacity.py", "/sparc/error_rate.py", "/sparc/bits2ints.py", "/sparc/data_point.py", "/sparc/dft.py", "/sparc/hadamard.py", "/sparc/power.py", "/sparc/sparc_modulated.py", "/sparc/sparc_normal.py", "/sparc/sparc_qpsk.py", "/sparc/sparc_params.py"], "/sparc/sparc_params.py": ["/sparc/awgn_capacity.py"], "/obtain_data_points.py": ["/sparc/__init__.py", "/simulate.py", "/define_scenarios.py"], "/sparc/sparc_modulated.py": ["/sparc/__init__.py"], "/sparc/data_point.py": ["/sparc/sparc_params.py"], "/define_scenarios.py": ["/sparc/__init__.py"], "/graph.py": ["/sparc/__init__.py", "/define_scenarios.py"], "/sparc/error_rate.py": ["/sparc/bits2ints.py"], "/sparc/sparc_normal.py": ["/sparc/__init__.py"], "/sparc/sparc_qpsk.py": ["/sparc/__init__.py"]}
|
26,271
|
neerajcse/INF241
|
refs/heads/master
|
/bluetooth_scanning.py
|
from __future__ import print_function
from multiprocessing.connection import Client
import bluetooth
import time
address = ('localhost', 6000)
user1_devices = ['60:6C:66:03:04:ED']
def write_to_file(nearby_devices):
with open("devices.txt", "w") as f:
for device in nearby_devices:
if device in user1_devices:
print("User 1")
def update_blackboard(nearby_devices):
conn = Client(address)
user = "2"
for device in nearby_devices:
if device in user1_devices:
user = "1"
break
conn.send('PUT:user,' + str(user))
conn.send('close')
conn.close()
while True:
nearby_devices = bluetooth.discover_devices(lookup_names = False, duration=2)
print("found %d devices" % len(nearby_devices))
update_blackboard(nearby_devices)
time.sleep(0.01)
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,272
|
neerajcse/INF241
|
refs/heads/master
|
/agent.py
|
from multiprocessing.connection import Client
from array import array
address = ('localhost', 6000)
conn = Client(address)
conn.send('exit')
conn.send('PUT:a,5')
conn.send('GET:a')
msg = conn.recv()
print(msg)
conn.send('close')
conn.close()
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,273
|
neerajcse/INF241
|
refs/heads/master
|
/handdetector.py
|
# import the necessary packages
import imutils
import numpy as np
import argparse
import cv2
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
# define the upper and lower boundaries of the HSV pixel
# intensities to be considered 'skin'
lower = np.array([0, 48, 80], dtype = "uint8")
upper = np.array([20, 255, 255], dtype = "uint8")
# if a video path was not supplied, grab the reference
# to the gray
current_channel = 0
try:
#keep looping over the frames in the video
while True:
camera = cv2.VideoCapture(current_channel)
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a
# frame, then we have reached the end of the video
if not grabbed:
current_channel = (current_channel + 1) % 2
continue
# resize the frame, convert it to the HSV color space,
# and determine the HSV pixel intensities that fall into
# the speicifed upper and lower boundaries
frame = imutils.resize(frame, width = 400)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(converted, lower, upper)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
skin = cv2.bitwise_and(frame, frame, mask = skinMask)
# show the skin in the image along with the mask
#cv2.imshow("images", np.hstack([frame, skin]))
cv2.imwrite("/home/neeraj/Dropbox/Camera Uploads/my_img.png", skin)
time.sleep(1)
camera.release()
except KeyboardInterrupt:
camera.release()
cv2.destroyAllWindows()
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,274
|
neerajcse/INF241
|
refs/heads/master
|
/detect_calories.py
|
import sys
import time
from barcode_scanner import BarcodeScanner
from usbscale import Scale
import requests
from multiprocessing.connection import Client
address = ('localhost', 6000)
def get_data_from_dashboard(key):
conn = Client(address)
conn.send('GET:' + str(key))
data = conn.recv()
conn.send('close')
conn.close()
return data
def get_weight_from_blackboard():
return get_data_from_dashboard('weight')
def get_user_from_blackboard():
return get_data_from_dashboard("user")
if __name__ == "__main__":
scanner = BarcodeScanner()
"""url = "http://dweet.io/dweet/for/inf241_barcode_reader?barcode={0}&weight={1}"""
url = "http://powerful-forest-7649.herokuapp.com/calories?barcode={0}&weight={1}&user={2}"
try:
while 1:
print 'reading next'
barcode = scanner.read_next_barcode()
weight = get_weight_from_blackboard()
user = get_user_from_blackboard()
print "Barcode : " + str(barcode)
print "Weight : " + str(weight)
print "User : " + str(user)
requests.get(url.format(barcode, weight, user))
except KeyboardInterrupt:
print 'Exiting...'
scanner.cleanup()
sys.exit(0)
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,275
|
neerajcse/INF241
|
refs/heads/master
|
/food_rest.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 31 12:33:46 2015
@author: swanand
"""
import os
import dweepy
import requests
import json
from flask import Flask
from pymongo import MongoClient
from urlparse import urlparse
app = Flask(__name__)
MONGO_URL = os.environ.get('MONGOLAB_URI')
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/calories', methods=['GET'])
def getCaloriesForBarcode():
url='http://world.openfoodfacts.org/api/v0/product/737628064502.json'
data = requests.get(url)
#info = dumpToMongo(data.json())
info = dumpToDweet(data.json())
dumpToMongo(data.json())
return json.dumps(json.dumps(info),indent=4)
def dumpToMongo(data):
info ={'user':'1',
'genericName': data['product']['generic_name'],
'code': data['code'],
'calories':data['product']['nutriments']['energy']
}
if MONGO_URL:
client = MongoClient(MONGO_URL)
db = client[urlparse(MONGO_URL).path[1:]]
else:
client = MongoClient()
db = client['calnagger']
conColl = db['consumption']
conColl.update({'user':'1'},{'$set':info},True)
return {'user':'1',
'genericName': data['product']['generic_name'],
'code': data['code'],
'calories':data['product']['nutriments']['energy']
}
def dumpToDweet(data):
#dweetUrl = 'http://dweet.io:80/dweet/for/decisive-train/'
info ={'user':'1',
'genericName': data['product']['generic_name'],
'code': data['code'],
'calories':data['product']['nutriments']['energy']
}
dweepy.dweet_for('decisive-train', info)
#resp = requests.post(dweetUrl, data=json.dumps(info), headers=headers)
#print resp
return {'user':'1',
'genericName': data['product']['generic_name'],
'code': data['code'],
'calories':data['product']['nutriments']['energy']
}
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,276
|
neerajcse/INF241
|
refs/heads/master
|
/barcode_scanner.py
|
import sys
import time
class BarcodeScanner():
def __init__(self, channel="/dev/hidraw0"):
self.fp = open(channel, 'rb')
def read_next_barcode(self):
barcode = ''
while True:
if barcode == '':
time.sleep(0.1)
buffer = self.fp.read(8)
for c in buffer:
num_val = ord(c)
if num_val == 40:
return barcode
else:
if num_val > 29 and num_val < 40:
barcode += str((num_val +1) % 10)
def cleanup(self):
self.fp.close()
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,277
|
neerajcse/INF241
|
refs/heads/master
|
/usbscale.py
|
#!/usr/bin/python
import os
import fcntl
import struct
import time
"""
Class which takes care of calibration and sampled weight of an item put
on the usb scale.
"""
class Scale(object):
"""
Object for a weighting scale.
Classes using this object should only use one method i.e., to get weight of an item put on the scale.
@link{get_weight} can be used to get the weight.
"""
def __init__(self, dev="/dev/usb/hiddev0"):
self.fd = os.open(dev, os.O_RDONLY)
self.dev = dev
self.calibrated = False
self.name, self.base_small, self.base_large = self.calibrate()
self.scale_factor = 9.61
"""
Takes an initial reading and sets that as the relative 0.
"""
def calibrate(self):
smalls = []
larges = []
for _ in range(5):
name, small, large = self.read_hid_usb()
smalls.append(small)
larges.append(large)
base_small = max(smalls)
base_large = max(larges)
print "Calibrated..."
print base_small, base_large
self.calibrate = True
return name, base_small, base_large
""" Gets one sample of weight. """
def get_weight(self):
scale_factor = self.scale_factor
base_large = self.base_large
base_small = self.base_small
name, small, large = self.read_hid_usb()
print small, large
if large < base_large + 1:
large = 0
else:
large = ((large - (base_large + 1)) * 94) + ((256 - base_small) / scale_factor)
if large == 0:
if small >= base_small:
small = (small - base_small) / scale_factor
else:
small = 0
else:
small = (small - base_small) / scale_factor
final = large + small
return final
""" Gets @link{self.samples} for weighing and returns the one that has max confidence level. """
def get_sampled_weight(self):
sample = []
for _ in range(5):
time.sleep(1)
sample.append(self.get_weight())
return max(sample)
""" Reads the usb channel for the scale and converts bytes into readable primitives. """
def read_hid_usb(self):
def _IOC(iodir, iotype, ionr, iosize):
return (iodir << 30) | (iotype << 8) | (ionr << 0) | (iosize << 16)
def HIDIOCGNAME(len):
return _IOC(2, ord("H"), 6, len)
name = fcntl.ioctl(self.fd, HIDIOCGNAME(100), " "*100).split("\0",1)[0]
hiddev_event_fmt = "Ii"
ev = []
for _ in range(8):
ev.append(struct.unpack(
hiddev_event_fmt,
os.read(self.fd, struct.calcsize(hiddev_event_fmt))))
input_large = ev[6][1]
input_small = ev[7][1]
return name, input_small % 256, input_large % 256
def cleanup(self):
os.close(self.fd)
if __name__ == "__main__":
scale = Scale()
print(scale.get_sampled_weight())
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,278
|
neerajcse/INF241
|
refs/heads/master
|
/usbscale_agent.py
|
from multiprocessing.connection import Client
from array import array
from usbscale import Scale
import time
address = ('localhost', 6000)
def send_weight_to_blackboard(weight):
conn = Client(address)
print("Sending weight :" + str(weight))
conn.send('PUT:weight,' + str(weight))
conn.send('close')
conn.close()
if __name__ == "__main__":
scale = Scale()
try:
history = list()
while True:
weight = scale.get_weight()
#print("Received weight : " + str(weight))
history.append(weight)
#print(history)
if len(history) > 6:
del history[0]
max_weight = max(history)
#print("Max weight : " + str(max_weight))
send_weight_to_blackboard(max_weight)
time.sleep(0.1)
except KeyboardInterrupt:
print("Exiting...")
scale.cleanup()
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,279
|
neerajcse/INF241
|
refs/heads/master
|
/blackboard.py
|
from multiprocessing.connection import Listener
from array import array
import sys
address = ('localhost', 6000)
listener = Listener(address)
key_store = {}
try:
while True:
try:
print("Listening for connections...")
conn = listener.accept()
print('connection accepted from', listener.last_accepted)
while True:
msg = conn.recv()
# do something with msg
if msg == 'close':
conn.close()
break
if msg == 'exit':
listener.close()
sys.exit(0)
if msg.startswith("GET:"):
key = msg.split(":")[1].split(",")[0]
conn.send(key_store.get(key, ""))
if msg.startswith("PUT:"):
key,value = msg.split(":")[1].split(",")
key_store[key] = value
except KeyboardInterrupt:
print("Exiting...")
break
except KeyboardInterrupt:
print("Exiting///")
listener.close()
|
{"/usbscale_agent.py": ["/usbscale.py"]}
|
26,315
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/lista.py
|
# coding=utf-8
# Archivo: lista
# Descipción: describe la clase Lista, que es una lista de amigos.
from uuid import uuid4
from time import time
class Lista:
# Lista de amigos con distintos métodos para cambiar su voto.
def __init__(self, **kwargs):
self.text = kwargs.get("text")
self.id = kwargs.get("id", str(uuid4()))
self.from_user_id = kwargs.get("from_user_id")
self.date = kwargs.get("date", time())
self.list = []
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,316
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/handlers.py
|
# coding=utf-8
# Archivo: handlers
# Descripción: Aquí se declararán los handlers a las distintas llamadas de la API.
from lang import get_lang
from telegram import ParseMode, InlineKeyboardButton, InlineKeyboardMarkup
def generic_message(bot, update, text_code):
# Responde a cualquier mensaje con un texto genérico, sin añadiduras.
message = update.effective_message
user = update.effective_user
user_lang_code = user.language_code
lang = get_lang(user_lang_code)
message.reply_text(lang.get_text(text_code), parse_mode=ParseMode.MARKDOWN)
# --- COMANDOS GENERICOS ---
def start(bot, update):
# Responde al comando "/start"
generic_message(bot, update, "start")
def help(bot, update):
# Responde al comando "/help"
generic_message(bot, update, "help")
def more(bot, update):
# Responde al comando "/more"
generic_message(bot, update, "more")
def donate(bot, update):
# Responde al comando "/donate"
user_data["donacion"] = 5
text = "Gracias de antemano por considerar donar. Soy un único desarrollador y me dedico a hacer bots gra" \
"tuitos mayormente. Las donaciones iran a cubrir los gastos del servidor como primera prioridad y" \
" como segunda las horas gastadas delante del ordenador. Puedes donar la cantidad que quieras usan" \
"do las flechas.\n\nTambién acepto donaciones en ETH `0xa1B41eD75Da5d053793168D1B4F28610779E8a7c`"
keyboard = [[InlineKeyboardButton("❤ donar %s€ ❤" % user_data["donacion"], callback_data="donate")],
[InlineKeyboardButton("⏬", callback_data="don*LLL"),
InlineKeyboardButton("⬇️", callback_data="don*LL"),
InlineKeyboardButton("🔽", callback_data="don*L"),
InlineKeyboardButton("🔼", callback_data="don*G"),
InlineKeyboardButton("⬆️", callback_data="don*GG"),
InlineKeyboardButton("⏫", callback_data="don*GGG")]]
update.message.reply_text(text,
reply_markup=InlineKeyboardMarkup(keyboard),
parse_mode=ParseMode.MARKDOWN)
# --- COMANDOS DEL BOT ---
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,317
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/list_manager.py
|
# coding=utf-8
# Archivo: list_manager
# Autor: Ventura Pérez García - vetu@pm.me - github.com/vetu11
# Fecha última edición: 8/10
# Descripción: Describe la clase que se encargará de manejar las listas.
import json
from lista import Lista
class ListManager:
def __init__(self):
self.lists = {}
with open("lists.json") as f:
crude_lists = json.load(f)
for list in crude_lists:
self.lists[list["id"]] = Lista(**list)
def guardar(self):
# Guardar las listas
# TODO: aún no ha sido probada
crude_lists = []
for lista in self.lists:
crude_lists.append(vars(lista))
with open("lists.json", "w") as f:
json.dump(crude_lists, f, indent=2, ensure_ascii=False)
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,318
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/lang/__init__.py
|
# coding=utf-8
# Modulo: lang
# Descripción: este módulo incluye los textos para los mensajes en todos los idiomas disponibles.
from lang import get_lang
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,319
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/lang/lang.py
|
# coding=utf-8
# Archivo: lang
# Descripción: Descripción de la clase Lang y otras funciones útiles, que se usarán para obtener los textos de los
# mensajes del bot.
import json
_LANGS_INICIADOS = {}
_IDIOMAS_DISPONIBLES = ["ES-es"]
_SINONIMOS_IDIOMAS = {"es": "ES-es",
"ES-es": "es"}
class Lang:
# Esta clase es la definicion de un idioma.
def __init__(self, lang_code="ES-es"):
if lang_code is _SINONIMOS_IDIOMAS:
lang_code = _SINONIMOS_IDIOMAS[lang_code]
elif lang_code is not _IDIOMAS_DISPONIBLES:
lang_code = "ES-es"
with open("lang/%s.json" % lang_code) as f:
self.texts = json.load(f)
def get_text(self, text_code, *args, **kwargs):
# Este método devuelve el texto correspondiente al solicitado en el lenguaje Lang, y si el text necesita
# inserciones de variables, las hace (args, kwargs).
if text_code in self.texts:
try:
return self.texts[text_code].format(*args, **kwargs)
except IndexError or KeyError:
return self.texts[text_code]
else:
return self.texts["not_found"].format(failed_text_code=text_code)
def get_lang(lang_code):
# Esta función devuelve una instancia de Lang del idioma solicitado.
if lang_code in _LANGS_INICIADOS:
return _LANGS_INICIADOS[lang_code]
else:
lang = Lang(lang_code)
_LANGS_INICIADOS.update({lang_code: lang})
return lang
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,320
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/user.py
|
# coding=utf-8
# Archivo: user
# Autor: vetu11
# Fecha última edición: 1/10/2018
# Descripción: Se define la clase User, que contiene las descripciónes minimas por parte de Telegram, y los datos
# adicionales generados por el bot.
class User:
# Usuario de Telegram.
def __init__(self, **kwargs):
self.first_name = kwargs.get("first_name")
self.last_name = kwargs.get("last_name")
self.username = kwargs.get("username")
self.id = kwargs.get("id")
self.language_code = kwargs.get("language_code", "ES-es")
self.full_name = kwargs.get("full_name")
self.full_name_simple = kwargs.get("full_name_simple")
assert self.first_name is not None, "Error al crear el usuario: first_name es None"
assert self.id is not None, "Error al crear el usuario: id es None"
# Check types
if not isinstance(self.first_name, str) and isinstance(id, int):
raise TypeError
if self.full_name is None:
self.create_full_name()
def create_full_name(self):
# crea las variables self.full_name y self.full_name_simple
assert self.first_name is not None, "self.first_name es None"
if self.last_name is None:
self.full_name_simple = self.first_name
else:
self.full_name_simple = self.first_name + " " + self.last_name
if self.username is None:
self.full_name = self.full_name_simple
else:
self.full_name = "[%s](%s)" % (self.full_name_simple, self.username)
def guardar(self):
# Escribe los datos del usuario en un archivo.
pass
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,321
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/bot.py
|
# coding=utf-8
# Archivo: bot
# Descripción: el bot se ejecutará desde este archivo. Aquí se asignarán las funciones handler del archivo handlers.py
# a una llamada de la API.
import logging
import handlers as h
from telegram.ext import Updater, InlineQueryHandler, ChosenInlineResultHandler, CallbackQueryHandler,\
CommandHandler, MessageHandler, Filters, PreCheckoutQueryHandler
from bot_tokens import BOT_TOKEN
# Console logger
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def stop_bot(updater):
# Función que da la señal al updater para que deje de hacer pooling
logger.info("Apagando bot...")
updater.stop()
logger.info("Bot apagado")
def main():
updater = Updater(BOT_TOKEN)
a = updater.dispatcher.add_handler
# Asignación de hanlders
a(CommandHandler('start', h.start))
a(CommandHandler('help', h.help))
a(CommandHandler('more', h.more))
a(CommandHandler('donate', h.donate))
# Iniciar bot, comenzar a hacer pools
updater.start_polling()
# CONSOLA
while True:
inp = raw_input("")
if inp:
input_c = inp.split()[0]
args = inp.split()[1:]
strig = ""
for e in args:
strig = strig + " " + e
if input_c == "stop":
stop_bot(updater)
break
else:
print "Comando desconocido"
if __name__ == '__main__':
main()
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,322
|
vetu11/QuienVienebot_old
|
refs/heads/master
|
/old_bot.py
|
# coding=utf-8
import logging, json, time, re
from telegram.ext import Updater, InlineQueryHandler, ChosenInlineResultHandler, CallbackQueryHandler,\
CommandHandler, MessageHandler, Filters, PreCheckoutQueryHandler
from telegram import InlineQueryResultArticle, ParseMode, InputTextMessageContent, InlineKeyboardButton,\
InlineKeyboardMarkup, LabeledPrice
from bot_tokens import BOT_TOKEN, PAYMENT_PROVIDER_TOKEN
from uuid import uuid4
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
g_search = re.compile("_g")
dk_search = re.compile("_dk")
ng_search = re.compile("_ng")
DEBUG = False
class FriendList:
def __init__(self, text, id, from_user_id, list=None, **kargs):
self.text = text
self.id = id
self.from_user_id = from_user_id
self.date = kargs.get("date", time.time())
self.list = []
if list:
for bot_user in list:
self.list.append(BotUser(json=bot_user))
def text_message_and_keyboard(self):
going, dont_know, not_going = self.get_three_lists()
g_count = dk_count = ng_count = 0
msg = "*%s*" % self.text
if going:
msg += "\n\n*Dicen que vienen:*"
while g_count < 5 and going:
user = going.pop()
msg += "\n%s" % user.full_name
g_count += 1
if g_count == 5 and going:
msg += "\n*+%s*" % len(going)
g_count = g_count + len(going)
if dont_know:
msg += "\n\n*Dicen que puede que vengan:*"
while dk_count < 5 and dont_know:
user = dont_know.pop()
msg += "\n%s" % user.full_name
dk_count += 1
if dk_count == 5 and dont_know:
msg += "\n*+%s*" % len(dont_know)
dk_count = dk_count + len(dont_know)
if not_going:
msg += "\n\n*Dicen que no vienen:*"
while ng_count < 5 and not_going:
user = not_going.pop()
msg += "\n%s" % user.full_name
ng_count += 1
if ng_count == 5 and not_going:
msg += "\n*+%s*" % len(not_going)
ng_count = ng_count + len(not_going)
return msg, self.make_keyboard(g_count, dk_count, ng_count)
def make_keyboard(self, g_count, dk_count, ng_count):
keyboard = [[InlineKeyboardButton("Yo voy", callback_data="si*%s" % self.id),
InlineKeyboardButton("Puede", callback_data="puede*%s" % self.id),
InlineKeyboardButton("Yo no voy", callback_data="no*%s" % self.id)],
[InlineKeyboardButton("CERRAR ✋️", callback_data="close*%s" % self.id),
InlineKeyboardButton("Reenviar", switch_inline_query="id*%s" % self.id)]]
if g_count > 5 or dk_count > 5 or ng_count > 5:
if not DEBUG:
url = "t.me/QuienVienebot?start=%s" % self.id
else:
url = "t.me/vetutestbot?start=%s" % self.id
keyboard.insert(1, [InlineKeyboardButton("(%s)" % g_count, url=url + "_g"),
InlineKeyboardButton("(%s)" % dk_count, url=url + "_dk"),
InlineKeyboardButton("(%s)" % ng_count, url=url + "_ng")])
return InlineKeyboardMarkup(keyboard)
def add_user(self, usuario):
user_id = usuario.t_id
already_in = -1
for user in self.list:
if user.t_id == user_id:
already_in = self.list.index(user)
if already_in >= 0:
if self.list[already_in].position == usuario.position:
self.list.pop(already_in)
else:
self.list.pop(already_in)
self.list.append(usuario)
else:
self.list.append(usuario)
self.date = time.time()
def to_json(self):
u_list = []
for u in self.list:
u_list.append(u.to_json())
j_son = {"text":self.text,
"id":self.id,
"from_user_id":self.from_user_id,
"date":self.date,
"list":u_list}
return j_son
def get_going(self):
list = []
for persona in self.list:
if persona.position == "si":
list.append(persona)
return list
def get_dont_know(self):
list = []
for persona in self.list:
if persona.position == "puede":
list.append(persona)
return list
def get_not_going(self):
list = []
for persona in self.list:
if persona.position == "no":
list.append(persona)
return list
def get_three_lists(self):
list_going = []
list_dont_know = []
list_not_going = []
for persona in self.list:
if persona.position == "si":
list_going.append(persona)
elif persona.position == "puede":
list_dont_know.append(persona)
else:
list_not_going.append(persona)
return list_going, list_dont_know, list_not_going
class BotUser:
def __init__(self, json=None, t_id=None, full_name=None, username=None, position=None):
if json:
self.t_id = json["t_id"]
self.full_name = json["full_name"]
self.username = json["username"]
self.position = json["position"]
else:
self.t_id = t_id
self.full_name = full_name
self.username = username
self.position = position
def to_json(self):
j_son = {"t_id":self.t_id,
"full_name":self.full_name,
"username":self.username,
"position":self.position}
return j_son
def make_buttons(uuid):
return InlineKeyboardMarkup([[InlineKeyboardButton("Yo voy", callback_data="si*%s" % uuid),
InlineKeyboardButton("Puede", callback_data="puede*%s" % uuid),
InlineKeyboardButton("Yo no voy", callback_data="no*%s" % uuid)],
[InlineKeyboardButton("Ajustes ⚙️️", callback_data="close*%s" % uuid),
InlineKeyboardButton("Reenviar", switch_inline_query="id*%s" % uuid)]])
with open("bot_list_list.json") as f:
l_l = json.load(f)
friend_list_list = []
for f_list in l_l:
friend_list_list.append(FriendList(text=f_list["text"],
id=f_list["id"],
list=f_list["list"],
from_user_id=f_list["from_user_id"],
date=f_list["date"]))
del f, l_l
def search_lists_by_user(t_user_id):
global friend_list_list
n = 0
while n < len(friend_list_list):
if friend_list_list[n].date < time.time() - 604800:
friend_list_list.pop(n)
else:
n += 1
matches = []
for list in friend_list_list:
if list.from_user_id == t_user_id:
matches.append(list)
return matches
def search_list_by_id(list_id):
global friend_list_list
for list in friend_list_list:
if list.id == list_id:
return list
return None
# BOT FUNCTIONS
def stopBot(updater):
logger.info("Apagando bot...")
updater.stop()
logger.info("Bot apagado")
def get_id_from_start_command(args):
assert len(args) == 1, "Hay más de un argumento"
if g_search.search(args[0]):
return args[0].replace("_g", ""), "g"
elif dk_search.search(args[0]):
return args[0].replace("_dk", ""), "dk"
elif ng_search.search(args[0]):
return args[0].replace("_ng", ""), "ng"
else:
raise
def start_command(bot, update, args, user_data):
if args:
if args[0] != "donacion":
try:
list_id, orden = get_id_from_start_command(args)
lista = search_list_by_id(list_id)
msg = "*%s*\n\n" % lista.text
if orden == "g":
lista_personas = lista.get_going()
if lista_personas:
msg += "*Dicen que vienen:*"
else:
msg += "*Nadie ha dicho que viene D:*"
elif orden == "dk":
lista_personas = lista.get_dont_know()
if lista_personas:
msg += "*Dicen que puede que vengan:*"
else:
msg += "*Nadie ha dicho que puede que venga*"
else:
lista_personas = lista.get_not_going()
if lista_personas:
msg += "*Dicen que no viene:*"
else:
msg += "*Nadie ha dicho que no viene :D*"
if lista_personas:
for persona in lista_personas:
msg += "\n%s" % persona.full_name
update.message.reply_text(msg, parse_mode=ParseMode.MARKDOWN)
return
finally:
pass
else:
user_data["donacion"] = 5
text = "Gracias de antemano por considerar donar. Soy un único desarrollador y me dedico a hacer bots gra" \
"tuitos mayormente. Las donaciones iran a cubrir los gastos del servidor como primera prioridad y" \
" como segunda las horas gastadas delante del ordenador. Puedes donar la cantidad que quieras usan" \
"do las flechas.\n\nTambién acepto donaciones en ETH `0xa1B41eD75Da5d053793168D1B4F28610779E8a7c`"
keyboard = [[InlineKeyboardButton("❤ donar %s€ ❤" % user_data["donacion"], callback_data="donate")],
[InlineKeyboardButton("⏬", callback_data="don*LLL"),
InlineKeyboardButton("⬇️", callback_data="don*LL"),
InlineKeyboardButton("🔽", callback_data="don*L"),
InlineKeyboardButton("🔼", callback_data="don*G"),
InlineKeyboardButton("⬆️", callback_data="don*GG"),
InlineKeyboardButton("⏫", callback_data="don*GGG")]]
update.message.reply_text(text,
reply_markup=InlineKeyboardMarkup(keyboard),
parse_mode=ParseMode.MARKDOWN)
return
msg = "¡Hola! Bienvenido a la versión 0.2 de @QuienVienebot!\n\nEste bot es un bot _inline_, esto quiere decir " \
"que para usarme no hace falta ni que uses este chat! Para empezar escribe en cualquier chat *@QuienViene" \
"bot ¿Quien viene a casa de Marta?*, por ejemplo.\n¡Gracias por usarme!"
update.message.reply_text(text=msg,
parse_mode=ParseMode.MARKDOWN)
def delete_list(id=None, index=None):
pass
def empty_query(bot, update):
user_matches = search_lists_by_user(update.inline_query.from_user.id)
results = []
if user_matches:
for list_match in user_matches:
message, keyboard = list_match.text_message_and_keyboard()
results.append(InlineQueryResultArticle(id=list_match.id,
title=list_match.text,
description="Pulsa para enviar esta cita ya creada",
reply_markup=keyboard,
input_message_content=InputTextMessageContent(message_text=message,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True)))
else:
msg_text = "*No tienes ninguna cita creada*\n" \
"Para crear una cita escribe en cualquier chat: _@QuienVienebot ¿Quién viene a...?_"
results = [InlineQueryResultArticle(id="sin_cita",
title="No tienes ninguna cita creada",
description="sigue escribiendo para crear una",
input_message_content=InputTextMessageContent(message_text=msg_text,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True))]
update.inline_query.answer(results,
is_personal=True,
cache_time=0,
switch_pm_text="Se aceptan donaciones ❤️",
switch_pm_parameter="donacion")
def full_query(bot, update):
query_split = update.inline_query.query.split("*")
if query_split[0] == "id":
list_id = query_split[1]
lista = search_list_by_id(list_id)
if lista:
message, keyboard = lista.text_message_and_keyboard()
results = [InlineQueryResultArticle(id=list_id,
title=lista.text,
description="Pulsa para enviar esta lista ya creada",
reply_markup=keyboard,
input_message_content=InputTextMessageContent(
message_text=message,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True))]
else:
msg_text = "*La lista que solicitas no se encuentra*\nPuede que haya sido cerrada o que haya caducado. Pa" \
"ra crear una nueva escribe en cualquier chat @QuienVienebot."
results = [InlineQueryResultArticle(id="sin_cita",
title="No existe ninguna cita con esa ID",
description="puede que haya sido cerrada o que haya caducado, crea una nueva",
input_message_content=InputTextMessageContent(message_text=msg_text,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True))]
else:
list_id = uuid4()
results = [InlineQueryResultArticle(id=list_id,
title=update.inline_query.query,
description="Pulsa para enviar",
reply_markup=make_buttons(list_id),
input_message_content=InputTextMessageContent(message_text="*%s*" % update.inline_query.query,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True))]
update.inline_query.answer(results,
is_personal=True,
cache_time=0,
switch_pm_text="Se aceptan donaciones ❤️",
switch_pm_parameter="donacion")
def inline_query(bot, update):
if update.inline_query.query:
full_query(bot, update)
else:
empty_query(bot, update)
def chosen_result(bot, update):
if update.chosen_inline_result.query and "id*" not in update.chosen_inline_result.query:
global friend_list_list
friend_list_list.append(FriendList(text=update.chosen_inline_result.query,
id=update.chosen_inline_result.result_id,
from_user_id=update.chosen_inline_result.from_user.id))
def callback_query_exception(bot, update):
update.callback_query.answer("Proximamente...")
def add_me_to_list(bot, update):
# Función para responder a un botón pulsado de voy / no voy etc...
telegram_user = update.callback_query.from_user
if telegram_user.last_name:
full_name = telegram_user.first_name + " " + telegram_user.last_name
else:
full_name = telegram_user.first_name
position = update.callback_query.data.split("*")[0]
list_id = update.callback_query.data.split("*")[1]
list = search_list_by_id(list_id)
if list:
usuario = BotUser(t_id=telegram_user.id,
full_name=full_name,
username=telegram_user.username,
position=position)
list.add_user(usuario)
message, keyboard = list.text_message_and_keyboard()
bot.edit_message_text(text=message,
parse_mode=ParseMode.MARKDOWN,
disable_web_page_preview=True,
reply_markup=keyboard,
inline_message_id=update.callback_query.inline_message_id)
update.callback_query.answer("okay 👍")
else:
update.callback_query.answer("⚠️ Parece que esa lista no existe", show_alert=True)
bot.edit_message_reply_markup(inline_message_id=update.callback_query.inline_message_id,
reply_markup=(InlineKeyboardMarkup([[]])))
def close_list(bot, update):
global friend_list_list
list_id = update.callback_query.data.split("*")[1]
n = None
for n in range(len(friend_list_list)):
if friend_list_list[n].id == list_id:
break
else:
n = None
if n is not None:
if friend_list_list[n].from_user_id == update.callback_query.from_user.id:
friend_list_list.pop(n)
update.callback_query.answer("okay, lista cerrada 👍")
bot.edit_message_reply_markup(inline_message_id=update.callback_query.inline_message_id,
reply_markup=(InlineKeyboardMarkup([[]])))
else:
update.callback_query.answer("⚠️ No has creado esta lista")
else:
update.callback_query.answer("⚠️ parece que esa lista no existe")
bot.edit_message_reply_markup(inline_message_id=update.callback_query.inline_message_id,
reply_markup=(InlineKeyboardMarkup([[]])))
def change_donation_quantity(bot, update, user_data):
if "donacion" not in user_data:
user_data["donacion"] = 5
s = update.callback_query.data.split("*")
change = 5 ** (s[1].count("G") - 1) if "G" in s[1] else -(5 ** (s[1].count("L") - 1))
user_data["donacion"] += change
if user_data["donacion"] < 1:
user_data["donacion"] = 1
keyboard = [[InlineKeyboardButton("❤ donar %s€ ❤" % user_data["donacion"], callback_data="donate")],
[InlineKeyboardButton("⏬", callback_data="don*LLL"),
InlineKeyboardButton("⬇️", callback_data="don*LL"),
InlineKeyboardButton("🔽", callback_data="don*L"),
InlineKeyboardButton("🔼", callback_data="don*G"),
InlineKeyboardButton("⬆️", callback_data="don*GG"),
InlineKeyboardButton("⏫", callback_data="don*GGG")]]
update.effective_message.edit_reply_markup(reply_markup=InlineKeyboardMarkup(keyboard))
update.callback_query.answer()
def donate(bot, update, user_data):
title = "Donación"
description = "Gracias por aportar a este proyecto. Usa el botón pagar para proceder al pago."
prices = [LabeledPrice("Donación", user_data["donacion"] * 100)]
bot.send_invoice(chat_id=update.effective_chat.id,
title=title,
description=description,
payload="donacion_completada",
provider_token=PAYMENT_PROVIDER_TOKEN,
start_parameter="donacion",
currency="EUR",
prices=prices)
update.effective_message.edit_reply_markup(reply_markup=InlineKeyboardMarkup([[]]))
def aprove_transaction(bot, update):
query = update.pre_checkout_query
# check the payload, is this from your bot?
if query.invoice_payload != 'donacion_completada':
# answer False pre_checkout_query
bot.answer_pre_checkout_query(pre_checkout_query_id=query.id, ok=False,
error_message="Algo ha fallado, vuelve a intentarlo por favor.")
else:
bot.answer_pre_checkout_query(pre_checkout_query_id=query.id, ok=True)
def completed_donation(bot, update):
update.effective_message.reply_text("Muchisimas gracias por donar!! ❤️❤️❤️")
bot.send_message(254234845, "%s ha donado!" % update.effective_chat.id)
def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"' % (update, error))
def main():
updater = Updater(BOT_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(InlineQueryHandler(inline_query))
dispatcher.add_handler(ChosenInlineResultHandler(chosen_result))
dispatcher.add_handler(CallbackQueryHandler(add_me_to_list, pattern="(si\*)|(puede\*)|(no\*)"))
dispatcher.add_handler(CallbackQueryHandler(close_list, pattern="close\*"))
dispatcher.add_handler(CallbackQueryHandler(change_donation_quantity, pattern=r"don\*", pass_user_data=True))
dispatcher.add_handler(CallbackQueryHandler(donate, pattern=r"donate$", pass_user_data=True))
dispatcher.add_handler(CallbackQueryHandler(callback_query_exception))
dispatcher.add_handler(CommandHandler("start", start_command, pass_args=True, pass_user_data=True))
dispatcher.add_handler(MessageHandler(filters=Filters.successful_payment, callback=completed_donation))
dispatcher.add_handler(PreCheckoutQueryHandler(aprove_transaction))
dispatcher.add_error_handler(error)
updater.start_polling()
# CONSOLA
while True:
inp = raw_input("")
if inp:
input_c = inp.split()[0]
args = inp.split()[1:]
strig = ""
for e in args:
strig = strig + " " + e
if input_c == "stop":
stopBot(updater)
break
else:
print "Comando desconocido"
if __name__ == '__main__':
main()
with open("bot_list_list.json", "w") as f:
l_l = []
for f_list in friend_list_list:
l_l.append(f_list.to_json())
json.dump(l_l, f, indent=2)
|
{"/handlers.py": ["/lang/__init__.py"], "/list_manager.py": ["/lista.py"]}
|
26,323
|
leohakim/FastAPI-GraphQL
|
refs/heads/main
|
/data.py
|
employee_data = [
{"id": 1001, "firstName": "John", "lastName": "Doe", "age": 28},
{"id": 1002, "firstName": "Bob", "lastName": "McBobby", "age": 45},
]
product_data = [
{"id": 2011, "name": "Tofu", "price": 12.7},
{"id": 2012, "name": "Chocolate", "price": 18.2},
{"id": 2013, "name": "Pepper Sauce", "price": 23.3},
]
supplier_data = [
{"id": 4101, "name": "Tokyo Sweet", "address": "7-11 Akimai Mushi-shi", "country": "Japan", "phone": "(03) 3555-5011 "},
{"id": 4102, "name": "New England", "address": "85 King's Street", "country": "United Kingdom", "phone": "(171) 555-2222 "},
]
def get_employee(id):
if id:
return [x for x in employee_data if x['id'] == id]
return employee_data
def get_product(id):
if id:
return [x for x in product_data if x['id'] == id]
return product_data
def get_supplier(id):
if id:
return [x for x in supplier_data if x['id'] == id]
return supplier_data
|
{"/graphqlapp.py": ["/data.py"], "/restapp.py": ["/data.py"]}
|
26,324
|
leohakim/FastAPI-GraphQL
|
refs/heads/main
|
/graphqlapp.py
|
from ariadne import ObjectType, QueryType, gql, make_executable_schema, load_schema_from_path
from ariadne.asgi import GraphQL
import data
type_defs = load_schema_from_path("schema.graphql")
# Map resolver functions to Query fields using QueryType
query = QueryType()
# Resolvers are simple python functions
@query.field("employee")
def resolve_employee(_, info, id=0):
return data.get_employee(id)
@query.field("product")
def resolve_product(_, info, id=0):
return data.get_product(id)
@query.field("supplier")
def resolve_supplier(_, info, id=0):
return data.get_supplier(id)
# Map resolver functions to custom type fields using ObjectType
employee = ObjectType("Employee")
@employee.field("fullName")
def resolve_person_fullname(person, *_):
return "%s %s" % (person["firstName"], person["lastName"])
# Create executable GraphQL schema
schema = make_executable_schema(type_defs, query, employee)
# Create an ASGI app using the schema, running in debug mode
app = GraphQL(schema, debug=True)
|
{"/graphqlapp.py": ["/data.py"], "/restapp.py": ["/data.py"]}
|
26,325
|
leohakim/FastAPI-GraphQL
|
refs/heads/main
|
/restapp.py
|
from fastapi import FastAPI
import data
app = FastAPI()
@app.get("/employee")
async def employee(id: int = 0):
employees = data.get_employee(id)
for employee in employees:
employee['fullName'] = f"{employee['firstName']} {employee['lastName']}"
return employees
@app.get("/product")
async def product(id: int = 0):
return data.get_product(id)
@app.get("/supplier")
async def supplier(id: int = 0):
return data.get_supplier(id)
|
{"/graphqlapp.py": ["/data.py"], "/restapp.py": ["/data.py"]}
|
26,326
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/management/commands/importdump_cxx.py
|
'''
Created on Dec 18, 2011
Import a JSON dump
@author: Joel Haasnoot
@author: Stefan de Konink
'''
import time, codecs, csv
import simplejson as json
from django.contrib.gis.geos import *
from django import db
from django.core.management.base import BaseCommand
from haltes.stops.models import UserStop, StopAttribute, Source, SourceAttribute
from haltes.stops import admin # Needed to track reversion
from haltes.utils import file
import reversion
class Command(BaseCommand):
def handle(self, *args, **options):
t0 = time.time()
f = open(args[0], mode='r') #codecs.open(args[0], encoding='utf-8', mode='r')
reader = file.UnicodeCsvReader(f, 'utf-8', dialect=csv.excel)
# Get or create our source
source, created = Source.objects.get_or_create(source_id=u'cxx', defaults={u'name': "Connexxion Website"})
keymap = False
#Loop over stops
for row in reader:
if not keymap:
keymap = row
else:
with db.transaction.commit_on_success():
with reversion.create_revision():
common_city = row[4].replace(', '+row[7], '')
pnt = Point(int(row[8])/10000000.0,
int(row[6])/10000000.0, srid=4326)
s, created = UserStop.objects.get_or_create(tpc=row[1],
defaults={u'common_name' : row[7], u'common_city' : common_city, 'point' : pnt })
self.get_create_update(StopAttribute, {'stop' : s, 'key' : u"Zone"}, {'value' : row[2]})
i = 0
for agency_attr in row:
self.get_create_update(SourceAttribute, {'stop' : s, 'source' : source, 'key' : unicode(keymap[i]).capitalize()}, {'value' : row[i]} )
i += 1
reversion.set_comment(u"Connexxion Import")
f.close()
print "Executed in "+str(time.time()-t0)+ " seconds"
def get_create_update(self, model, get_kwargs, update_values):
''' This helper function makes a simple one line update possible '''
sa, created = model.objects.get_or_create(**get_kwargs);
for (key, value) in update_values.items():
setattr(sa, key, value)
sa.save()
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,327
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/views.py
|
from models import UserStop, BaseStop
from forms import SearchForm
from django.shortcuts import render
from django.db.models import Count
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.cache import cache_page
import reversion
from django.http import Http404
def home(request):
return render(request, 'stops/home.html', { 'form': SearchForm() })
def search(request, term = None):
form = search_term = result_list = results = None
if request.POST:
form = SearchForm(request.POST)
if form.is_valid():
search_term = form.cleaned_data['terms']
if term is not None:
# Note, we might need to do some cleanup here
search_term = term
if search_term is not None:
# Get results
result_list = BaseStop.search(search_term)
paginator = Paginator(result_list, 25)
page = request.GET.get('page', 1)
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
if form is None:
form = SearchForm()
return render(request, 'stops/results.html', { 'results' : results, 'term' : search_term, 'form' : form })
# Cache this frontpage often, it's very slow
@cache_page(60 * 15)
def cities(request):
cities = BaseStop.objects.filter(stop_type=2).values('common_city').annotate(count=Count('common_city')).order_by('common_city')
return render(request, 'stops/cities.html', { 'cities' : cities})
def city_stops(request, city):
stops = BaseStop.objects.filter(common_city__iexact=city, stop_type=2).order_by('common_name')
return render(request, 'stops/stops.html', { 'stops' : stops })
#def stop(request, stop_id=None, tpc=None):
# if stop_id is not None:
# stop = UserStop.objects.get(id=stop_id)
# else:
# stop = UserStop.objects.get(tpc=tpc)
# if stop is None:
# return Http404
# return render(request, 'stops/stop.html', { 'stop' : stop, 'history' : reversion.get_for_object(stop)})
def stop_json(request, stop_id):
return render(request, 'stops/stop_json.html', { 'stop' : UserStop.objects.get(id=stop_id) })
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,328
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/tools/rewrite.py
|
'''
Created on Jan 29, 2012
Import a JSON dump and convert it to a CSV column based file
@author: Joel Haasnoot
'''
import csv, codecs, cStringIO
import simplejson as json
class DictUnicodeWriter(object):
def __init__(self, f, fieldnames, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.DictWriter(self.queue, fieldnames, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, D):
self.writer.writerow({k:v.encode("utf-8") for k,v in D.items()})
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for D in rows:
self.writerow(D)
def writeheader(self):
self.writer.writeheader()
f = codecs.open('cxx', encoding='utf-8', mode='r')
content = f.read()
f.close()
content = "{"+content.split('>{')[1].split('}<')[0]+"}"
j = json.loads(content, encoding='utf-8', parse_int=str)
import csv
f = open('cxx.csv', mode='wb')
keys = j['HALTELIST'].keys()
cxxWriter = DictUnicodeWriter(f, map(unicode.lower, keys))
cxxWriter.writeheader();
for i in range(1, len(j['HALTELIST']['ID'])):
row = {}
for key in keys:
if j['HALTELIST'][key][i] == True:
j['HALTELIST'][key][i] = "1"
elif j['HALTELIST'][key][i] == False:
j['HALTELIST'][key][i] = "0"
elif j['HALTELIST'][key][i] is None:
j['HALTELIST'][key][i] = ''
row[key.lower()] = j['HALTELIST'][key][i]
cxxWriter.writerow(row)
f.close()
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,329
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/utils/geo.py
|
from django.contrib.gis.geos import Point
from django.contrib.gis.gdal import OGRGeometry, SpatialReference
def transform_rd(point):
''' Please note this returns an OGRGeometry, not a point '''
src_string = '+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +units=m +towgs84=565.2369,50.0087,465.658,-0.406857330322398,0.350732676542563,-1.8703473836068,4.0812 +no_defs no_defs <>'
src_srs = SpatialReference(src_string)
geom = OGRGeometry(point.wkt, src_srs)
geom.transform(4326)
return geom
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,330
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/management/commands/importkv1.py
|
'''
Import a KV1 dump in TSV format
@author: Joel Haasnoot
'''
import csv, codecs, logging
from optparse import make_option
from haltes.utils import file, geo
from haltes.stops import admin # Needed for reversion
from haltes.stops.models import UserStop, BaseStop, StopAttribute, Source, SourceAttribute
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import *
from django import db
import reversion
from os.path import exists, join
from os import listdir
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--delimiter',
dest='delimiter',
help='Delimiter to use between fields'),
)
def handle(self, *args, **options):
if (len(args) < 2):
return self.do_help()
# Resolve all filenames
usrstop_filename = None
usrstar_filename = None
point_filename = None
for filename in listdir(args[0]):
if usrstop_filename is None and filename.lower().startswith('usrstop'):
usrstop_filename = filename
if usrstar_filename is None and filename.lower().startswith('usrstar'):
usrstar_filename = filename
if point_filename is None and filename.lower().startswith('point'):
point_filename = filename
mapping = "usrstop - %s, usrstar - %s and point - %s" % (usrstop_filename, usrstar_filename, point_filename)
if usrstop_filename is None or usrstar_filename is None or point_filename is None:
return "Couldn't find all 3 required files (%s)" % mapping
else:
print "Using: %s" % mapping
# Retrieve source - used for encoding
source = Source.objects.get(source_id=str(args[1]).lower())
if source is None:
return "Couldn't find the specified source - specify a data source"
# 2. Read files
# Create mapping
csv.register_dialect('kv1', quotechar='"',
delimiter=(options['delimiter'] if options['delimiter'] is not None else '|'),
doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
# Read files
print "Using %s as encoding" % source.encoding
f1 = codecs.open(join(args[0], usrstop_filename), mode='rU')
if 'dataownercode' not in f1.readline().lower():
return "Huston, we have no headers!\n"
f1.close()
stops = file.UnicodeDictReader(codecs.open(join(args[0], usrstop_filename), mode='rU'), source.encoding, dialect=csv.get_dialect('kv1'))
point_rows = file.UnicodeDictReader(codecs.open(join(args[0], point_filename), mode='rU'), source.encoding, dialect=csv.get_dialect('kv1'))
stoparea_rows = file.UnicodeDictReader(codecs.open(join(args[0], usrstar_filename), mode='rU'), source.encoding, dialect=csv.get_dialect('kv1'))
# Do some translation
points = { point['PointCode'] : point for point in point_rows }
areas = { area['UserStopAreaCode'] : area for area in stoparea_rows }
for stop in stops:
with db.transaction.commit_on_success():
with reversion.create_revision():
if stop['TimingPointCode'] is '':
print "Huston, TPC was none, falling back to USC"
stop['TimingPointCode'] = stop['UserStopCode']
if stop['TimingPointCode'] is None:
return "We had no TPC or USC - import halted"
# Figure out our location
stop_location = points[stop['UserStopCode']]
pnt = geo.transform_rd(Point(int(stop_location['LocationX_EW']), int(stop_location['LocationY_NS']), srid=28992))
s, created = UserStop.objects.get_or_create(tpc=stop['TimingPointCode'],
defaults={u'common_name' : stop['Name'].replace(stop['Town']+', ', ''),
u'common_city' : stop['Town'],
'point' : pnt.wkt })
# Check for stop areas
if stop['UserStopAreaCode'] is not None and stop['UserStopAreaCode'] in areas:
s.parent = self.get_create_star(areas[stop['UserStopAreaCode']], source)
s.save()
# Save as much as the original data as possible
self.create_source_attr(s, source, 'id', stop['TimingPointCode'])
self.create_source_attr(s, source, 'GetIn', stop['GetIn'])
self.create_source_attr(s, source, 'GetOut', stop['GetOut'])
self.create_source_attr(s, source, 'Name', stop['Name'])
self.create_source_attr(s, source, 'Town', stop['Town'])
self.create_source_attr(s, source, 'UserStopAreaCode', stop['UserStopAreaCode'])
self.create_source_attr(s, source, 'MinimalStopTime', stop['MinimalStopTime'])
self.create_source_attr(s, source, 'UserStopType', stop['UserStopType'])
self.create_source_attr(s, source, 'latitude', int(stop_location['LocationX_EW']))
self.create_source_attr(s, source, 'longitude', int(stop_location['LocationY_NS']))
reversion.set_comment("KV1 Import")
def create_source_attr(self, stop, source, key, value):
self.get_create_update(SourceAttribute, {'stop' : stop, 'source' : source, 'key' : key}, {'value' : value} )
def get_create_update(self, model, get_kwargs, update_values):
''' This helper function makes a simple one line update possible '''
sa, created = model.objects.get_or_create(**get_kwargs);
for (key, value) in update_values.items():
setattr(sa, key, value)
sa.save()
''' Get or create a stop area to be a parent '''
def get_create_star(self, stop_area, source):
attr = BaseStop.objects.filter(sourceattribute__value=stop_area['UserStopAreaCode'], stop_type=2)
if attr:
# We're going to assume there's only one of you
#print "Using an existing star %s" % attr[0]
sa = attr[0]
else:
# Create the stop area, it doesn't exist
#print "Creating new star %s" % stop_area['Name']
sa = BaseStop(common_name=stop_area['Name'].replace(stop_area['Town']+', ', ''),
common_city=stop_area['Town'],
stop_type=2)
sa.save()
# Make sure the attribute is created
self.get_create_update(SourceAttribute, {'stop' : sa, 'source' : source, 'key' : u'UserStopAreaCode'}, {'value' : stop_area['UserStopAreaCode']} )
return sa
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,331
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/urls.py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.gis import admin
from django.views.generic.detail import DetailView
from stops.models import BaseStop, UserStop
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'stops.views.home', name='home'),
url(r'^cities/?$', 'stops.views.cities', name='cities'),
url(r'^search/(?P<term>[\w]+)?/?$', 'stops.views.search', name='search'),
# \w .,-/\(\)\'\`]
url(r'^stops/(?P<city>.+)/?$', 'stops.views.city_stops', name='city_stops'),
url(r'^stop/(?P<pk>[\d]+)/?$', DetailView.as_view(model=BaseStop), name='stop'),
url(r'^stop/tpc/(?P<tpc>[\d]{0,8})/?$', DetailView.as_view(model=UserStop, slug_field='tpc', slug_url_kwarg='tpc'), name='stop_tpc'),
url(r'^stop/(?P<stop_id>[\w]+)/json/?$', 'stops.views.stop_json', name='stop_json'),
(r'^admin/', include(admin.site.urls)),
)
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,332
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/admin.py
|
from django.contrib.gis import admin
from models import UserStop, BaseStop, Agency, Source, StopAttribute, SourceAttribute
import reversion
class StopAttributeInline(admin.TabularInline):
model = StopAttribute
class SourceAttributeInline(admin.TabularInline):
model = SourceAttribute
class StopAdmin(admin.OSMGeoAdmin, reversion.VersionAdmin):
inlines = [
StopAttributeInline,
SourceAttributeInline
]
list_display = ('__unicode__', 'stop_type')
list_filter = ('stop_type',)
search_fields = ['common_name','common_city']
class UserStopAdmin(StopAdmin):
list_filter = ()
list_display = ('__unicode__', 'has_parent')
actions = ['merge_stops']
def has_parent(self, obj):
return (obj.parent is not None)
has_parent.short_description = "Has parent?"
def merge_stops(self, request, queryset):
base_stop = BaseStop(common_name=queryset[0].common_name, common_city=queryset[1].common_name)
base_stop.save()
for obj in queryset:
obj.parent = base_stop
obj.save()
self.message_user(request, "%s added as logical parent stop" % base_stop)
merge_stops.short_description = "Add stops to same logical stop"
class AgencyAdmin(admin.ModelAdmin):
model = Agency
class SourceAdmin(admin.ModelAdmin):
model = Source
'''
Please note, registering these models has the side-effect of registering them for
django-reversion and keeping track of revisions. Please think twice before removing
'''
admin.site.register(BaseStop, StopAdmin)
admin.site.register(UserStop, UserStopAdmin)
admin.site.register(Agency, AgencyAdmin)
admin.site.register(Source, SourceAdmin)
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,333
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/utils/file.py
|
import codecs
import csv
def UnicodeDictReader(str_data, encoding, **kwargs):
csv_reader = csv.DictReader(str_data, **kwargs)
# Decode the keys once
keymap = dict((k, k.decode(encoding)) for k in csv_reader.fieldnames)
for row in csv_reader:
yield dict((keymap[k], (unicode(v, encoding) if encoding != 'ascii' else v)) for k, v in row.iteritems())
def UnicodeCsvReader(str_data, encoding, **kwargs):
csv_reader = csv.reader(str_data, **kwargs)
for row in csv_reader:
yield [unicode(v, encoding) for v in row]
def open_file_list(filename, delimeter=',', cr='\n'):
''' Open a file, split it by line and return a list'''
f = codecs.open(filename, encoding='utf-8', mode='r')
output = []
for row in f.read().split(cr)[:-1]:
output.append(row.split(delimeter))
return output
def open_file_dict(filename, key_column=None):
f = codecs.open(filename, encoding='utf-8', mode='r')
i = 0
output = {}
for row in f.read().split('\n')[:-1]:
row = row.split('|')
if key_column is None:
output[i] = row
i += 1
else:
output[row[key_column]] = row
print output
return output
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,334
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/forms.py
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class SearchForm(forms.Form):
terms = forms.CharField(max_length=100, label="", widget=forms.TextInput(attrs={'placeholder' : 'Typ een zoekterm'}))
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_action = 'search'
self.helper.form_class = 'nice'
self.helper.add_input(Submit('submit', 'Zoek'))
return super(SearchForm, self).__init__(*args, **kwargs)
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,335
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/models.py
|
from django.db import models
from django.contrib.gis.db import models as gis_models
import reversion
class Agency(models.Model):
agency_id = models.CharField(max_length=10)
name = models.CharField(max_length=100)
url = models.CharField(max_length=100)
tz = models.CharField(max_length=25)
def __unicode__(self):
return self.name
class Source(models.Model):
source_id = models.CharField(max_length=5)
name = models.CharField(max_length=100)
encoding = models.CharField(max_length=10, blank=True, default='utf-8')
def __unicode__(self):
return u"%s - %s" % (self.source_id, self.name)
class BaseStop(models.Model):
common_name = models.CharField(max_length=100)
common_city = models.CharField(max_length=50)
stop_type = models.SmallIntegerField(choices=[(1,"Physical stop"), (2, "Logical stop")], default=1)
def __unicode__(self):
return u"%s, %s" % (self.common_city, self.common_name)
@staticmethod
def search(terms):
return BaseStop.objects.filter(models.Q(common_name__icontains=terms) | models.Q(common_city__icontains=terms)).filter(stop_type=2)
class UserStop(BaseStop, gis_models.Model):
tpc = models.CharField(max_length=16, unique=True) #May change
point = gis_models.PointField()
objects = gis_models.GeoManager()
''' A physical stop denotes a physical location where a transport vehicle stops. A logical stop is composed of
one or more physical stops (typically two, one for each direction'''
parent = models.ForeignKey("BaseStop", blank=True, null=True, related_name="parent")
class StopAttribute(models.Model):
stop = models.ForeignKey(BaseStop)
key = models.CharField(max_length=20)
value = models.CharField(max_length=256)
class Meta:
unique_together = (("stop", "key"),)
def __unicode__(self):
return u"%s: %s" % (self.stop, self.key)
class SourceAttribute(models.Model):
stop = models.ForeignKey(BaseStop)
source = models.ForeignKey(Source)
key = models.CharField(max_length=20)
value = models.CharField(max_length=256)
class Meta:
unique_together = (("stop", "key"),)
def __unicode__(self):
return u"%s - %s: %s" % (self.source.name, self.stop, self.key)
class Route(models.Model):
''' Line / Lijnnummer (1, A, 4s, 122s, etc)'''
common_code = models.CharField(max_length=5)
''' Destination / Eindbestemming (Station Arnhem, Velp Broekhuizerweg, Het Duifje)'''
common_destination = models.CharField(max_length=100)
origin = models.ForeignKey(BaseStop, related_name="origin")
destination = models.ForeignKey(BaseStop, related_name="destination")
''' Collection of agencies that operate this route '''
agencies=models.ManyToManyField(Agency)
def __unicode__(self):
return u"Lijn %s - %s" % (self.common_code, self.common_destination)
class Trip(models.Model):
trip_id = models.CharField(max_length=10)
route = models.ForeignKey(Route)
class TripSegment(gis_models.Model):
trip = models.ForeignKey(Trip)
''' These names chosen because from is a protected keyword and start/end seems silly without _stop '''
from_stop = models.ForeignKey(BaseStop, related_name="from_stop")
to_stop = models.ForeignKey(BaseStop, related_name="to_stop")
''' Line of points between these two stops'''
line = gis_models.LineStringField()
objects = gis_models.GeoManager()
def __unicode__(self):
return u"%s - %s" % (self.from_stop, self.to_stop)
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,336
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/management/commands/importgovi.py
|
'''
Import a KV1 dump in TSV format
@author: Joel Haasnoot
'''
import csv, codecs
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import Point
from django.contrib.auth.models import User
from haltes.utils import file, geo
from haltes.stops.models import BaseStop, UserStop, StopAttribute, Source, SourceAttribute
from haltes.stops import admin # Needed to track reversion
import reversion
class Command(BaseCommand):
def handle(self, *args, **options):
if (len(args) < 1):
return
csv.register_dialect('quotescolon', quotechar='"', delimiter=';', doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
f = codecs.open(args[0], mode='rU')
stops = file.UnicodeDictReader(f, 'utf-8', dialect=csv.get_dialect('quotescolon'))
with reversion.create_revision():
source, created = Source.objects.get_or_create(source_id=u'govi', defaults={u'name': "GOVI"})
for stop in stops:
split = unicode(stop['TimingPointName']).split(',')
if len(split) > 1:
city = split[0]
name = split[1].lstrip()
else:
city = stop['TimingPointTown'].capitalize()
name = stop['TimingPointName']
point = geo.transform_rd(Point(x=int(stop['LocationX_EW']), y=int(stop['LocationY_NS']), srid=28992))
s, created = UserStop.objects.get_or_create(tpc=stop[u"TimingPointCode"],
defaults={u'common_name' : name, u'common_city' : city, 'point' : point.wkt})
# Get or create our source
for attr in stop.keys():
self.get_create_update(SourceAttribute, {'stop' : s, 'source' : source, 'key' : attr.capitalize()}, {'value' : stop[attr]} )
reversion.set_comment(u"GOVI Import")
f.close()
def get_create_update(self, model, get_kwargs, update_values):
''' This helper function makes a simple one line update possible '''
sa, created = model.objects.get_or_create(**get_kwargs);
for (key, value) in update_values.items():
setattr(sa, key, value)
sa.save()
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,337
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/migrations/0002_auto__add_field_source_encoding.py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Source.encoding'
db.add_column('stops_source', 'encoding', self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Source.encoding'
db.delete_column('stops_source', 'encoding')
models = {
'stops.agency': {
'Meta': {'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tz': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stops.basestop': {
'Meta': {'object_name': 'BaseStop'},
'common_city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'stops.route': {
'Meta': {'object_name': 'Route'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['stops.Agency']", 'symmetrical': 'False'}),
'common_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'common_destination': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'destination'", 'to': "orm['stops.BaseStop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'origin'", 'to': "orm['stops.BaseStop']"})
},
'stops.source': {
'Meta': {'object_name': 'Source'},
'encoding': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'stops.sourceattribute': {
'Meta': {'unique_together': "(('stop', 'key'),)", 'object_name': 'SourceAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Source']"}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.BaseStop']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'stops.stopattribute': {
'Meta': {'unique_together': "(('stop', 'key'),)", 'object_name': 'StopAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.BaseStop']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'stops.trip': {
'Meta': {'object_name': 'Trip'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Route']"}),
'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'stops.tripsegment': {
'Meta': {'object_name': 'TripSegment'},
'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_stop'", 'to': "orm['stops.BaseStop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.contrib.gis.db.models.fields.LineStringField', [], {}),
'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_stop'", 'to': "orm['stops.BaseStop']"}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Trip']"})
},
'stops.userstop': {
'Meta': {'object_name': 'UserStop', '_ormbases': ['stops.BaseStop']},
'basestop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['stops.BaseStop']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent'", 'null': 'True', 'to': "orm['stops.BaseStop']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'tpc': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'})
}
}
complete_apps = ['stops']
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,338
|
StichtingOpenGeo-Museum/haltebeheer
|
refs/heads/master
|
/haltes/stops/migrations/0001_initial.py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Agency'
db.create_table('stops_agency', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agency_id', self.gf('django.db.models.fields.CharField')(max_length=10)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('url', self.gf('django.db.models.fields.CharField')(max_length=100)),
('tz', self.gf('django.db.models.fields.CharField')(max_length=25)),
))
db.send_create_signal('stops', ['Agency'])
# Adding model 'Source'
db.create_table('stops_source', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source_id', self.gf('django.db.models.fields.CharField')(max_length=5)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('stops', ['Source'])
# Adding model 'BaseStop'
db.create_table('stops_basestop', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('common_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('common_city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('stop_type', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
))
db.send_create_signal('stops', ['BaseStop'])
# Adding model 'UserStop'
db.create_table('stops_userstop', (
('basestop_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['stops.BaseStop'], unique=True, primary_key=True)),
('tpc', self.gf('django.db.models.fields.CharField')(unique=True, max_length=16)),
('point', self.gf('django.contrib.gis.db.models.fields.PointField')()),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent', null=True, to=orm['stops.BaseStop'])),
))
db.send_create_signal('stops', ['UserStop'])
# Adding model 'StopAttribute'
db.create_table('stops_stopattribute', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['stops.BaseStop'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=20)),
('value', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('stops', ['StopAttribute'])
# Adding unique constraint on 'StopAttribute', fields ['stop', 'key']
db.create_unique('stops_stopattribute', ['stop_id', 'key'])
# Adding model 'SourceAttribute'
db.create_table('stops_sourceattribute', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['stops.BaseStop'])),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['stops.Source'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=20)),
('value', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('stops', ['SourceAttribute'])
# Adding unique constraint on 'SourceAttribute', fields ['stop', 'key']
db.create_unique('stops_sourceattribute', ['stop_id', 'key'])
# Adding model 'Route'
db.create_table('stops_route', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('common_code', self.gf('django.db.models.fields.CharField')(max_length=5)),
('common_destination', self.gf('django.db.models.fields.CharField')(max_length=100)),
('origin', self.gf('django.db.models.fields.related.ForeignKey')(related_name='origin', to=orm['stops.BaseStop'])),
('destination', self.gf('django.db.models.fields.related.ForeignKey')(related_name='destination', to=orm['stops.BaseStop'])),
))
db.send_create_signal('stops', ['Route'])
# Adding M2M table for field agencies on 'Route'
db.create_table('stops_route_agencies', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('route', models.ForeignKey(orm['stops.route'], null=False)),
('agency', models.ForeignKey(orm['stops.agency'], null=False))
))
db.create_unique('stops_route_agencies', ['route_id', 'agency_id'])
# Adding model 'Trip'
db.create_table('stops_trip', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('trip_id', self.gf('django.db.models.fields.CharField')(max_length=10)),
('route', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['stops.Route'])),
))
db.send_create_signal('stops', ['Trip'])
# Adding model 'TripSegment'
db.create_table('stops_tripsegment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('trip', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['stops.Trip'])),
('from_stop', self.gf('django.db.models.fields.related.ForeignKey')(related_name='from_stop', to=orm['stops.BaseStop'])),
('to_stop', self.gf('django.db.models.fields.related.ForeignKey')(related_name='to_stop', to=orm['stops.BaseStop'])),
('line', self.gf('django.contrib.gis.db.models.fields.LineStringField')()),
))
db.send_create_signal('stops', ['TripSegment'])
def backwards(self, orm):
# Removing unique constraint on 'SourceAttribute', fields ['stop', 'key']
db.delete_unique('stops_sourceattribute', ['stop_id', 'key'])
# Removing unique constraint on 'StopAttribute', fields ['stop', 'key']
db.delete_unique('stops_stopattribute', ['stop_id', 'key'])
# Deleting model 'Agency'
db.delete_table('stops_agency')
# Deleting model 'Source'
db.delete_table('stops_source')
# Deleting model 'BaseStop'
db.delete_table('stops_basestop')
# Deleting model 'UserStop'
db.delete_table('stops_userstop')
# Deleting model 'StopAttribute'
db.delete_table('stops_stopattribute')
# Deleting model 'SourceAttribute'
db.delete_table('stops_sourceattribute')
# Deleting model 'Route'
db.delete_table('stops_route')
# Removing M2M table for field agencies on 'Route'
db.delete_table('stops_route_agencies')
# Deleting model 'Trip'
db.delete_table('stops_trip')
# Deleting model 'TripSegment'
db.delete_table('stops_tripsegment')
models = {
'stops.agency': {
'Meta': {'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tz': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stops.basestop': {
'Meta': {'object_name': 'BaseStop'},
'common_city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'stops.route': {
'Meta': {'object_name': 'Route'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['stops.Agency']", 'symmetrical': 'False'}),
'common_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'common_destination': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'destination'", 'to': "orm['stops.BaseStop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'origin'", 'to': "orm['stops.BaseStop']"})
},
'stops.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'stops.sourceattribute': {
'Meta': {'unique_together': "(('stop', 'key'),)", 'object_name': 'SourceAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Source']"}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.BaseStop']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'stops.stopattribute': {
'Meta': {'unique_together': "(('stop', 'key'),)", 'object_name': 'StopAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.BaseStop']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'stops.trip': {
'Meta': {'object_name': 'Trip'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Route']"}),
'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'stops.tripsegment': {
'Meta': {'object_name': 'TripSegment'},
'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_stop'", 'to': "orm['stops.BaseStop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.contrib.gis.db.models.fields.LineStringField', [], {}),
'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_stop'", 'to': "orm['stops.BaseStop']"}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stops.Trip']"})
},
'stops.userstop': {
'Meta': {'object_name': 'UserStop', '_ormbases': ['stops.BaseStop']},
'basestop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['stops.BaseStop']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent'", 'null': 'True', 'to': "orm['stops.BaseStop']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'tpc': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'})
}
}
complete_apps = ['stops']
|
{"/haltes/stops/management/commands/importgovi.py": ["/haltes/stops/models.py"]}
|
26,351
|
sregmi/word-count
|
refs/heads/master
|
/test_count_letters.py
|
import unittest
from main import count_letters
class TestCountLetter(unittest.TestCase):
def test_count_letters(self):
count = count_letters('hello world!')
self.assertEqual('2', count[0])
|
{"/test_count_letters.py": ["/main.py"]}
|
26,352
|
sregmi/word-count
|
refs/heads/master
|
/main.py
|
from flask import Flask, render_template
from flask_wtf import Form
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from collections import Counter
import re
import operator
import enchant
app = Flask(__name__)
app.config['SECRET_KEY'] = 's3cr3tk3y'
class MyForm(Form):
input_text = TextAreaField('Insert your text here', validators=[DataRequired()])
@app.route('/', methods=['GET', 'POST'])
def index():
form = MyForm()
if form.validate_on_submit():
results = count_letters(form.input_text.data)
return render_template(
'main.html',
total=results[0],
words=results[1],
valid_words=results[2],
valid_word_count=len(results[2])
)
return render_template('index.html', form=form)
def count_letters(input_string):
'''
this function will count the number of words in any given text.
words are categorized into two pools. one is any alphanumeric string and
the other is just a valid dictionary word. please read readme for more
information
'''
list_words = input_string.split()
#removing all the non alphabet words from the string using regex
alphabet_filter = re.compile('.*[A-Za-z].*')
alpha_string = [word for word in list_words if alphabet_filter.match(word)]
#removing all non alpha-numeric characters at the beginning and end of string and saving as lowercase
alpha_num = [re.sub("^[^a-zA-Z0-9\\s]+|[^a-zA-Z0-9\\s]+$", "", s.lower()) for s in alpha_string]
dictionary = enchant.Dict("en_US")
dic_words = [word for word in alpha_num if dictionary.check(word)]
total_word_count = str(len(alpha_num))
alpha_num_count = Counter(alpha_num)
dic_word_count = Counter(dic_words)
word_count = sorted(
alpha_num_count.items(),
key=operator.itemgetter(1),
reverse=True
)
valid_word_count = sorted(
dic_word_count.items(),
key=operator.itemgetter(1),
reverse=True
)
return [total_word_count, word_count, valid_word_count]
if __name__ == '__main__':
app.run(debug=True)
|
{"/test_count_letters.py": ["/main.py"]}
|
26,591
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/global_config.py
|
"""Configuration module for logging"""
import logging
from os import mkdir, getcwd
import csv
import threading
import time
import configparser
LOG_DIR = "./logs/"
FILE_NAME_MAPPING = "mapping.csv"
DELIMITER = ","
CONFIG_FILE_NAME = getcwd() + '/Settings.ini'
config = configparser.ConfigParser()
config.read(CONFIG_FILE_NAME)
try:
mkdir(LOG_DIR)
except OSError:
print("Logs directory exists.")
else:
print("Successfully created the logs directory")
LOG_CONFIG = dict(
version=1,
formatters={
'simple':
{
'format': '[%(asctime)s] [%(levelname)s] - : %(message)s.',
'datefmt': '%H:%M:%S',
},
'detailed':
{
'format': '[%(asctime)s] [%(levelname)s] - Line: %(lineno)d '
'- %(name)s - : %(message)s.',
'datefmt': '%d/%m/%y - %H:%M:%S',
},
},
handlers={
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': logging.INFO,
},
'file':
{
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'detailed',
'level': logging.INFO,
'filename': LOG_DIR + 'logfile',
'when': 'midnight',
},
},
root={
'handlers': ['file', ],
'level': logging.INFO,
},
)
def compare_mapping(compare_value, key):
"""
:param compare_value: value we want to cmpare in mapping csv
:return: mapping dict in all data
"""
key_value = {
'title': "Possible Area Nicknames in Production",
'experience': "experience_id",
'area': "Area"
}
heading = key_value.get(key, False)
if heading is False:
return False
with open(FILE_NAME_MAPPING) as r_file:
file_reader = csv.DictReader(r_file, delimiter=DELIMITER)
for exp_dict in file_reader:
if exp_dict[heading] == compare_value:
return exp_dict
return False
def treade_notification_deamon(func, sec=0, minutes=0, hours=0):
"""This func refresh another func , which change cells in sheets"""
while True:
sleep_time = sec + (minutes * 60) + (hours * 3600)
time.sleep(sleep_time)
func()
def create_tread(func):
"""Create deamon thred for rewrite cells in sheets"""
enable_notification_thread = threading.Thread(
target=treade_notification_deamon, kwargs=({"func": func, "hours": 23}))
enable_notification_thread.daemon = True
enable_notification_thread.start()
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,592
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/xola_deputy.py
|
"""Main script which start all logic. Here we have 2 webhooks,
and process date from request from XOLA and DEPUTY"""
from flask import Flask, request, Response
from xola_client import XolaClient
from deputy_client import DeputyClient
from logger import LoggerClient
from google_sheets_client import GoogleSheetsClient
from global_config import create_tread
app = Flask(__name__)
logging = LoggerClient()
logging.settings_init()
logging = logging.get_logger()
xola = XolaClient(logging)
xola.init_settings()
deputy = DeputyClient(logging)
deputy.init_settings()
sheets = GoogleSheetsClient(deputy, logging)
sheets.init_settings()
@app.route("/xola", methods=['POST'])
def xola_deputy_run():
"""Take response from xola notification about order.
Create parameters for deputy POST request with data from XOLA.
Post new shift with this parameters , and check which employee free.
:return: code 200 is all good, code 500 - arose problem
"""
params, number_shifts, mapping = xola.start(request.json)
if params is False:
return Response(status=500)
for _ in range(number_shifts):
# first time we created shift in open block
id_shift, date_shift = deputy.process_data_from_new_shift(params)
if date_shift is False:
return Response(status=500)
date_shift = xola.convert_time(date_shift)
id_location = mapping["Area"]
title = mapping['Possible Area Nicknames in Production']
unavailable_employee = deputy.process_people_unavailability(
date_shift, id_location)[0] # check who have a work
if unavailable_employee is False:
return Response(status=500)
id_employee = deputy.get_people_availability(
id_shift, unavailable_employee)
params.update({
"intRosterId": id_shift,
"intRosterEmployee": id_employee
})
is_good = deputy.process_data_from_new_shift(
params) # post shift for employee
if is_good is False:
return Response(status=500)
sheets.change_cells(
params["intStartTimestamp"],
params["intEndTimestamp"],
title)
logging.info("Successfully post shift, sheets ")
name_of_employee = deputy.get_employee_name(id_employee)
if xola.verification_guides_for_event(name_of_employee) is False:
return Response(status=500)
logging.info("Successfully post guides")
return Response(status=200)
@app.route("/delete_employee", methods=['POST'])
def deputy_delete():
"""we change all list in google sheet,when in deputy delete employee"""
sheets.change_all_spread()
logging.info("Successfully change sheet")
return Response(status=200)
@app.route("/insert_employee", methods=['POST'])
def deputy_insert():
"""we change all list in google sheet,when in deputy insert employee"""
sheets.change_all_spread()
logging.info("Successfully change sheet")
return Response(status=200)
@app.route("/unvial_employee", methods=['POST'])
def deputy_unvial():
"""get all day off from deputy,change specific list,and make minus 1 to cells"""
list_of_unvial = deputy.get_employee_unavail()
if list_of_unvial is False:
return Response(status=500)
title_to_change = set()
for unvial_time in list_of_unvial:
title_to_change.add(unvial_time[2])
for title in title_to_change:
sheets.change_specific_spread(title)
for unvial_time in list_of_unvial:
sheets.change_cells(unvial_time[0], unvial_time[1], unvial_time[2])
logging.info("Successfully change sheet")
return Response(status=200)
if __name__ == '__main__':
if xola.subscribe_to_webhook() is False:
logging.warning("Can not subscribe to webhook")
deputy.subscribe_to_webhooks()
sheets.change_all_spread()
create_tread(sheets.change_all_spread)
app.run(host="0.0.0.0", port=5000)
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,593
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/xola_client.py
|
""""Have class XolaCLient,which connect to Xola API and get/post data from here.
Also process data from xola-webhook for parameters to deputy post request"""
from datetime import datetime
from math import ceil
import json
import requests
from global_config import compare_mapping, config
HTTP_CREATED = 201
HTTP_CONFLICT = 409
class XolaClient():
"""connect to XOLA API"""
__url = "https://xola.com/api/"
_event_id, __headers = "", ""
_seller_id, __public_url = "", ""
__x_api_key, __user_id = "", ""
def __init__(self, logger):
self.log = logger
def init_settings(self):
"""Parser the Settings.ini file, and get parameters for xola api connection"""
self.__x_api_key, self.__user_id = config['XOLA']['x_api_key'], config['XOLA']['user_id']
self.__public_url = config['URL']['public_url']
self.__headers = {
'X-API-KEY': self.__x_api_key,
}
def post_request_subscribe_to_webhook(self, event_name):
"""
make post request to subscribe hook
:param event_name:
:return:
"""
url = self.__url + "users/" + self.__user_id + "/hooks"
param = {"eventName": event_name, "url": self.__public_url + "/xola"}
json_mylist = json.dumps(param)
data = f"{json_mylist}"
try:
response = requests.post(
url=url, headers=self.__headers, data=data)
return response.status_code
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to XOLA",
exc_info=ex)
def subscribe_to_webhook(self, event_name="order.create"):
"""do post request to xola api: subscribe to weebhook(order.created)
:param public_url: public url,where post notification
:return: bool, false if something wrong, true if all good
"""
status_code = self.post_request_subscribe_to_webhook(event_name)
if status_code != HTTP_CREATED:
if status_code == HTTP_CONFLICT:
self.log.warning("Webhook already subscribe")
return False
self.log.error("Subscription failed " +
"response.status_code = " +
str(status_code))
return False
return True
def _get_data_from_event(self,):
"""
Make request to XOLA events. Take all data from specific event
:param event_id: str. with event id
:return: response json format,with all fields
"""
url = self.__url + "events/" + self._event_id
try:
response = requests.get(
url=url, headers=self.__headers)
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send get request to XOLA",
exc_info=ex)
def take_params_from_responce(self, request):
"""Take data from json event request, and process them
:param request: json format data
:return: dict with parameters to deputy
"""
self._event_id = request["data"]["items"][0]["event"]["id"]
response = self._get_data_from_event()
time_start = self.convert_time(response["start"])
time_end = self.convert_time(response["end"])
experience_id = response["experience"]["id"]
# all ticket for 1 event
ticket_count = response["quantity"]["reserved"]
self._seller_id = response["seller"]["id"]
mapping = compare_mapping(experience_id, "experience")
if mapping is False:
self.log.error("Can not find experience in json file")
raise ValueError
shift_count = int(mapping["Shifts logic"])
params = {
"intStartTimestamp": time_start,
"intEndTimestamp": time_end,
# we need plus 2,because it is specific validation from deputy post
# request
"intOpunitId": int(mapping["Area"]) + 2,
}
number_shifts = self.calculation_of_employee(shift_count, ticket_count)
return params, number_shifts, mapping
def start(self, request):
"""starting"""
try:
params, number_shifts, title = self.take_params_from_responce(
request)
return params, number_shifts, title
except (ValueError, TypeError):
self.log.error("Bad JSON data")
return False, False, False
def get_list_of_guids(self):
"""
make get request to xola, find all available guides
:return: list of name all guides
"""
response = self._get_request_list_guides()
if response.status_code != 200:
return False
list_guides = []
for guide_name in response.json()["data"]:
guides = (guide_name["name"], guide_name["id"])
list_guides.append(guides)
return list_guides
def _get_request_list_guides(self):
url = self.__url + "sellers/" + self._seller_id + "/guides"
try:
response = requests.get(
url=url, headers=self.__headers)
return response
except requests.RequestException as ex:
self.log.error("Unable to send get request to XOLA", exc_info=ex)
def take_guide_id(self, name_of_employee):
"""
Compare name employee from deputy and guide from xola
:param name_of_employee: employee name from deputy
:return: guide id from xola
"""
list_guides = self.get_list_of_guids()
if list_guides is False:
return False
guide_id = ""
for guide in list_guides:
if guide[0] == name_of_employee:
guide_id = guide[1]
if guide_id == "":
self.log.warning("Can not find employee in xola guides")
return False
return guide_id
def _post_guides_for_event(self, guide_id):
url = self.__url + "events/" + self._event_id + "/guides"
param = {"id": {"id": guide_id}}
json_mylist = json.dumps(param)
data = f"{json_mylist}"
try:
response = requests.post(
url=url, headers=self.__headers, data=data)
return response.status_code
except requests.RequestException as ex:
self.log.error("Unable to send get request to XOLA", exc_info=ex)
def verification_guides_for_event(self, name_of_employee):
"""
make post request to xola with guid name
:param name_of_employee: employee name from deputy
:return: trye if successfully, false if have trables
"""
guide_id = self.take_guide_id(name_of_employee)
if guide_id is False:
return False
status_code = self._post_guides_for_event(guide_id)
if status_code != HTTP_CREATED:
self.log.error("Can not assigned guides ")
return False
if status_code == HTTP_CONFLICT:
self.log.error(
"The guide is already assigned to an overlapping event.")
return False
self.log.info("Update successfully sent to XOLA")
return True
@staticmethod
def calculation_of_employee(shift_count, ticket_count):
"""
:param shift_count: number max ticket for 1 person
:param ticket_count: all tickets which reserved in event
:return: how many new shifts we have to do
"""
if shift_count == 1: # we don`t need divine ticket on guids|do 1 shift
return 1
if ticket_count < shift_count: # 20<shift_count<25
return 1
return ceil(ticket_count / shift_count)
@staticmethod
def convert_time(time):
"""change data and time into UNIX time stamp
:param time: time format in iso format
:return: str time
"""
str_date = datetime.fromisoformat(time).strftime("%Y-%m-%d")
return str_date
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,594
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/logger.py
|
"""Init logger parameters"""
import configparser
import logging
from logging.config import dictConfig
from global_config import CONFIG_FILE_NAME, LOG_CONFIG
DEFAULT_NAME_FLASK_LOGGER = 'werkzeuq'
class LoggerClient():
"""Create logger object"""
logger = logging.getLogger()
logmode = "console"
def settings_init(self):
"""Parser the Settings.ini file, and get parameters for logger"""
config_console = configparser.ConfigParser()
config_console.read(CONFIG_FILE_NAME)
self.logmode = config_console["LOG"]["log_mode"]
def logger_settings(self):
"""create logger object and do some configuration with them"""
LOG_CONFIG['root']['handlers'].append(self.logmode)
flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)
flask_log.setLevel(logging.ERROR)
dictConfig(LOG_CONFIG)
self.logger = logging.getLogger()
def get_logger(self):
"""init logfer setting
:return:logger object
"""
self.logger_settings()
return self.logger
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,595
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/tests/test_deputy_client.py
|
"""File with test for deputy_client file and class DeputyClient """
import pytest
import json
import requests
from unittest.mock import patch
from xola_deputy.deputy_client import DeputyClient
from xola_deputy.logger import LoggerClient
logging = LoggerClient().get_logger()
deputy = DeputyClient(logging)
@pytest.fixture()
def get_shift_json():
"""
Take testing data from json file,with structure like real request.
This function return json with structure post request to deputy
"""
with open("tests/data_from_post_new_shift_deputy.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_unvial_json():
"""
Take testing data from json file,with structure like real request.
This function return json with structure get request to deputy
"""
with open("tests/data_from_unvial_deputy.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_recomendation_json():
"""
Take testing data from json file,with structure like real request.
This function return json with structure get request to deputy
"""
with open("tests/data_recomendation_deputy.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_dayoff_json():
"""
Take testing data from json file,with structure like real request.
This function return json with structure get request to deputy
"""
with open("tests/data_unvial_employee_deputy.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_employee_json():
"""
Take testing data from json file,with structure like real request.
This function return json with structure get request to deputy
"""
with open("tests/data_emplloyee_info_deputy.json", "r") as file:
request = json.load(file)
return request
def test_process_data_from_new_shift(get_shift_json):
"""
Test, where we use mock for deputy func _post_new_shift,
and fixture for return data from this post request
"""
with patch.object(DeputyClient, '_post_new_shift') as mock_func:
mock_func.return_value = get_shift_json
params = {
"intStartTimestamp": 1234,
"intEndTimestamp": 1243,
"intOpunitId": 3,
}
assert deputy.process_data_from_new_shift(params)
with patch.object(DeputyClient, '_post_new_shift', return_value=KeyError):
params = {
"intStartTimestamp": 1234,
"intEndTimestamp": 1243,
"intOpunitId": 3,
}
assert deputy.process_data_from_new_shift(params)
def test_process_people_unavailability(get_unvial_json):
"""
Test, where we use mock for deputy func _get_people_unavailability,
and fixture for return data from this post request
"""
with patch.object(DeputyClient, '_get_people_unavailability') as mock_func:
mock_func.return_value = get_unvial_json
assert deputy.process_people_unavailability("2020-12-17","1")
with patch.object(DeputyClient, '_get_people_unavailability', return_value=ValueError):
assert deputy.process_people_unavailability("2020-12-17", "1") == False
def test_get_people_availability(get_recomendation_json):
"""
Test, where we use mock for deputy func _get_recomendation,
and fixture for return data from this post request
"""
with patch.object(DeputyClient, '_get_recomendation') as mock_func:
mock_func.return_value = get_recomendation_json
test_unvi = []
assert deputy.get_people_availability("49",test_unvi)
with patch.object(DeputyClient, '_get_recomendation') as mock_func:
mock_func.return_value = get_recomendation_json
test_unvi = [1,2,3]
assert deputy.get_people_availability("49", test_unvi)
with patch.object(DeputyClient, '_get_recomendation') as mock_func:
get_recomendation_json["trained"] = {}
mock_func.return_value = get_recomendation_json
test_unvi = [1,2,3]
assert deputy.get_people_availability("49", test_unvi)
with patch.object(DeputyClient, '_get_recomendation', return_value=IndexError):
mock_func.return_value = get_recomendation_json
test_unvi = [1,2,3]
assert deputy.get_people_availability("49", test_unvi) == False
def test_check_all_job_employee():
"""
Test, where we have testing list with value,
and check if our function work good
"""
unavailable_employee = [1,2,2,3,4,5,6,3,4,5,6,7]
assert deputy.check_all_job_employee(unavailable_employee) == 7
def test_update_params_for_post_deputy_style():
"""
Test, with testing dict check if our function work good
"""
test_param={"test":1}
data = deputy.update_params_for_post_deputy_style(test_param)
assert type(data) == str
def test_get_employee_unavail(get_dayoff_json):
"""
Test, where we use mock for deputy func _get_request_for_employee_unvail,
and fixture for return data from this request
"""
with patch.object(DeputyClient, '_get_request_for_employee_unvail') as mock_func:
mock_func.return_value = []
assert deputy.get_employee_unavail() == False
with patch.object(DeputyClient, '_get_request_for_employee_unvail') as mock_func:
mock_func.return_value = get_dayoff_json
with patch.object(DeputyClient, '_get_area_for_employee',return_value = "Aliens"):
assert deputy.get_employee_unavail()
with patch.object(DeputyClient, '_get_request_for_employee_unvail') as mock_func:
mock_func.return_value = get_dayoff_json
with patch.object(DeputyClient, '_get_area_for_employee', return_value= False):
assert deputy.get_employee_unavail() == False
def test_subscribe_to_webhooks():
with patch.object(DeputyClient, '_verification_webhooks', return_value=False):
assert deputy.subscribe_to_webhooks() == False
with patch.object(DeputyClient, '_verification_webhooks', return_value=True):
with patch.object(DeputyClient, 'post_params_for_webhook', return_value= 201):
assert deputy.subscribe_to_webhooks() == False
with patch.object(DeputyClient, '_verification_webhooks', return_value=True):
with patch.object(DeputyClient, 'post_params_for_webhook', return_value= 200):
assert deputy.subscribe_to_webhooks() == True
def test_get_area_for_employee(get_employee_json):
with patch.object(DeputyClient, '_get_request_employee') as mock_func:
mock_func.return_value = get_employee_json
assert deputy._get_area_for_employee("1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_post_new_shift():
params = {
"intStartTimestamp": 1234,
"intEndTimestamp": 1243,
"intOpunitId": 3,
}
deputy._post_new_shift(params)
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_people_unavailability():
deputy._get_people_unavailability("2020-12-12","1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_recomendation():
deputy._get_recomendation("1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_employee_name():
deputy.get_employee_name("1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_request_for_employee_unvail():
deputy._get_request_for_employee_unvail()
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_request_employee():
deputy._get_request_employee("1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_verification_webhooks():
deputy._verification_webhooks("1")
@pytest.mark.xfail(raises=requests.RequestException)
def test_post_params_for_webhook():
deputy.post_params_for_webhook("Aliens", "url")
@pytest.mark.xfail(raises=requests.RequestException)
def test_get_number_of_employee():
deputy.get_number_of_employee("1")
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,596
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/tests/test_global_config.py
|
from xola_deputy.global_config import compare_mapping
def test_compare_mapping():
assert compare_mapping("Asylum","title")
assert compare_mapping("5b649f7ec581e1c1738b463f","experience")
assert compare_mapping("1","area")
assert compare_mapping("111111","experience") == False
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,597
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/google_sheets_client.py
|
"""Google class for process data from deputy and post data to google sheets"""
from datetime import datetime
from spreadsheet_api import SpreadsheetAPI
from global_config import compare_mapping, config
START_TIME = " 6:00" # start time in google sheets
END_TIME = " 23:30" # end time in google sheets
DELAY = 1800 # in google sheets range 30 minute, so it is in seconds
ONE_DAY_UNIX = 86400 # 1 day in seconds
DELAY_ALL_DAY = 63000 # seconds between first and last cell
DAYS = 30
class GoogleSheetsClient():
"""This is mediator class, where process data and post to google sheets"""
__spreadsheet_id = ""
sheets_api = ""
def __init__(self, deputy, logger):
self.deputy = deputy
self.logger = logger
def init_settings(self):
"""Parser the Settings.ini file, and get parameters for google sheets connection"""
self.__spreadsheet_id = config['GOOGLE']['spreadsheet_id']
self.sheets_api = SpreadsheetAPI(self.__spreadsheet_id)
def change_sheets(self, count_of_days, id_location):
"""
take shifts from deputy, and minus unavailable employee from cell in google sheets
:param count_of_days: how many days we process
:param id_location: location in deputy
:return: list,with available employee for every time/date cell in google sheets
"""
date_start, date_end, date_shift = self.date_time()
number_of_employee = self.deputy.get_number_of_employee(id_location)
list_of_free_employee = []
number_of_days = 0
while number_of_days < count_of_days:
while date_start <= date_end:
list_of_free_employee.append([date_start, number_of_employee])
date_start = date_start + DELAY
time_of_shits = self.deputy.process_people_unavailability(
date_shift, id_location)[1]
self.calculation_unavailability_count(
time_of_shits, list_of_free_employee)
date_start, date_end = (date_end - DELAY_ALL_DAY) + \
ONE_DAY_UNIX, date_end + ONE_DAY_UNIX
date_shift = datetime.fromtimestamp(
date_start).strftime('%Y-%m-%d')
number_of_days += 1
return list_of_free_employee
def change_cells(self, start_time, end_time, title):
"""
change value by -1 in list
:param start_time: unix time start changed
:param end_time: unix time end changed
:param title: title of list in google sheets
"""
start_time = self._convert_for_30_minutes(start_time, False)
end_time = self._convert_for_30_minutes(end_time)
self.sheets_api.change_multiple_ranges_by_value(
title, start_time, end_time, -1)
def change_all_spread(self):
"""
change value of all list in google sheets
"""
title_of_all_lists = self._get_all_title()
for title in title_of_all_lists:
if self.change_specific_spread(title) is False:
continue
def change_specific_spread(self, title):
"""
change value in cell in specific list
:param title: name of list
"""
location_id = compare_mapping(title, "title")
if location_id is False:
return False
range_of_values = self.change_sheets(DAYS, location_id["Area"])
self.sheets_api.change_availability_multiple_ranges(
title, range_of_values)
return True
def _get_all_title(self):
"""
get all title of list google sheets
:return: list object with name of title
"""
all_settings_sheets = self.sheets_api.sheets
title_of_all_lists = []
for lists in all_settings_sheets:
title_of_all_lists.append(lists['properties']['title'])
return title_of_all_lists
@staticmethod
def _convert_for_30_minutes(time, key=True):
"""
User google sheets list have 30 minutes cells,so
if shift start not in :00 or :30 we modification this
(e.g start shift in 13:32 , modification = 13:30)
:param time: unix time
:param key: if wonna add cell = True, if minus cell =False
"""
mod = time % DELAY
time = time - mod
if mod != 0:
if key:
time += DELAY
else:
time -= DELAY
return time
@staticmethod
def calculation_unavailability_count(time_of_shits, list_of_free_employee):
"""
process every cell in google sheets on available employee
:param time_of_shits: list of lists,with start end end time of shift
:param list_of_free_employee: list with all available employee
:return: list with all available employee after minus shifts
"""
for time_end_start in time_of_shits:
while time_end_start[0] <= time_end_start[1]:
for number in list_of_free_employee:
if number[0] == time_end_start[0]:
number[1] -= 1
time_end_start[0] += DELAY
return list_of_free_employee
@staticmethod
def date_time():
"""
take date of today, transform it in unix time and date specific format
:return: 3 value: unix time(seconds) from today START_TIME
and today END_TIME, today in format year-month-day
"""
date_shift = datetime.now().strftime("%d-%m-%Y")
str_date = date_shift + START_TIME
data_unix_start = int(
datetime.strptime(
str_date,
"%d-%m-%Y %H:%M").timestamp())
str_date = date_shift + END_TIME
data_unix_end = int(
datetime.strptime(
str_date,
"%d-%m-%Y %H:%M").timestamp())
date_shift = datetime.now().strftime("%Y-%m-%d")
return data_unix_start, data_unix_end, date_shift
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,598
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/deputy_client.py
|
"""Have class DeputyCLient,which connect to deputy API and get/post data from here"""
import json
from collections import Counter
import requests
from global_config import compare_mapping, config
HTTP_SUCCESS = 200
class DeputyClient():
""""Connect to Deputy API"""
__headers = {}
__url = ""
__public_url = ""
def __init__(self, logger):
self.log = logger
def init_settings(self):
"""
Parser the Settings.ini file, and get parameters for deputy api connection like tokens.
Assign getting value to class variables
"""
deputy_access_token, deputy_id = config["DEPUTY"]["deputy_access_token"], \
config["DEPUTY"]["deputy_id"]
self.__headers = {
'Authorization': 'OAuth ' + deputy_access_token,
}
self.__url = 'https://' + deputy_id + '/api/v1/'
self.__public_url = config['URL']['public_url']
def _post_new_shift(self, params_for_deputy):
"""
Make post request to deputy roster ,with specific params, which we take from arguments.
If response will be successful, return data in json format from response.
:param params_for_deputy: parameters to post request
:return: json response
"""
data = self.update_params_for_post_deputy_style(params_for_deputy)
url = self.__url + 'supervise/roster/'
try:
response = requests.post(
url=url, headers=self.__headers, data=data)
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def process_data_from_new_shift(self, params_for_deputy=None):
"""
Make request, get data in json format.
Process data, and return id new shift witch we created, and
date this shift in format "year-month-day"
:param params_for_deputy: parameters to post request
:return: id new shift, date new shift
"""
try:
response = self._post_new_shift(params_for_deputy)
return str(response["Id"]), response["Date"]
except (TypeError, KeyError):
self.log.warning(
"Can not find availability employee",)
return False, False
def _get_people_unavailability(self, date, id_location):
"""
Make get request to deputy roster ,with specific params, which we take from arguments.
If response will be successful, return data in json format from response.
:param date: date in format "year-month-day" where find all shift
:param id_location: id location where find shifts
:return: json response
"""
url = self.__url + 'supervise/roster/' + date + "/" + id_location
try:
response = requests.get(url=url, headers=self.__headers, )
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def process_people_unavailability(self, date, id_location):
"""
Make get request . Process data from response, get time of all shift,
and employee who work on this shift
:param date: date in format "year-month-day" where find all shift
:param id_location: id location where find shifts
:return: list of id employee,who have shift , list of tuple with shift time
"""
unavailable_employee = []
unavailable_time = []
try:
response = self._get_people_unavailability(date, id_location)
for shift in response:
unavailable_time.append([shift["StartTime"], shift["EndTime"]])
if shift["_DPMetaData"]["EmployeeInfo"]:
unavailable_employee.append(
str(shift["_DPMetaData"]["EmployeeInfo"]["Id"]))
return unavailable_employee, unavailable_time
except (ValueError, TypeError):
self.log.error("Bad JSON data")
return False
def _get_recomendation(self, shift_id):
"""
Make get request to deputy roster ,with specific params, which we take from arguments.
If response will be successful, return data in json format from response.
:param shift_id: id empty shift
:return: json response
"""
url = self.__url + 'supervise/getrecommendation/' + shift_id
try:
response = requests.get(url=url, headers=self.__headers, )
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def get_people_availability(self, shift_id, unavi_employee):
"""
Make request,which return recommendation about employee and shift.
Then we process data and get ids recommendation employee,
and exclude ids unavailability employee(day-off). From arguments take list of
unavailability employee( have a shift ) . We compare which employee
have a less work and return his id.
:param shift_id: id empty shift
:param unavi_employee: list of ids employee who have shift
:return: id free employeee
"""
try:
response = self._get_recomendation(shift_id)
free_employee = list(response["trained"])
unavilability_employee = list(response["unavailable"])
free_employee = list(
set(free_employee) -
set(unavilability_employee))
if not unavi_employee: # check if we have all employee free
return free_employee[0]
# do we have people who don`t work?
employees = list(set(free_employee) - set(unavi_employee))
if not employees: # no, all people have a work
return self.check_all_job_employee(unavi_employee)
return employees[0]
except (TypeError, IndexError):
self.log.warning("Not Available employee")
return False
def get_employee_name(self, id_employee):
"""
Make get request to deputy roster ,with specific params, which we take from arguments.
If response will be successful, return data in json format from response.Process data
and return Name of employee, and status code requests
:param id_employee: id employee, we wanna get name
:return: name of employee , status code
"""
url = self.__url + 'supervise/employee/' + id_employee
try:
response = requests.get(url=url, headers=self.__headers, )
return response.json()["DisplayName"], response.status_code
except KeyError:
self.log.warning("Can not find employee with transferred id")
return False
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
return False
def _get_request_for_employee_unvail(self):
"""
Make get request to deputy roster to take data about employee who assigned day off
If response will be successful, return data in json format from response.
:return: json response
"""
url = self.__url + 'supervise/unavail/'
try:
response = requests.get(url=url, headers=self.__headers, )
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def get_employee_unavail(self):
"""
Process data from request.Get time of day-off, and compare if employee with
area where his work
:return: list with employee have unavailability
"""
response = self._get_request_for_employee_unvail()
if not response:
return False
list_of_em = []
for employee in response:
employee_id = str(employee["Employee"])
title = self._get_area_for_employee(employee_id)
if title is False:
return False
list_of_em.append(
(employee["StartTime"], employee["EndTime"], title))
return list_of_em
def post_params_for_webhook(self, topic, address):
"""
make post request to deputy for webhook
:param topic: what we waiting
:param address: where data come
:return: status code of response
"""
params_for_webhooks = {
"Topic": topic,
"Enabled": 1,
"Type": "URL",
"Address": self.__public_url + address
}
url = self.__url + 'resource/Webhook'
data = self.update_params_for_post_deputy_style(params_for_webhooks)
try:
response = requests.post(url=url, headers=self.__headers, data=data)
return response.status_code
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def subscribe_to_webhooks(self):
"""
make 3 post request for subscribe webhooks
:return: true if all good
"""
data_for_webhooks = [
("Employee.Delete",
"/delete_employee"),
("Employee.Insert",
"/insert_employee"),
("EmployeeAvailability.Insert",
"/unvial_employee")]
if self._verification_webhooks(data_for_webhooks) is False:
self.log.warning("You already subscribe to webhooks")
return False
for data in data_for_webhooks:
if self.post_params_for_webhook(data[0], data[1]) != HTTP_SUCCESS:
return False
return True
def _verification_webhooks(self, data_for_webhooks):
"""
make verification , if user already subscribe to webhooks
:param data_for_webhooks: list of tuple with topic webhook
:return: bool, false already subscribe , true make post request
"""
url = self.__url + 'resource/Webhook'
try:
response = requests.get(url=url, headers=self.__headers, )
for webhook in response.json():
for data in data_for_webhooks:
if webhook["Topic"] == data[0]:
return False
return True
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def get_number_of_employee(self, id_location):
"""make GET request to DEPUTY,for all employee
:return: count of employee
"""
url = self.__url + 'supervise/employee'
try:
response = requests.get(url=url, headers=self.__headers, )
count = 0
for employee in response.json():
if employee["Company"] == int(id_location):
count += 1
return count
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def _get_request_employee(self, employee_id):
"""
make GET request to DEPUTY,for get info about employee
:param employee_id:
:return: count of employee
"""
try:
url = self.__url + 'supervise/employee/' + employee_id
response = requests.get(url=url, headers=self.__headers, )
return response.json()
except requests.RequestException as ex:
self.log.error(
"Unable to send post request to DEPUTY",
exc_info=ex)
def _get_area_for_employee(self, employee_id):
"""
Make get request to deputy roster.
Compare id location with title for google sheets
"""
response = self._get_request_employee(employee_id)
area_id = str(response["Company"])
title = compare_mapping(area_id, "area")
if title is False:
return False
return title["Possible Area Nicknames in Production"]
@staticmethod
def update_params_for_post_deputy_style(params):
"""
take python dict and change it for json stringify format
:param params: python dict
:return: json stringify
"""
json_mylist = json.dumps(params)
data = f"{json_mylist}"
return data
@staticmethod
def check_all_job_employee(unavailable_employee):
"""
create dictionary where take e less worked employee
:param unavailable_employee:
:return: employee who have a less work
"""
job_employees = Counter()
for job in unavailable_employee:
job_employees[job] += 1
return (job_employees.most_common()[-1])[0]
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,599
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/spreadsheet_api.py
|
# pylint: disable=E1101
"""
This module describe class for working with Spreadsheet API
"""
import pickle
import os.path
from datetime import datetime
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
TOKEN_PATH = 'token.pickle'
CREDENTIALS_PATH = 'credentials.json'
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
MINUTES_TIME = 60
class SpreadsheetAPI:
"""
A class used for work with google spreadsheet
...
Attributes
----------
spreadsheet_id : str
id of google spreadsheet with which we will work
spreadsheet__service : googleapiclient.discovery.Resource
google spreadsheet object
Methods
-------
get_availability(sheet_title, date)
Get title of sheet in google spreadsheet and date (year, month, day, hour, minutes)
in unix format
Return value from appropriate cell
change_availability(self, sheet_title, date, value)
Change value in appropriate cell
add_sheet(title)
create new sheet with provided title
get_sheet_by_title(title)
return sheet object
_get_cell_indexes_by_timestamp(date)
convert date to str format and looking for index of cell in spreadsheet
_create_range_name(sheet_title, row_id, column_id)
create range name which using in google api
_read_date(range_name)
Read data from spreadsheet using range name
_update_date(range_name, value)
Update value in cell of spreadsheet using range name
"""
def __init__(self, spreadsheet_id):
"""
Parameters
----------
spreadsheet_id : str
Id of google spreadsheet
----------
Init function looking for token with TOKEN_PATH,
if token doesn't exist or expired - user need to authenticate in google services
after it function create google spreadsheet object, using provided id and save it
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN_PATH):
with open(TOKEN_PATH, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES)
creds = flow.run_local_server(port=8000)
# Save the credentials for the next run
with open(TOKEN_PATH, 'wb') as token:
pickle.dump(creds, token)
self.spreadsheet_service = build(
'sheets', 'v4', credentials=creds).spreadsheets()
self.spreadsheet_id = spreadsheet_id
values = self._read_data(
self.spreadsheet_service.get(
spreadsheetId=self.spreadsheet_id).execute().get(
'sheets',
'')[0]['properties']['title'] +
'!A1:ZZZ1000')
self.time_values = values[0]
self.date_values = [i[0] for i in values]
@property
def sheets(self):
"""
Property
Return all sheet objects in spreadsheet
"""
return self.spreadsheet_service.get(
spreadsheetId=self.spreadsheet_id
).execute().get('sheets', '')
def get_availability(self, sheet_title, date):
"""
Parameters
----------
sheet_title : str
Title of sheet
date : int
Date in unix format
----------
Function takes date in unix format convert it into 2 string variables
After it looking for cell in sheet with provided title, using converted date variables
In the end function return value from cell in sheet
Return value from cell if date is valid, in another case - return None
"""
time_index, date_index = self._get_cell_indexes_by_timestamp(date)
range_name = self._create_range_name(
sheet_title, time_index, date_index)
if range_name is None:
return None
result = self._read_data(range_name)
if result is None:
return 0
return result[0][0]
def change_availability(self, sheet_title, date, value):
"""
Parameters
----------
sheet_title : str
Title of sheet
date : int
Date in unix format
value : str
Value which will be insert into sheet
----------
Function looking for cell in sheet and insert new value to this cell
Return value of cell if date is valid, in another case - return None
"""
time_index, date_index = self._get_cell_indexes_by_timestamp(date)
range_name = self._create_range_name(
sheet_title, time_index, date_index)
if range_name is None:
return None
return self._update_data(range_name, value)
def change_availability_by_value(self, sheet_title, date, value):
"""
Parameters
----------
sheet_title : str
Title of sheet
date : int
Date in unix format
value : int
Change current value by this value
----------
Function looking for cell in sheet and change it by new value
Return value of cell if date is valid, in another case - return None
"""
try:
old_value = int(self.get_availability(sheet_title, date))
new_value = old_value + value
except ValueError:
return None
return self.change_availability(sheet_title, date, new_value)
def add_sheet(self, title):
"""
Parameters
----------
title : str
Title of sheet
----------
Try to add new sheet to the spreadsheet
Return new sheet object if sheet was added or None if this sheet is exist
"""
try:
result = self.spreadsheet_service.batchUpdate(
spreadsheetId=self.spreadsheet_id,
body={
"requests": [
{
"addSheet": {
"properties": {
"title": title,
}
}
}
]
}).execute()
return result
except HttpError:
return None
def get_sheet_by_title(self, title):
"""
Parameters
----------
title : str
Title of sheet
----------
Looking for sheet with provided title in the spreadsheet
If sheet isn't exist - create new sheet with provided title
"""
for sheet in self.sheets:
if sheet['properties']['title'] == title:
result = sheet
break
else:
result = self.add_sheet(title)
return result
def _get_cell_indexes_by_timestamp(self, date):
"""
Parameters
----------
date: int
Time in unix format
----------
Convert time into 2 string variables, looking for cell in sheet using this variables
Return int indexes of row and column of cell in sheet
"""
spreadsheet_date = datetime.fromtimestamp(
date).strftime('%A, %B %-d, %Y')
spreadsheet_time = datetime.fromtimestamp(date).strftime('%-I:%M %p')
time_index, date_index = 0, 0
for index, time in enumerate(self.time_values, 1):
if time == spreadsheet_time:
time_index = index
break
for index, date_tmp in enumerate(self.date_values, 1):
if date_tmp == spreadsheet_date:
date_index = index
break
return time_index, date_index
@staticmethod
def _create_range_name(sheet_title, row_id, column_id):
"""
Parameters
----------
sheet_title : str
Title of sheet
row_id: int
Integer index of row
column_id: int
Integer index of column
----------
Create range name of cell in google spreadsheet format, using row and column indexes
Return string range_name
"""
letter = ''
while row_id > 0:
temp = (row_id - 1) % 26
letter = chr(temp + 65) + letter
row_id = (row_id - temp - 1) // 26
if not letter:
return None
return sheet_title + '!' + letter + str(column_id)
def _read_data(self, range_name):
"""
Parameters
----------
range_name : str
Google spreadsheet range name in str format
----------
Read data from cell, using provided range_name
return result in format [[value]]
"""
result = self.spreadsheet_service.values().get(
spreadsheetId=self.spreadsheet_id, range=range_name).execute()
rows = result.get('values')
return rows
def _update_data(self, range_name, value):
"""
Parameters
----------
range_name : str
Google spreadsheet range name in str format
value : str
Insert this value into cell
----------
Insert value into cell, using provided range_name
return result in format [[value]]
"""
value_body = {
"values": [[value]]
}
result = self.spreadsheet_service.values().update(
spreadsheetId=self.spreadsheet_id,
range=range_name,
valueInputOption='RAW',
body=value_body).execute()
rows = result.get('values')
return rows
def change_multiple_ranges_by_value(
self, sheet_title, time_start, time_end, value):
"""
Parameters
----------
sheet_title : str
Title of sheet
time_start : int
Time of start changing in unix format
time_end : int
Time of end changing in unix format
value : str
All cells change by this value
----------
Change all values in provided sheet from time_start to time_end
"""
range_names = []
while time_start <= time_end:
time_index, date_index = self._get_cell_indexes_by_timestamp(
time_start)
range_name_tmp = self._create_range_name(
sheet_title, time_index, date_index)
if range_name_tmp:
range_names.append(range_name_tmp)
time_start += 30 * MINUTES_TIME
result = self.spreadsheet_service.values().batchGet(
spreadsheetId=self.spreadsheet_id, ranges=range_names).execute()
ranges = result.get('valueRanges', [])
spreadsheet_request_values = []
for tmp_range in ranges:
if 'values' in tmp_range:
tmp_range_value = int(tmp_range['values'][0][0])
else:
tmp_range_value = 0
spreadsheet_request_values.append(
{
"range": tmp_range['range'],
"values": [[tmp_range_value + value]]
}
)
self._batch_update_data(spreadsheet_request_values)
def change_availability_multiple_ranges(self, sheet_title, data_list):
"""
Parameters
----------
sheet_title : str
Title of sheet
data_list: list
List contains tuples with date and value for cell
Example: [[1607414400, 1], [1607416200, 2]]
----------
Create range names for each time from data_list,
make request for writing all values to a spreadsheet by their range names
"""
spreadsheet_request_values = []
for data_item in data_list:
date = data_item[0]
value = data_item[1]
time_index, date_index = self._get_cell_indexes_by_timestamp(date)
range_name = self._create_range_name(
sheet_title, time_index, date_index)
if range_name is None:
continue
spreadsheet_request_values.append(
{
"range": range_name,
"values": [[value]]
}
)
self._batch_update_data(spreadsheet_request_values)
return True
def _batch_update_data(self, request_values):
"""
Parameters
----------
request_values : list of dict
list of dictionaries, which contain range name and value
----------
Send request batchUpdate to google spreadsheet API for updating batch of data
"""
body = {
'valueInputOption': 'RAW',
'data': request_values
}
result = self.spreadsheet_service.values().batchUpdate(
spreadsheetId=self.spreadsheet_id, body=body).execute()
rows = result.get('values')
return rows
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,600
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/tests/test_google_sheets_client.py
|
import pytest
from unittest.mock import patch, Mock
from xola_deputy.deputy_client import DeputyClient
from xola_deputy.logger import LoggerClient
from xola_deputy.google_sheets_client import GoogleSheetsClient
logging = LoggerClient().get_logger()
deputy = DeputyClient(logging)
sheets = GoogleSheetsClient(deputy,logging)
def test_date_time():
assert sheets.date_time()
def test_calculation_unavailability_count():
time_of_shits = [[1608641235, 1608641735], [1608641238, 1608641738]]
list_of_free_employee = [[1608609600, 4], [1608611400, 4], [1608613200, 4]]
assert sheets.calculation_unavailability_count(time_of_shits,list_of_free_employee)
def test_change_sheets():
with patch.object(DeputyClient, 'get_number_of_employee', return_value=4):
with patch.object(DeputyClient, 'process_people_unavailability', return_value=[[1608641235, 1608641735], [1608641238, 1608641738]]):
with patch.object(GoogleSheetsClient, 'calculation_unavailability_count', return_value=True):
assert sheets.change_sheets(3,1)
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,601
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/setup.py
|
"""This script creates a config file for the main script.
This should be run once at the very beginning before
the main script run (zola_deputy.py).
Then it should be run every time you want to reconfigure the script."""
import sys
import argparse
import configparser
from os import path
def create_parser():
"""Creates parameters passed from console"""
parser = argparse.ArgumentParser()
parser.add_argument('-xak', '--x_api_key')
parser.add_argument('-ui', '--user_id')
parser.add_argument('-dat', '--deputy_access_token')
parser.add_argument('-did', '--deputy_id')
parser.add_argument('-url', '--url')
parser.add_argument('-spid', '--spreadsheet_id')
parser.add_argument('-logmode', '--logmode', default='file')
parser.add_argument('-logpath', '--logpath', default='./logs')
return parser
def initialize_variables():
"""Using parser to output parameters from console"""
config = configparser.ConfigParser()
parser = create_parser()
namespace = parser.parse_args(sys.argv[1:])
if len(sys.argv) == 1:
sys.exit(
"Please check if you run script with parameters . Script is terminated")
config.add_section('XOLA')
config["XOLA"]["x_api_key"], config["XOLA"]["user_id"] = namespace.x_api_key, namespace.user_id
config.add_section('DEPUTY')
deputy_access_token = namespace.deputy_access_token
deputy_id = namespace.deputy_id
config["DEPUTY"]["deputy_access_token"], config["DEPUTY"]["deputy_id"] = \
deputy_access_token, deputy_id
config.add_section('LOG')
config["LOG"]["log_path"], config["LOG"]["log_mode"] = namespace.logpath, namespace.logmode
config.add_section('URL')
config['URL']['public_url'] = namespace.url
config.add_section('GOOGLE')
config['GOOGLE']['spreadsheet_id'] = namespace.spreadsheet_id
with open('xola_deputy/Settings.ini', 'w') as configfile: # save
config.write(configfile)
if __name__ == '__main__':
if path.isfile('xola_deputy/Settings.ini'):
key = input("Do you really wanna change your settings?(y/n) ")
if key == "y":
initialize_variables()
else:
sys.exit("Script is terminated")
else:
initialize_variables()
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,602
|
COXIT-CO/xola_deputy
|
refs/heads/main
|
/xola_deputy/tests/test_xola_client.py
|
import pytest
import json
import requests
from unittest.mock import patch, Mock
from xola_deputy.xola_client import XolaClient
from xola_deputy.logger import LoggerClient
logging = LoggerClient().get_logger()
xola = XolaClient(logging)
@pytest.fixture()
def get_event_json():
with open("tests/data_event_from_xola.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_order_json():
with open("tests/test_response_data.json", "r") as file:
request = json.load(file)
return request
@pytest.fixture()
def get_guide_json():
with open("tests/data_list_of_guide_xola.json", "r") as file:
request = json.load(file)
return request
def test_subscribe_to_webhook():
status_codes = [(200,False),(409,False),(201,True)]
for status_code in status_codes:
with patch.object(XolaClient, 'post_request_subscribe_to_webhook', return_value=status_code[0]):
result = xola.subscribe_to_webhook()
assert result == status_code[1]
def test_calculation_of_employee():
assert xola.calculation_of_employee(1,10) == 1
assert xola.calculation_of_employee(20, 10) == 1
assert xola.calculation_of_employee(25, 26) == 2
assert xola.calculation_of_employee(20, 99) == 5
def test_convert_time():
assert xola.convert_time("2020-12-07T12:55:40+00:00") == "2020-12-07"
assert xola.convert_time("2020-12-01T10:14:40+00:00") == "2020-12-01"
def test_take_params_from_responce(get_order_json,get_event_json):
request_order = get_order_json
with patch.object(XolaClient, '_get_data_from_event',) as mock_func:
mock_func.return_value = get_event_json
params, number_shifts, mapping = xola.take_params_from_responce(request_order)
assert type(params) == dict
assert type(number_shifts) == int
assert type(mapping) == dict
def test_start(get_order_json):
with patch.object(XolaClient, 'take_params_from_responce', return_value= TypeError):
params, number_shifts, mapping = xola.start(get_order_json)
assert params == False
assert number_shifts == False
assert mapping == False
with patch.object(XolaClient, 'take_params_from_responce', return_value= (True,True,True)):
params, number_shifts, mapping = xola.start(get_order_json)
assert params != False
assert number_shifts != False
assert mapping != False
def test_get_list_of_guids(get_guide_json):
with patch.object(XolaClient, '_get_request_list_guides') as mock_func:
the_response = Mock(spec=requests.models.Response)
the_response.json.return_value = get_guide_json
the_response.status_code = 400
mock_func.return_value = the_response
assert xola.get_list_of_guids() == False
with patch.object(XolaClient, '_get_request_list_guides', ) as mock_func:
the_response = Mock(spec=requests.models.Response)
the_response.json.return_value = get_guide_json
the_response.status_code = 200
mock_func.return_value = the_response
assert type(xola.get_list_of_guids()) == list
def test_take_guide_id():
with patch.object(XolaClient, 'get_list_of_guids',) as mock_func:
mock_func.return_value = [('Maaiz - GM Test', '5b5f8e90c481e1e54e8b4571'),
('Taylor Test', '5b60e68a332e75c36a8b45c1'),
]
assert type(xola.take_guide_id('Maaiz - GM Test')) == str
assert xola.take_guide_id('Maria Test') == False
def test_verification_guides_for_event():
status_codes = [(200, False), (409, False), (201, True)]
for status_code in status_codes:
with patch.object(XolaClient, '_post_guides_for_event', return_value=status_code[0]):
with patch.object(XolaClient, 'take_guide_id', return_value=True):
result = xola.verification_guides_for_event('Adam Brodner')
assert result == status_code[1]
@patch('requests.post')
def test_post_request_subscribe_to_webhook(post_mock):
post_mock.side_effect = requests.RequestException
xola.post_request_subscribe_to_webhook("order")
@patch('requests.get')
def test__get_data_from_event(post_mock):
post_mock.side_effect = requests.RequestException
xola._get_data_from_event()
@patch('requests.get')
def test_get_request_list_guides(post_mock):
post_mock.side_effect = requests.RequestException
xola._get_request_list_guides()
@patch('requests.post')
def test_post_guides_for_event(post_mock):
post_mock.side_effect = requests.RequestException
xola._post_guides_for_event("1")
|
{"/xola_deputy/tests/test_deputy_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py"], "/xola_deputy/tests/test_global_config.py": ["/xola_deputy/global_config.py"], "/xola_deputy/tests/test_google_sheets_client.py": ["/xola_deputy/deputy_client.py", "/xola_deputy/logger.py", "/xola_deputy/google_sheets_client.py"], "/xola_deputy/tests/test_xola_client.py": ["/xola_deputy/xola_client.py", "/xola_deputy/logger.py"]}
|
26,609
|
tacticalhombre/CarND-Vehicle-Detection
|
refs/heads/master
|
/visualize_for_writeup.py
|
import matplotlib.image as mpimg
import numpy as np
import cv2
import glob
import pickle
import glob
import collections
from scipy.ndimage.measurements import label
from moviepy.editor import VideoFileClip
from search_classify import *
from pipeline import *
from random import randint
class VisualizeProject:
def __init__(self):
print('VisualizeProject object created ...')
self.pipeline = Pipeline()
self.cars = []
self.notcars = []
fns = self.pipeline.get_image_files()
self.cars = fns['cars']
self.notcars = fns['notcars']
def visualize_input_images(self):
p = self.pipeline
rc = randint(0, len(self.cars))
print('Car', rc)
rnc = randint(0, len(self.notcars))
print('Not car', rnc)
print('Example of a car image:', self.cars[rc])
print('Example of a non-car image:', self.notcars[rnc])
car_img = mpimg.imread(self.cars[rc])
notcar_img = mpimg.imread(self.notcars[rnc])
fig = plt.figure()
plt.subplot(121)
plt.imshow(car_img)
plt.title('Car')
plt.subplot(122)
plt.imshow(notcar_img)
plt.title('Not car')
fig.tight_layout()
#plt.show()
fig.savefig('./examples/vis-car_not_car.png')
def visualize_hog(self):
rc = randint(0, len(self.cars))
print('Car', rc)
rnc = randint(0, len(self.notcars))
print('Not car', rnc)
car_img = mpimg.imread(self.cars[rc])
notcar_img = mpimg.imread(self.notcars[rnc])
colorspaces = ['RGB', 'HSV', 'LUV', 'HLS', 'YCrCb']
for color_space in colorspaces:
#color_space = 'YCrCb'
#color_space = 'HLS'
print('Extracting hog -', color_space)
self.display_hog(car_img, color_space, 'Car')
self.display_hog(notcar_img, color_space, 'Not-Car')
def display_hog(self, input_img, color_space, title):
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
scale = 1
img_tosearch = input_img.astype(np.float32)/255
#img_tosearch = img[ystart:ystop,:,:]
conv = 'RGB2' + color_space
ctrans_tosearch = convert_color(img_tosearch, conv=conv)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
f1, hog_img1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, vis=True, feature_vec=False)
f2, hog_img2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, vis=True, feature_vec=False)
f3, hog_img3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, vis=True, feature_vec=False)
fig = plt.figure()
fig.set_figheight(5)
fig.set_figwidth(20)
plt.subplot(141)
plt.imshow(input_img)
plt.title(title)
plt.subplot(142)
plt.imshow(hog_img1)
plt.title('Ch1 - ' + color_space)
plt.subplot(143)
plt.imshow(hog_img2)
plt.title('Ch2 - ' + color_space)
plt.subplot(144)
plt.imshow(hog_img3)
plt.title('Ch3 - ' + color_space)
fig.tight_layout()
#plt.show()
fn = './examples/vis-hog-' + color_space + '-' + title + '.png'
print(fn)
fig.savefig(fn)
def visualize_search_windows(self, image_file):
image = mpimg.imread(image_file)
draw_image = np.copy(image)
dist_pickle = self.get_param_dict("svc_pickle_RGB.p")
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
y_start_stop = [None, None]
color_space = 'HLS'
hog_channel = 'ALL'
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
image = image.astype(np.float32)/255
windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
xy_window=(96, 96), xy_overlap=(0.5, 0.5))
hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
plt.imshow(window_img)
plt.show()
def visualize_identify_cars(self):
image_file = './test_images/test6.jpg'
image = mpimg.imread(image_file)
colorspaces = ['RGB', 'HLS', 'HSV', 'YCrCb']
for color_space in colorspaces:
dist_pickle = self.pipeline.get_param_dict("svc_pickle_" + color_space + ".p")
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
test_accuracy = dist_pickle["test_accuracy"]
color_space = dist_pickle["color_space"]
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('color_space:', color_space, ' accuracy =', test_accuracy)
ystart = 400
ystop = 656
scale = 1.5
boxes = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, color_space)
draw_img = np.copy(image)
for box in boxes:
cv2.rectangle(draw_img,box[0],box[1],(0,0,255),6)
fig = plt.figure()
#plt.subplot(121)
plt.imshow(draw_img)
plt.title('Detected Cars - ' + color_space)
fig.savefig('./examples/vis-detectcars-' + color_space + '.png')
def visualize_on_test_images(self):
test_image_files = glob.glob('./test_images/*.jpg')
fig = plt.figure()
x = 1
for image_file in test_image_files:
print(image_file, x)
image = mpimg.imread(image_file)
heat = np.zeros_like(image[:,:,0]).astype(np.float)
draw_image = np.copy(image)
boxes = self.pipeline.detect_cars(image)
for box in boxes:
cv2.rectangle(draw_image, box[0],box[1],(0,0,255),6)
plt.subplot(3, 2, x)
plt.imshow(draw_image)
plt.title(image_file)
x = x + 1
fig.tight_layout()
fig.savefig('./examples/vis-test-images.png')
def visualize_heatmap(self, heat, box_list, image):
# Add heat to each box in box list
heat = add_heat(heat, box_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 2)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
fig = plt.figure()
plt.subplot(121)
plt.imshow(draw_img)
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
fig.tight_layout()
plt.show()
def visualize_heat(self):
test_image_files = glob.glob('./test_images/*.jpg')
for image_file in test_image_files:
#image_file = './test_images/test6.jpg'
#pipeline.visualize_search_windows(image_file)
image = mpimg.imread(image_file)
heat = np.zeros_like(image[:,:,0]).astype(np.float)
#draw_image = np.copy(image)
bbox_list = self.pipeline.detect_cars(image)
self.visualize_heatmap(heat, bbox_list, image)
if __name__ == "__main__":
vis = VisualizeProject()
#vis.visualize_input_images()
#vis.visualize_hog()
#vis.visualize_identify_cars()
#vis.visualize_on_test_images()
vis.visualize_heat()
|
{"/visualize_for_writeup.py": ["/pipeline.py"]}
|
26,610
|
tacticalhombre/CarND-Vehicle-Detection
|
refs/heads/master
|
/pipeline.py
|
import matplotlib.image as mpimg
import numpy as np
import cv2
import glob
import pickle
import glob
import collections
from scipy.ndimage.measurements import label
from moviepy.editor import VideoFileClip
from search_classify import *
#from hog_subsample import *
class Pipeline:
def __init__(self):
print('Pipeline object created ...')
self.lastN_bbox = []
self.frame = 1
self.heatmaps = collections.deque(maxlen=10)
self.pickle_cache = {}
def get_image_files(self):
fn_cars = glob.glob('./data/vehicles/*/*.png')
fn_notcars = glob.glob('./data/non-vehicles/*/*.png')
print('Number of car images:', len(fn_cars))
print('Number of non-car images:', len(fn_notcars))
return {'cars':fn_cars, 'notcars':fn_notcars}
def train_classifier(self):
fns = self.get_image_files()
cars = fns['cars']
notcars = fns['notcars']
# Reduce the sample size because
# The quiz evaluator times out after 13s of CPU time
#sample_size = 500
#cars = cars[0:sample_size]
#notcars = notcars[0:sample_size]
### TODO: Tweak these parameters and see how the results change.
colorspaces = ['RGB', 'HSV', 'LUV', 'HLS', 'YUV', 'YCrCb']
#color_space = 'HLS' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
spatial_size = (16, 16) # Spatial binning dimensions
hist_bins = 16 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [None, None] # Min and max in y to search in slide_window()
for color_space in colorspaces:
print('Extracting features for color_space', color_space)
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
test_accuracy = round(svc.score(X_test, y_test), 4)
print('Test Accuracy of SVC = ', test_accuracy)
# Check the prediction time for a single sample
t=time.time()
dist_pickle = {}
dist_pickle["svc"] = svc
dist_pickle["scaler"] = X_scaler
dist_pickle["orient"] = orient
dist_pickle["pix_per_cell"] = pix_per_cell
dist_pickle["cell_per_block"] = cell_per_block
dist_pickle["spatial_size"] = spatial_size
dist_pickle["hist_bins"] = hist_bins
dist_pickle["test_accuracy"] = test_accuracy
dist_pickle["color_space"] = color_space
pickle_file = 'svc_pickle_' + color_space + '.p'
pickle.dump( dist_pickle, open(pickle_file, 'wb') )
def get_param_dict(self, pickle_file):
if (pickle_file not in self.pickle_cache):
dist_pickle = pickle.load( open(pickle_file, "rb" ) )
self.pickle_cache[pickle_file] = dist_pickle
else:
dist_pickle = self.pickle_cache[pickle_file]
return dist_pickle
def detect_cars(self, img):
ystart = 400
ystop = 656
scales = [1.0, 1.5, 1.75, 2.0]
color_space = 'YCrCb'
dist_pickle = self.get_param_dict("svc_pickle_" + color_space + ".p")
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
test_accuracy = dist_pickle["test_accuracy"]
color_space = dist_pickle["color_space"]
bbox_list = []
for scale in scales:
boxes = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins, color_space)
bbox_list.extend(boxes)
return bbox_list
def process_image(self, image):
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# get bounding boxes for all detections
bbox_list = pipeline.detect_cars(image)
# Add heat to each box in box list
heat = add_heat(heat, bbox_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 2)
self.heatmaps.append(heat)
if len(self.heatmaps) == 10:
heat = sum(list(self.heatmaps))
heat = apply_threshold(heat, 5)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
# uncomment for visualizing
"""
if (self.frame <= 10):
vis_image = np.copy(image)
for box in bbox_list:
cv2.rectangle(vis_image,box[0],box[1],(0,0,255),6)
fig = plt.figure(figsize=(9,3))
plt.subplot(121)
plt.imshow(vis_image)
#plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
#plt.title('Heat Map')
fig.tight_layout()
fig.savefig('./output_images/vis-frame-' + str(self.frame) + '.png')
if (self.frame == 10):
fig = plt.figure()
plt.imshow(draw_img)
plt.title('Detected Cars')
fig.tight_layout()
fig.savefig('./output_images/vis-frame-' + str(self.frame) + '-detected.png')
fig = plt.figure()
plt.imshow(labels[0], cmap='gray')
plt.title('Labels - ' + str(labels[1]) + ' cars found')
fig.tight_layout()
fig.savefig('./output_images/vis-frame-' + str(self.frame) + '-labels.png')
"""
self.frame = self.frame + 1
return draw_img
def create_output_video(self):
input_video_file = './project_video.mp4'
output_video_file = './my-project_video.mp4'
start = 21
end = 31
#clip1 = VideoFileClip(input_video_file).subclip(start, end)
clip1 = VideoFileClip(input_video_file)
proj_clip = clip1.fl_image(self.process_image) #NOTE: this function expects color images!!
proj_clip.write_videofile(output_video_file , audio=False)
if __name__ == "__main__":
pipeline = Pipeline()
#pipeline.train_classifier()
pipeline.create_output_video()
|
{"/visualize_for_writeup.py": ["/pipeline.py"]}
|
26,642
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/BBA.py
|
class Algorithm:
def __init__(self):
# fill your init vars
self.buffer_size = 0
# Intial
def Initial(self,model_name):
return None
def run(self, time, S_time_interval, S_send_data_size, S_chunk_len, S_rebuf, S_buffer_size, S_play_time_len,
S_end_delay, S_decision_flag, S_buffer_flag, S_cdn_flag, S_skip_time, end_of_video, cdn_newest_id,
download_id, cdn_has_frame, IntialVars,start_avgbw):
# record your params
self.buffer_size = S_buffer_size[-1]
bit_rate = 0
RESEVOIR = 0.5
CUSHION = 1.5
if S_buffer_size[-1] < RESEVOIR:
bit_rate = 0
elif S_buffer_size[-1] >= RESEVOIR + CUSHION and S_buffer_size[-1] < CUSHION + CUSHION:
bit_rate = 2
elif S_buffer_size[-1] >= CUSHION + CUSHION:
bit_rate = 3
else:
bit_rate = 1
target_buffer = 3
latency_limit = 3
return bit_rate, target_buffer,latency_limit
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,643
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/DYNAMIC.py
|
import math
import numpy as np
BIT_RATE = [500.0,850.0,1200.0,1850.0]
MAX_STORE = 100
TARGET_BUFFER = 1
latency_limit = 4
class BBA():
def __init__(self):
self.buffer_size = 0
def get_quality(self,segment):
# record your params
self.buffer_size = segment['buffer'][-1]
bit_rate = 0
RESEVOIR = 0.5
CUSHION = 1.5
if self.buffer_size < RESEVOIR:
bit_rate = 0
elif self.buffer_size >= RESEVOIR + CUSHION and self.buffer_size < CUSHION + CUSHION:
bit_rate = 2
elif self.buffer_size >= CUSHION + CUSHION:
bit_rate = 3
else:
bit_rate = 1
return bit_rate
def get_first_quality(self,segment):
return 0
class RBA:
def __init__(self):
self.buffer_size = 0
self.p_rb = 1
def get_quality(self, segment):
# record your params
bit_rate = 0
bandwidth = self.predict_throughput(segment['throughputHistory'],0.8)
tempBitrate = bandwidth * self.p_rb
for i in range(len(BIT_RATE)):
if tempBitrate >= BIT_RATE[i]:
bit_rate = i
return bit_rate
def predict_throughput(self,throughputHistory,alpha):
if alpha < 0 or alpha > 1:
print("Invalid input!")
alpha = 2/(len(throughputHistory)+1)
predict = [0] * len(throughputHistory)
for i in range(1,len(throughputHistory)):
factor = 1 - pow(alpha, i)
predict[i] = (alpha * predict[i-1] + (1 - alpha) * throughputHistory[i])/factor
return predict[-1]
def get_first_quality(self,segment):
return self.get_quality(segment)
class Dynamic():
def __init__(self):
# self.bba = Bola()
self.bba = BBA()
self.tput = RBA()
self.is_buffer_based = False
self.low_buffer_threshold = 1
def get_quality(self, segment):
level = segment['buffer'][-1]
b = self.bba.get_quality(segment)
t = self.tput.get_quality(segment)
if self.is_buffer_based:
if level < self.low_buffer_threshold and b < t:
self.is_buffer_based = False
else:
if level > self.low_buffer_threshold and b >= t:
self.is_buffer_based = True
return b if self.is_buffer_based else t
def get_first_quality(self,segment):
if self.is_buffer_based:
return self.bba.get_first_quality(segment)
else:
return self.tput.get_first_quality(segment)
class Algorithm:
def __init__(self):
self.dynamic = Dynamic()
self.is_first = True
self.next_throughput = 0
self.next_latency = 0
def Initial(self,model_name):
self.last_bit_rate = 0
def run(self, time, S_time_interval, S_send_data_size, S_chunk_len, S_rebuf, S_buffer_size, S_play_time_len,
S_end_delay, S_decision_flag, S_buffer_flag, S_cdn_flag, S_skip_time, end_of_video, cdn_newest_id,
download_id, cdn_has_frame, IntialVars, start_avgbw):
bit_rate = 0
target_buffer = TARGET_BUFFER
throughputHistory = []
if start_avgbw != -1:
throughputHistory.append(start_avgbw)
segment = {}
segment['throughputHistory'] = throughputHistory
return self.dynamic.get_first_quality(segment),target_buffer,latency_limit
for i in range(len(S_send_data_size)-MAX_STORE,len((S_send_data_size))):
send_data_size = S_send_data_size[i]
time_interval = S_time_interval[i]
bw = 0
if time_interval != 0:
bw = (send_data_size / time_interval) / 1000
throughputHistory.append(bw)
segment = {}
segment['buffer'] = np.array(S_buffer_size[-MAX_STORE:])
segment['time'] = np.array(S_time_interval[-MAX_STORE:])
segment['latency'] = np.array(S_buffer_size[-MAX_STORE -1:-1]) - np.array(S_buffer_size[-MAX_STORE:])
segment['throughputHistory'] = np.array(throughputHistory)
bit_rate = self.dynamic.get_quality(segment=segment)
target_buffer = TARGET_BUFFER
return bit_rate, target_buffer,latency_limit
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,644
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/Pensieve.py
|
import numpy as np
import tensorflow as tf
import tflearn
import a3c
GAMMA = 0.99
ENTROPY_WEIGHT = 0.5
ENTROPY_EPS = 1e-6
S_INFO = 5 # bit_rate, buffer_size, now_chunk_size, bandwidth_measurement(throughput and time)
S_LEN = 50 # take how many frames in the past
A_DIM = 64
M_IN_K = 1000.0
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
NUM_AGENTS = 6
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
VIDEO_BIT_RATE = [500.0,850.0,1200.0,1850.0] # Kbps
BUFFER_NORM_FACTOR = 10.0
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000
NN_MODEL = None
class Algorithm:
def __init__(self):
# fill your init vars
n = 0
self.BITRATE = [0, 1, 2, 3]
self.TARGET_BUFFER = [0, 1, 2, 3]
self.LATENCY_LIMIT = [1, 2, 3, 4]
self.ACTION_SAPCE = []
self.sess = tf.Session()
self.actor = a3c.ActorNetwork(self.sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
self.critic = a3c.CriticNetwork(self.sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
# Intial
def Initial(self,model_name):
name = None
name = model_name + "nn_model_ep_00000.ckpt"
if name != None:
self.saver.restore(self.sess, name)
for i in self.BITRATE:
for j in self.TARGET_BUFFER:
for k in self.LATENCY_LIMIT:
action_apace = []
action_apace.append(i)
action_apace.append(j)
action_apace.append(k)
self.ACTION_SAPCE.append(action_apace)
#Define your al
def run(self,time,S_time_interval,S_send_data_size,S_chunk_len,S_rebuf,S_buffer_size,S_play_time_len,
S_end_delay,S_decision_flag,S_buffer_flag,S_cdn_flag,S_skip_time,end_of_video,cdn_newest_id,download_id,cdn_has_frame,abr_init,start_avgbw):
target_buffer = 1
latency_limit = 4
state = []
length = len(S_time_interval)
history_len = S_LEN
for i in S_buffer_size[length - history_len:]:
state.append(i * 0.1)
for i in S_send_data_size[length - history_len:]:
state.append(i * 0.00001)
for i in S_time_interval[length - history_len:]:
state.append(i * 10)
for i in S_end_delay[length - history_len:]:
state.append(i * 0.1)
for i in S_rebuf[length - history_len:]:
state.append(i)
action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
action = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
bit_rate = self.ACTION_SAPCE[action][0]
target_buffer = self.ACTION_SAPCE[action][1]
latency_limit = self.ACTION_SAPCE[action][2]
return bit_rate, target_buffer, latency_limit
def main():
sess = tf.Session()
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
name = "./Pensieve_models/nn_model_ep_3000.ckpt"
saver.restore(sess, name)
state = [np.zeros((S_INFO, S_LEN))]
action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
target_buffer = 1
print(bit_rate)
if __name__ == "__main__":
main()
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,645
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/picture/CDF_QoE.py
|
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
f_BBA = './result/QoE/QoE_al/BBA.csv'
f_RBA = './result/QoE/QoE_al/RBA.csv'
f_DYNAMIC = './result/QoE/QoE_al/DYNAMIC.csv'
f_Pensieve = './result/QoE/QoE_al/Pensieve.csv'
f_PDDQN = './result/QoE/QoE_al/PDDQN.csv'
f_Offline_optimal = './result/QoE/QoE_al/Offline optimal.csv'
def init(F):
A = []
with open(F,encoding = 'utf-8') as f:
data = np.loadtxt(f,str,delimiter = ",")
for i in data:
A.append(float(i[2]))
A = np.array(A)
print(max(A))
res = stats.relfreq(A, numbins=1000,defaultreallimits=(0,2.5))
x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,res.frequency.size)
y = np.cumsum(res.frequency)
return x,y
RBA_x,RBA_y = init(f_RBA)
BBA_x,BBA_y = init(f_BBA)
DYNAMIC_x,DYNAMIC_y = init(f_DYNAMIC)
Pensieve_x,Pensieve_y = init(f_Pensieve)
PDDQN_x,PDDQN_y = init(f_PDDQN)
Offline_optimal_x,Offline_optimal_y = init(f_Offline_optimal)
plt.plot(RBA_x,RBA_y,color='magenta',linestyle='--',label='RBA')
plt.plot(BBA_x,BBA_y,color='darkviolet',linestyle='--',label='BBA')
plt.plot(DYNAMIC_x,DYNAMIC_y,color='orange',linestyle='--',label='DYNAMIC')
plt.plot(Pensieve_x,Pensieve_y,color='springgreen',linestyle='--',label='Pensieve')
plt.plot(PDDQN_x,PDDQN_y,color='red',linestyle='-',label='PDDQN-R')
plt.plot(Offline_optimal_x,Offline_optimal_y,color='grey',linestyle='--',label='Offline optimal')
plt.xlabel("Average QoE")
plt.ylabel("CDF")
plt.legend()
plt.savefig('')
plt.show()
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,646
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/train/pensieve-b/train.py
|
import os
import logging
import multiprocessing as mp
import fixed_env as env
import load_trace as load_trace
import matplotlib.pyplot as plt
import time as time_package
import tensorflow as tf
import numpy as np
import a3c
import csv
S_INFO = 5 # bit_rate, buffer_size, now_chunk_size, bandwidth_measurement(throughput and time)
S_LEN = 50 # take how many frames in the past
M_IN_K = 1000.0
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
NUM_AGENTS = 8
NUM_EPOCH = 300
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 1000
VIDEO_BIT_RATE = [500.0,850.0,1200.0,1850.0] # Kbps
BUFFER_NORM_FACTOR = 10.0
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
TEST_LOG_FOLDER = './test_results/'
TRAIN_TRACES = './dataset/network_trace/fixed/'
VIDEO_TRACES = './dataset/video_trace/sports/frame_trace_'
# NN_MODEL = './results/pretrain_linear_reward.ckpt'
NN_MODEL = None
BIT_RATE = [500.0,850.0,1200.0,1850.0] # kpbs
BITRATE = [0,1,2,3]
TARGET_BUFFER = [0,1,2,3]
LATENCY_LIMIT = [1,2,3,4]
ACTION_SAPCE = []
for i in BITRATE:
for j in TARGET_BUFFER:
for k in LATENCY_LIMIT:
action_apace = []
action_apace.append(i)
action_apace.append(j)
action_apace.append(k)
ACTION_SAPCE.append(action_apace)
A_DIM = len(ACTION_SAPCE)
out = open("QoE.csv", "w", newline="")
w = csv.writer(out)
def central_agent(net_params_queues, exp_queues):
assert len(net_params_queues) == NUM_AGENTS
assert len(exp_queues) == NUM_AGENTS
logging.basicConfig(filename=LOG_FILE + '_central',
filemode='w',
level=logging.INFO)
with tf.Session() as sess, open(LOG_FILE + '_test', 'wb') as test_log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
summary_ops, summary_vars = a3c.build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) # training monitor
saver = tf.train.Saver(max_to_keep = 50) # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
epoch = 0
# assemble experiences from agents, compute the gradients
while True:
# synchronize the network parameters of work agent
actor_net_params = actor.get_network_params()
critic_net_params = critic.get_network_params()
for i in range(NUM_AGENTS):
net_params_queues[i].put([actor_net_params, critic_net_params])
# Note: this is synchronous version of the parallel training,
# which is easier to understand and probe. The framework can be
# fairly easily modified to support asynchronous training.
# Some practices of asynchronous training (lock-free SGD at
# its core) are nicely explained in the following two papers:
# https://arxiv.org/abs/1602.01783
# https://arxiv.org/abs/1106.5730
# record average reward and td loss change
# in the experiences from the agents
total_batch_len = 0.0
total_reward = 0.0
total_td_loss = 0.0
total_entropy = 0.0
total_agents = 0.0
# assemble experiences from the agents
actor_gradient_batch = []
critic_gradient_batch = []
for i in range(NUM_AGENTS):
s_batch, a_batch, r_batch, terminal, info = exp_queues[i].get()
actor_gradient, critic_gradient, td_batch = \
a3c.compute_gradients(
s_batch=np.stack(s_batch, axis=0),
a_batch=np.vstack(a_batch),
r_batch=np.vstack(r_batch),
terminal=terminal, actor=actor, critic=critic)
actor_gradient_batch.append(actor_gradient)
critic_gradient_batch.append(critic_gradient)
total_reward += np.sum(r_batch)
total_td_loss += np.sum(td_batch)
total_batch_len += len(r_batch)
total_agents += 1.0
total_entropy += np.sum(info['entropy'])
# compute aggregated gradient
assert NUM_AGENTS == len(actor_gradient_batch)
assert len(actor_gradient_batch) == len(critic_gradient_batch)
# assembled_actor_gradient = actor_gradient_batch[0]
# assembled_critic_gradient = critic_gradient_batch[0]
# for i in xrange(len(actor_gradient_batch) - 1):
# for j in xrange(len(assembled_actor_gradient)):
# assembled_actor_gradient[j] += actor_gradient_batch[i][j]
# assembled_critic_gradient[j] += critic_gradient_batch[i][j]
# actor.apply_gradients(assembled_actor_gradient)
# critic.apply_gradients(assembled_critic_gradient)
for i in range(len(actor_gradient_batch)):
actor.apply_gradients(actor_gradient_batch[i])
critic.apply_gradients(critic_gradient_batch[i])
# log training information
epoch += 1
avg_reward = total_reward / total_agents
avg_td_loss = total_td_loss / total_batch_len
avg_entropy = total_entropy / total_batch_len
logging.info('Epoch: ' + str(epoch) +
' TD_loss: ' + str(avg_td_loss) +
' Avg_reward: ' + str(avg_reward) +
' Avg_entropy: ' + str(avg_entropy))
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: avg_td_loss,
summary_vars[1]: avg_reward,
summary_vars[2]: avg_entropy
})
writer.add_summary(summary_str, epoch)
writer.flush()
if epoch % MODEL_SAVE_INTERVAL == 0:
# Save the neural net parameters to disk.
save_path = saver.save(sess, SUMMARY_DIR + "/nn_model_ep_" +
str(epoch) + ".ckpt")
logging.info("Model saved in file: " + save_path)
def agent(agent_id, all_cooked_time, all_cooked_bw,all_file_names,video_size_file, net_params_queue, exp_queue):
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=agent_id,
VIDEO_SIZE_FILE=video_size_file,
Debug=False)
with tf.Session() as sess, open(LOG_FILE + '_agent_' + str(agent_id), 'wb') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
# initial synchronization of the network parameters from the coordinator
actor_net_params, critic_net_params = net_params_queue.get()
actor.set_network_params(actor_net_params)
critic.set_network_params(critic_net_params)
bit_rate = DEFAULT_QUALITY
target_buffer = DEFAULT_QUALITY
latency_limit = 4
index = 1
action_vec = np.zeros(A_DIM)
action_vec[index] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [action_vec]
r_batch = []
entropy_record = []
video_count = 0
reward_all_sum = 0
reward_all = 0
reward = 0
switch_num = 0
SMOOTH_PENALTY = 0.02
REBUF_PENALTY = 1.5
LANTENCY_PENALTY = 0.005
BITRATE_REWARD = 0.001
SKIP_PENALTY = 0.5
epoch = 0
n = 0
state = np.array(s_batch[-1], copy=True)
frame_time_len = 0.04
last_bit_rate = DEFAULT_QUALITY
while True: # experience video streaming forever
# the action is from the last decision
# this is to make the framework similar to the real
time, time_interval, send_data_size, chunk_len, \
rebuf, buffer_size, play_time_len, end_delay, \
cdn_newest_id, download_id, cdn_has_frame, skip_frame_time_len, decision_flag, \
buffer_flag, cdn_flag, skip_flag, end_of_video = net_env.get_video_frame(bit_rate, target_buffer,
latency_limit)
# # QOE setting
# if end_delay <= 1.0:
# LANTENCY_PENALTY = 0.005
# else:
# LANTENCY_PENALTY = 0.01
reward_frame = 0
epoch += 1
if not cdn_flag:
reward_frame = frame_time_len * float(BIT_RATE[
bit_rate]) * BITRATE_REWARD - REBUF_PENALTY * rebuf - LANTENCY_PENALTY * end_delay - SKIP_PENALTY * skip_frame_time_len
else:
reward_frame = -(REBUF_PENALTY * rebuf)
reward += reward_frame
# dequeue history record
state = np.roll(state, -1, axis=1)
# this should be S_INFO number of terms
state[0, -1] = buffer_size * 0.1
state[1, -1] = send_data_size * 0.00001
state[2, -1] = time_interval * 10 # kilo byte / ms
state[3, -1] = end_delay * 0.1 # 10 sec
state[4, -1] = rebuf # mega byte
if decision_flag and not end_of_video:
reward_frame = -1 * SMOOTH_PENALTY * (abs(BIT_RATE[bit_rate] - BIT_RATE[last_bit_rate]) / 1000)
reward += reward_frame
last_bit_rate = bit_rate
r_batch.append(reward)
reward = 0
# compute action probability vector
action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
temp = np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)
index = (action_cumsum > temp).argmax()
bit_rate = ACTION_SAPCE[index][0]
target_buffer = ACTION_SAPCE[index][1]
latency_limit = ACTION_SAPCE[index][2]
# Note: we need to discretize the probability into 1/RAND_RANGE steps,
# because there is an intrinsic discrepancy in passing single state and batch states
entropy_record.append(a3c.compute_entropy(action_prob[0]))
# report experience to the coordinator
if len(r_batch) >= TRAIN_SEQ_LEN :
exp_queue.put([s_batch[1:], # ignore the first chuck
a_batch[1:], # since we don't have the
r_batch[1:], # control over it
end_of_video,
{'entropy': entropy_record}])
# synchronize the network parameters from the coordinator
actor_net_params, critic_net_params = net_params_queue.get()
actor.set_network_params(actor_net_params)
critic.set_network_params(critic_net_params)
del s_batch[:]
del a_batch[:]
del r_batch[:]
del entropy_record[:]
s_batch.append(state)
action_vec = np.zeros(A_DIM)
action_vec[index] = 1
a_batch.append(action_vec)
reward_all += reward_frame
# store the state and action into batches
if end_of_video:
r_batch.append(reward)
reward_all_sum += reward_all / 20
video_count += 1
if video_count >= len(all_file_names):
n += 1
video_count = 0
print(n,"agent_id ",agent_id,"reward_all_sum:",reward_all_sum)
w.writerow([n,reward_all_sum])
out.flush()
reward_all_sum = 0
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=epoch,
VIDEO_SIZE_FILE=video_size_file,
Debug=False)
if n == NUM_EPOCH:
break
reward_all = 0
reward = 0
switch_num = 0
bit_rate = DEFAULT_QUALITY # use the default action here
target_buffer = DEFAULT_QUALITY
action_vec = np.zeros(A_DIM)
action_vec[bit_rate] = 1
s_batch.append(np.zeros((S_INFO, S_LEN)))
a_batch.append(action_vec)
def main():
np.random.seed(RANDOM_SEED)
assert len(ACTION_SAPCE) == A_DIM
# create result directory
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
# inter-process communication queues
net_params_queues = []
exp_queues = []
for i in range(NUM_AGENTS):
net_params_queues.append(mp.Queue(1))
exp_queues.append(mp.Queue(1))
# create a coordinator and multiple agent processes
# (note: threading is not desirable due to python GIL)
coordinator = mp.Process(target=central_agent,
args=(net_params_queues, exp_queues))
coordinator.start()
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(TRAIN_TRACES)
agents = []
for i in range(NUM_AGENTS):
agents.append(mp.Process(target=agent,
args=(i, all_cooked_time, all_cooked_bw,all_file_names,VIDEO_TRACES,
net_params_queues[i],
exp_queues[i])))
for i in range(NUM_AGENTS):
agents[i].start()
# wait unit training is done
coordinator.join()
if __name__ == '__main__':
main()
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,647
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/PDDQN_.py
|
import random
import numpy as np
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Conv1D
from keras.layers import Conv2D
from keras.layers import Reshape
from keras.layers import Flatten
BIT_RATE = [500.0,850.0,1200.0,1850.0]
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=200000)
self.gamma = 0.95 # discount rate
self.epsilon = 0.0 # exploration rate
self.epsilon_min = 0.0
self.epsilon_decay = 0.0
self.learning_rate = 0.0001
self.model = self._build_model()
self.target_model = self._build_model()
self.update_target_model()
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = K.abs(error) <= clip_delta
squared_loss = 0.5 * K.square(error)
quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)
return K.mean(tf.where(cond, squared_loss, quadratic_loss))
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
# model.add(Dense(128,input_dim=self.state_size, activation='relu'))
model.add(Reshape((50,5),input_shape=(self.state_size,)))
model.add(Conv1D(5,kernel_size=4, activation='relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(self.action_size, activation='relu'))
model.compile(loss=self._huber_loss,
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = self.model.predict(state)
if done:
target[0][action] = reward
else:
a = self.model.predict(next_state)[0]
t = self.target_model.predict(next_state)[0]
# target[0][action] = reward + self.gamma * np.amax(t)
target[0][action] = reward + self.gamma * t[np.argmax(a)]
self.model.fit(state, target, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
class Algorithm:
def __init__(self):
# fill your init vars
self.state_size = 250
self.action_size = 64
self.history_len = 50
self.BITRATE = [0, 1, 2, 3]
self.TARGET_BUFFER = [0, 1, 2, 3]
self.LATENCY_LIMIT = [1, 2, 3, 4]
self.ACTION_SAPCE = []
self.agent = DQNAgent(self.state_size, self.action_size)
# Intial
def Initial(self,model_name):
# name = "save/16.h5"
name = str(model_name+"100.h5")
self.agent.load(name)
for i in self.BITRATE:
for j in self.TARGET_BUFFER:
for k in self.LATENCY_LIMIT:
action_apace = []
action_apace.append(i)
action_apace.append(j)
action_apace.append(k)
self.ACTION_SAPCE.append(action_apace)
#Define your al
def run(self, time, S_time_interval, S_send_data_size, S_chunk_len, S_rebuf, S_buffer_size, S_play_time_len,
S_end_delay, S_decision_flag, S_buffer_flag, S_cdn_flag, S_skip_time, end_of_video, cdn_newest_id,
download_id, cdn_has_frame, IntialVars, start_avgbw):
target_buffer = 1
latency_limit = 4
state = []
length = len(S_time_interval)
history_len = self.history_len
for i in S_buffer_size[length-history_len:]:
state.append(i*0.1)
for i in S_send_data_size[length-history_len:]:
state.append(i*0.00001)
for i in S_time_interval[length-history_len:]:
state.append(i*10)
for i in S_end_delay[length-history_len:]:
state.append(i*0.1)
for i in S_rebuf[length-history_len:]:
state.append(i)
state = np.reshape(state, [1, self.state_size])
# print(state)
action = self.agent.act(state)
bit_rate = self.ACTION_SAPCE[action][0]
target_buffer = self.ACTION_SAPCE[action][1]
latency_limit = self.ACTION_SAPCE[action][2]
return bit_rate, target_buffer,latency_limit
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,648
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/test.py
|
import fixed_env as env
import load_trace as load_trace
import os
import time as tm
import csv
import tensorflow as tf
def test(user_id):
#1 Algorithm Setting: RBA, BBA, DYNAMIC, PDDQN, Pensieve
ABR_NAME = 'Pensieve'
#2 QoE Setting: ar, al, hd, b, max
QoE = 'b'
#3 Network Dataset: high, medium, low, fixed
NETWORK_TRACE = 'fixed'
#4 Video Dataset: AsianCup_China_Uzbekistan, Fengtimo_2018_11_3, YYF_2018_08_12
VIDEO_TRACE = 'AsianCup_China_Uzbekistan'
if ABR_NAME == 'BBA':
import BBA as ABR
if ABR_NAME == 'RBA':
import RBA as ABR
if ABR_NAME == 'DYNAMIC':
import DYNAMIC as ABR
if ABR_NAME == 'PDDQN':
import PDDQN_R as ABR
if ABR_NAME == 'Pensieve':
import Pensieve as ABR
SMOOTH_PENALTY = 0
REBUF_PENALTY = 0.0
LANTENCY_PENALTY = 0.0
SKIP_PENALTY = 0.0
BITRATE_REWARD = 0.0
if QoE == 'al':
SMOOTH_PENALTY = 0.01
REBUF_PENALTY = 1.5
LANTENCY_PENALTY = 0.01
BITRATE_REWARD = 0.001
SKIP_PENALTY = 1
if QoE == 'ar':
SMOOTH_PENALTY = 0.0
REBUF_PENALTY = 3
LANTENCY_PENALTY = 0.0
BITRATE_REWARD = 0.001
SKIP_PENALTY = 0.0
if QoE == 'b':
SMOOTH_PENALTY = 0.02
REBUF_PENALTY = 1.5
LANTENCY_PENALTY = 0.005
BITRATE_REWARD = 0.001
SKIP_PENALTY = 0.5
if QoE == 'hd':
SMOOTH_PENALTY = 0.0
REBUF_PENALTY = 0.5
LANTENCY_PENALTY = 0.0
BITRATE_REWARD = 0.001
SKIP_PENALTY = 0.0
if QoE == 'max':
SMOOTH_PENALTY = 0
REBUF_PENALTY = 0.0
LANTENCY_PENALTY = 0.0
SKIP_PENALTY = 0.0
BITRATE_REWARD = 0.001
FILE_NAME = './'+'result/'+QoE+'_'+NETWORK_TRACE+'_'+VIDEO_TRACE+'.csv'
else:
FILE_NAME = './'+'result/'+ABR_NAME+'_'+QoE+'_'+NETWORK_TRACE+'_'+VIDEO_TRACE+'.csv'
out = open(FILE_NAME,'w',newline='')
w = csv.writer(out)
DEBUG = False
LOG_FILE_PATH = './log/'
# create result directory
if not os.path.exists(LOG_FILE_PATH):
os.makedirs(LOG_FILE_PATH)
# -- End Configuration --
network_trace_dir = './dataset/new_network_trace/' + NETWORK_TRACE + '/'
video_trace_prefix = './dataset/video_trace/' + VIDEO_TRACE + '/frame_trace_'
# load the trace
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(network_trace_dir)
start_avgbw = (sum(all_cooked_bw[0][0:10])/10) *1000
# random_seed
random_seed = 2
count = 0
trace_count = 1
FPS = 25
frame_time_len = 0.04
reward_all_sum = 0
run_time = 0
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=random_seed,
logfile_path=LOG_FILE_PATH,
VIDEO_SIZE_FILE=video_trace_prefix,
Debug=DEBUG)
abr = ABR.Algorithm()
model_name = 20000
model_name+=1000
abr_init = abr.Initial(str(model_name))
BIT_RATE = [500.0, 850.0, 1200.0, 1850.0] # kpbs
TARGET_BUFFER = [0.5,0.75,1,1.25] # seconds
# ABR setting
RESEVOIR = 0.5
CUSHION = 2
cnt = 0
# defalut setting
last_bit_rate = 0
bit_rate = 0
target_buffer = 0
latency_limit = 4
# reward setting
reward_frame = 0
reward_all = 0
# past_info setting
past_frame_num = 200
S_time_interval = [0] * past_frame_num
S_send_data_size = [0] * past_frame_num
S_chunk_len = [0] * past_frame_num
S_rebuf = [0] * past_frame_num
S_buffer_size = [0] * past_frame_num
S_end_delay = [0] * past_frame_num
S_chunk_size = [0] * past_frame_num
S_play_time_len = [0] * past_frame_num
S_decision_flag = [0] * past_frame_num
S_buffer_flag = [0] * past_frame_num
S_cdn_flag = [0] * past_frame_num
S_skip_time = [0] * past_frame_num
# params setting
call_time_sum = 0
while True:
reward_frame = 0
time, time_interval, send_data_size, chunk_len, \
rebuf, buffer_size, play_time_len, end_delay, \
cdn_newest_id, download_id, cdn_has_frame, skip_frame_time_len, decision_flag, \
buffer_flag, cdn_flag, skip_flag, end_of_video = net_env.get_video_frame(bit_rate, target_buffer, latency_limit)
# S_info is sequential order
S_time_interval.pop(0)
S_send_data_size.pop(0)
S_chunk_len.pop(0)
S_buffer_size.pop(0)
S_rebuf.pop(0)
S_end_delay.pop(0)
S_play_time_len.pop(0)
S_decision_flag.pop(0)
S_buffer_flag.pop(0)
S_cdn_flag.pop(0)
S_skip_time.pop(0)
S_time_interval.append(time_interval)
S_send_data_size.append(send_data_size)
S_chunk_len.append(chunk_len)
S_buffer_size.append(buffer_size)
S_rebuf.append(rebuf)
S_end_delay.append(end_delay)
S_play_time_len.append(play_time_len)
S_decision_flag.append(decision_flag)
S_buffer_flag.append(buffer_flag)
S_cdn_flag.append(cdn_flag)
S_skip_time.append(skip_frame_time_len)
# QOE setting
# if end_delay <= 1.0:
# LANTENCY_PENALTY = 0.005
# else:
# LANTENCY_PENALTY = 0.01
if not cdn_flag:
reward_frame = frame_time_len * float(BIT_RATE[
bit_rate]) * BITRATE_REWARD - REBUF_PENALTY * rebuf - LANTENCY_PENALTY * end_delay - SKIP_PENALTY * skip_frame_time_len
else:
reward_frame = -(REBUF_PENALTY * rebuf)
if decision_flag or end_of_video:
reward_frame += -1 * SMOOTH_PENALTY * (abs(BIT_RATE[bit_rate] - BIT_RATE[last_bit_rate]) / 1000)
last_bit_rate = bit_rate
# ----------------- Your Algorithm ---------------------
cnt += 1
timestamp_start = tm.time()
bit_rate, target_buffer, latency_limit = abr.run(time,
S_time_interval,
S_send_data_size,
S_chunk_len,
S_rebuf,
S_buffer_size,
S_play_time_len,
S_end_delay,
S_decision_flag,
S_buffer_flag,
S_cdn_flag,
S_skip_time,
end_of_video,
cdn_newest_id,
download_id,
cdn_has_frame,
abr_init,
start_avgbw)
start_avgbw = -1
timestamp_end = tm.time()
call_time_sum += timestamp_end - timestamp_start
# -------------------- End --------------------------------
if end_of_video:
# print("network traceID, network_reward, avg_running_time", trace_count, reward_all, call_time_sum / cnt)
w.writerow([trace_count,reward_all,(call_time_sum / cnt) * 1000])
reward_all_sum += reward_all
run_time += call_time_sum / cnt
if trace_count >= len(all_file_names):
break
trace_count += 1
cnt = 0
call_time_sum = 0
last_bit_rate = 0
reward_all = 0
bit_rate = 0
target_buffer = 0
S_time_interval = [0] * past_frame_num
S_send_data_size = [0] * past_frame_num
S_chunk_len = [0] * past_frame_num
S_rebuf = [0] * past_frame_num
S_buffer_size = [0] * past_frame_num
S_end_delay = [0] * past_frame_num
S_chunk_size = [0] * past_frame_num
S_play_time_len = [0] * past_frame_num
S_decision_flag = [0] * past_frame_num
S_buffer_flag = [0] * past_frame_num
S_cdn_flag = [0] * past_frame_num
reward_all += reward_frame
return [reward_all_sum / trace_count, run_time / trace_count]
for i in range(100):
tf.reset_default_graph()
a = test("aaa")
print(a)
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,649
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/train/Dynamic/Dynamic.py
|
import math
import numpy as np
BIT_RATE = [500.0, 1200.0]
MAX_STORE = 100
TARGET_BUFFER = 2
# class Ewma():
#
# def __init__(self, segment):
#
# self.throughput = None
# self.latency = None
# self.segment_time = 1
# self.half_life = [8000, 3000]
# self.latency_half_life = [h / self.segment_time for h in self.half_life]
# self.ThroughputHistory = segment['ThroughputHistory']
# self.throughput = [0] * len(self.half_life)
# self.weight_throughput = 0
# self.latency = [0] * len(self.half_life)
# self.weight_latency = 0
#
# def push(self, time, tput, lat):
#
# for i in range(len(self.half_life)):
# alpha = math.pow(0.5, time / self.half_life[i])
# self.throughput[i] = alpha * self.throughput[i] + (1 - alpha) * tput
# alpha = math.pow(0.5, 1 / self.latency_half_life[i])
# self.latency[i] = alpha * self.latency[i] + (1 - alpha) * lat
#
# self.weight_throughput += time
# self.weight_latency += 1
#
# tput = None
# lat = None
# for i in range(len(self.half_life)):
# zero_factor = 1 - math.pow(0.5, self.weight_throughput / self.half_life[i])
# t = self.throughput[i] / zero_factor
# tput = t if tput == None else min(tput, t) # conservative case is min
# zero_factor = 1 - math.pow(0.5, self.weight_latency / self.latency_half_life[i])
# l = self.latency[i] / zero_factor
# lat = l if lat == None else max(lat, l) # conservative case is max
# self.throughput = tput
# self.latency = lat
#
# return self.throughput,self.latency
class SlidingWindow():
def __init__(self):
self.window_size = [MAX_STORE]
self.throughput = None
self.latency = None
def get_next(self, segment):
tput = None
lat = None
for ws in self.window_size:
sample = segment['throughput'][-ws:]
t = sum(sample) / len(sample)
tput = t if tput == None else min(tput, t)
sample = segment['latency'][-ws:]
l = sum(sample) / len(sample)
lat = l if lat == None else max(lat, l)
self.throughput = tput
self.latency = lat
return self.throughput,self.latency
class Bola():
def __init__(self):
self.utility_offset = -math.log(BIT_RATE[0])
self.utilities = [math.log(b) +self.utility_offset for b in BIT_RATE]
self.segment_time = 1 #s
self.gp = 5
self.buffer_size = TARGET_BUFFER #s
self.abr_osc = True
self.Vp = (self.buffer_size - self.segment_time) / (self.utilities[-1] + self.gp)
self.last_seek_index = 0 # TODO
self.last_quality = 0
self.slidingWindow = SlidingWindow()
def quality_from_buffer(self,segment):
level = segment['buffer']
quality = 0
score = None
for q in range(len(BIT_RATE)):
s = ((self.Vp * (self.utilities[q] + self.gp) - level) / BIT_RATE[q])
if score == None or s > score:
quality = q
score = s
return quality
def get_quality(self, segment):
quality = self.quality_from_buffer(segment)
throughput, latency = self.slidingWindow.get_next(segment)
if quality > self.last_quality:
quality_t = self.quality_from_throughput(throughput, latency)
if self.last_quality > quality_t and quality > quality_t:
quality = self.last_quality
elif quality > quality_t:
if not self.abr_osc:
quality = quality_t + 1
else:
quality = quality_t
self.last_quality = quality
return quality
def quality_from_throughput(self, tput,lat):
p = self.segment_time
quality = 0
while (quality + 1 < len(BIT_RATE) and lat + p * BIT_RATE[quality + 1] / tput <= p):
quality += 1
return quality
def get_first_quality(self):
return 0
class ThroughputRule():
def __init__(self):
self.segment_time = 1 # s
self.safety_factor = 0.9
self.slidingWindow = SlidingWindow()
def get_quality(self, segment):
throughput,latency = self.slidingWindow.get_next(segment)
quality = self.quality_from_throughput(throughput * self.safety_factor,latency)
return quality
def quality_from_throughput(self, tput,lat):
p = self.segment_time
quality = 0
while (quality + 1 < len(BIT_RATE) and lat + p * BIT_RATE[quality + 1] / tput <= p):
quality += 1
return quality
def get_first_quality(self):
return 0
class Dynamic():
def __init__(self):
self.bola = Bola()
self.tput = ThroughputRule()
self.is_bola = False
self.low_buffer_threshold = 1
def get_quality(self, segment):
level = segment['buffer'][-1]
b = self.bola.get_quality(segment)
t = self.tput.get_quality(segment)
if self.is_bola:
if level < self.low_buffer_threshold and b < t:
self.is_bola = False
else:
if level > self.low_buffer_threshold and b >= t:
self.is_bola = True
return b if self.is_bola else t
def get_first_quality(self):
if self.is_bola:
return self.bola.get_first_quality()
else:
return self.tput.get_first_quality()
class Algorithm:
def __init__(self):
self.dynamic = Dynamic()
self.is_first = True
self.next_throughput = 0
self.next_latency = 0
def Initial(self):
self.last_bit_rate = 0
# Define your al
def run(self, S_time_interval, S_send_data_size, S_frame_time_len, S_frame_type, S_buffer_size, S_end_delay,
rebuf_time, cdn_has_frame, cdn_flag, buffer_flag):
if(self.is_first):
if S_time_interval[-MAX_STORE] != 0:
self.is_first = False
return self.dynamic.get_first_quality()
segment = {}
segment['buffer'] = np.array(S_buffer_size[-MAX_STORE:])
segment['latency'] = np.array(S_end_delay[-MAX_STORE:])
segment['throughput'] = np.array(S_send_data_size[-MAX_STORE:])/np.array(S_time_interval[-MAX_STORE:])
bit_rate = self.dynamic.get_quality(segment=segment)
target_buffer = TARGET_BUFFER
return bit_rate, target_buffer
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,650
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/RBA.py
|
import time as tm
BIT_RATE = [500.0,850.0,1200.0,1850.0]
class Algorithm:
def __init__(self):
# fill your init vars
self.buffer_size = 0
self.p_rb = 1
# Intial
def Initial(self,model_name):
IntialVars = {'throughputHistory':[]}
return IntialVars
def run(self, time, S_time_interval, S_send_data_size, S_chunk_len, S_rebuf, S_buffer_size, S_play_time_len,
S_end_delay, S_decision_flag, S_buffer_flag, S_cdn_flag, S_skip_time, end_of_video, cdn_newest_id,
download_id, cdn_has_frame, IntialVars,start_avgbw):
# record your params
target_buffer = 1
bit_rate = 0
throughputHistory = []
if start_avgbw!=-1:
throughputHistory.append(start_avgbw)
else:
for i in range(len(S_send_data_size)):
send_data_size = S_send_data_size[i]
time_interval = S_time_interval[i]
bw = 0
if time_interval != 0:
bw = (send_data_size/time_interval)/1000
throughputHistory.append(bw)
bandwidth = self.predict_throughput(throughputHistory,0.8)
tempBitrate = bandwidth * self.p_rb
for i in range(len(BIT_RATE)):
if tempBitrate >= BIT_RATE[i]:
bit_rate = i
latency_limit = 4
return bit_rate, target_buffer,latency_limit
def predict_throughput(self,throughputHistory,alpha):
if alpha < 0 or alpha > 1:
print("Invalid input!")
alpha = 2/(len(throughputHistory)+1)
predict = [0] * len(throughputHistory)
for i in range(1,len(throughputHistory)):
factor = 1 - pow(alpha, i)
predict[i] = (alpha * predict[i-1] + (1 - alpha) * throughputHistory[i])/factor
return predict[-1]
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,651
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/train/DQN-b - no R/train2.py
|
import LiveStreamingEnv.final_fixed_env as env
import LiveStreamingEnv.load_trace as load_trace
import matplotlib.pyplot as plt
import time as time_package
import numpy as np
import ABR
from ddqn2 import DQNAgent
ACTION_SAPCE = []
for j in range(1,19):
ACTION_SAPCE.append(j/10)
STATE_SIZE = 200 + 40
ACTION_SIZE = 20
BATCH_SIZE = 32
history_len = 40
done = False
agent = DQNAgent(STATE_SIZE,ACTION_SIZE)
def train(epoch,train_trace):
# path setting
TRAIN_TRACES = train_trace
video_size_file = './video_trace/frame_trace_' # video trace path setting,
LogFile_Path = "./log/" # log file trace path setting,
# load the trace
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(TRAIN_TRACES)
# random_seed
random_seed = 2
video_count = 0
frame_time_len = 0.04
reward_all_sum = 0
# init the environment
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=random_seed,
logfile_path=LogFile_Path,
VIDEO_SIZE_FILE=video_size_file,
Debug=False)
BIT_RATE = [500.0, 1200.0] # kpbs
# ABR setting
cnt = 0
# defalut setting
bit_rate = 0
target_buffer = 1
# QOE setting
reward_frame = 0
reward_all = 0
reward = 0
SMOOTH_PENALTY = 0.02
REBUF_PENALTY = 1.5
LANTENCY_PENALTY = 0.005
switch_num = 0
rebuf_time = 0
buffer_flag = 0
cdn_flag = 0
S_time_interval = [0] * 1000
S_send_data_size = [0] * 1000
S_frame_type = [0] * 1000
S_frame_time_len = [0] * 1000
S_buffer_size = [0] * 1000
S_end_delay = [0] * 1000
S_rebuf = [0] * 1000
S_real_quality = [0] * 1000
cdn_has_frame = [0] * 1000
flag = False
n = 0
mark = 0
marks = 0
while True:
if len(agent.memory) > BATCH_SIZE and cnt % 1000 == 0:
agent.replay(BATCH_SIZE)
reward_frame = 0
time, time_interval, send_data_size, frame_time_len, rebuf, buffer_size, end_delay, cdn_newest_id, downlaod_id, cdn_has_frame, decision_flag, real_quality, buffer_flag, switch, cdn_flag, end_of_video = net_env.get_video_frame(
bit_rate, target_buffer)
cnt += 1
switch_num += switch
S_time_interval.append(time_interval)
S_time_interval.pop(0)
S_buffer_size.append(buffer_size)
S_buffer_size.pop(0)
S_send_data_size.append(send_data_size)
S_send_data_size.pop(0)
S_frame_time_len.append(frame_time_len)
S_frame_time_len.pop(0)
S_end_delay.append(end_delay)
S_end_delay.pop(0)
S_rebuf.append(rebuf)
S_rebuf.pop(0)
S_real_quality.append(real_quality)
S_real_quality.pop(0)
if decision_flag:
S_frame_type.append(1)
S_frame_type.pop(0)
else:
S_frame_type.append(0)
S_frame_type.pop(0)
rebuf_time += rebuf
n+=1
if not cdn_flag:
reward_frame = frame_time_len * float(
BIT_RATE[bit_rate]) / 1000 - REBUF_PENALTY * rebuf - LANTENCY_PENALTY * end_delay
else:
reward_frame = -(REBUF_PENALTY * rebuf)
reward += reward_frame
if decision_flag and not end_of_video:
reward_frame += -(switch_num) * SMOOTH_PENALTY * (1200 - 500) / 1000
reward += reward_frame
length = len(S_buffer_size)
if flag:
next_state = []
for i in S_buffer_size[length-history_len:]:
next_state.append(i*0.1)
for i in S_send_data_size[length-history_len:]:
next_state.append(i*0.00001)
for i in S_time_interval[length-history_len:]:
next_state.append(i*10)
for i in S_end_delay[length-history_len:]:
next_state.append(i*0.1)
# for i in S_frame_time_len[length-history_len:]:
# next_state.append(i*10)
for i in S_rebuf[length-history_len:]:
next_state.append(i)
for i in S_real_quality[length-history_len:]:
next_state.append(i)
marks += 1
if(n>=history_len-10):
next_state = np.reshape(next_state, [1, STATE_SIZE])
agent.remember(state, action, reward-2, next_state, done)
reward = 0
else:
mark += 1
n = 0
flag = True
state = []
for i in S_buffer_size[length-history_len:]:
state.append(i*0.1)
for i in S_send_data_size[length-history_len:]:
state.append(i*0.00001)
for i in S_time_interval[length-history_len:]:
state.append(i*10)
for i in S_end_delay[length-history_len:]:
state.append(i*0.1)
# for i in S_frame_time_len[length-history_len:]:
# state.append(i*10)
for i in S_rebuf[length-history_len:]:
state.append(i)
for i in S_real_quality[length-history_len:]:
state.append(i)
state = np.reshape(state, [1, STATE_SIZE])
action = agent.act(state)
bit_rate = action[0]
target_buffer = ACTION_SAPCE[action[1]]
switch_num = 0
rebuf_time = 0
reward_all += reward_frame
if end_of_video:
agent.update_target_model()
# Narrow the range of results
print("video count", video_count, reward_all,mark,marks)
reward_all_sum += reward_all / 100
video_count += 1
if video_count >= len(all_file_names):
agent.save("save/"+str(epoch)+".h5")
break
reward_all = 0
bit_rate = 0
target_buffer = 1.5
S_time_interval = [0] * 1000
S_send_data_size = [0] * 1000
S_frame_type = [0] * 1000
S_frame_time_len = [0] * 1000
S_buffer_size = [0] * 1000
S_end_delay = [0] * 1000
cdn_has_frame = [0] * 1000
rebuf_time = 0
buffer_flag = 0
cdn_flag = 0
reward = 0
flag = False
n = 0
mark = 0
marks = 0
switch_num = 0
return reward_all_sum
def main():
epoch = 1
while epoch < 1000:
train_trace = './network_trace500/'
a = train(epoch,train_trace)
print(str(epoch) + " : " + str(a))
epoch +=1
if __name__ == "__main__":
main()
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,652
|
tianzhaotju/Deeplive
|
refs/heads/master
|
/train/DQN-ar/train.py
|
import fixed_env as env
import load_trace as load_trace
import matplotlib.pyplot as plt
import time as time_package
import numpy as np
import ABR
from ddqn import DQNAgent
BITRATE = [0,1,2,3]
TARGET_BUFFER = [0,1,2,3]
LATENCY_LIMIT = [1,2,3,4]
ACTION_SAPCE = []
for i in BITRATE:
for j in TARGET_BUFFER:
for k in LATENCY_LIMIT:
action_apace = []
action_apace.append(i)
action_apace.append(j)
action_apace.append(k)
ACTION_SAPCE.append(action_apace)
STATE_SIZE = 250
ACTION_SIZE = len(BITRATE)*len(TARGET_BUFFER)*len(LATENCY_LIMIT)
BATCH_SIZE = 32
history_len = 50
done = False
agent = DQNAgent(STATE_SIZE,ACTION_SIZE)
def train(epoch,train_trace):
# path setting
TRAIN_TRACES = train_trace
video_size_file = './dataset/video_trace/sports/frame_trace_' # video trace path setting,
LogFile_Path = "./log/" # log file trace path setting,
# load the trace
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(TRAIN_TRACES)
# random_seed
random_seed = 2
video_count = 0
frame_time_len = 0.04
reward_all_sum = 0
# init the environment
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw,
random_seed=random_seed,
logfile_path=LogFile_Path,
VIDEO_SIZE_FILE=video_size_file,
Debug=False)
BIT_RATE = [500.0,850.0,1200.0,1850.0] # kpbs
# ABR setting
cnt = 0
# defalut setting
bit_rate = 0
last_bit_rate = 0
target_buffer = 1
latency_limit = 7
# QOE setting
reward_frame = 0
reward_all = 0
reward = 0
SMOOTH_PENALTY = 0.0
REBUF_PENALTY = 3
LANTENCY_PENALTY = 0.0
BITRATE_REWARD = 0.001
SKIP_PENALTY = 0.0
switch_num = 0
rebuf_time = 0
buffer_flag = 0
cdn_flag = 0
S_time_interval = [0] * 100
S_send_data_size = [0] * 100
S_buffer_size = [0] * 100
S_end_delay = [0] * 100
S_rebuf = [0] * 100
flag = False
n = 0
mark = 0
marks = 0
while True:
if len(agent.memory) > BATCH_SIZE and cnt % 1000 == 0:
agent.replay(BATCH_SIZE)
reward_frame = 0
time, time_interval, send_data_size, chunk_len, \
rebuf, buffer_size, play_time_len, end_delay, \
cdn_newest_id, download_id, cdn_has_frame, skip_frame_time_len, decision_flag, \
buffer_flag, cdn_flag, skip_flag, end_of_video = net_env.get_video_frame(bit_rate, target_buffer, latency_limit)
cnt += 1
S_time_interval.append(time_interval)
S_time_interval.pop(0)
S_buffer_size.append(buffer_size)
S_buffer_size.pop(0)
S_send_data_size.append(send_data_size)
S_send_data_size.pop(0)
S_end_delay.append(end_delay)
S_end_delay.pop(0)
S_rebuf.append(rebuf)
S_rebuf.pop(0)
# # QOE setting
# if end_delay <= 1.0:
# LANTENCY_PENALTY = 0.005
# else:
# LANTENCY_PENALTY = 0.01
if not cdn_flag:
reward_frame = frame_time_len * float(BIT_RATE[
bit_rate]) * BITRATE_REWARD - REBUF_PENALTY * rebuf - LANTENCY_PENALTY * end_delay - SKIP_PENALTY * skip_frame_time_len
else:
reward_frame = -(REBUF_PENALTY * rebuf)
rebuf_time += rebuf
n+=1
reward += reward_frame
if decision_flag and not end_of_video:
reward_frame = -1 * SMOOTH_PENALTY * (abs(BIT_RATE[bit_rate] - BIT_RATE[last_bit_rate]) / 1000)
last_bit_rate = bit_rate
reward += reward_frame
length = len(S_buffer_size)
if flag:
next_state = []
for i in S_buffer_size[length-history_len:]:
next_state.append(i*0.1)
for i in S_send_data_size[length-history_len:]:
next_state.append(i*0.00001)
for i in S_time_interval[length-history_len:]:
next_state.append(i*10)
for i in S_end_delay[length-history_len:]:
next_state.append(i*0.1)
for i in S_rebuf[length-history_len:]:
next_state.append(i)
marks += 1
if(n>=history_len-40):
next_state = np.reshape(next_state, [1,STATE_SIZE])
agent.remember(state, action, reward, next_state, done)
reward = 0
else:
mark += 1
n = 0
flag = True
state = []
for i in S_buffer_size[length-history_len:]:
state.append(i*0.1)
for i in S_send_data_size[length-history_len:]:
state.append(i*0.00001)
for i in S_time_interval[length-history_len:]:
state.append(i*10)
for i in S_end_delay[length-history_len:]:
state.append(i*0.1)
for i in S_rebuf[length-history_len:]:
state.append(i)
state = np.reshape(state, [1,STATE_SIZE])
action = agent.act(state)
bit_rate = ACTION_SAPCE[action][0]
target_buffer = ACTION_SAPCE[action][1]
latency_limit = ACTION_SAPCE[action][2]
switch_num = 0
rebuf_time = 0
reward_all += reward_frame
if end_of_video:
agent.update_target_model()
# Narrow the range of results
print("video count", video_count, reward_all,mark,marks)
reward_all_sum += reward_all / 20
video_count += 1
if video_count >= len(all_file_names):
agent.save("save/"+str(epoch)+".h5")
break
reward_all = 0
bit_rate = 0
target_buffer = 1
S_time_interval = [0] * 100
S_send_data_size = [0] * 100
S_buffer_size = [0] * 100
S_end_delay = [0] * 100
S_rebuf = [0] * 100
rebuf_time = 0
buffer_flag = 0
cdn_flag = 0
reward = 0
flag = False
n = 0
mark = 0
marks = 0
return reward_all_sum
import csv
def main():
epoch = 1
out = open("QoE.csv", 'w', newline='')
w = csv.writer(out)
while epoch <= 100:
train_trace = './dataset/network_trace/fixed/'
a = train(epoch,train_trace)
print(str(epoch) + " : " + str(a))
w.writerow([epoch,a])
out.flush()
epoch +=1
if __name__ == "__main__":
main()
|
{"/test.py": ["/BBA.py", "/RBA.py", "/DYNAMIC.py", "/Pensieve.py"]}
|
26,653
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/utils.py
|
import calendar
import time
def create_time_stamp(date_string, formatting="%Y-%m-%d %H:%M:%S"):
"""
Sometimes Poloniex will return a date as a string. This converts it to
a timestamp.
Args:
date_string: Date string returned by poloniex
formatting: The default format poloniex returns the date_string in
Returns:
UNIX timestamp
"""
return calendar.timegm(time.strptime(date_string, formatting))
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,654
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex.py
|
import argparse
import textwrap
from collections import OrderedDict
import time
import sys
if sys.version_info[0] < 3:
raise Exception("This project has move to using python 3. If you would like to use the python 2 snapshot, checkout"
" the v1.0.0 tag. `git checkout v1.0.0`")
import analyzer
def main():
actions = OrderedDict([
("GetOverview", {
'function': analyzer.get_overview,
'help': 'Returns overall balance and percentage earned/lost',
}),
("GetDetailedOverview", {
'function': analyzer.get_detailed_overview,
'help': 'Returns detailed overall balance and percentage earned/lost',
}),
("CalculateFees", {
'function': analyzer.calculate_fees,
'help': 'Returns the total amount in fees',
}),
("GetLendingHistory", {
'function': analyzer.get_lending_history,
'help': 'Returns your total lending interest and fees.',
}),
("GetChangeOverTime", {
'function': analyzer.get_change_over_time,
'help': 'Public function: Returns percent change over a series of time periods for currencies exceeding a volume threshold'
})
])
parser = argparse.ArgumentParser(
description="This analyzes information from your Poloniex account",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('-a', '--action', help='Script action (see below).',
default='', required=True)
parser.add_argument('-l', '--loop', help='Run every n seconds',
default='', required=False)
parser.epilog = "script actions/tasks:"
for action in actions:
parser.epilog += "\n {}".format(action)
line_length = 80
indents = 8
for line in textwrap.wrap(actions[action]['help'],
line_length - indents):
parser.epilog += "\n {}".format(line)
args = parser.parse_args()
if args.action not in actions or args.action is None:
parser.print_help()
print(args.action)
return
if not args.loop:
actions[args.action]['function']()
else:
while True:
actions[args.action]['function']()
time.sleep(int(args.loop))
if __name__ == '__main__':
main()
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,655
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_models/trade_history.py
|
from poloniex_apis import trading_api
class TradeHistory:
def __init__(self):
self.history = trading_api.return_trade_history()
def get_all_fees(self):
result = {}
for stock in self.history:
result[stock] = 0
for trade in self.history[stock]:
result[stock] += float(trade["fee"]) * float(trade["total"])
return result
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,656
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/dev_utils.py
|
"""
Utility methods for development
"""
import json
def dict_to_file(dict_input, filename):
with open(filename, 'w') as f:
json.dump(dict_input, f, indent=4, sort_keys=True)
def file_to_dict(filename):
with open(filename, 'r') as f:
return json.load(f)
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,657
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_models/ticker_price.py
|
from poloniex_apis import public_api
class TickerData:
def __init__(self):
self.ticker_price = public_api.return_ticker()
def get_price(self, symbol):
try:
return float(self.ticker_price[symbol]["last"])
except KeyError:
return 0
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,658
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/public_api.py
|
"""
Public API for Poloniex
Poloniex's Public API. Not all public api methods are implemented and will
probably not be added unless it will actually be used.
"""
import json
from urllib.request import Request
from urllib.request import urlopen
import dev_utils
import settings
api_url = "https://poloniex.com/public"
def return_usd_btc():
ticker = return_ticker()
return float(ticker["USDT_BTC"]["last"])
def return_ticker():
if settings.MOCK_API_RESPONSE:
return dev_utils.file_to_dict("return_ticker.txt")
url = "{}?command=returnTicker".format(api_url)
return _call_public_api(url)
def return_24_hour_volume():
url = "{}?command=return24hVolume".format(api_url)
return _call_public_api(url)
def return_chart_data(period, currency_pair, start, end=9999999999):
"""
Returns the candlestick chart data.
:param period: (candlestick period in seconds; valid values are 300, 900, 1800, 7200, 14400, and 86400)
:param currency_pair: The currency pair e.g. BTC_XMR
:param start: UNIX Timestamp for start date
:param end: UNIX Timestamp for end date
"""
url = "{api_url}?command=returnChartData¤cyPair={currency_pair}&start={start}&end={end}&period={period}".format(
api_url=api_url, currency_pair=currency_pair, start=start, end=end, period=period)
return _call_public_api(url)
def _call_public_api(url):
request = Request(url)
response = urlopen(request)
json_response = response.read().decode('utf8')
return json.loads(json_response)
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,659
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_key_secret_util.py
|
import configparser
def get_api_key():
"""
Returns a Poloniex API key from the config file
"""
config = configparser.ConfigParser()
config.read_file(open("api_keys.ini"))
key = config.get("ApiKeys", "key")
return key
def get_api_secret():
"""
Returns a Poloniex API secret from the config file
"""
config = configparser.ConfigParser()
config.read("api_keys.ini")
secret = config.get("ApiKeys", "secret")
return bytes(secret, encoding='utf-8')
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,660
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_models/lending_history.py
|
import time
from poloniex_apis import trading_api
from utils import create_time_stamp
BITCOIN_GENESIS_BLOCK_DATE = "1231006505"
class LendingHistory:
def __init__(self):
self.history = self._get_all_lending_history()
def _get_all_lending_history(self):
current_timestamp = time.time()
lending_history = trading_api.return_lending_history(BITCOIN_GENESIS_BLOCK_DATE, current_timestamp)
did_not_recieve_all_lending_history = True
while did_not_recieve_all_lending_history:
returned_end_time = create_time_stamp(lending_history[-1]['close'])
lending_history_segment = trading_api.return_lending_history(BITCOIN_GENESIS_BLOCK_DATE, returned_end_time)
if len(lending_history_segment) == 1:
did_not_recieve_all_lending_history = False
else:
lending_history += lending_history_segment
return lending_history
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,661
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_models/balances.py
|
from poloniex_apis import trading_api
class Balances:
def __init__(self):
self.all_balances = trading_api.return_complete_balances()
def get_btc_total(self):
total_btc = 0
for stock, balances in self._get_active_balances().items():
total_btc += float(balances['btcValue'])
print("----------Current Balances----------")
print("Total BTC={}".format(total_btc))
return total_btc
def _get_active_balances(self):
active_balances = {}
for stock, balances in self.all_balances.items():
for balance_type, value in balances.items():
if float(value) > 0:
active_balances[stock] = balances
return active_balances
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,662
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/trading_api.py
|
"""
Poloniex's Trading API. Not all trading api methods are implemented and will
probably not be added unless it will actually be used. In order for these API
methods to work, an API key and secret must be configured. Not all methods need
the "Trading Enabled" option on their API key.
"""
import hashlib
import hmac
import json
import time
from urllib.error import HTTPError
from urllib.request import urlopen
from urllib.request import Request
import sys
import dev_utils
import settings
from .api_key_secret_util import get_api_key, get_api_secret
api_url = "https://poloniex.com/tradingApi"
class InvalidKeySecretError(Exception):
"""
Exception raised for an invalid API key/secret pair.
"""
pass
class TradingApiError(Exception):
"""
Exception raised for a general TradingApi error.
"""
pass
def return_complete_balances():
body = _build_body(command="returnCompleteBalances")
return _call_trading_api(body)
def return_deposits_withdrawals():
parameters = {
'start': '0',
'end': time.time()
}
body = _build_body(
command="returnDepositsWithdrawals",
parameters=parameters
)
return _call_trading_api(body)
def return_trade_history():
if settings.MOCK_API_RESPONSE:
return dev_utils.file_to_dict("return_trade_history.txt")
parameters = {
'currencyPair': 'all',
'start': '0',
'end': time.time()
}
body = _build_body(
command="returnTradeHistory",
parameters=parameters
)
return _call_trading_api(body)
def return_lending_history(start, end):
"""
Args:
start: start time
end: end time
Returns: json of lending history between designated times
"""
parameters = {
'start': start,
'end': end
}
body = _build_body(
command="returnLendingHistory",
parameters=parameters
)
return _call_trading_api(body)
def _sign_header(post_body):
hashed = hmac.new(get_api_secret(), bytes(post_body, encoding='utf-8'), hashlib.sha512)
return hashed.hexdigest()
def _call_trading_api(post_body):
"""
Calls the Poloniex Trading API.
The Poloniex trading API required two headers with the api key, and a
signed POST body signed with the secret.
:param post_body: (str) POST parameters
:return: (dict) Response
:raises: InvalidKeySecretError
:raises: TradingApiError
"""
request = Request(api_url)
request.add_header("Key", get_api_key())
request.add_header("Sign", _sign_header(post_body))
request.data = bytes(post_body, encoding='utf-8')
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 422:
print("HTTP Error 422. Use a new API key/secret. From the Poloniex API doc:\n"
" Additionally, all queries must include a 'nonce' POST parameter.\n"
" The nonce parameter is an integer which must always be greater \n"
" than the previous nonce used.\n\n"
"If you have used another script or the api directly, the nonce value\n"
"is persistent may be greater than what this script is setting. This \n"
"script uses the Epoch time to determine the nonce.")
sys.exit(0)
if err.code == 403:
print("HTTP Error 403. Are your api keys correct?")
sys.exit(0)
decoded_response = response.read().decode('utf8')
response_dict = json.loads(decoded_response)
if "error" in response_dict:
if response_dict["error"] == "Invalid API key/secret pair.":
raise InvalidKeySecretError
else:
raise TradingApiError(response_dict["error"])
return response_dict
def _build_body(command, parameters=None):
"""
Builds the body for the trading api. Api methods are specified by the
'command' POST parameter. Additionally, each query must have the 'nonce'
POST parameter which requires a greater int than the previous call.
:type parameters: (dict) Extra parameters
:param command: (str) API method
:return: (str) POST body
"""
body = "command={}".format(command)
nonce_int = int(time.time() * 100)
body += "&nonce={}".format(nonce_int)
if parameters is not None:
for key, value in parameters.items():
body += "&{}={}".format(key, value)
return body
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.